diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/conftest.py b/env-llmeval/lib/python3.10/site-packages/scipy/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..577987a4ac744dc598f05cfea68ce357917a8874
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/conftest.py
@@ -0,0 +1,238 @@
+# Pytest customization
+import json
+import os
+import warnings
+import tempfile
+
+import numpy as np
+import numpy.testing as npt
+import pytest
+import hypothesis
+
+from scipy._lib._fpumode import get_fpu_mode
+from scipy._lib._testutils import FPUModeChangeWarning
+from scipy._lib import _pep440
+from scipy._lib._array_api import SCIPY_ARRAY_API, SCIPY_DEVICE
+
+
+def pytest_configure(config):
+ config.addinivalue_line("markers",
+ "slow: Tests that are very slow.")
+ config.addinivalue_line("markers",
+ "xslow: mark test as extremely slow (not run unless explicitly requested)")
+ config.addinivalue_line("markers",
+ "xfail_on_32bit: mark test as failing on 32-bit platforms")
+ try:
+ import pytest_timeout # noqa:F401
+ except Exception:
+ config.addinivalue_line(
+ "markers", 'timeout: mark a test for a non-default timeout')
+ config.addinivalue_line("markers",
+ "skip_if_array_api(*backends, reasons=None, np_only=False, cpu_only=False): "
+ "mark the desired skip configuration for the `skip_if_array_api` fixture.")
+
+
+def _get_mark(item, name):
+ if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"):
+ mark = item.get_closest_marker(name)
+ else:
+ mark = item.get_marker(name)
+ return mark
+
+
+def pytest_runtest_setup(item):
+ mark = _get_mark(item, "xslow")
+ if mark is not None:
+ try:
+ v = int(os.environ.get('SCIPY_XSLOW', '0'))
+ except ValueError:
+ v = False
+ if not v:
+ pytest.skip("very slow test; "
+ "set environment variable SCIPY_XSLOW=1 to run it")
+ mark = _get_mark(item, 'xfail_on_32bit')
+ if mark is not None and np.intp(0).itemsize < 8:
+ pytest.xfail(f'Fails on our 32-bit test platform(s): {mark.args[0]}')
+
+ # Older versions of threadpoolctl have an issue that may lead to this
+ # warning being emitted, see gh-14441
+ with npt.suppress_warnings() as sup:
+ sup.filter(pytest.PytestUnraisableExceptionWarning)
+
+ try:
+ from threadpoolctl import threadpool_limits
+
+ HAS_THREADPOOLCTL = True
+ except Exception: # observed in gh-14441: (ImportError, AttributeError)
+ # Optional dependency only. All exceptions are caught, for robustness
+ HAS_THREADPOOLCTL = False
+
+ if HAS_THREADPOOLCTL:
+ # Set the number of openmp threads based on the number of workers
+ # xdist is using to prevent oversubscription. Simplified version of what
+ # sklearn does (it can rely on threadpoolctl and its builtin OpenMP helper
+ # functions)
+ try:
+ xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT'])
+ except KeyError:
+ # raises when pytest-xdist is not installed
+ return
+
+ if not os.getenv('OMP_NUM_THREADS'):
+ max_openmp_threads = os.cpu_count() // 2 # use nr of physical cores
+ threads_per_worker = max(max_openmp_threads // xdist_worker_count, 1)
+ try:
+ threadpool_limits(threads_per_worker, user_api='blas')
+ except Exception:
+ # May raise AttributeError for older versions of OpenBLAS.
+ # Catch any error for robustness.
+ return
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+ """
+ Check FPU mode was not changed during the test.
+ """
+ old_mode = get_fpu_mode()
+ yield
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ warnings.warn(f"FPU mode changed from {old_mode:#x} to {new_mode:#x} during "
+ "the test",
+ category=FPUModeChangeWarning, stacklevel=0)
+
+
+# Array API backend handling
+xp_available_backends = {'numpy': np}
+
+if SCIPY_ARRAY_API and isinstance(SCIPY_ARRAY_API, str):
+ # fill the dict of backends with available libraries
+ try:
+ import array_api_strict
+ xp_available_backends.update({'array_api_strict': array_api_strict})
+ except ImportError:
+ pass
+
+ try:
+ import torch # type: ignore[import]
+ xp_available_backends.update({'pytorch': torch})
+ # can use `mps` or `cpu`
+ torch.set_default_device(SCIPY_DEVICE)
+ except ImportError:
+ pass
+
+ try:
+ import cupy # type: ignore[import]
+ xp_available_backends.update({'cupy': cupy})
+ except ImportError:
+ pass
+
+ # by default, use all available backends
+ if SCIPY_ARRAY_API.lower() not in ("1", "true"):
+ SCIPY_ARRAY_API_ = json.loads(SCIPY_ARRAY_API)
+
+ if 'all' in SCIPY_ARRAY_API_:
+ pass # same as True
+ else:
+ # only select a subset of backend by filtering out the dict
+ try:
+ xp_available_backends = {
+ backend: xp_available_backends[backend]
+ for backend in SCIPY_ARRAY_API_
+ }
+ except KeyError:
+ msg = f"'--array-api-backend' must be in {xp_available_backends.keys()}"
+ raise ValueError(msg)
+
+if 'cupy' in xp_available_backends:
+ SCIPY_DEVICE = 'cuda'
+
+array_api_compatible = pytest.mark.parametrize("xp", xp_available_backends.values())
+
+
+@pytest.fixture
+def skip_if_array_api(xp, request):
+ """
+ Skip based on the ``skip_if_array_api`` marker.
+
+ Parameters
+ ----------
+ *backends : tuple
+ Backends to skip, e.g. ``("array_api_strict", "torch")``.
+ These are overriden when ``np_only`` is ``True``, and are not
+ necessary to provide for non-CPU backends when ``cpu_only`` is ``True``.
+ reasons : list, optional
+ A list of reasons for each skip. When ``np_only`` is ``True``,
+ this should be a singleton list. Otherwise, this should be a list
+ of reasons, one for each corresponding backend in ``backends``.
+ If unprovided, default reasons are used. Note that it is not possible
+ to specify a custom reason with ``cpu_only``. Default: ``None``.
+ np_only : bool, optional
+ When ``True``, the test is skipped for all backends other
+ than the default NumPy backend. There is no need to provide
+ any ``backends`` in this case. To specify a reason, pass a
+ singleton list to ``reasons``. Default: ``False``.
+ cpu_only : bool, optional
+ When ``True``, the test is skipped on non-CPU devices.
+ There is no need to provide any ``backends`` in this case,
+ but any ``backends`` will also be skipped on the CPU.
+ Default: ``False``.
+ """
+ if "skip_if_array_api" not in request.keywords:
+ return
+ backends = request.keywords["skip_if_array_api"].args
+ kwargs = request.keywords["skip_if_array_api"].kwargs
+ np_only = kwargs.get("np_only", False)
+ cpu_only = kwargs.get("cpu_only", False)
+ if np_only:
+ reasons = kwargs.get("reasons", ["do not run with non-NumPy backends."])
+ reason = reasons[0]
+ if xp.__name__ != 'numpy':
+ pytest.skip(reason=reason)
+ return
+ if cpu_only:
+ reason = "do not run with `SCIPY_ARRAY_API` set and not on CPU"
+ if SCIPY_ARRAY_API and SCIPY_DEVICE != 'cpu':
+ if xp.__name__ == 'cupy':
+ pytest.skip(reason=reason)
+ elif xp.__name__ == 'torch':
+ if 'cpu' not in torch.empty(0).device.type:
+ pytest.skip(reason=reason)
+ if backends is not None:
+ reasons = kwargs.get("reasons", False)
+ for i, backend in enumerate(backends):
+ if xp.__name__ == backend:
+ if not reasons:
+ reason = f"do not run with array API backend: {backend}"
+ else:
+ reason = reasons[i]
+ pytest.skip(reason=reason)
+
+
+# Following the approach of NumPy's conftest.py...
+# Use a known and persistent tmpdir for hypothesis' caches, which
+# can be automatically cleared by the OS or user.
+hypothesis.configuration.set_hypothesis_home_dir(
+ os.path.join(tempfile.gettempdir(), ".hypothesis")
+)
+
+# We register two custom profiles for SciPy - for details see
+# https://hypothesis.readthedocs.io/en/latest/settings.html
+# The first is designed for our own CI runs; the latter also
+# forces determinism and is designed for use via scipy.test()
+hypothesis.settings.register_profile(
+ name="nondeterministic", deadline=None, print_blob=True,
+)
+hypothesis.settings.register_profile(
+ name="deterministic",
+ deadline=None, print_blob=True, database=None, derandomize=True,
+ suppress_health_check=list(hypothesis.HealthCheck),
+)
+
+# Profile is currently set by environment variable `SCIPY_HYPOTHESIS_PROFILE`
+# In the future, it would be good to work the choice into dev.py.
+SCIPY_HYPOTHESIS_PROFILE = os.environ.get("SCIPY_HYPOTHESIS_PROFILE",
+ "deterministic")
+hypothesis.settings.load_profile(SCIPY_HYPOTHESIS_PROFILE)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ea5ba9d88b91c252e7533249aba47998a24610d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__init__.py
@@ -0,0 +1,201 @@
+"""
+========================================
+Interpolation (:mod:`scipy.interpolate`)
+========================================
+
+.. currentmodule:: scipy.interpolate
+
+Sub-package for objects used in interpolation.
+
+As listed below, this sub-package contains spline functions and classes,
+1-D and multidimensional (univariate and multivariate)
+interpolation classes, Lagrange and Taylor polynomial interpolators, and
+wrappers for `FITPACK `__
+and DFITPACK functions.
+
+Univariate interpolation
+========================
+
+.. autosummary::
+ :toctree: generated/
+
+ interp1d
+ BarycentricInterpolator
+ KroghInterpolator
+ barycentric_interpolate
+ krogh_interpolate
+ pchip_interpolate
+ CubicHermiteSpline
+ PchipInterpolator
+ Akima1DInterpolator
+ CubicSpline
+ PPoly
+ BPoly
+
+
+Multivariate interpolation
+==========================
+
+Unstructured data:
+
+.. autosummary::
+ :toctree: generated/
+
+ griddata
+ LinearNDInterpolator
+ NearestNDInterpolator
+ CloughTocher2DInterpolator
+ RBFInterpolator
+ Rbf
+ interp2d
+
+For data on a grid:
+
+.. autosummary::
+ :toctree: generated/
+
+ interpn
+ RegularGridInterpolator
+ RectBivariateSpline
+
+.. seealso::
+
+ `scipy.ndimage.map_coordinates`
+
+Tensor product polynomials:
+
+.. autosummary::
+ :toctree: generated/
+
+ NdPPoly
+ NdBSpline
+
+1-D Splines
+===========
+
+.. autosummary::
+ :toctree: generated/
+
+ BSpline
+ make_interp_spline
+ make_lsq_spline
+ make_smoothing_spline
+
+Functional interface to FITPACK routines:
+
+.. autosummary::
+ :toctree: generated/
+
+ splrep
+ splprep
+ splev
+ splint
+ sproot
+ spalde
+ splder
+ splantider
+ insert
+
+Object-oriented FITPACK interface:
+
+.. autosummary::
+ :toctree: generated/
+
+ UnivariateSpline
+ InterpolatedUnivariateSpline
+ LSQUnivariateSpline
+
+
+
+2-D Splines
+===========
+
+For data on a grid:
+
+.. autosummary::
+ :toctree: generated/
+
+ RectBivariateSpline
+ RectSphereBivariateSpline
+
+For unstructured data:
+
+.. autosummary::
+ :toctree: generated/
+
+ BivariateSpline
+ SmoothBivariateSpline
+ SmoothSphereBivariateSpline
+ LSQBivariateSpline
+ LSQSphereBivariateSpline
+
+Low-level interface to FITPACK functions:
+
+.. autosummary::
+ :toctree: generated/
+
+ bisplrep
+ bisplev
+
+Additional tools
+================
+
+.. autosummary::
+ :toctree: generated/
+
+ lagrange
+ approximate_taylor_polynomial
+ pade
+
+.. seealso::
+
+ `scipy.ndimage.map_coordinates`,
+ `scipy.ndimage.spline_filter`,
+ `scipy.signal.resample`,
+ `scipy.signal.bspline`,
+ `scipy.signal.gauss_spline`,
+ `scipy.signal.qspline1d`,
+ `scipy.signal.cspline1d`,
+ `scipy.signal.qspline1d_eval`,
+ `scipy.signal.cspline1d_eval`,
+ `scipy.signal.qspline2d`,
+ `scipy.signal.cspline2d`.
+
+``pchip`` is an alias of `PchipInterpolator` for backward compatibility
+(should not be used in new code).
+"""
+from ._interpolate import *
+from ._fitpack_py import *
+
+# New interface to fitpack library:
+from ._fitpack2 import *
+
+from ._rbf import Rbf
+
+from ._rbfinterp import *
+
+from ._polyint import *
+
+from ._cubic import *
+
+from ._ndgriddata import *
+
+from ._bsplines import *
+
+from ._pade import *
+
+from ._rgi import *
+
+from ._ndbspline import NdBSpline
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import fitpack, fitpack2, interpolate, ndgriddata, polyint, rbf
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
+
+# Backward compatibility
+pchip = PchipInterpolator
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71873336cb98548f3896b0066f19a0fd04062e50
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d64e4734378a4ad4ea049851122a2a32b03271c2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c8be5b2d9a5d6ccacd0d523096c91da134fb0d3
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38dcb2128ea656467495028d9c58414b7ff39953
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3743acf69caa167f93a12b876b8134678aa09180
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1df1109dbf54c3f65d157aca1dd1f4106135acc2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a01788bb44da04e599b727b0c534c7d3a9750799
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..038b81e260bd1f6a206c74ef400fd6274dc8194a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..282dd1ad439a0e02ec32136b3c712555ec823be6
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f4e7e96e4c3d53c982bd3e08bc1a30abac6b75a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80cbcb6ad2e996f1c52262e390fd9ef84f17f061
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20d1f45c4a98e47ae4a121f46db547818045795e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5aaf26fda19565f1262b46e87c186f6c30d32c12
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c14ac9e7e202e05ef6ccace45ca6d65467914207
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd322cd0d2b1b2dcfb61860ffbb964b99141c03c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6c9bdc4971f508442f6ee946cdf5201c34d165b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9979cb5e90af0911740aa9d610284c5fd50ae25
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f0fc7e3caa65b77e653a8adab6dad4487f2a355
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6504fa1673d711ec4878f0b2e894bad6efb913f1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b97735c5d512a8b79a08999dc69eb87112e442b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_cubic.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_cubic.py
new file mode 100644
index 0000000000000000000000000000000000000000..997776150afa44ff3f6d14356f2b1bc3b1fb5e5e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_cubic.py
@@ -0,0 +1,970 @@
+"""Interpolation algorithms using piecewise cubic polynomials."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import warnings
+
+import numpy as np
+
+from scipy.linalg import solve, solve_banded
+
+from . import PPoly
+from ._polyint import _isscalar
+
+if TYPE_CHECKING:
+ from typing import Literal
+
+__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
+ "Akima1DInterpolator", "CubicSpline"]
+
+
+def prepare_input(x, y, axis, dydx=None):
+ """Prepare input for cubic spline interpolators.
+
+ All data are converted to numpy arrays and checked for correctness.
+ Axes equal to `axis` of arrays `y` and `dydx` are moved to be the 0th
+ axis. The value of `axis` is converted to lie in
+ [0, number of dimensions of `y`).
+ """
+
+ x, y = map(np.asarray, (x, y))
+ if np.issubdtype(x.dtype, np.complexfloating):
+ raise ValueError("`x` must contain real values.")
+ x = x.astype(float)
+
+ if np.issubdtype(y.dtype, np.complexfloating):
+ dtype = complex
+ else:
+ dtype = float
+
+ if dydx is not None:
+ dydx = np.asarray(dydx)
+ if y.shape != dydx.shape:
+ raise ValueError("The shapes of `y` and `dydx` must be identical.")
+ if np.issubdtype(dydx.dtype, np.complexfloating):
+ dtype = complex
+ dydx = dydx.astype(dtype, copy=False)
+
+ y = y.astype(dtype, copy=False)
+ axis = axis % y.ndim
+ if x.ndim != 1:
+ raise ValueError("`x` must be 1-dimensional.")
+ if x.shape[0] < 2:
+ raise ValueError("`x` must contain at least 2 elements.")
+ if x.shape[0] != y.shape[axis]:
+ raise ValueError(f"The length of `y` along `axis`={axis} doesn't "
+ "match the length of `x`")
+
+ if not np.all(np.isfinite(x)):
+ raise ValueError("`x` must contain only finite values.")
+ if not np.all(np.isfinite(y)):
+ raise ValueError("`y` must contain only finite values.")
+
+ if dydx is not None and not np.all(np.isfinite(dydx)):
+ raise ValueError("`dydx` must contain only finite values.")
+
+ dx = np.diff(x)
+ if np.any(dx <= 0):
+ raise ValueError("`x` must be strictly increasing sequence.")
+
+ y = np.moveaxis(y, axis, 0)
+ if dydx is not None:
+ dydx = np.moveaxis(dydx, axis, 0)
+
+ return x, dx, y, axis, dydx
+
+
+class CubicHermiteSpline(PPoly):
+ """Piecewise-cubic interpolator matching values and first derivatives.
+
+ The result is represented as a `PPoly` instance.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ 1-D array containing values of the independent variable.
+ Values must be real, finite and in strictly increasing order.
+ y : array_like
+ Array containing values of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ dydx : array_like
+ Array containing derivatives of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ axis : int, optional
+ Axis along which `y` is assumed to be varying. Meaning that for
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+ Default is 0.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. If None (default), it is set to True.
+
+ Attributes
+ ----------
+ x : ndarray, shape (n,)
+ Breakpoints. The same ``x`` which was passed to the constructor.
+ c : ndarray, shape (4, n-1, ...)
+ Coefficients of the polynomials on each segment. The trailing
+ dimensions match the dimensions of `y`, excluding ``axis``.
+ For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
+ axis : int
+ Interpolation axis. The same axis which was passed to the
+ constructor.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ roots
+
+ See Also
+ --------
+ Akima1DInterpolator : Akima 1D interpolator.
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
+
+ Notes
+ -----
+ If you want to create a higher-order spline matching higher-order
+ derivatives, use `BPoly.from_derivatives`.
+
+ References
+ ----------
+ .. [1] `Cubic Hermite spline
+ `_
+ on Wikipedia.
+ """
+
+ def __init__(self, x, y, dydx, axis=0, extrapolate=None):
+ if extrapolate is None:
+ extrapolate = True
+
+ x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
+
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
+ slope = np.diff(y, axis=0) / dxr
+ t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
+
+ c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
+ c[0] = t / dxr
+ c[1] = (slope - dydx[:-1]) / dxr - t
+ c[2] = dydx[:-1]
+ c[3] = y[:-1]
+
+ super().__init__(c, x, extrapolate=extrapolate)
+ self.axis = axis
+
+
+class PchipInterpolator(CubicHermiteSpline):
+ r"""PCHIP 1-D monotonic cubic interpolation.
+
+ ``x`` and ``y`` are arrays of values used to approximate some function f,
+ with ``y = f(x)``. The interpolant uses monotonic cubic splines
+ to find the value of new points. (PCHIP stands for Piecewise Cubic
+ Hermite Interpolating Polynomial).
+
+ Parameters
+ ----------
+ x : ndarray, shape (npoints, )
+ A 1-D array of monotonically increasing real values. ``x`` cannot
+ include duplicate values (otherwise f is overspecified)
+ y : ndarray, shape (..., npoints, ...)
+ A N-D array of real values. ``y``'s length along the interpolation
+ axis must be equal to the length of ``x``. Use the ``axis``
+ parameter to select the interpolation axis.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated and will raise an error in SciPy 1.15.0.
+ If you are trying to use the real components of the passed array,
+ use ``np.real`` on ``y``.
+
+ axis : int, optional
+ Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
+ to ``axis=0``.
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ roots
+
+ See Also
+ --------
+ CubicHermiteSpline : Piecewise-cubic interpolator.
+ Akima1DInterpolator : Akima 1D interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
+
+ Notes
+ -----
+ The interpolator preserves monotonicity in the interpolation data and does
+ not overshoot if the data is not smooth.
+
+ The first derivatives are guaranteed to be continuous, but the second
+ derivatives may jump at :math:`x_k`.
+
+ Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
+ by using PCHIP algorithm [1]_.
+
+ Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
+ are the slopes at internal points :math:`x_k`.
+ If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
+ them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
+ weighted harmonic mean
+
+ .. math::
+
+ \frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
+
+ where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
+
+ The end slopes are set using a one-sided scheme [2]_.
+
+
+ References
+ ----------
+ .. [1] F. N. Fritsch and J. Butland,
+ A method for constructing local
+ monotone piecewise cubic interpolants,
+ SIAM J. Sci. Comput., 5(2), 300-304 (1984).
+ :doi:`10.1137/0905021`.
+ .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
+ :doi:`10.1137/1.9780898717952`
+
+ """
+
+ def __init__(self, x, y, axis=0, extrapolate=None):
+ x, _, y, axis, _ = prepare_input(x, y, axis)
+ if np.iscomplexobj(y):
+ msg = ("`PchipInterpolator` only works with real values for `y`. "
+ "Passing an array with a complex dtype for `y` is deprecated "
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
+ "use the real components of the passed array, use `np.real` on "
+ "the array before passing to `PchipInterpolator`.")
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
+ dk = self._find_derivatives(xp, y)
+ super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
+ self.axis = axis
+
+ @staticmethod
+ def _edge_case(h0, h1, m0, m1):
+ # one-sided three-point estimate for the derivative
+ d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
+
+ # try to preserve shape
+ mask = np.sign(d) != np.sign(m0)
+ mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
+ mmm = (~mask) & mask2
+
+ d[mask] = 0.
+ d[mmm] = 3.*m0[mmm]
+
+ return d
+
+ @staticmethod
+ def _find_derivatives(x, y):
+ # Determine the derivatives at the points y_k, d_k, by using
+ # PCHIP algorithm is:
+ # We choose the derivatives at the point x_k by
+ # Let m_k be the slope of the kth segment (between k and k+1)
+ # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
+ # else use weighted harmonic mean:
+ # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
+ # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
+ # where h_k is the spacing between x_k and x_{k+1}
+ y_shape = y.shape
+ if y.ndim == 1:
+ # So that _edge_case doesn't end up assigning to scalars
+ x = x[:, None]
+ y = y[:, None]
+
+ hk = x[1:] - x[:-1]
+ mk = (y[1:] - y[:-1]) / hk
+
+ if y.shape[0] == 2:
+ # edge case: only have two points, use linear interpolation
+ dk = np.zeros_like(y)
+ dk[0] = mk
+ dk[1] = mk
+ return dk.reshape(y_shape)
+
+ smk = np.sign(mk)
+ condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
+
+ w1 = 2*hk[1:] + hk[:-1]
+ w2 = hk[1:] + 2*hk[:-1]
+
+ # values where division by zero occurs will be excluded
+ # by 'condition' afterwards
+ with np.errstate(divide='ignore', invalid='ignore'):
+ whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
+
+ dk = np.zeros_like(y)
+ dk[1:-1][condition] = 0.0
+ dk[1:-1][~condition] = 1.0 / whmean[~condition]
+
+ # special case endpoints, as suggested in
+ # Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
+ dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
+ dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
+
+ return dk.reshape(y_shape)
+
+
+def pchip_interpolate(xi, yi, x, der=0, axis=0):
+ """
+ Convenience function for pchip interpolation.
+
+ xi and yi are arrays of values used to approximate some function f,
+ with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
+ to find the value of new points x and the derivatives there.
+
+ See `scipy.interpolate.PchipInterpolator` for details.
+
+ Parameters
+ ----------
+ xi : array_like
+ A sorted list of x-coordinates, of length N.
+ yi : array_like
+ A 1-D array of real values. `yi`'s length along the interpolation
+ axis must be equal to the length of `xi`. If N-D array, use axis
+ parameter to select correct axis.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated and will raise an error in
+ SciPy 1.15.0. If you are trying to use the real components of
+ the passed array, use ``np.real`` on `yi`.
+
+ x : scalar or array_like
+ Of length M.
+ der : int or list, optional
+ Derivatives to extract. The 0th derivative can be included to
+ return the function value.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values.
+
+ Returns
+ -------
+ y : scalar or array_like
+ The result, of length R or length M or M by R.
+
+ See Also
+ --------
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+
+ Examples
+ --------
+ We can interpolate 2D observed data using pchip interpolation:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import pchip_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = pchip_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="pchip interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+
+ """
+ P = PchipInterpolator(xi, yi, axis=axis)
+
+ if der == 0:
+ return P(x)
+ elif _isscalar(der):
+ return P.derivative(der)(x)
+ else:
+ return [P.derivative(nu)(x) for nu in der]
+
+
+class Akima1DInterpolator(CubicHermiteSpline):
+ r"""
+ Akima interpolator
+
+ Fit piecewise cubic polynomials, given vectors x and y. The interpolation
+ method by Akima uses a continuously differentiable sub-spline built from
+ piecewise cubic polynomials. The resultant curve passes through the given
+ data points and will appear smooth and natural.
+
+ Parameters
+ ----------
+ x : ndarray, shape (npoints, )
+ 1-D array of monotonically increasing real values.
+ y : ndarray, shape (..., npoints, ...)
+ N-D array of real values. The length of ``y`` along the interpolation axis
+ must be equal to the length of ``x``. Use the ``axis`` parameter to
+ select the interpolation axis.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated and will raise an error in SciPy 1.15.0.
+ If you are trying to use the real components of the passed array,
+ use ``np.real`` on ``y``.
+
+ axis : int, optional
+ Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
+ to ``axis=0``.
+ method : {'akima', 'makima'}, optional
+ If ``"makima"``, use the modified Akima interpolation [2]_.
+ Defaults to ``"akima"``, use the Akima interpolation [1]_.
+
+ .. versionadded:: 1.13.0
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ roots
+
+ See Also
+ --------
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
+
+ Notes
+ -----
+ .. versionadded:: 0.14
+
+ Use only for precise data, as the fitted curve passes through the given
+ points exactly. This routine is useful for plotting a pleasingly smooth
+ curve through a few given points for purposes of plotting.
+
+ Let :math:`\delta_i = (y_{i+1} - y_i) / (x_{i+1} - x_i)` be the slopes of
+ the interval :math:`\left[x_i, x_{i+1}\right)`. Akima's derivative at
+ :math:`x_i` is defined as:
+
+ .. math::
+
+ d_i = \frac{w_1}{w_1 + w_2}\delta_{i-1} + \frac{w_2}{w_1 + w_2}\delta_i
+
+ In the Akima interpolation [1]_ (``method="akima"``), the weights are:
+
+ .. math::
+
+ \begin{aligned}
+ w_1 &= |\delta_{i+1} - \delta_i| \\
+ w_2 &= |\delta_{i-1} - \delta_{i-2}|
+ \end{aligned}
+
+ In the modified Akima interpolation [2]_ (``method="makima"``),
+ to eliminate overshoot and avoid edge cases of both numerator and
+ denominator being equal to 0, the weights are modified as follows:
+
+ .. math::
+
+ \begin{align*}
+ w_1 &= |\delta_{i+1} - \delta_i| + |\delta_{i+1} + \delta_i| / 2 \\
+ w_2 &= |\delta_{i-1} - \delta_{i-2}| + |\delta_{i-1} + \delta_{i-2}| / 2
+ \end{align*}
+
+ Examples
+ --------
+ Comparison of ``method="akima"`` and ``method="makima"``:
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import Akima1DInterpolator
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.linspace(1, 7, 7)
+ >>> y = np.array([-1, -1, -1, 0, 1, 1, 1])
+ >>> xs = np.linspace(min(x), max(x), num=100)
+ >>> y_akima = Akima1DInterpolator(x, y, method="akima")(xs)
+ >>> y_makima = Akima1DInterpolator(x, y, method="makima")(xs)
+
+ >>> fig, ax = plt.subplots()
+ >>> ax.plot(x, y, "o", label="data")
+ >>> ax.plot(xs, y_akima, label="akima")
+ >>> ax.plot(xs, y_makima, label="makima")
+ >>> ax.legend()
+ >>> fig.show()
+
+ The overshoot that occured in ``"akima"`` has been avoided in ``"makima"``.
+
+ References
+ ----------
+ .. [1] A new method of interpolation and smooth curve fitting based
+ on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
+ 589-602. :doi:`10.1145/321607.321609`
+ .. [2] Makima Piecewise Cubic Interpolation. Cleve Moler and Cosmin Ionita, 2019.
+ https://blogs.mathworks.com/cleve/2019/04/29/makima-piecewise-cubic-interpolation/
+
+ """
+
+ def __init__(self, x, y, axis=0, *, method: Literal["akima", "makima"]="akima"):
+ if method not in {"akima", "makima"}:
+ raise NotImplementedError(f"`method`={method} is unsupported.")
+ # Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
+ # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
+
+ if np.iscomplexobj(y):
+ msg = ("`Akima1DInterpolator` only works with real values for `y`. "
+ "Passing an array with a complex dtype for `y` is deprecated "
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
+ "use the real components of the passed array, use `np.real` on "
+ "the array before passing to `Akima1DInterpolator`.")
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+
+ # determine slopes between breakpoints
+ m = np.empty((x.size + 3, ) + y.shape[1:])
+ dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
+ m[2:-2] = np.diff(y, axis=0) / dx
+
+ # add two additional points on the left ...
+ m[1] = 2. * m[2] - m[3]
+ m[0] = 2. * m[1] - m[2]
+ # ... and on the right
+ m[-2] = 2. * m[-3] - m[-4]
+ m[-1] = 2. * m[-2] - m[-3]
+
+ # if m1 == m2 != m3 == m4, the slope at the breakpoint is not
+ # defined. This is the fill value:
+ t = .5 * (m[3:] + m[:-3])
+ # get the denominator of the slope t
+ dm = np.abs(np.diff(m, axis=0))
+ if method == "makima":
+ pm = np.abs(m[1:] + m[:-1])
+ f1 = dm[2:] + 0.5 * pm[2:]
+ f2 = dm[:-2] + 0.5 * pm[:-2]
+ else:
+ f1 = dm[2:]
+ f2 = dm[:-2]
+ f12 = f1 + f2
+ # These are the mask of where the slope at breakpoint is defined:
+ ind = np.nonzero(f12 > 1e-9 * np.max(f12, initial=-np.inf))
+ x_ind, y_ind = ind[0], ind[1:]
+ # Set the slope at breakpoint
+ t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
+ f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
+
+ super().__init__(x, y, t, axis=0, extrapolate=False)
+ self.axis = axis
+
+ def extend(self, c, x, right=True):
+ raise NotImplementedError("Extending a 1-D Akima interpolator is not "
+ "yet implemented")
+
+ # These are inherited from PPoly, but they do not produce an Akima
+ # interpolator. Hence stub them out.
+ @classmethod
+ def from_spline(cls, tck, extrapolate=None):
+ raise NotImplementedError("This method does not make sense for "
+ "an Akima interpolator.")
+
+ @classmethod
+ def from_bernstein_basis(cls, bp, extrapolate=None):
+ raise NotImplementedError("This method does not make sense for "
+ "an Akima interpolator.")
+
+
+class CubicSpline(CubicHermiteSpline):
+ """Cubic spline data interpolator.
+
+ Interpolate data with a piecewise cubic polynomial which is twice
+ continuously differentiable [1]_. The result is represented as a `PPoly`
+ instance with breakpoints matching the given data.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ 1-D array containing values of the independent variable.
+ Values must be real, finite and in strictly increasing order.
+ y : array_like
+ Array containing values of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ axis : int, optional
+ Axis along which `y` is assumed to be varying. Meaning that for
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+ Default is 0.
+ bc_type : string or 2-tuple, optional
+ Boundary condition type. Two additional equations, given by the
+ boundary conditions, are required to determine all coefficients of
+ polynomials on each segment [2]_.
+
+ If `bc_type` is a string, then the specified condition will be applied
+ at both ends of a spline. Available conditions are:
+
+ * 'not-a-knot' (default): The first and second segment at a curve end
+ are the same polynomial. It is a good default when there is no
+ information on boundary conditions.
+ * 'periodic': The interpolated functions is assumed to be periodic
+ of period ``x[-1] - x[0]``. The first and last value of `y` must be
+ identical: ``y[0] == y[-1]``. This boundary condition will result in
+ ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
+ * 'clamped': The first derivative at curves ends are zero. Assuming
+ a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
+ * 'natural': The second derivative at curve ends are zero. Assuming
+ a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
+
+ If `bc_type` is a 2-tuple, the first and the second value will be
+ applied at the curve start and end respectively. The tuple values can
+ be one of the previously mentioned strings (except 'periodic') or a
+ tuple `(order, deriv_values)` allowing to specify arbitrary
+ derivatives at curve ends:
+
+ * `order`: the derivative order, 1 or 2.
+ * `deriv_value`: array_like containing derivative values, shape must
+ be the same as `y`, excluding ``axis`` dimension. For example, if
+ `y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
+ the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
+ and have the shape (n0, n1).
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. If None (default), ``extrapolate`` is
+ set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
+
+ Attributes
+ ----------
+ x : ndarray, shape (n,)
+ Breakpoints. The same ``x`` which was passed to the constructor.
+ c : ndarray, shape (4, n-1, ...)
+ Coefficients of the polynomials on each segment. The trailing
+ dimensions match the dimensions of `y`, excluding ``axis``.
+ For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
+ axis : int
+ Interpolation axis. The same axis which was passed to the
+ constructor.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ roots
+
+ See Also
+ --------
+ Akima1DInterpolator : Akima 1D interpolator.
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
+
+ Notes
+ -----
+ Parameters `bc_type` and ``extrapolate`` work independently, i.e. the
+ former controls only construction of a spline, and the latter only
+ evaluation.
+
+ When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
+ a condition that the first derivative is equal to the linear interpolant
+ slope. When both boundary conditions are 'not-a-knot' and n = 3, the
+ solution is sought as a parabola passing through given points.
+
+ When 'not-a-knot' boundary conditions is applied to both ends, the
+ resulting spline will be the same as returned by `splrep` (with ``s=0``)
+ and `InterpolatedUnivariateSpline`, but these two methods use a
+ representation in B-spline basis.
+
+ .. versionadded:: 0.18.0
+
+ Examples
+ --------
+ In this example the cubic spline is used to interpolate a sampled sinusoid.
+ You can see that the spline continuity property holds for the first and
+ second derivatives and violates only for the third derivative.
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import CubicSpline
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+ >>> cs = CubicSpline(x, y)
+ >>> xs = np.arange(-0.5, 9.6, 0.1)
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
+ >>> ax.plot(x, y, 'o', label='data')
+ >>> ax.plot(xs, np.sin(xs), label='true')
+ >>> ax.plot(xs, cs(xs), label="S")
+ >>> ax.plot(xs, cs(xs, 1), label="S'")
+ >>> ax.plot(xs, cs(xs, 2), label="S''")
+ >>> ax.plot(xs, cs(xs, 3), label="S'''")
+ >>> ax.set_xlim(-0.5, 9.5)
+ >>> ax.legend(loc='lower left', ncol=2)
+ >>> plt.show()
+
+ In the second example, the unit circle is interpolated with a spline. A
+ periodic boundary condition is used. You can see that the first derivative
+ values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
+ computed. Note that a circle cannot be exactly represented by a cubic
+ spline. To increase precision, more breakpoints would be required.
+
+ >>> theta = 2 * np.pi * np.linspace(0, 1, 5)
+ >>> y = np.c_[np.cos(theta), np.sin(theta)]
+ >>> cs = CubicSpline(theta, y, bc_type='periodic')
+ >>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
+ ds/dx=0.0 ds/dy=1.0
+ >>> xs = 2 * np.pi * np.linspace(0, 1, 100)
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
+ >>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
+ >>> ax.plot(np.cos(xs), np.sin(xs), label='true')
+ >>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
+ >>> ax.axes.set_aspect('equal')
+ >>> ax.legend(loc='center')
+ >>> plt.show()
+
+ The third example is the interpolation of a polynomial y = x**3 on the
+ interval 0 <= x<= 1. A cubic spline can represent this function exactly.
+ To achieve that we need to specify values and first derivatives at
+ endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
+ y'(1) = 3.
+
+ >>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
+ >>> x = np.linspace(0, 1)
+ >>> np.allclose(x**3, cs(x))
+ True
+
+ References
+ ----------
+ .. [1] `Cubic Spline Interpolation
+ `_
+ on Wikiversity.
+ .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
+ """
+
+ def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
+ n = len(x)
+
+ bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
+
+ if extrapolate is None:
+ if bc[0] == 'periodic':
+ extrapolate = 'periodic'
+ else:
+ extrapolate = True
+
+ if y.size == 0:
+ # bail out early for zero-sized arrays
+ s = np.zeros_like(y)
+ else:
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
+ slope = np.diff(y, axis=0) / dxr
+
+ # If bc is 'not-a-knot' this change is just a convention.
+ # If bc is 'periodic' then we already checked that y[0] == y[-1],
+ # and the spline is just a constant, we handle this case in the
+ # same way by setting the first derivatives to slope, which is 0.
+ if n == 2:
+ if bc[0] in ['not-a-knot', 'periodic']:
+ bc[0] = (1, slope[0])
+ if bc[1] in ['not-a-knot', 'periodic']:
+ bc[1] = (1, slope[0])
+
+ # This is a special case, when both conditions are 'not-a-knot'
+ # and n == 3. In this case 'not-a-knot' can't be handled regularly
+ # as the both conditions are identical. We handle this case by
+ # constructing a parabola passing through given points.
+ if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
+ A = np.zeros((3, 3)) # This is a standard matrix.
+ b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
+
+ A[0, 0] = 1
+ A[0, 1] = 1
+ A[1, 0] = dx[1]
+ A[1, 1] = 2 * (dx[0] + dx[1])
+ A[1, 2] = dx[0]
+ A[2, 1] = 1
+ A[2, 2] = 1
+
+ b[0] = 2 * slope[0]
+ b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
+ b[2] = 2 * slope[1]
+
+ s = solve(A, b, overwrite_a=True, overwrite_b=True,
+ check_finite=False)
+ elif n == 3 and bc[0] == 'periodic':
+ # In case when number of points is 3 we compute the derivatives
+ # manually
+ t = (slope / dxr).sum(0) / (1. / dxr).sum(0)
+ s = np.broadcast_to(t, (n,) + y.shape[1:])
+ else:
+ # Find derivative values at each x[i] by solving a tridiagonal
+ # system.
+ A = np.zeros((3, n)) # This is a banded matrix representation.
+ b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+
+ # Filling the system for i=1..n-2
+ # (x[i-1] - x[i]) * s[i-1] +\
+ # 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
+ # (x[i] - x[i-1]) * s[i+1] =\
+ # 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
+ # (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
+
+ A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
+ A[0, 2:] = dx[:-1] # The upper diagonal
+ A[-1, :-2] = dx[1:] # The lower diagonal
+
+ b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
+
+ bc_start, bc_end = bc
+
+ if bc_start == 'periodic':
+ # Due to the periodicity, and because y[-1] = y[0], the
+ # linear system has (n-1) unknowns/equations instead of n:
+ A = A[:, 0:-1]
+ A[1, 0] = 2 * (dx[-1] + dx[0])
+ A[0, 1] = dx[-1]
+
+ b = b[:-1]
+
+ # Also, due to the periodicity, the system is not tri-diagonal.
+ # We need to compute a "condensed" matrix of shape (n-2, n-2).
+ # See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
+ # for more explanations.
+ # The condensed matrix is obtained by removing the last column
+ # and last row of the (n-1, n-1) system matrix. The removed
+ # values are saved in scalar variables with the (n-1, n-1)
+ # system matrix indices forming their names:
+ a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
+ a_m1_m2 = dx[-1]
+ a_m1_m1 = 2 * (dx[-1] + dx[-2])
+ a_m2_m1 = dx[-3]
+ a_0_m1 = dx[0]
+
+ b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
+ b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
+
+ Ac = A[:, :-1]
+ b1 = b[:-1]
+ b2 = np.zeros_like(b1)
+ b2[0] = -a_0_m1
+ b2[-1] = -a_m2_m1
+
+ # s1 and s2 are the solutions of (n-2, n-2) system
+ s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
+ overwrite_b=False, check_finite=False)
+
+ s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
+ overwrite_b=False, check_finite=False)
+
+ # computing the s[n-2] solution:
+ s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
+ (a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
+
+ # s is the solution of the (n, n) system:
+ s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+ s[:-2] = s1 + s_m1 * s2
+ s[-2] = s_m1
+ s[-1] = s[0]
+ else:
+ if bc_start == 'not-a-knot':
+ A[1, 0] = dx[1]
+ A[0, 1] = x[2] - x[0]
+ d = x[2] - x[0]
+ b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
+ dxr[0]**2 * slope[1]) / d
+ elif bc_start[0] == 1:
+ A[1, 0] = 1
+ A[0, 1] = 0
+ b[0] = bc_start[1]
+ elif bc_start[0] == 2:
+ A[1, 0] = 2 * dx[0]
+ A[0, 1] = dx[0]
+ b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
+
+ if bc_end == 'not-a-knot':
+ A[1, -1] = dx[-2]
+ A[-1, -2] = x[-1] - x[-3]
+ d = x[-1] - x[-3]
+ b[-1] = ((dxr[-1]**2*slope[-2] +
+ (2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
+ elif bc_end[0] == 1:
+ A[1, -1] = 1
+ A[-1, -2] = 0
+ b[-1] = bc_end[1]
+ elif bc_end[0] == 2:
+ A[1, -1] = 2 * dx[-1]
+ A[-1, -2] = dx[-1]
+ b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
+
+ s = solve_banded((1, 1), A, b, overwrite_ab=True,
+ overwrite_b=True, check_finite=False)
+
+ super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
+ self.axis = axis
+
+ @staticmethod
+ def _validate_bc(bc_type, y, expected_deriv_shape, axis):
+ """Validate and prepare boundary conditions.
+
+ Returns
+ -------
+ validated_bc : 2-tuple
+ Boundary conditions for a curve start and end.
+ y : ndarray
+ y casted to complex dtype if one of the boundary conditions has
+ complex dtype.
+ """
+ if isinstance(bc_type, str):
+ if bc_type == 'periodic':
+ if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
+ raise ValueError(
+ f"The first and last `y` point along axis {axis} must "
+ "be identical (within machine precision) when "
+ "bc_type='periodic'.")
+
+ bc_type = (bc_type, bc_type)
+
+ else:
+ if len(bc_type) != 2:
+ raise ValueError("`bc_type` must contain 2 elements to "
+ "specify start and end conditions.")
+
+ if 'periodic' in bc_type:
+ raise ValueError("'periodic' `bc_type` is defined for both "
+ "curve ends and cannot be used with other "
+ "boundary conditions.")
+
+ validated_bc = []
+ for bc in bc_type:
+ if isinstance(bc, str):
+ if bc == 'clamped':
+ validated_bc.append((1, np.zeros(expected_deriv_shape)))
+ elif bc == 'natural':
+ validated_bc.append((2, np.zeros(expected_deriv_shape)))
+ elif bc in ['not-a-knot', 'periodic']:
+ validated_bc.append(bc)
+ else:
+ raise ValueError(f"bc_type={bc} is not allowed.")
+ else:
+ try:
+ deriv_order, deriv_value = bc
+ except Exception as e:
+ raise ValueError(
+ "A specified derivative value must be "
+ "given in the form (order, value)."
+ ) from e
+
+ if deriv_order not in [1, 2]:
+ raise ValueError("The specified derivative order must "
+ "be 1 or 2.")
+
+ deriv_value = np.asarray(deriv_value)
+ if deriv_value.shape != expected_deriv_shape:
+ raise ValueError(
+ "`deriv_value` shape {} is not the expected one {}."
+ .format(deriv_value.shape, expected_deriv_shape))
+
+ if np.issubdtype(deriv_value.dtype, np.complexfloating):
+ y = y.astype(complex, copy=False)
+
+ validated_bc.append((deriv_order, deriv_value))
+
+ return validated_bc, y
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..cc527980099835534aad06ab0dbcb4dc4f8c7364
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c660e2b183dca3f3f1d594bbbe6936a5eb9d7a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py
@@ -0,0 +1,805 @@
+"""
+fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
+ FITPACK is a collection of FORTRAN programs for curve and surface
+ fitting with splines and tensor product splines.
+
+See
+ https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
+or
+ http://www.netlib.org/dierckx/
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+TODO: Make interfaces to the following fitpack functions:
+ For univariate splines: cocosp, concon, fourco, insert
+ For bivariate splines: profil, regrid, parsur, surev
+"""
+
+__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
+
+import warnings
+import numpy as np
+from . import _fitpack
+from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
+ empty, iinfo, asarray)
+
+# Try to replace _fitpack interface with
+# f2py-generated version
+from . import dfitpack
+
+
+dfitpack_int = dfitpack.types.intvar.dtype
+
+
+def _int_overflow(x, exception, msg=None):
+ """Cast the value to an dfitpack_int and raise an OverflowError if the value
+ cannot fit.
+ """
+ if x > iinfo(dfitpack_int).max:
+ if msg is None:
+ msg = f'{x!r} cannot fit into an {dfitpack_int!r}'
+ raise exception(msg)
+ return dfitpack_int.type(x)
+
+
+_iermess = {
+ 0: ["The spline has a residual sum of squares fp such that "
+ "abs(fp-s)/s<=0.001", None],
+ -1: ["The spline is an interpolating spline (fp=0)", None],
+ -2: ["The spline is weighted least-squares polynomial of degree k.\n"
+ "fp gives the upper bound fp0 for the smoothing factor s", None],
+ 1: ["The required storage space exceeds the available storage space.\n"
+ "Probable causes: data (x,y) size is too small or smoothing parameter"
+ "\ns is too small (fp>s).", ValueError],
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
+ "with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
+ ValueError],
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 10: ["Error on input data", ValueError],
+ 'unknown': ["An error occurred", TypeError]
+}
+
+_iermess2 = {
+ 0: ["The spline has a residual sum of squares fp such that "
+ "abs(fp-s)/s<=0.001", None],
+ -1: ["The spline is an interpolating spline (fp=0)", None],
+ -2: ["The spline is weighted least-squares polynomial of degree kx and ky."
+ "\nfp gives the upper bound fp0 for the smoothing factor s", None],
+ -3: ["Warning. The coefficients of the spline have been computed as the\n"
+ "minimal norm least-squares solution of a rank deficient system.",
+ None],
+ 1: ["The required storage space exceeds the available storage space.\n"
+ "Probable causes: nxest or nyest too small or s is too small. (fp>s)",
+ ValueError],
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
+ "with fp = s. Probable causes: s too small or badly chosen eps.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 4: ["No more knots can be added because the number of B-spline\n"
+ "coefficients already exceeds the number of data points m.\n"
+ "Probable causes: either s or m too small. (fp>s)", ValueError],
+ 5: ["No more knots can be added because the additional knot would\n"
+ "coincide with an old one. Probable cause: s too small or too large\n"
+ "a weight to an inaccurate data point. (fp>s)", ValueError],
+ 10: ["Error on input data", ValueError],
+ 11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
+ "the minimal least-squares solution of a rank deficient system of\n"
+ "linear equations.", ValueError],
+ 'unknown': ["An error occurred", TypeError]
+}
+
+_parcur_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
+ 'ub': 0, 'ue': 1}
+
+
+def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
+ full_output=0, nest=None, per=0, quiet=1):
+ # see the docstring of `_fitpack_py/splprep`
+ if task <= 0:
+ _parcur_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
+ 'ub': 0, 'ue': 1}
+ x = atleast_1d(x)
+ idim, m = x.shape
+ if per:
+ for i in range(idim):
+ if x[i][0] != x[i][-1]:
+ if not quiet:
+ warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
+ (i, m, i)),
+ stacklevel=2)
+ x[i][-1] = x[i][0]
+ if not 0 < idim < 11:
+ raise TypeError('0 < idim < 11 must hold')
+ if w is None:
+ w = ones(m, float)
+ else:
+ w = atleast_1d(w)
+ ipar = (u is not None)
+ if ipar:
+ _parcur_cache['u'] = u
+ if ub is None:
+ _parcur_cache['ub'] = u[0]
+ else:
+ _parcur_cache['ub'] = ub
+ if ue is None:
+ _parcur_cache['ue'] = u[-1]
+ else:
+ _parcur_cache['ue'] = ue
+ else:
+ _parcur_cache['u'] = zeros(m, float)
+ if not (1 <= k <= 5):
+ raise TypeError('1 <= k= %d <=5 must hold' % k)
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
+ raise TypeError('Mismatch of input dimensions')
+ if s is None:
+ s = m - sqrt(2*m)
+ if t is None and task == -1:
+ raise TypeError('Knots must be given for task=-1')
+ if t is not None:
+ _parcur_cache['t'] = atleast_1d(t)
+ n = len(_parcur_cache['t'])
+ if task == -1 and n < 2*k + 2:
+ raise TypeError('There must be at least 2*k+2 knots for task=-1')
+ if m <= k:
+ raise TypeError('m > k must hold')
+ if nest is None:
+ nest = m + 2*k
+
+ if (task >= 0 and s == 0) or (nest < 0):
+ if per:
+ nest = m + 2*k
+ else:
+ nest = m + k + 1
+ nest = max(nest, 2*k + 3)
+ u = _parcur_cache['u']
+ ub = _parcur_cache['ub']
+ ue = _parcur_cache['ue']
+ t = _parcur_cache['t']
+ wrk = _parcur_cache['wrk']
+ iwrk = _parcur_cache['iwrk']
+ t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
+ task, ipar, s, t, nest, wrk, iwrk, per)
+ _parcur_cache['u'] = o['u']
+ _parcur_cache['ub'] = o['ub']
+ _parcur_cache['ue'] = o['ue']
+ _parcur_cache['t'] = t
+ _parcur_cache['wrk'] = o['wrk']
+ _parcur_cache['iwrk'] = o['iwrk']
+ ier = o['ier']
+ fp = o['fp']
+ n = len(t)
+ u = o['u']
+ c.shape = idim, n - k - 1
+ tcku = [t, list(c), k], u
+ if ier <= 0 and not quiet:
+ warnings.warn(RuntimeWarning(_iermess[ier][0] +
+ "\tk=%d n=%d m=%d fp=%f s=%f" %
+ (k, len(t), m, fp, s)),
+ stacklevel=2)
+ if ier > 0 and not full_output:
+ if ier in [1, 2, 3]:
+ warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
+ else:
+ try:
+ raise _iermess[ier][1](_iermess[ier][0])
+ except KeyError as e:
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
+ if full_output:
+ try:
+ return tcku, fp, ier, _iermess[ier][0]
+ except KeyError:
+ return tcku, fp, ier, _iermess['unknown'][0]
+ else:
+ return tcku
+
+
+_curfit_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int)}
+
+
+def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
+ full_output=0, per=0, quiet=1):
+ # see the docstring of `_fitpack_py/splrep`
+ if task <= 0:
+ _curfit_cache = {}
+ x, y = map(atleast_1d, [x, y])
+ m = len(x)
+ if w is None:
+ w = ones(m, float)
+ if s is None:
+ s = 0.0
+ else:
+ w = atleast_1d(w)
+ if s is None:
+ s = m - sqrt(2*m)
+ if not len(w) == m:
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
+ if (m != len(y)) or (m != len(w)):
+ raise TypeError('Lengths of the first three arguments (x,y,w) must '
+ 'be equal')
+ if not (1 <= k <= 5):
+ raise TypeError('Given degree of the spline (k=%d) is not supported. '
+ '(1<=k<=5)' % k)
+ if m <= k:
+ raise TypeError('m > k must hold')
+ if xb is None:
+ xb = x[0]
+ if xe is None:
+ xe = x[-1]
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if t is not None:
+ task = -1
+ if task == -1:
+ if t is None:
+ raise TypeError('Knots must be given for task=-1')
+ numknots = len(t)
+ _curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
+ _curfit_cache['t'][k+1:-k-1] = t
+ nest = len(_curfit_cache['t'])
+ elif task == 0:
+ if per:
+ nest = max(m + 2*k, 2*k + 3)
+ else:
+ nest = max(m + k + 1, 2*k + 3)
+ t = empty((nest,), float)
+ _curfit_cache['t'] = t
+ if task <= 0:
+ if per:
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
+ else:
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
+ _curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
+ try:
+ t = _curfit_cache['t']
+ wrk = _curfit_cache['wrk']
+ iwrk = _curfit_cache['iwrk']
+ except KeyError as e:
+ raise TypeError("must call with task=1 only after"
+ " call with task=0,-1") from e
+ if not per:
+ n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
+ xb, xe, k, s)
+ else:
+ n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
+ tck = (t[:n], c[:n], k)
+ if ier <= 0 and not quiet:
+ _mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
+ (k, len(t), m, fp, s))
+ warnings.warn(RuntimeWarning(_mess), stacklevel=2)
+ if ier > 0 and not full_output:
+ if ier in [1, 2, 3]:
+ warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
+ else:
+ try:
+ raise _iermess[ier][1](_iermess[ier][0])
+ except KeyError as e:
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
+ if full_output:
+ try:
+ return tck, fp, ier, _iermess[ier][0]
+ except KeyError:
+ return tck, fp, ier, _iermess['unknown'][0]
+ else:
+ return tck
+
+
+def splev(x, tck, der=0, ext=0):
+ # see the docstring of `_fitpack_py/splev`
+ t, c, k = tck
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, x=x, t=t, k=k, der=der:
+ splev(x, [t, c, k], der, ext), c))
+ else:
+ if not (0 <= der <= k):
+ raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
+ if ext not in (0, 1, 2, 3):
+ raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
+
+ x = asarray(x)
+ shape = x.shape
+ x = atleast_1d(x).ravel()
+ if der == 0:
+ y, ier = dfitpack.splev(t, c, k, x, ext)
+ else:
+ y, ier = dfitpack.splder(t, c, k, x, der, ext)
+
+ if ier == 10:
+ raise ValueError("Invalid input data")
+ if ier == 1:
+ raise ValueError("Found x value not in the domain")
+ if ier:
+ raise TypeError("An error occurred")
+
+ return y.reshape(shape)
+
+
+def splint(a, b, tck, full_output=0):
+ # see the docstring of `_fitpack_py/splint`
+ t, c, k = tck
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, a=a, b=b, t=t, k=k:
+ splint(a, b, [t, c, k]), c))
+ else:
+ aint, wrk = dfitpack.splint(t, c, k, a, b)
+ if full_output:
+ return aint, wrk
+ else:
+ return aint
+
+
+def sproot(tck, mest=10):
+ # see the docstring of `_fitpack_py/sproot`
+ t, c, k = tck
+ if k != 3:
+ raise ValueError("sproot works only for cubic (k=3) splines")
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, t=t, k=k, mest=mest:
+ sproot([t, c, k], mest), c))
+ else:
+ if len(t) < 8:
+ raise TypeError("The number of knots %d>=8" % len(t))
+ z, m, ier = dfitpack.sproot(t, c, mest)
+ if ier == 10:
+ raise TypeError("Invalid input data. "
+ "t1<=..<=t4 1:
+ return list(map(lambda x, tck=tck: spalde(x, tck), x))
+ d, ier = dfitpack.spalde(t, c, k+1, x[0])
+ if ier == 0:
+ return d
+ if ier == 10:
+ raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
+ raise TypeError("Unknown error")
+
+# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
+# full_output=0,nest=None,per=0,quiet=1):
+
+
+_surfit_cache = {'tx': array([], float), 'ty': array([], float),
+ 'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
+
+
+def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
+ kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
+ full_output=0, nxest=None, nyest=None, quiet=1):
+ """
+ Find a bivariate B-spline representation of a surface.
+
+ Given a set of data points (x[i], y[i], z[i]) representing a surface
+ z=f(x,y), compute a B-spline representation of the surface. Based on
+ the routine SURFIT from FITPACK.
+
+ Parameters
+ ----------
+ x, y, z : ndarray
+ Rank-1 arrays of data points.
+ w : ndarray, optional
+ Rank-1 array of weights. By default ``w=np.ones(len(x))``.
+ xb, xe : float, optional
+ End points of approximation interval in `x`.
+ By default ``xb = x.min(), xe=x.max()``.
+ yb, ye : float, optional
+ End points of approximation interval in `y`.
+ By default ``yb=y.min(), ye = y.max()``.
+ kx, ky : int, optional
+ The degrees of the spline (1 <= kx, ky <= 5).
+ Third order (kx=ky=3) is recommended.
+ task : int, optional
+ If task=0, find knots in x and y and coefficients for a given
+ smoothing factor, s.
+ If task=1, find knots and coefficients for another value of the
+ smoothing factor, s. bisplrep must have been previously called
+ with task=0 or task=1.
+ If task=-1, find coefficients for a given set of knots tx, ty.
+ s : float, optional
+ A non-negative smoothing factor. If weights correspond
+ to the inverse of the standard-deviation of the errors in z,
+ then a good s-value should be found in the range
+ ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
+ eps : float, optional
+ A threshold for determining the effective rank of an
+ over-determined linear system of equations (0 < eps < 1).
+ `eps` is not likely to need changing.
+ tx, ty : ndarray, optional
+ Rank-1 arrays of the knots of the spline for task=-1
+ full_output : int, optional
+ Non-zero to return optional outputs.
+ nxest, nyest : int, optional
+ Over-estimates of the total number of knots. If None then
+ ``nxest = max(kx+sqrt(m/2),2*kx+3)``,
+ ``nyest = max(ky+sqrt(m/2),2*ky+3)``.
+ quiet : int, optional
+ Non-zero to suppress printing of messages.
+
+ Returns
+ -------
+ tck : array_like
+ A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
+ coefficients (c) of the bivariate B-spline representation of the
+ surface along with the degree of the spline.
+ fp : ndarray
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int
+ An integer flag about splrep success. Success is indicated if
+ ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+ Otherwise an error is raised.
+ msg : str
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev
+ UnivariateSpline, BivariateSpline
+
+ Notes
+ -----
+ See `bisplev` to evaluate the value of the B-spline given its tck
+ representation.
+
+ If the input data is such that input dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolation.
+
+ References
+ ----------
+ .. [1] Dierckx P.:An algorithm for surface fitting with spline functions
+ Ima J. Numer. Anal. 1 (1981) 267-283.
+ .. [2] Dierckx P.:An algorithm for surface fitting with spline functions
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
+ .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
+ m = len(x)
+ if not (m == len(y) == len(z)):
+ raise TypeError('len(x)==len(y)==len(z) must hold.')
+ if w is None:
+ w = ones(m, float)
+ else:
+ w = atleast_1d(w)
+ if not len(w) == m:
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
+ if xb is None:
+ xb = x.min()
+ if xe is None:
+ xe = x.max()
+ if yb is None:
+ yb = y.min()
+ if ye is None:
+ ye = y.max()
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if s is None:
+ s = m - sqrt(2*m)
+ if tx is None and task == -1:
+ raise TypeError('Knots_x must be given for task=-1')
+ if tx is not None:
+ _surfit_cache['tx'] = atleast_1d(tx)
+ nx = len(_surfit_cache['tx'])
+ if ty is None and task == -1:
+ raise TypeError('Knots_y must be given for task=-1')
+ if ty is not None:
+ _surfit_cache['ty'] = atleast_1d(ty)
+ ny = len(_surfit_cache['ty'])
+ if task == -1 and nx < 2*kx+2:
+ raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
+ if task == -1 and ny < 2*ky+2:
+ raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
+ if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
+ raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
+ 'supported. (1<=k<=5)' % (kx, ky))
+ if m < (kx + 1)*(ky + 1):
+ raise TypeError('m >= (kx+1)(ky+1) must hold')
+ if nxest is None:
+ nxest = int(kx + sqrt(m/2))
+ if nyest is None:
+ nyest = int(ky + sqrt(m/2))
+ nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
+ if task >= 0 and s == 0:
+ nxest = int(kx + sqrt(3*m))
+ nyest = int(ky + sqrt(3*m))
+ if task == -1:
+ _surfit_cache['tx'] = atleast_1d(tx)
+ _surfit_cache['ty'] = atleast_1d(ty)
+ tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
+ wrk = _surfit_cache['wrk']
+ u = nxest - kx - 1
+ v = nyest - ky - 1
+ km = max(kx, ky) + 1
+ ne = max(nxest, nyest)
+ bx, by = kx*v + ky + 1, ky*u + kx + 1
+ b1, b2 = bx, bx + v - ky
+ if bx > by:
+ b1, b2 = by, by + u - kx
+ msg = "Too many data points to interpolate"
+ lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
+ 2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
+ OverflowError,
+ msg=msg)
+ lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, OverflowError, msg=msg)
+ tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
+ task, s, eps, tx, ty, nxest, nyest,
+ wrk, lwrk1, lwrk2)
+ _curfit_cache['tx'] = tx
+ _curfit_cache['ty'] = ty
+ _curfit_cache['wrk'] = o['wrk']
+ ier, fp = o['ier'], o['fp']
+ tck = [tx, ty, c, kx, ky]
+
+ ierm = min(11, max(-3, ier))
+ if ierm <= 0 and not quiet:
+ _mess = (_iermess2[ierm][0] +
+ "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
+ (kx, ky, len(tx), len(ty), m, fp, s))
+ warnings.warn(RuntimeWarning(_mess), stacklevel=2)
+ if ierm > 0 and not full_output:
+ if ier in [1, 2, 3, 4, 5]:
+ _mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
+ (kx, ky, len(tx), len(ty), m, fp, s))
+ warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess), stacklevel=2)
+ else:
+ try:
+ raise _iermess2[ierm][1](_iermess2[ierm][0])
+ except KeyError as e:
+ raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
+ if full_output:
+ try:
+ return tck, fp, ier, _iermess2[ierm][0]
+ except KeyError:
+ return tck, fp, ier, _iermess2['unknown'][0]
+ else:
+ return tck
+
+
+def bisplev(x, y, tck, dx=0, dy=0):
+ """
+ Evaluate a bivariate B-spline and its derivatives.
+
+ Return a rank-2 array of spline function values (or spline derivative
+ values) at points given by the cross-product of the rank-1 arrays `x` and
+ `y`. In special cases, return an array or just a float if either `x` or
+ `y` or both are floats. Based on BISPEV and PARDER from FITPACK.
+
+ Parameters
+ ----------
+ x, y : ndarray
+ Rank-1 arrays specifying the domain over which to evaluate the
+ spline or its derivative.
+ tck : tuple
+ A sequence of length 5 returned by `bisplrep` containing the knot
+ locations, the coefficients, and the degree of the spline:
+ [tx, ty, c, kx, ky].
+ dx, dy : int, optional
+ The orders of the partial derivatives in `x` and `y` respectively.
+
+ Returns
+ -------
+ vals : ndarray
+ The B-spline or its derivative evaluated over the set formed by
+ the cross-product of `x` and `y`.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev
+ UnivariateSpline, BivariateSpline
+
+ Notes
+ -----
+ See `bisplrep` to generate the `tck` representation.
+
+ References
+ ----------
+ .. [1] Dierckx P. : An algorithm for surface fitting
+ with spline functions
+ Ima J. Numer. Anal. 1 (1981) 267-283.
+ .. [2] Dierckx P. : An algorithm for surface fitting
+ with spline functions
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
+ .. [3] Dierckx P. : Curve and surface fitting with splines,
+ Monographs on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ tx, ty, c, kx, ky = tck
+ if not (0 <= dx < kx):
+ raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
+ if not (0 <= dy < ky):
+ raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
+ x, y = map(atleast_1d, [x, y])
+ if (len(x.shape) != 1) or (len(y.shape) != 1):
+ raise ValueError("First two entries should be rank-1 arrays.")
+
+ msg = "Too many data points to interpolate."
+
+ _int_overflow(x.size * y.size, MemoryError, msg=msg)
+
+ if dx != 0 or dy != 0:
+ _int_overflow((tx.size - kx - 1)*(ty.size - ky - 1),
+ MemoryError, msg=msg)
+ z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
+ else:
+ z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
+
+ if ier == 10:
+ raise ValueError("Invalid input data")
+ if ier:
+ raise TypeError("An error occurred")
+ z.shape = len(x), len(y)
+ if len(z) > 1:
+ return z
+ if len(z[0]) > 1:
+ return z[0]
+ return z[0][0]
+
+
+def dblint(xa, xb, ya, yb, tck):
+ """Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
+
+ Parameters
+ ----------
+ xa, xb : float
+ The end-points of the x integration interval.
+ ya, yb : float
+ The end-points of the y integration interval.
+ tck : list [tx, ty, c, kx, ky]
+ A sequence of length 5 returned by bisplrep containing the knot
+ locations tx, ty, the coefficients c, and the degrees kx, ky
+ of the spline.
+
+ Returns
+ -------
+ integ : float
+ The value of the resulting integral.
+ """
+ tx, ty, c, kx, ky = tck
+ return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
+
+
+def insert(x, tck, m=1, per=0):
+ # see the docstring of `_fitpack_py/insert`
+ t, c, k = tck
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ cc = []
+ for c_vals in c:
+ tt, cc_val, kk = insert(x, [t, c_vals, k], m)
+ cc.append(cc_val)
+ return (tt, cc, kk)
+ else:
+ tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
+ if ier == 10:
+ raise ValueError("Invalid input data")
+ if ier:
+ raise TypeError("An error occurred")
+ return (tt, cc, k)
+
+
+def splder(tck, n=1):
+ # see the docstring of `_fitpack_py/splder`
+ if n < 0:
+ return splantider(tck, -n)
+
+ t, c, k = tck
+
+ if n > k:
+ raise ValueError(f"Order of derivative (n = {n!r}) must be <= "
+ f"order of spline (k = {tck[2]!r})")
+
+ # Extra axes for the trailing dims of the `c` array:
+ sh = (slice(None),) + ((None,)*len(c.shape[1:]))
+
+ with np.errstate(invalid='raise', divide='raise'):
+ try:
+ for j in range(n):
+ # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
+
+ # Compute the denominator in the differentiation formula.
+ # (and append trailing dims, if necessary)
+ dt = t[k+1:-1] - t[1:-k-1]
+ dt = dt[sh]
+ # Compute the new coefficients
+ c = (c[1:-1-k] - c[:-2-k]) * k / dt
+ # Pad coefficient array to same size as knots (FITPACK
+ # convention)
+ c = np.r_[c, np.zeros((k,) + c.shape[1:])]
+ # Adjust knots
+ t = t[1:-1]
+ k -= 1
+ except FloatingPointError as e:
+ raise ValueError(("The spline has internal repeated knots "
+ "and is not differentiable %d times") % n) from e
+
+ return t, c, k
+
+
+def splantider(tck, n=1):
+ # see the docstring of `_fitpack_py/splantider`
+ if n < 0:
+ return splder(tck, -n)
+
+ t, c, k = tck
+
+ # Extra axes for the trailing dims of the `c` array:
+ sh = (slice(None),) + (None,)*len(c.shape[1:])
+
+ for j in range(n):
+ # This is the inverse set of operations to splder.
+
+ # Compute the multiplier in the antiderivative formula.
+ dt = t[k+1:] - t[:-k-1]
+ dt = dt[sh]
+ # Compute the new coefficients
+ c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
+ c = np.r_[np.zeros((1,) + c.shape[1:]),
+ c,
+ [c[-1]] * (k+2)]
+ # New knots
+ t = np.r_[t[0], t, t[-1]]
+ k += 1
+
+ return t, c, k
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py
new file mode 100644
index 0000000000000000000000000000000000000000..91ee711fead98bacd3f15b4175520a4d387390df
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py
@@ -0,0 +1,796 @@
+__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
+
+
+import numpy as np
+
+# These are in the API for fitpack even if not used in fitpack.py itself.
+from ._fitpack_impl import bisplrep, bisplev, dblint # noqa: F401
+from . import _fitpack_impl as _impl
+from ._bsplines import BSpline
+
+
+def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
+ full_output=0, nest=None, per=0, quiet=1):
+ """
+ Find the B-spline representation of an N-D curve.
+
+ Given a list of N rank-1 arrays, `x`, which represent a curve in
+ N-dimensional space parametrized by `u`, find a smooth approximating
+ spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
+
+ Parameters
+ ----------
+ x : array_like
+ A list of sample vector arrays representing the curve.
+ w : array_like, optional
+ Strictly positive rank-1 array of weights the same length as `x[0]`.
+ The weights are used in computing the weighted least-squares spline
+ fit. If the errors in the `x` values have standard-deviation given by
+ the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
+ u : array_like, optional
+ An array of parameter values. If not given, these values are
+ calculated automatically as ``M = len(x[0])``, where
+
+ v[0] = 0
+
+ v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
+
+ u[i] = v[i] / v[M-1]
+
+ ub, ue : int, optional
+ The end-points of the parameters interval. Defaults to
+ u[0] and u[-1].
+ k : int, optional
+ Degree of the spline. Cubic splines are recommended.
+ Even values of `k` should be avoided especially with a small s-value.
+ ``1 <= k <= 5``, default is 3.
+ task : int, optional
+ If task==0 (default), find t and c for a given smoothing factor, s.
+ If task==1, find t and c for another value of the smoothing factor, s.
+ There must have been a previous call with task=0 or task=1
+ for the same set of data.
+ If task=-1 find the weighted least square spline for a given set of
+ knots, t.
+ s : float, optional
+ A smoothing condition. The amount of smoothness is determined by
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
+ where g(x) is the smoothed interpolation of (x,y). The user can
+ use `s` to control the trade-off between closeness and smoothness
+ of fit. Larger `s` means more smoothing while smaller values of `s`
+ indicate less smoothing. Recommended values of `s` depend on the
+ weights, w. If the weights represent the inverse of the
+ standard-deviation of y, then a good `s` value should be found in
+ the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
+ data points in x, y, and w.
+ t : array, optional
+ The knots needed for ``task=-1``.
+ There must be at least ``2*k+2`` knots.
+ full_output : int, optional
+ If non-zero, then return optional outputs.
+ nest : int, optional
+ An over-estimate of the total number of knots of the spline to
+ help in determining the storage space. By default nest=m/2.
+ Always large enough is nest=m+k+1.
+ per : int, optional
+ If non-zero, data points are considered periodic with period
+ ``x[m-1] - x[0]`` and a smooth periodic spline approximation is
+ returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
+ quiet : int, optional
+ Non-zero to suppress messages.
+
+ Returns
+ -------
+ tck : tuple
+ A tuple, ``(t,c,k)`` containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline.
+ u : array
+ An array of the values of the parameter.
+ fp : float
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int
+ An integer flag about splrep success. Success is indicated
+ if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+ Otherwise an error is raised.
+ msg : str
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ splrep, splev, sproot, spalde, splint,
+ bisplrep, bisplev
+ UnivariateSpline, BivariateSpline
+ BSpline
+ make_interp_spline
+
+ Notes
+ -----
+ See `splev` for evaluation of the spline and its derivatives.
+ The number of dimensions N must be smaller than 11.
+
+ The number of coefficients in the `c` array is ``k+1`` less than the number
+ of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
+ the array of coefficients to have the same length as the array of knots.
+ These additional coefficients are ignored by evaluation routines, `splev`
+ and `BSpline`.
+
+ References
+ ----------
+ .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
+ parametric splines, Computer Graphics and Image Processing",
+ 20 (1982) 171-184.
+ .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
+ parametric splines", report tw55, Dept. Computer Science,
+ K.U.Leuven, 1981.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Generate a discretization of a limacon curve in the polar coordinates:
+
+ >>> import numpy as np
+ >>> phi = np.linspace(0, 2.*np.pi, 40)
+ >>> r = 0.5 + np.cos(phi) # polar coords
+ >>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
+
+ And interpolate:
+
+ >>> from scipy.interpolate import splprep, splev
+ >>> tck, u = splprep([x, y], s=0)
+ >>> new_points = splev(u, tck)
+
+ Notice that (i) we force interpolation by using `s=0`,
+ (ii) the parameterization, ``u``, is generated automatically.
+ Now plot the result:
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig, ax = plt.subplots()
+ >>> ax.plot(x, y, 'ro')
+ >>> ax.plot(new_points[0], new_points[1], 'r-')
+ >>> plt.show()
+
+ """
+
+ res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
+ quiet)
+ return res
+
+
+def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
+ full_output=0, per=0, quiet=1):
+ """
+ Find the B-spline representation of a 1-D curve.
+
+ Given the set of data points ``(x[i], y[i])`` determine a smooth spline
+ approximation of degree k on the interval ``xb <= x <= xe``.
+
+ Parameters
+ ----------
+ x, y : array_like
+ The data points defining a curve ``y = f(x)``.
+ w : array_like, optional
+ Strictly positive rank-1 array of weights the same length as `x` and `y`.
+ The weights are used in computing the weighted least-squares spline
+ fit. If the errors in the `y` values have standard-deviation given by the
+ vector ``d``, then `w` should be ``1/d``. Default is ``ones(len(x))``.
+ xb, xe : float, optional
+ The interval to fit. If None, these default to ``x[0]`` and ``x[-1]``
+ respectively.
+ k : int, optional
+ The degree of the spline fit. It is recommended to use cubic splines.
+ Even values of `k` should be avoided especially with small `s` values.
+ ``1 <= k <= 5``.
+ task : {1, 0, -1}, optional
+ If ``task==0``, find ``t`` and ``c`` for a given smoothing factor, `s`.
+
+ If ``task==1`` find ``t`` and ``c`` for another value of the smoothing factor,
+ `s`. There must have been a previous call with ``task=0`` or ``task=1`` for
+ the same set of data (``t`` will be stored an used internally)
+
+ If ``task=-1`` find the weighted least square spline for a given set of
+ knots, ``t``. These should be interior knots as knots on the ends will be
+ added automatically.
+ s : float, optional
+ A smoothing condition. The amount of smoothness is determined by
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s`` where ``g(x)``
+ is the smoothed interpolation of ``(x,y)``. The user can use `s` to control
+ the tradeoff between closeness and smoothness of fit. Larger `s` means
+ more smoothing while smaller values of `s` indicate less smoothing.
+ Recommended values of `s` depend on the weights, `w`. If the weights
+ represent the inverse of the standard-deviation of `y`, then a good `s`
+ value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))`` where ``m`` is
+ the number of datapoints in `x`, `y`, and `w`. default : ``s=m-sqrt(2*m)`` if
+ weights are supplied. ``s = 0.0`` (interpolating) if no weights are
+ supplied.
+ t : array_like, optional
+ The knots needed for ``task=-1``. If given then task is automatically set
+ to ``-1``.
+ full_output : bool, optional
+ If non-zero, then return optional outputs.
+ per : bool, optional
+ If non-zero, data points are considered periodic with period ``x[m-1]`` -
+ ``x[0]`` and a smooth periodic spline approximation is returned. Values of
+ ``y[m-1]`` and ``w[m-1]`` are not used.
+ The default is zero, corresponding to boundary condition 'not-a-knot'.
+ quiet : bool, optional
+ Non-zero to suppress messages.
+
+ Returns
+ -------
+ tck : tuple
+ A tuple ``(t,c,k)`` containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline.
+ fp : array, optional
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int, optional
+ An integer flag about splrep success. Success is indicated if ``ier<=0``.
+ If ``ier in [1,2,3]``, an error occurred but was not raised. Otherwise an
+ error is raised.
+ msg : str, optional
+ A message corresponding to the integer flag, `ier`.
+
+ See Also
+ --------
+ UnivariateSpline, BivariateSpline
+ splprep, splev, sproot, spalde, splint
+ bisplrep, bisplev
+ BSpline
+ make_interp_spline
+
+ Notes
+ -----
+ See `splev` for evaluation of the spline and its derivatives. Uses the
+ FORTRAN routine ``curfit`` from FITPACK.
+
+ The user is responsible for assuring that the values of `x` are unique.
+ Otherwise, `splrep` will not return sensible results.
+
+ If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
+ i.e., there must be a subset of data points ``x[j]`` such that
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+ This routine zero-pads the coefficients array ``c`` to have the same length
+ as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
+ by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
+ `splprep`, which does not zero-pad the coefficients.
+
+ The default boundary condition is 'not-a-knot', i.e. the first and second
+ segment at a curve end are the same polynomial. More boundary conditions are
+ available in `CubicSpline`.
+
+ References
+ ----------
+ Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
+
+ .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
+ integration of experimental data using spline functions",
+ J.Comp.Appl.Maths 1 (1975) 165-184.
+ .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
+ grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
+ 1286-1304.
+ .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
+ functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
+ .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ You can interpolate 1-D points with a B-spline curve.
+ Further examples are given in
+ :ref:`in the tutorial `.
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import splev, splrep
+ >>> x = np.linspace(0, 10, 10)
+ >>> y = np.sin(x)
+ >>> spl = splrep(x, y)
+ >>> x2 = np.linspace(0, 10, 200)
+ >>> y2 = splev(x2, spl)
+ >>> plt.plot(x, y, 'o', x2, y2)
+ >>> plt.show()
+
+ """
+ res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
+ return res
+
+
+def splev(x, tck, der=0, ext=0):
+ """
+ Evaluate a B-spline or its derivatives.
+
+ Given the knots and coefficients of a B-spline representation, evaluate
+ the value of the smoothing polynomial and its derivatives. This is a
+ wrapper around the FORTRAN routines splev and splder of FITPACK.
+
+ Parameters
+ ----------
+ x : array_like
+ An array of points at which to return the value of the smoothed
+ spline or its derivatives. If `tck` was returned from `splprep`,
+ then the parameter values, u should be given.
+ tck : 3-tuple or a BSpline object
+ If a tuple, then it should be a sequence of length 3 returned by
+ `splrep` or `splprep` containing the knots, coefficients, and degree
+ of the spline. (Also see Notes.)
+ der : int, optional
+ The order of derivative of the spline to compute (must be less than
+ or equal to k, the degree of the spline).
+ ext : int, optional
+ Controls the value returned for elements of ``x`` not in the
+ interval defined by the knot sequence.
+
+ * if ext=0, return the extrapolated value.
+ * if ext=1, return 0
+ * if ext=2, raise a ValueError
+ * if ext=3, return the boundary value.
+
+ The default value is 0.
+
+ Returns
+ -------
+ y : ndarray or list of ndarrays
+ An array of values representing the spline function evaluated at
+ the points in `x`. If `tck` was returned from `splprep`, then this
+ is a list of arrays representing the curve in an N-D space.
+
+ See Also
+ --------
+ splprep, splrep, sproot, spalde, splint
+ bisplrep, bisplev
+ BSpline
+
+ Notes
+ -----
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using `BSpline` objects.
+
+ References
+ ----------
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+ Theory, 6, p.50-62, 1972.
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+ Applics, 10, p.134-149, 1972.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
+ "not allowed. Use BSpline.__call__(x) instead.")
+ raise ValueError(mesg)
+
+ # remap the out-of-bounds behavior
+ try:
+ extrapolate = {0: True, }[ext]
+ except KeyError as e:
+ raise ValueError("Extrapolation mode %s is not supported "
+ "by BSpline." % ext) from e
+
+ return tck(x, der, extrapolate=extrapolate)
+ else:
+ return _impl.splev(x, tck, der, ext)
+
+
+def splint(a, b, tck, full_output=0):
+ """
+ Evaluate the definite integral of a B-spline between two given points.
+
+ Parameters
+ ----------
+ a, b : float
+ The end-points of the integration interval.
+ tck : tuple or a BSpline instance
+ If a tuple, then it should be a sequence of length 3, containing the
+ vector of knots, the B-spline coefficients, and the degree of the
+ spline (see `splev`).
+ full_output : int, optional
+ Non-zero to return optional output.
+
+ Returns
+ -------
+ integral : float
+ The resulting integral.
+ wrk : ndarray
+ An array containing the integrals of the normalized B-splines
+ defined on the set of knots.
+ (Only returned if `full_output` is non-zero)
+
+ See Also
+ --------
+ splprep, splrep, sproot, spalde, splev
+ bisplrep, bisplev
+ BSpline
+
+ Notes
+ -----
+ `splint` silently assumes that the spline function is zero outside the data
+ interval (`a`, `b`).
+
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using the `BSpline` objects.
+
+ References
+ ----------
+ .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
+ J. Inst. Maths Applics, 17, p.37-41, 1976.
+ .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
+ "not allowed. Use BSpline.integrate() instead.")
+ raise ValueError(mesg)
+
+ if full_output != 0:
+ mesg = ("full_output = %s is not supported. Proceeding as if "
+ "full_output = 0" % full_output)
+
+ return tck.integrate(a, b, extrapolate=False)
+ else:
+ return _impl.splint(a, b, tck, full_output)
+
+
+def sproot(tck, mest=10):
+ """
+ Find the roots of a cubic B-spline.
+
+ Given the knots (>=8) and coefficients of a cubic B-spline return the
+ roots of the spline.
+
+ Parameters
+ ----------
+ tck : tuple or a BSpline object
+ If a tuple, then it should be a sequence of length 3, containing the
+ vector of knots, the B-spline coefficients, and the degree of the
+ spline.
+ The number of knots must be >= 8, and the degree must be 3.
+ The knots must be a montonically increasing sequence.
+ mest : int, optional
+ An estimate of the number of zeros (Default is 10).
+
+ Returns
+ -------
+ zeros : ndarray
+ An array giving the roots of the spline.
+
+ See Also
+ --------
+ splprep, splrep, splint, spalde, splev
+ bisplrep, bisplev
+ BSpline
+
+ Notes
+ -----
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using the `BSpline` objects.
+
+ References
+ ----------
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+ Theory, 6, p.50-62, 1972.
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+ Applics, 10, p.134-149, 1972.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+
+ For some data, this method may miss a root. This happens when one of
+ the spline knots (which FITPACK places automatically) happens to
+ coincide with the true root. A workaround is to convert to `PPoly`,
+ which uses a different root-finding algorithm.
+
+ For example,
+
+ >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
+ >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
+ ... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03,
+ ... 6.520430e-03, 8.170770e-03]
+ >>> from scipy.interpolate import splrep, sproot, PPoly
+ >>> tck = splrep(x, y, s=0)
+ >>> sproot(tck)
+ array([], dtype=float64)
+
+ Converting to a PPoly object does find the roots at `x=2`:
+
+ >>> ppoly = PPoly.from_spline(tck)
+ >>> ppoly.roots(extrapolate=False)
+ array([2.])
+
+
+ Further examples are given :ref:`in the tutorial
+ `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
+ "not allowed.")
+ raise ValueError(mesg)
+
+ t, c, k = tck.tck
+
+ # _impl.sproot expects the interpolation axis to be last, so roll it.
+ # NB: This transpose is a no-op if c is 1D.
+ sh = tuple(range(c.ndim))
+ c = c.transpose(sh[1:] + (0,))
+ return _impl.sproot((t, c, k), mest)
+ else:
+ return _impl.sproot(tck, mest)
+
+
+def spalde(x, tck):
+ """
+ Evaluate all derivatives of a B-spline.
+
+ Given the knots and coefficients of a cubic B-spline compute all
+ derivatives up to order k at a point (or set of points).
+
+ Parameters
+ ----------
+ x : array_like
+ A point or a set of points at which to evaluate the derivatives.
+ Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
+ tck : tuple
+ A tuple (t,c,k) containing the vector of knots,
+ the B-spline coefficients, and the degree of the spline.
+
+ Returns
+ -------
+ results : {ndarray, list of ndarrays}
+ An array (or a list of arrays) containing all derivatives
+ up to order k inclusive for each point `x`.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
+ UnivariateSpline, BivariateSpline
+
+ References
+ ----------
+ .. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
+ 6 (1972) 50-62.
+ .. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
+ applics 10 (1972) 134-149.
+ .. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ """
+ if isinstance(tck, BSpline):
+ raise TypeError("spalde does not accept BSpline instances.")
+ else:
+ return _impl.spalde(x, tck)
+
+
+def insert(x, tck, m=1, per=0):
+ """
+ Insert knots into a B-spline.
+
+ Given the knots and coefficients of a B-spline representation, create a
+ new B-spline with a knot inserted `m` times at point `x`.
+ This is a wrapper around the FORTRAN routine insert of FITPACK.
+
+ Parameters
+ ----------
+ x (u) : float
+ A knot value at which to insert a new knot. If `tck` was returned
+ from ``splprep``, then the parameter values, u should be given.
+ tck : a `BSpline` instance or a tuple
+ If tuple, then it is expected to be a tuple (t,c,k) containing
+ the vector of knots, the B-spline coefficients, and the degree of
+ the spline.
+ m : int, optional
+ The number of times to insert the given knot (its multiplicity).
+ Default is 1.
+ per : int, optional
+ If non-zero, the input spline is considered periodic.
+
+ Returns
+ -------
+ BSpline instance or a tuple
+ A new B-spline with knots t, coefficients c, and degree k.
+ ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
+ In case of a periodic spline (``per != 0``) there must be
+ either at least k interior knots t(j) satisfying ``t(k+1)>> from scipy.interpolate import splrep, insert
+ >>> import numpy as np
+ >>> x = np.linspace(0, 10, 5)
+ >>> y = np.sin(x)
+ >>> tck = splrep(x, y)
+ >>> tck[0]
+ array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
+
+ A knot is inserted:
+
+ >>> tck_inserted = insert(3, tck)
+ >>> tck_inserted[0]
+ array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
+
+ Some knots are inserted:
+
+ >>> tck_inserted2 = insert(8, tck, m=3)
+ >>> tck_inserted2[0]
+ array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
+
+ """
+ if isinstance(tck, BSpline):
+
+ t, c, k = tck.tck
+
+ # FITPACK expects the interpolation axis to be last, so roll it over
+ # NB: if c array is 1D, transposes are no-ops
+ sh = tuple(range(c.ndim))
+ c = c.transpose(sh[1:] + (0,))
+ t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
+
+ # and roll the last axis back
+ c_ = np.asarray(c_)
+ c_ = c_.transpose((sh[-1],) + sh[:-1])
+ return BSpline(t_, c_, k_)
+ else:
+ return _impl.insert(x, tck, m, per)
+
+
+def splder(tck, n=1):
+ """
+ Compute the spline representation of the derivative of a given spline
+
+ Parameters
+ ----------
+ tck : BSpline instance or a tuple of (t, c, k)
+ Spline whose derivative to compute
+ n : int, optional
+ Order of derivative to evaluate. Default: 1
+
+ Returns
+ -------
+ `BSpline` instance or tuple
+ Spline of order k2=k-n representing the derivative
+ of the input spline.
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
+ a BSpline object is constructed and returned.
+
+ See Also
+ --------
+ splantider, splev, spalde
+ BSpline
+
+ Notes
+ -----
+
+ .. versionadded:: 0.13.0
+
+ Examples
+ --------
+ This can be used for finding maxima of a curve:
+
+ >>> from scipy.interpolate import splrep, splder, sproot
+ >>> import numpy as np
+ >>> x = np.linspace(0, 10, 70)
+ >>> y = np.sin(x)
+ >>> spl = splrep(x, y, k=4)
+
+ Now, differentiate the spline and find the zeros of the
+ derivative. (NB: `sproot` only works for order 3 splines, so we
+ fit an order 4 spline):
+
+ >>> dspl = splder(spl)
+ >>> sproot(dspl) / np.pi
+ array([ 0.50000001, 1.5 , 2.49999998])
+
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
+ :math:`\\cos(x) = \\sin'(x)`.
+
+ """
+ if isinstance(tck, BSpline):
+ return tck.derivative(n)
+ else:
+ return _impl.splder(tck, n)
+
+
+def splantider(tck, n=1):
+ """
+ Compute the spline for the antiderivative (integral) of a given spline.
+
+ Parameters
+ ----------
+ tck : BSpline instance or a tuple of (t, c, k)
+ Spline whose antiderivative to compute
+ n : int, optional
+ Order of antiderivative to evaluate. Default: 1
+
+ Returns
+ -------
+ BSpline instance or a tuple of (t2, c2, k2)
+ Spline of order k2=k+n representing the antiderivative of the input
+ spline.
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
+ a BSpline object is constructed and returned.
+
+ See Also
+ --------
+ splder, splev, spalde
+ BSpline
+
+ Notes
+ -----
+ The `splder` function is the inverse operation of this function.
+ Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
+ rounding error.
+
+ .. versionadded:: 0.13.0
+
+ Examples
+ --------
+ >>> from scipy.interpolate import splrep, splder, splantider, splev
+ >>> import numpy as np
+ >>> x = np.linspace(0, np.pi/2, 70)
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+ >>> spl = splrep(x, y)
+
+ The derivative is the inverse operation of the antiderivative,
+ although some floating point error accumulates:
+
+ >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
+ (array(2.1565429877197317), array(2.1565429877201865))
+
+ Antiderivative can be used to evaluate definite integrals:
+
+ >>> ispl = splantider(spl)
+ >>> splev(np.pi/2, ispl) - splev(0, ispl)
+ 2.2572053588768486
+
+ This is indeed an approximation to the complete elliptic integral
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+ >>> from scipy.special import ellipk
+ >>> ellipk(0.8)
+ 2.2572053268208538
+
+ """
+ if isinstance(tck, BSpline):
+ return tck.antiderivative(n)
+ else:
+ return _impl.splantider(tck, n)
+
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py
new file mode 100644
index 0000000000000000000000000000000000000000..826dddb311d78bf8d5381b18e46ca1ba86c04b6d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py
@@ -0,0 +1,358 @@
+import itertools
+import functools
+import operator
+import numpy as np
+
+from math import prod
+
+from . import _bspl # type: ignore
+
+import scipy.sparse.linalg as ssl
+from scipy.sparse import csr_array
+
+from ._bsplines import _not_a_knot
+
+__all__ = ["NdBSpline"]
+
+
+def _get_dtype(dtype):
+ """Return np.complex128 for complex dtypes, np.float64 otherwise."""
+ if np.issubdtype(dtype, np.complexfloating):
+ return np.complex128
+ else:
+ return np.float64
+
+
+class NdBSpline:
+ """Tensor product spline object.
+
+ The value at point ``xp = (x1, x2, ..., xN)`` is evaluated as a linear
+ combination of products of one-dimensional b-splines in each of the ``N``
+ dimensions::
+
+ c[i1, i2, ..., iN] * B(x1; i1, t1) * B(x2; i2, t2) * ... * B(xN; iN, tN)
+
+
+ Here ``B(x; i, t)`` is the ``i``-th b-spline defined by the knot vector
+ ``t`` evaluated at ``x``.
+
+ Parameters
+ ----------
+ t : tuple of 1D ndarrays
+ knot vectors in directions 1, 2, ... N,
+ ``len(t[i]) == n[i] + k + 1``
+ c : ndarray, shape (n1, n2, ..., nN, ...)
+ b-spline coefficients
+ k : int or length-d tuple of integers
+ spline degrees.
+ A single integer is interpreted as having this degree for
+ all dimensions.
+ extrapolate : bool, optional
+ Whether to extrapolate out-of-bounds inputs, or return `nan`.
+ Default is to extrapolate.
+
+ Attributes
+ ----------
+ t : tuple of ndarrays
+ Knots vectors.
+ c : ndarray
+ Coefficients of the tensor-produce spline.
+ k : tuple of integers
+ Degrees for each dimension.
+ extrapolate : bool, optional
+ Whether to extrapolate or return nans for out-of-bounds inputs.
+ Defaults to true.
+
+ Methods
+ -------
+ __call__
+ design_matrix
+
+ See Also
+ --------
+ BSpline : a one-dimensional B-spline object
+ NdPPoly : an N-dimensional piecewise tensor product polynomial
+
+ """
+ def __init__(self, t, c, k, *, extrapolate=None):
+ ndim = len(t)
+
+ try:
+ len(k)
+ except TypeError:
+ # make k a tuple
+ k = (k,)*ndim
+
+ if len(k) != ndim:
+ raise ValueError(f"{len(t) = } != {len(k) = }.")
+
+ self.k = tuple(operator.index(ki) for ki in k)
+ self.t = tuple(np.ascontiguousarray(ti, dtype=float) for ti in t)
+ self.c = np.asarray(c)
+
+ if extrapolate is None:
+ extrapolate = True
+ self.extrapolate = bool(extrapolate)
+
+ self.c = np.asarray(c)
+
+ for d in range(ndim):
+ td = self.t[d]
+ kd = self.k[d]
+ n = td.shape[0] - kd - 1
+ if kd < 0:
+ raise ValueError(f"Spline degree in dimension {d} cannot be"
+ f" negative.")
+ if td.ndim != 1:
+ raise ValueError(f"Knot vector in dimension {d} must be"
+ f" one-dimensional.")
+ if n < kd + 1:
+ raise ValueError(f"Need at least {2*kd + 2} knots for degree"
+ f" {kd} in dimension {d}.")
+ if (np.diff(td) < 0).any():
+ raise ValueError(f"Knots in dimension {d} must be in a"
+ f" non-decreasing order.")
+ if len(np.unique(td[kd:n + 1])) < 2:
+ raise ValueError(f"Need at least two internal knots in"
+ f" dimension {d}.")
+ if not np.isfinite(td).all():
+ raise ValueError(f"Knots in dimension {d} should not have"
+ f" nans or infs.")
+ if self.c.ndim < ndim:
+ raise ValueError(f"Coefficients must be at least"
+ f" {d}-dimensional.")
+ if self.c.shape[d] != n:
+ raise ValueError(f"Knots, coefficients and degree in dimension"
+ f" {d} are inconsistent:"
+ f" got {self.c.shape[d]} coefficients for"
+ f" {len(td)} knots, need at least {n} for"
+ f" k={k}.")
+
+ dt = _get_dtype(self.c.dtype)
+ self.c = np.ascontiguousarray(self.c, dtype=dt)
+
+ def __call__(self, xi, *, nu=None, extrapolate=None):
+ """Evaluate the tensor product b-spline at ``xi``.
+
+ Parameters
+ ----------
+ xi : array_like, shape(..., ndim)
+ The coordinates to evaluate the interpolator at.
+ This can be a list or tuple of ndim-dimensional points
+ or an array with the shape (num_points, ndim).
+ nu : array_like, optional, shape (ndim,)
+ Orders of derivatives to evaluate. Each must be non-negative.
+ Defaults to the zeroth derivivative.
+ extrapolate : bool, optional
+ Whether to exrapolate based on first and last intervals in each
+ dimension, or return `nan`. Default is to ``self.extrapolate``.
+
+ Returns
+ -------
+ values : ndarray, shape ``xi.shape[:-1] + self.c.shape[ndim:]``
+ Interpolated values at ``xi``
+ """
+ ndim = len(self.t)
+
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ extrapolate = bool(extrapolate)
+
+ if nu is None:
+ nu = np.zeros((ndim,), dtype=np.intc)
+ else:
+ nu = np.asarray(nu, dtype=np.intc)
+ if nu.ndim != 1 or nu.shape[0] != ndim:
+ raise ValueError(
+ f"invalid number of derivative orders {nu = } for "
+ f"ndim = {len(self.t)}.")
+ if any(nu < 0):
+ raise ValueError(f"derivatives must be positive, got {nu = }")
+
+ # prepare xi : shape (..., m1, ..., md) -> (1, m1, ..., md)
+ xi = np.asarray(xi, dtype=float)
+ xi_shape = xi.shape
+ xi = xi.reshape(-1, xi_shape[-1])
+ xi = np.ascontiguousarray(xi)
+
+ if xi_shape[-1] != ndim:
+ raise ValueError(f"Shapes: xi.shape={xi_shape} and ndim={ndim}")
+
+ # prepare k & t
+ _k = np.asarray(self.k, dtype=np.dtype("long"))
+
+ # pack the knots into a single array
+ len_t = [len(ti) for ti in self.t]
+ _t = np.empty((ndim, max(len_t)), dtype=float)
+ _t.fill(np.nan)
+ for d in range(ndim):
+ _t[d, :len(self.t[d])] = self.t[d]
+ len_t = np.asarray(len_t, dtype=np.dtype("long"))
+
+ # tabulate the flat indices for iterating over the (k+1)**ndim subarray
+ shape = tuple(kd + 1 for kd in self.k)
+ indices = np.unravel_index(np.arange(prod(shape)), shape)
+ _indices_k1d = np.asarray(indices, dtype=np.intp).T
+
+ # prepare the coefficients: flatten the trailing dimensions
+ c1 = self.c.reshape(self.c.shape[:ndim] + (-1,))
+ c1r = c1.ravel()
+
+ # replacement for np.ravel_multi_index for indexing of `c1`:
+ _strides_c1 = np.asarray([s // c1.dtype.itemsize
+ for s in c1.strides], dtype=np.intp)
+
+ num_c_tr = c1.shape[-1] # # of trailing coefficients
+ out = np.empty(xi.shape[:-1] + (num_c_tr,), dtype=c1.dtype)
+
+ _bspl.evaluate_ndbspline(xi,
+ _t,
+ len_t,
+ _k,
+ nu,
+ extrapolate,
+ c1r,
+ num_c_tr,
+ _strides_c1,
+ _indices_k1d,
+ out,)
+
+ return out.reshape(xi_shape[:-1] + self.c.shape[ndim:])
+
+ @classmethod
+ def design_matrix(cls, xvals, t, k, extrapolate=True):
+ """Construct the design matrix as a CSR format sparse array.
+
+ Parameters
+ ----------
+ xvals : ndarray, shape(npts, ndim)
+ Data points. ``xvals[j, :]`` gives the ``j``-th data point as an
+ ``ndim``-dimensional array.
+ t : tuple of 1D ndarrays, length-ndim
+ Knot vectors in directions 1, 2, ... ndim,
+ k : int
+ B-spline degree.
+ extrapolate : bool, optional
+ Whether to extrapolate out-of-bounds values of raise a `ValueError`
+
+ Returns
+ -------
+ design_matrix : a CSR array
+ Each row of the design matrix corresponds to a value in `xvals` and
+ contains values of b-spline basis elements which are non-zero
+ at this value.
+
+ """
+ xvals = np.asarray(xvals, dtype=float)
+ ndim = xvals.shape[-1]
+ if len(t) != ndim:
+ raise ValueError(
+ f"Data and knots are inconsistent: len(t) = {len(t)} for "
+ f" {ndim = }."
+ )
+ try:
+ len(k)
+ except TypeError:
+ # make k a tuple
+ k = (k,)*ndim
+
+ kk = np.asarray(k, dtype=np.int32)
+ data, indices, indptr = _bspl._colloc_nd(xvals, t, kk)
+ return csr_array((data, indices, indptr))
+
+
+def _iter_solve(a, b, solver=ssl.gcrotmk, **solver_args):
+ # work around iterative solvers not accepting multiple r.h.s.
+
+ # also work around a.dtype == float64 and b.dtype == complex128
+ # cf https://github.com/scipy/scipy/issues/19644
+ if np.issubdtype(b.dtype, np.complexfloating):
+ real = _iter_solve(a, b.real, solver, **solver_args)
+ imag = _iter_solve(a, b.imag, solver, **solver_args)
+ return real + 1j*imag
+
+ if b.ndim == 2 and b.shape[1] !=1:
+ res = np.empty_like(b)
+ for j in range(b.shape[1]):
+ res[:, j], info = solver(a, b[:, j], **solver_args)
+ if info != 0:
+ raise ValueError(f"{solver = } returns {info =} for column {j}.")
+ return res
+ else:
+ res, info = solver(a, b, **solver_args)
+ if info != 0:
+ raise ValueError(f"{solver = } returns {info = }.")
+ return res
+
+
+def make_ndbspl(points, values, k=3, *, solver=ssl.gcrotmk, **solver_args):
+ """Construct an interpolating NdBspline.
+
+ Parameters
+ ----------
+ points : tuple of ndarrays of float, with shapes (m1,), ... (mN,)
+ The points defining the regular grid in N dimensions. The points in
+ each dimension (i.e. every element of the `points` tuple) must be
+ strictly ascending or descending.
+ values : ndarray of float, shape (m1, ..., mN, ...)
+ The data on the regular grid in n dimensions.
+ k : int, optional
+ The spline degree. Must be odd. Default is cubic, k=3
+ solver : a `scipy.sparse.linalg` solver (iterative or direct), optional.
+ An iterative solver from `scipy.sparse.linalg` or a direct one,
+ `sparse.sparse.linalg.spsolve`.
+ Used to solve the sparse linear system
+ ``design_matrix @ coefficients = rhs`` for the coefficients.
+ Default is `scipy.sparse.linalg.gcrotmk`
+ solver_args : dict, optional
+ Additional arguments for the solver. The call signature is
+ ``solver(csr_array, rhs_vector, **solver_args)``
+
+ Returns
+ -------
+ spl : NdBSpline object
+
+ Notes
+ -----
+ Boundary conditions are not-a-knot in all dimensions.
+ """
+ ndim = len(points)
+ xi_shape = tuple(len(x) for x in points)
+
+ try:
+ len(k)
+ except TypeError:
+ # make k a tuple
+ k = (k,)*ndim
+
+ for d, point in enumerate(points):
+ numpts = len(np.atleast_1d(point))
+ if numpts <= k[d]:
+ raise ValueError(f"There are {numpts} points in dimension {d},"
+ f" but order {k[d]} requires at least "
+ f" {k[d]+1} points per dimension.")
+
+ t = tuple(_not_a_knot(np.asarray(points[d], dtype=float), k[d])
+ for d in range(ndim))
+ xvals = np.asarray([xv for xv in itertools.product(*points)], dtype=float)
+
+ # construct the colocation matrix
+ matr = NdBSpline.design_matrix(xvals, t, k)
+
+ # Solve for the coefficients given `values`.
+ # Trailing dimensions: first ndim dimensions are data, the rest are batch
+ # dimensions, so stack `values` into a 2D array for `spsolve` to undestand.
+ v_shape = values.shape
+ vals_shape = (prod(v_shape[:ndim]), prod(v_shape[ndim:]))
+ vals = values.reshape(vals_shape)
+
+ if solver != ssl.spsolve:
+ solver = functools.partial(_iter_solve, solver=solver)
+ if "atol" not in solver_args:
+ # avoid a DeprecationWarning, grumble grumble
+ solver_args["atol"] = 1e-6
+
+ coef = solver(matr, vals, **solver_args)
+ coef = coef.reshape(xi_shape + v_shape[ndim:])
+ return NdBSpline(t, coef, k)
+
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py
new file mode 100644
index 0000000000000000000000000000000000000000..2724d78f61416256357c4b2789c860a604845548
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py
@@ -0,0 +1,332 @@
+"""
+Convenience interface to N-D interpolation
+
+.. versionadded:: 0.9
+
+"""
+import numpy as np
+from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
+ CloughTocher2DInterpolator, _ndim_coords_from_arrays
+from scipy.spatial import cKDTree
+
+__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
+ 'CloughTocher2DInterpolator']
+
+#------------------------------------------------------------------------------
+# Nearest-neighbor interpolation
+#------------------------------------------------------------------------------
+
+
+class NearestNDInterpolator(NDInterpolatorBase):
+ """NearestNDInterpolator(x, y).
+
+ Nearest-neighbor interpolator in N > 1 dimensions.
+
+ .. versionadded:: 0.9
+
+ Methods
+ -------
+ __call__
+
+ Parameters
+ ----------
+ x : (npoints, ndims) 2-D ndarray of floats
+ Data point coordinates.
+ y : (npoints, ) 1-D ndarray of float or complex
+ Data values.
+ rescale : boolean, optional
+ Rescale points to unit cube before performing interpolation.
+ This is useful if some of the input dimensions have
+ incommensurable units and differ by many orders of magnitude.
+
+ .. versionadded:: 0.14.0
+ tree_options : dict, optional
+ Options passed to the underlying ``cKDTree``.
+
+ .. versionadded:: 0.17.0
+
+ See Also
+ --------
+ griddata :
+ Interpolate unstructured D-D data.
+ LinearNDInterpolator :
+ Piecewise linear interpolator in N dimensions.
+ CloughTocher2DInterpolator :
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
+ interpn : Interpolation on a regular grid or rectilinear grid.
+ RegularGridInterpolator : Interpolator on a regular or rectilinear grid
+ in arbitrary dimensions (`interpn` wraps this
+ class).
+
+ Notes
+ -----
+ Uses ``scipy.spatial.cKDTree``
+
+ .. note:: For data on a regular grid use `interpn` instead.
+
+ Examples
+ --------
+ We can interpolate values on a 2D plane:
+
+ >>> from scipy.interpolate import NearestNDInterpolator
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> rng = np.random.default_rng()
+ >>> x = rng.random(10) - 0.5
+ >>> y = rng.random(10) - 0.5
+ >>> z = np.hypot(x, y)
+ >>> X = np.linspace(min(x), max(x))
+ >>> Y = np.linspace(min(y), max(y))
+ >>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
+ >>> interp = NearestNDInterpolator(list(zip(x, y)), z)
+ >>> Z = interp(X, Y)
+ >>> plt.pcolormesh(X, Y, Z, shading='auto')
+ >>> plt.plot(x, y, "ok", label="input point")
+ >>> plt.legend()
+ >>> plt.colorbar()
+ >>> plt.axis("equal")
+ >>> plt.show()
+
+ """
+
+ def __init__(self, x, y, rescale=False, tree_options=None):
+ NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
+ need_contiguous=False,
+ need_values=False)
+ if tree_options is None:
+ tree_options = dict()
+ self.tree = cKDTree(self.points, **tree_options)
+ self.values = np.asarray(y)
+
+ def __call__(self, *args, **query_options):
+ """
+ Evaluate interpolator at given points.
+
+ Parameters
+ ----------
+ x1, x2, ... xn : array-like of float
+ Points where to interpolate data at.
+ x1, x2, ... xn can be array-like of float with broadcastable shape.
+ or x1 can be array-like of float with shape ``(..., ndim)``
+ **query_options
+ This allows ``eps``, ``p``, ``distance_upper_bound``, and ``workers``
+ being passed to the cKDTree's query function to be explicitly set.
+ See `scipy.spatial.cKDTree.query` for an overview of the different options.
+
+ .. versionadded:: 1.12.0
+
+ """
+ # For the sake of enabling subclassing, NDInterpolatorBase._set_xi performs
+ # some operations which are not required by NearestNDInterpolator.__call__,
+ # hence here we operate on xi directly, without calling a parent class function.
+ xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
+ xi = self._check_call_shape(xi)
+ xi = self._scale_x(xi)
+
+ # We need to handle two important cases:
+ # (1) the case where xi has trailing dimensions (..., ndim), and
+ # (2) the case where y has trailing dimensions
+ # We will first flatten xi to deal with case (1),
+ # do the computation in flattened array while retaining y's dimensionality,
+ # and then reshape the interpolated values back to match xi's shape.
+
+ # Flatten xi for the query
+ xi_flat = xi.reshape(-1, xi.shape[-1])
+ original_shape = xi.shape
+ flattened_shape = xi_flat.shape
+
+ # if distance_upper_bound is set to not be infinite,
+ # then we need to consider the case where cKDtree
+ # does not find any points within distance_upper_bound to return.
+ # It marks those points as having infinte distance, which is what will be used
+ # below to mask the array and return only the points that were deemed
+ # to have a close enough neighbor to return something useful.
+ dist, i = self.tree.query(xi_flat, **query_options)
+ valid_mask = np.isfinite(dist)
+
+ # create a holder interp_values array and fill with nans.
+ if self.values.ndim > 1:
+ interp_shape = flattened_shape[:-1] + self.values.shape[1:]
+ else:
+ interp_shape = flattened_shape[:-1]
+
+ if np.issubdtype(self.values.dtype, np.complexfloating):
+ interp_values = np.full(interp_shape, np.nan, dtype=self.values.dtype)
+ else:
+ interp_values = np.full(interp_shape, np.nan)
+
+ interp_values[valid_mask] = self.values[i[valid_mask], ...]
+
+ if self.values.ndim > 1:
+ new_shape = original_shape[:-1] + self.values.shape[1:]
+ else:
+ new_shape = original_shape[:-1]
+ interp_values = interp_values.reshape(new_shape)
+
+ return interp_values
+
+
+#------------------------------------------------------------------------------
+# Convenience interface function
+#------------------------------------------------------------------------------
+
+
+def griddata(points, values, xi, method='linear', fill_value=np.nan,
+ rescale=False):
+ """
+ Interpolate unstructured D-D data.
+
+ Parameters
+ ----------
+ points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
+ Data point coordinates.
+ values : ndarray of float or complex, shape (n,)
+ Data values.
+ xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
+ Points at which to interpolate data.
+ method : {'linear', 'nearest', 'cubic'}, optional
+ Method of interpolation. One of
+
+ ``nearest``
+ return the value at the data point closest to
+ the point of interpolation. See `NearestNDInterpolator` for
+ more details.
+
+ ``linear``
+ tessellate the input point set to N-D
+ simplices, and interpolate linearly on each simplex. See
+ `LinearNDInterpolator` for more details.
+
+ ``cubic`` (1-D)
+ return the value determined from a cubic
+ spline.
+
+ ``cubic`` (2-D)
+ return the value determined from a
+ piecewise cubic, continuously differentiable (C1), and
+ approximately curvature-minimizing polynomial surface. See
+ `CloughTocher2DInterpolator` for more details.
+ fill_value : float, optional
+ Value used to fill in for requested points outside of the
+ convex hull of the input points. If not provided, then the
+ default is ``nan``. This option has no effect for the
+ 'nearest' method.
+ rescale : bool, optional
+ Rescale points to unit cube before performing interpolation.
+ This is useful if some of the input dimensions have
+ incommensurable units and differ by many orders of magnitude.
+
+ .. versionadded:: 0.14.0
+
+ Returns
+ -------
+ ndarray
+ Array of interpolated values.
+
+ See Also
+ --------
+ LinearNDInterpolator :
+ Piecewise linear interpolator in N dimensions.
+ NearestNDInterpolator :
+ Nearest-neighbor interpolator in N dimensions.
+ CloughTocher2DInterpolator :
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
+ interpn : Interpolation on a regular grid or rectilinear grid.
+ RegularGridInterpolator : Interpolator on a regular or rectilinear grid
+ in arbitrary dimensions (`interpn` wraps this
+ class).
+
+ Notes
+ -----
+
+ .. versionadded:: 0.9
+
+ .. note:: For data on a regular grid use `interpn` instead.
+
+ Examples
+ --------
+
+ Suppose we want to interpolate the 2-D function
+
+ >>> import numpy as np
+ >>> def func(x, y):
+ ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
+
+ on a grid in [0, 1]x[0, 1]
+
+ >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
+
+ but we only know its values at 1000 data points:
+
+ >>> rng = np.random.default_rng()
+ >>> points = rng.random((1000, 2))
+ >>> values = func(points[:,0], points[:,1])
+
+ This can be done with `griddata` -- below we try out all of the
+ interpolation methods:
+
+ >>> from scipy.interpolate import griddata
+ >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
+ >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
+ >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
+
+ One can see that the exact result is reproduced by all of the
+ methods to some degree, but for this smooth function the piecewise
+ cubic interpolant gives the best results:
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.subplot(221)
+ >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
+ >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
+ >>> plt.title('Original')
+ >>> plt.subplot(222)
+ >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Nearest')
+ >>> plt.subplot(223)
+ >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Linear')
+ >>> plt.subplot(224)
+ >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Cubic')
+ >>> plt.gcf().set_size_inches(6, 6)
+ >>> plt.show()
+
+ """ # numpy/numpydoc#87 # noqa: E501
+
+ points = _ndim_coords_from_arrays(points)
+
+ if points.ndim < 2:
+ ndim = points.ndim
+ else:
+ ndim = points.shape[-1]
+
+ if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
+ from ._interpolate import interp1d
+ points = points.ravel()
+ if isinstance(xi, tuple):
+ if len(xi) != 1:
+ raise ValueError("invalid number of dimensions in xi")
+ xi, = xi
+ # Sort points/values together, necessary as input for interp1d
+ idx = np.argsort(points)
+ points = points[idx]
+ values = values[idx]
+ if method == 'nearest':
+ fill_value = 'extrapolate'
+ ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
+ fill_value=fill_value)
+ return ip(xi)
+ elif method == 'nearest':
+ ip = NearestNDInterpolator(points, values, rescale=rescale)
+ return ip(xi)
+ elif method == 'linear':
+ ip = LinearNDInterpolator(points, values, fill_value=fill_value,
+ rescale=rescale)
+ return ip(xi)
+ elif method == 'cubic' and ndim == 2:
+ ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
+ rescale=rescale)
+ return ip(xi)
+ else:
+ raise ValueError("Unknown interpolation method %r for "
+ "%d dimensional data" % (method, ndim))
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_polyint.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_polyint.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ed06d8abdba9597397295a0ca3c4a2bc3b25659
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_polyint.py
@@ -0,0 +1,938 @@
+import warnings
+
+import numpy as np
+from scipy.special import factorial
+from scipy._lib._util import _asarray_validated, float_factorial, check_random_state
+
+
+__all__ = ["KroghInterpolator", "krogh_interpolate",
+ "BarycentricInterpolator", "barycentric_interpolate",
+ "approximate_taylor_polynomial"]
+
+
+def _isscalar(x):
+ """Check whether x is if a scalar type, or 0-dim"""
+ return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
+
+
+class _Interpolator1D:
+ """
+ Common features in univariate interpolation
+
+ Deal with input data type and interpolation axis rolling. The
+ actual interpolator can assume the y-data is of shape (n, r) where
+ `n` is the number of x-points, and `r` the number of variables,
+ and use self.dtype as the y-data type.
+
+ Attributes
+ ----------
+ _y_axis
+ Axis along which the interpolation goes in the original array
+ _y_extra_shape
+ Additional trailing shape of the input arrays, excluding
+ the interpolation axis.
+ dtype
+ Dtype of the y-data arrays. Can be set via _set_dtype, which
+ forces it to be float or complex.
+
+ Methods
+ -------
+ __call__
+ _prepare_x
+ _finish_y
+ _reshape_yi
+ _set_yi
+ _set_dtype
+ _evaluate
+
+ """
+
+ __slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
+
+ def __init__(self, xi=None, yi=None, axis=None):
+ self._y_axis = axis
+ self._y_extra_shape = None
+ self.dtype = None
+ if yi is not None:
+ self._set_yi(yi, xi=xi, axis=axis)
+
+ def __call__(self, x):
+ """
+ Evaluate the interpolant
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the interpolant.
+
+ Returns
+ -------
+ y : array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of `x`.
+
+ Notes
+ -----
+ Input values `x` must be convertible to `float` values like `int`
+ or `float`.
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate(x)
+ return self._finish_y(y, x_shape)
+
+ def _evaluate(self, x):
+ """
+ Actually evaluate the value of the interpolator.
+ """
+ raise NotImplementedError()
+
+ def _prepare_x(self, x):
+ """Reshape input x array to 1-D"""
+ x = _asarray_validated(x, check_finite=False, as_inexact=True)
+ x_shape = x.shape
+ return x.ravel(), x_shape
+
+ def _finish_y(self, y, x_shape):
+ """Reshape interpolated y back to an N-D array similar to initial y"""
+ y = y.reshape(x_shape + self._y_extra_shape)
+ if self._y_axis != 0 and x_shape != ():
+ nx = len(x_shape)
+ ny = len(self._y_extra_shape)
+ s = (list(range(nx, nx + self._y_axis))
+ + list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
+ y = y.transpose(s)
+ return y
+
+ def _reshape_yi(self, yi, check=False):
+ yi = np.moveaxis(np.asarray(yi), self._y_axis, 0)
+ if check and yi.shape[1:] != self._y_extra_shape:
+ ok_shape = "{!r} + (N,) + {!r}".format(self._y_extra_shape[-self._y_axis:],
+ self._y_extra_shape[:-self._y_axis])
+ raise ValueError("Data must be of shape %s" % ok_shape)
+ return yi.reshape((yi.shape[0], -1))
+
+ def _set_yi(self, yi, xi=None, axis=None):
+ if axis is None:
+ axis = self._y_axis
+ if axis is None:
+ raise ValueError("no interpolation axis specified")
+
+ yi = np.asarray(yi)
+
+ shape = yi.shape
+ if shape == ():
+ shape = (1,)
+ if xi is not None and shape[axis] != len(xi):
+ raise ValueError("x and y arrays must be equal in length along "
+ "interpolation axis.")
+
+ self._y_axis = (axis % yi.ndim)
+ self._y_extra_shape = yi.shape[:self._y_axis] + yi.shape[self._y_axis+1:]
+ self.dtype = None
+ self._set_dtype(yi.dtype)
+
+ def _set_dtype(self, dtype, union=False):
+ if np.issubdtype(dtype, np.complexfloating) \
+ or np.issubdtype(self.dtype, np.complexfloating):
+ self.dtype = np.complex128
+ else:
+ if not union or self.dtype != np.complex128:
+ self.dtype = np.float64
+
+
+class _Interpolator1DWithDerivatives(_Interpolator1D):
+ def derivatives(self, x, der=None):
+ """
+ Evaluate several derivatives of the polynomial at the point `x`
+
+ Produce an array of derivatives evaluated at the point `x`.
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the derivatives
+ der : int or list or None, optional
+ How many derivatives to evaluate, or None for all potentially
+ nonzero derivatives (that is, a number equal to the number
+ of points), or a list of derivatives to evaluate. This number
+ includes the function value as the '0th' derivative.
+
+ Returns
+ -------
+ d : ndarray
+ Array with derivatives; ``d[j]`` contains the jth derivative.
+ Shape of ``d[j]`` is determined by replacing the interpolation
+ axis in the original array with the shape of `x`.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import KroghInterpolator
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
+ array([1.0,2.0,3.0])
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
+ array([[1.0,1.0],
+ [2.0,2.0],
+ [3.0,3.0]])
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate_derivatives(x, der)
+
+ y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
+ if self._y_axis != 0 and x_shape != ():
+ nx = len(x_shape)
+ ny = len(self._y_extra_shape)
+ s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+ + list(range(1, nx+1)) +
+ list(range(nx+1+self._y_axis, nx+ny+1)))
+ y = y.transpose(s)
+ return y
+
+ def derivative(self, x, der=1):
+ """
+ Evaluate a single derivative of the polynomial at the point `x`.
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the derivatives
+
+ der : integer, optional
+ Which derivative to evaluate (default: first derivative).
+ This number includes the function value as 0th derivative.
+
+ Returns
+ -------
+ d : ndarray
+ Derivative interpolated at the x-points. Shape of `d` is
+ determined by replacing the interpolation axis in the
+ original array with the shape of `x`.
+
+ Notes
+ -----
+ This may be computed by evaluating all derivatives up to the desired
+ one (using self.derivatives()) and then discarding the rest.
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate_derivatives(x, der+1)
+ return self._finish_y(y[der], x_shape)
+
+ def _evaluate_derivatives(self, x, der=None):
+ """
+ Actually evaluate the derivatives.
+
+ Parameters
+ ----------
+ x : array_like
+ 1D array of points at which to evaluate the derivatives
+ der : integer, optional
+ The number of derivatives to evaluate, from 'order 0' (der=1)
+ to order der-1. If omitted, return all possibly-non-zero
+ derivatives, ie 0 to order n-1.
+
+ Returns
+ -------
+ d : ndarray
+ Array of shape ``(der, x.size, self.yi.shape[1])`` containing
+ the derivatives from 0 to der-1
+ """
+ raise NotImplementedError()
+
+
+class KroghInterpolator(_Interpolator1DWithDerivatives):
+ """
+ Interpolating polynomial for a set of points.
+
+ The polynomial passes through all the pairs ``(xi, yi)``. One may
+ additionally specify a number of derivatives at each point `xi`;
+ this is done by repeating the value `xi` and specifying the
+ derivatives as successive `yi` values.
+
+ Allows evaluation of the polynomial and all its derivatives.
+ For reasons of numerical stability, this function does not compute
+ the coefficients of the polynomial, although they can be obtained
+ by evaluating all the derivatives.
+
+ Parameters
+ ----------
+ xi : array_like, shape (npoints, )
+ Known x-coordinates. Must be sorted in increasing order.
+ yi : array_like, shape (..., npoints, ...)
+ Known y-coordinates. When an xi occurs two or more times in
+ a row, the corresponding yi's represent derivative values. The length of `yi`
+ along the interpolation axis must be equal to the length of `xi`. Use the
+ `axis` parameter to select the correct axis.
+ axis : int, optional
+ Axis in the `yi` array corresponding to the x-coordinate values. Defaults to
+ ``axis=0``.
+
+ Notes
+ -----
+ Be aware that the algorithms implemented here are not necessarily
+ the most numerically stable known. Moreover, even in a world of
+ exact computation, unless the x coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon. In general, even with well-chosen
+ x values, degrees higher than about thirty cause problems with
+ numerical instability in this code.
+
+ Based on [1]_.
+
+ References
+ ----------
+ .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
+ and Numerical Differentiation", 1970.
+
+ Examples
+ --------
+ To produce a polynomial that is zero at 0 and 1 and has
+ derivative 2 at 0, call
+
+ >>> from scipy.interpolate import KroghInterpolator
+ >>> KroghInterpolator([0,0,1],[0,2,0])
+
+ This constructs the quadratic :math:`2x^2-2x`. The derivative condition
+ is indicated by the repeated zero in the `xi` array; the corresponding
+ yi values are 0, the function value, and 2, the derivative value.
+
+ For another example, given `xi`, `yi`, and a derivative `ypi` for each
+ point, appropriate arrays can be constructed as:
+
+ >>> import numpy as np
+ >>> rng = np.random.default_rng()
+ >>> xi = np.linspace(0, 1, 5)
+ >>> yi, ypi = rng.random((2, 5))
+ >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
+ >>> KroghInterpolator(xi_k, yi_k)
+
+ To produce a vector-valued polynomial, supply a higher-dimensional
+ array for `yi`:
+
+ >>> KroghInterpolator([0,1],[[2,3],[4,5]])
+
+ This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
+
+ """
+
+ def __init__(self, xi, yi, axis=0):
+ super().__init__(xi, yi, axis)
+
+ self.xi = np.asarray(xi)
+ self.yi = self._reshape_yi(yi)
+ self.n, self.r = self.yi.shape
+
+ if (deg := self.xi.size) > 30:
+ warnings.warn(f"{deg} degrees provided, degrees higher than about"
+ " thirty cause problems with numerical instability "
+ "with 'KroghInterpolator'", stacklevel=2)
+
+ c = np.zeros((self.n+1, self.r), dtype=self.dtype)
+ c[0] = self.yi[0]
+ Vk = np.zeros((self.n, self.r), dtype=self.dtype)
+ for k in range(1, self.n):
+ s = 0
+ while s <= k and xi[k-s] == xi[k]:
+ s += 1
+ s -= 1
+ Vk[0] = self.yi[k]/float_factorial(s)
+ for i in range(k-s):
+ if xi[i] == xi[k]:
+ raise ValueError("Elements of `xi` can't be equal.")
+ if s == 0:
+ Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
+ else:
+ Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
+ c[k] = Vk[k-s]
+ self.c = c
+
+ def _evaluate(self, x):
+ pi = 1
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
+ p += self.c[0,np.newaxis,:]
+ for k in range(1, self.n):
+ w = x - self.xi[k-1]
+ pi = w*pi
+ p += pi[:,np.newaxis] * self.c[k]
+ return p
+
+ def _evaluate_derivatives(self, x, der=None):
+ n = self.n
+ r = self.r
+
+ if der is None:
+ der = self.n
+
+ pi = np.zeros((n, len(x)))
+ w = np.zeros((n, len(x)))
+ pi[0] = 1
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
+ p += self.c[0, np.newaxis, :]
+
+ for k in range(1, n):
+ w[k-1] = x - self.xi[k-1]
+ pi[k] = w[k-1] * pi[k-1]
+ p += pi[k, :, np.newaxis] * self.c[k]
+
+ cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
+ cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
+ cn[0] = p
+ for k in range(1, n):
+ for i in range(1, n-k+1):
+ pi[i] = w[k+i-1]*pi[i-1] + pi[i]
+ cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
+ cn[k] *= float_factorial(k)
+
+ cn[n, :, :] = 0
+ return cn[:der]
+
+
+def krogh_interpolate(xi, yi, x, der=0, axis=0):
+ """
+ Convenience function for polynomial interpolation.
+
+ See `KroghInterpolator` for more details.
+
+ Parameters
+ ----------
+ xi : array_like
+ Interpolation points (known x-coordinates).
+ yi : array_like
+ Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
+ vectors of length R, or scalars if R=1.
+ x : array_like
+ Point or points at which to evaluate the derivatives.
+ der : int or list or None, optional
+ How many derivatives to evaluate, or None for all potentially
+ nonzero derivatives (that is, a number equal to the number
+ of points), or a list of derivatives to evaluate. This number
+ includes the function value as the '0th' derivative.
+ axis : int, optional
+ Axis in the `yi` array corresponding to the x-coordinate values.
+
+ Returns
+ -------
+ d : ndarray
+ If the interpolator's values are R-D then the
+ returned array will be the number of derivatives by N by R.
+ If `x` is a scalar, the middle dimension will be dropped; if
+ the `yi` are scalars then the last dimension will be dropped.
+
+ See Also
+ --------
+ KroghInterpolator : Krogh interpolator
+
+ Notes
+ -----
+ Construction of the interpolating polynomial is a relatively expensive
+ process. If you want to evaluate it repeatedly consider using the class
+ KroghInterpolator (which is what this function uses).
+
+ Examples
+ --------
+ We can interpolate 2D observed data using Krogh interpolation:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import krogh_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = krogh_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="krogh interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+ """
+
+ P = KroghInterpolator(xi, yi, axis=axis)
+ if der == 0:
+ return P(x)
+ elif _isscalar(der):
+ return P.derivative(x, der=der)
+ else:
+ return P.derivatives(x, der=np.amax(der)+1)[der]
+
+
+def approximate_taylor_polynomial(f,x,degree,scale,order=None):
+ """
+ Estimate the Taylor polynomial of f at x by polynomial fitting.
+
+ Parameters
+ ----------
+ f : callable
+ The function whose Taylor polynomial is sought. Should accept
+ a vector of `x` values.
+ x : scalar
+ The point at which the polynomial is to be evaluated.
+ degree : int
+ The degree of the Taylor polynomial
+ scale : scalar
+ The width of the interval to use to evaluate the Taylor polynomial.
+ Function values spread over a range this wide are used to fit the
+ polynomial. Must be chosen carefully.
+ order : int or None, optional
+ The order of the polynomial to be used in the fitting; `f` will be
+ evaluated ``order+1`` times. If None, use `degree`.
+
+ Returns
+ -------
+ p : poly1d instance
+ The Taylor polynomial (translated to the origin, so that
+ for example p(0)=f(x)).
+
+ Notes
+ -----
+ The appropriate choice of "scale" is a trade-off; too large and the
+ function differs from its Taylor polynomial too much to get a good
+ answer, too small and round-off errors overwhelm the higher-order terms.
+ The algorithm used becomes numerically unstable around order 30 even
+ under ideal circumstances.
+
+ Choosing order somewhat larger than degree may improve the higher-order
+ terms.
+
+ Examples
+ --------
+ We can calculate Taylor approximation polynomials of sin function with
+ various degrees:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import approximate_taylor_polynomial
+ >>> x = np.linspace(-10.0, 10.0, num=100)
+ >>> plt.plot(x, np.sin(x), label="sin curve")
+ >>> for degree in np.arange(1, 15, step=2):
+ ... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
+ ... order=degree + 2)
+ ... plt.plot(x, sin_taylor(x), label=f"degree={degree}")
+ >>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
+ ... borderaxespad=0.0, shadow=True)
+ >>> plt.tight_layout()
+ >>> plt.axis([-10, 10, -10, 10])
+ >>> plt.show()
+
+ """
+ if order is None:
+ order = degree
+
+ n = order+1
+ # Choose n points that cluster near the endpoints of the interval in
+ # a way that avoids the Runge phenomenon. Ensure, by including the
+ # endpoint or not as appropriate, that one point always falls at x
+ # exactly.
+ xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
+
+ P = KroghInterpolator(xs, f(xs))
+ d = P.derivatives(x,der=degree+1)
+
+ return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
+
+
+class BarycentricInterpolator(_Interpolator1DWithDerivatives):
+ r"""Interpolating polynomial for a set of points.
+
+ Constructs a polynomial that passes through a given set of points.
+ Allows evaluation of the polynomial and all its derivatives,
+ efficient changing of the y-values to be interpolated,
+ and updating by adding more x- and y-values.
+
+ For reasons of numerical stability, this function does not compute
+ the coefficients of the polynomial.
+
+ The values `yi` need to be provided before the function is
+ evaluated, but none of the preprocessing depends on them, so rapid
+ updates are possible.
+
+ Parameters
+ ----------
+ xi : array_like, shape (npoints, )
+ 1-D array of x coordinates of the points the polynomial
+ should pass through
+ yi : array_like, shape (..., npoints, ...), optional
+ N-D array of y coordinates of the points the polynomial should pass through.
+ If None, the y values will be supplied later via the `set_y` method.
+ The length of `yi` along the interpolation axis must be equal to the length
+ of `xi`. Use the ``axis`` parameter to select correct axis.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values. Defaults
+ to ``axis=0``.
+ wi : array_like, optional
+ The barycentric weights for the chosen interpolation points `xi`.
+ If absent or None, the weights will be computed from `xi` (default).
+ This allows for the reuse of the weights `wi` if several interpolants
+ are being calculated using the same nodes `xi`, without re-computation.
+ random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+ singleton is used.
+ If `seed` is an int, a new ``RandomState`` instance is used,
+ seeded with `seed`.
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
+ that instance is used.
+
+ Notes
+ -----
+ This class uses a "barycentric interpolation" method that treats
+ the problem as a special case of rational function interpolation.
+ This algorithm is quite stable, numerically, but even in a world of
+ exact computation, unless the x coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon.
+
+ Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
+
+ Examples
+ --------
+ To produce a quintic barycentric interpolant approximating the function
+ :math:`\sin x`, and its first four derivatives, using six randomly-spaced
+ nodes in :math:`(0, \frac{\pi}{2})`:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import BarycentricInterpolator
+ >>> rng = np.random.default_rng()
+ >>> xi = rng.random(6) * np.pi/2
+ >>> f, f_d1, f_d2, f_d3, f_d4 = np.sin, np.cos, lambda x: -np.sin(x), lambda x: -np.cos(x), np.sin
+ >>> P = BarycentricInterpolator(xi, f(xi), random_state=rng)
+ >>> fig, axs = plt.subplots(5, 1, sharex=True, layout='constrained', figsize=(7,10))
+ >>> x = np.linspace(0, np.pi, 100)
+ >>> axs[0].plot(x, P(x), 'r:', x, f(x), 'k--', xi, f(xi), 'xk')
+ >>> axs[1].plot(x, P.derivative(x), 'r:', x, f_d1(x), 'k--', xi, f_d1(xi), 'xk')
+ >>> axs[2].plot(x, P.derivative(x, 2), 'r:', x, f_d2(x), 'k--', xi, f_d2(xi), 'xk')
+ >>> axs[3].plot(x, P.derivative(x, 3), 'r:', x, f_d3(x), 'k--', xi, f_d3(xi), 'xk')
+ >>> axs[4].plot(x, P.derivative(x, 4), 'r:', x, f_d4(x), 'k--', xi, f_d4(xi), 'xk')
+ >>> axs[0].set_xlim(0, np.pi)
+ >>> axs[4].set_xlabel(r"$x$")
+ >>> axs[4].set_xticks([i * np.pi / 4 for i in range(5)],
+ ... ["0", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"])
+ >>> axs[0].set_ylabel("$f(x)$")
+ >>> axs[1].set_ylabel("$f'(x)$")
+ >>> axs[2].set_ylabel("$f''(x)$")
+ >>> axs[3].set_ylabel("$f^{(3)}(x)$")
+ >>> axs[4].set_ylabel("$f^{(4)}(x)$")
+ >>> labels = ['Interpolation nodes', 'True function $f$', 'Barycentric interpolation']
+ >>> axs[0].legend(axs[0].get_lines()[::-1], labels, bbox_to_anchor=(0., 1.02, 1., .102),
+ ... loc='lower left', ncols=3, mode="expand", borderaxespad=0., frameon=False)
+ >>> plt.show()
+ """ # numpy/numpydoc#87 # noqa: E501
+
+ def __init__(self, xi, yi=None, axis=0, *, wi=None, random_state=None):
+ super().__init__(xi, yi, axis)
+
+ random_state = check_random_state(random_state)
+
+ self.xi = np.asarray(xi, dtype=np.float64)
+ self.set_yi(yi)
+ self.n = len(self.xi)
+
+ # cache derivative object to avoid re-computing the weights with every call.
+ self._diff_cij = None
+
+ if wi is not None:
+ self.wi = wi
+ else:
+ # See page 510 of Berrut and Trefethen 2004 for an explanation of the
+ # capacity scaling and the suggestion of using a random permutation of
+ # the input factors.
+ # At the moment, the permutation is not performed for xi that are
+ # appended later through the add_xi interface. It's not clear to me how
+ # to implement that and it seems that most situations that require
+ # these numerical stability improvements will be able to provide all
+ # the points to the constructor.
+ self._inv_capacity = 4.0 / (np.max(self.xi) - np.min(self.xi))
+ permute = random_state.permutation(self.n, )
+ inv_permute = np.zeros(self.n, dtype=np.int32)
+ inv_permute[permute] = np.arange(self.n)
+ self.wi = np.zeros(self.n)
+
+ for i in range(self.n):
+ dist = self._inv_capacity * (self.xi[i] - self.xi[permute])
+ dist[inv_permute[i]] = 1.0
+ prod = np.prod(dist)
+ if prod == 0.0:
+ raise ValueError("Interpolation points xi must be"
+ " distinct.")
+ self.wi[i] = 1.0 / prod
+
+ def set_yi(self, yi, axis=None):
+ """
+ Update the y values to be interpolated
+
+ The barycentric interpolation algorithm requires the calculation
+ of weights, but these depend only on the `xi`. The `yi` can be changed
+ at any time.
+
+ Parameters
+ ----------
+ yi : array_like
+ The y-coordinates of the points the polynomial will pass through.
+ If None, the y values must be supplied later.
+ axis : int, optional
+ Axis in the `yi` array corresponding to the x-coordinate values.
+
+ """
+ if yi is None:
+ self.yi = None
+ return
+ self._set_yi(yi, xi=self.xi, axis=axis)
+ self.yi = self._reshape_yi(yi)
+ self.n, self.r = self.yi.shape
+ self._diff_baryint = None
+
+ def add_xi(self, xi, yi=None):
+ """
+ Add more x values to the set to be interpolated
+
+ The barycentric interpolation algorithm allows easy updating by
+ adding more points for the polynomial to pass through.
+
+ Parameters
+ ----------
+ xi : array_like
+ The x coordinates of the points that the polynomial should pass
+ through.
+ yi : array_like, optional
+ The y coordinates of the points the polynomial should pass through.
+ Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
+ vector-valued.
+ If `yi` is not given, the y values will be supplied later. `yi`
+ should be given if and only if the interpolator has y values
+ specified.
+
+ Notes
+ -----
+ The new points added by `add_xi` are not randomly permuted
+ so there is potential for numerical instability,
+ especially for a large number of points. If this
+ happens, please reconstruct interpolation from scratch instead.
+ """
+ if yi is not None:
+ if self.yi is None:
+ raise ValueError("No previous yi value to update!")
+ yi = self._reshape_yi(yi, check=True)
+ self.yi = np.vstack((self.yi,yi))
+ else:
+ if self.yi is not None:
+ raise ValueError("No update to yi provided!")
+ old_n = self.n
+ self.xi = np.concatenate((self.xi,xi))
+ self.n = len(self.xi)
+ self.wi **= -1
+ old_wi = self.wi
+ self.wi = np.zeros(self.n)
+ self.wi[:old_n] = old_wi
+ for j in range(old_n, self.n):
+ self.wi[:j] *= self._inv_capacity * (self.xi[j]-self.xi[:j])
+ self.wi[j] = np.multiply.reduce(
+ self._inv_capacity * (self.xi[:j]-self.xi[j])
+ )
+ self.wi **= -1
+ self._diff_cij = None
+ self._diff_baryint = None
+
+ def __call__(self, x):
+ """Evaluate the interpolating polynomial at the points x
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the interpolant.
+
+ Returns
+ -------
+ y : array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of `x`.
+
+ Notes
+ -----
+ Currently the code computes an outer product between `x` and the
+ weights, that is, it constructs an intermediate array of size
+ ``(N, len(x))``, where N is the degree of the polynomial.
+ """
+ return _Interpolator1D.__call__(self, x)
+
+ def _evaluate(self, x):
+ if x.size == 0:
+ p = np.zeros((0, self.r), dtype=self.dtype)
+ else:
+ c = x[..., np.newaxis] - self.xi
+ z = c == 0
+ c[z] = 1
+ c = self.wi / c
+ with np.errstate(divide='ignore'):
+ p = np.dot(c, self.yi) / np.sum(c, axis=-1)[..., np.newaxis]
+ # Now fix where x==some xi
+ r = np.nonzero(z)
+ if len(r) == 1: # evaluation at a scalar
+ if len(r[0]) > 0: # equals one of the points
+ p = self.yi[r[0][0]]
+ else:
+ p[r[:-1]] = self.yi[r[-1]]
+ return p
+
+ def derivative(self, x, der=1):
+ """
+ Evaluate a single derivative of the polynomial at the point x.
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the derivatives
+ der : integer, optional
+ Which derivative to evaluate (default: first derivative).
+ This number includes the function value as 0th derivative.
+
+ Returns
+ -------
+ d : ndarray
+ Derivative interpolated at the x-points. Shape of `d` is
+ determined by replacing the interpolation axis in the
+ original array with the shape of `x`.
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate_derivatives(x, der+1, all_lower=False)
+ return self._finish_y(y, x_shape)
+
+ def _evaluate_derivatives(self, x, der=None, all_lower=True):
+ # NB: der here is not the order of the highest derivative;
+ # instead, it is the size of the derivatives matrix that
+ # would be returned with all_lower=True, including the
+ # '0th' derivative (the undifferentiated function).
+ # E.g. to evaluate the 5th derivative alone, call
+ # _evaluate_derivatives(x, der=6, all_lower=False).
+
+ if (not all_lower) and (x.size == 0 or self.r == 0):
+ return np.zeros((0, self.r), dtype=self.dtype)
+
+ if (not all_lower) and der == 1:
+ return self._evaluate(x)
+
+ if (not all_lower) and (der > self.n):
+ return np.zeros((len(x), self.r), dtype=self.dtype)
+
+ if der is None:
+ der = self.n
+
+ if all_lower and (x.size == 0 or self.r == 0):
+ return np.zeros((der, len(x), self.r), dtype=self.dtype)
+
+ if self._diff_cij is None:
+ # c[i,j] = xi[i] - xi[j]
+ c = self.xi[:, np.newaxis] - self.xi
+
+ # avoid division by 0 (diagonal entries are so far zero by construction)
+ np.fill_diagonal(c, 1)
+
+ # c[i,j] = (w[j] / w[i]) / (xi[i] - xi[j]) (equation 9.4)
+ c = self.wi/ (c * self.wi[..., np.newaxis])
+
+ # fill in correct diagonal entries: each column sums to 0
+ np.fill_diagonal(c, 0)
+
+ # calculate diagonal
+ # c[j,j] = -sum_{i != j} c[i,j] (equation 9.5)
+ d = -c.sum(axis=1)
+ # c[i,j] = l_j(x_i)
+ np.fill_diagonal(c, d)
+
+ self._diff_cij = c
+
+ if self._diff_baryint is None:
+ # initialise and cache derivative interpolator and cijs;
+ # reuse weights wi (which depend only on interpolation points xi),
+ # to avoid unnecessary re-computation
+ self._diff_baryint = BarycentricInterpolator(xi=self.xi,
+ yi=self._diff_cij @ self.yi,
+ wi=self.wi)
+ self._diff_baryint._diff_cij = self._diff_cij
+
+ if all_lower:
+ # assemble matrix of derivatives from order 0 to order der-1,
+ # in the format required by _Interpolator1DWithDerivatives.
+ cn = np.zeros((der, len(x), self.r), dtype=self.dtype)
+ for d in range(der):
+ cn[d, :, :] = self._evaluate_derivatives(x, d+1, all_lower=False)
+ return cn
+
+ # recursively evaluate only the derivative requested
+ return self._diff_baryint._evaluate_derivatives(x, der-1, all_lower=False)
+
+
+def barycentric_interpolate(xi, yi, x, axis=0, *, der=0):
+ """
+ Convenience function for polynomial interpolation.
+
+ Constructs a polynomial that passes through a given set of points,
+ then evaluates the polynomial. For reasons of numerical stability,
+ this function does not compute the coefficients of the polynomial.
+
+ This function uses a "barycentric interpolation" method that treats
+ the problem as a special case of rational function interpolation.
+ This algorithm is quite stable, numerically, but even in a world of
+ exact computation, unless the `x` coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon.
+
+ Parameters
+ ----------
+ xi : array_like
+ 1-D array of x coordinates of the points the polynomial should
+ pass through
+ yi : array_like
+ The y coordinates of the points the polynomial should pass through.
+ x : scalar or array_like
+ Point or points at which to evaluate the interpolant.
+ der : int or list or None, optional
+ How many derivatives to evaluate, or None for all potentially
+ nonzero derivatives (that is, a number equal to the number
+ of points), or a list of derivatives to evaluate. This number
+ includes the function value as the '0th' derivative.
+ axis : int, optional
+ Axis in the `yi` array corresponding to the x-coordinate values.
+
+ Returns
+ -------
+ y : scalar or array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of `x`.
+
+ See Also
+ --------
+ BarycentricInterpolator : Barycentric interpolator
+
+ Notes
+ -----
+ Construction of the interpolation weights is a relatively slow process.
+ If you want to call this many times with the same xi (but possibly
+ varying yi or x) you should use the class `BarycentricInterpolator`.
+ This is what this function uses internally.
+
+ Examples
+ --------
+ We can interpolate 2D observed data using barycentric interpolation:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import barycentric_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = barycentric_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="barycentric interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+
+ """
+ P = BarycentricInterpolator(xi, yi, axis=axis)
+ if der == 0:
+ return P(x)
+ elif _isscalar(der):
+ return P.derivative(x, der=der)
+ else:
+ return P.derivatives(x, der=np.amax(der)+1)[der]
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbf.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbf.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed52230dd1cce678e56ca4427e10bafd07e501c0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbf.py
@@ -0,0 +1,290 @@
+"""rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
+
+Written by John Travers , February 2007
+Based closely on Matlab code by Alex Chirokov
+Additional, large, improvements by Robert Hetland
+Some additional alterations by Travis Oliphant
+Interpolation with multi-dimensional target domain by Josua Sassen
+
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+Copyright (c) 2006-2007, Robert Hetland
+Copyright (c) 2007, John Travers
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of Robert Hetland nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+import numpy as np
+
+from scipy import linalg
+from scipy.special import xlogy
+from scipy.spatial.distance import cdist, pdist, squareform
+
+__all__ = ['Rbf']
+
+
+class Rbf:
+ """
+ Rbf(*args, **kwargs)
+
+ A class for radial basis function interpolation of functions from
+ N-D scattered data to an M-D domain.
+
+ .. legacy:: class
+
+ `Rbf` is legacy code, for new usage please use `RBFInterpolator`
+ instead.
+
+ Parameters
+ ----------
+ *args : arrays
+ x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
+ and d is the array of values at the nodes
+ function : str or callable, optional
+ The radial basis function, based on the radius, r, given by the norm
+ (default is Euclidean distance); the default is 'multiquadric'::
+
+ 'multiquadric': sqrt((r/self.epsilon)**2 + 1)
+ 'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
+ 'gaussian': exp(-(r/self.epsilon)**2)
+ 'linear': r
+ 'cubic': r**3
+ 'quintic': r**5
+ 'thin_plate': r**2 * log(r)
+
+ If callable, then it must take 2 arguments (self, r). The epsilon
+ parameter will be available as self.epsilon. Other keyword
+ arguments passed in will be available as well.
+
+ epsilon : float, optional
+ Adjustable constant for gaussian or multiquadrics functions
+ - defaults to approximate average distance between nodes (which is
+ a good start).
+ smooth : float, optional
+ Values greater than zero increase the smoothness of the
+ approximation. 0 is for interpolation (default), the function will
+ always go through the nodal points in this case.
+ norm : str, callable, optional
+ A function that returns the 'distance' between two points, with
+ inputs as arrays of positions (x, y, z, ...), and an output as an
+ array of distance. E.g., the default: 'euclidean', such that the result
+ is a matrix of the distances from each point in ``x1`` to each point in
+ ``x2``. For more options, see documentation of
+ `scipy.spatial.distances.cdist`.
+ mode : str, optional
+ Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
+ '1-D' the data `d` will be considered as 1-D and flattened
+ internally. When it is 'N-D' the data `d` is assumed to be an array of
+ shape (n_samples, m), where m is the dimension of the target domain.
+
+
+ Attributes
+ ----------
+ N : int
+ The number of data points (as determined by the input arrays).
+ di : ndarray
+ The 1-D array of data values at each of the data coordinates `xi`.
+ xi : ndarray
+ The 2-D array of data coordinates.
+ function : str or callable
+ The radial basis function. See description under Parameters.
+ epsilon : float
+ Parameter used by gaussian or multiquadrics functions. See Parameters.
+ smooth : float
+ Smoothing parameter. See description under Parameters.
+ norm : str or callable
+ The distance function. See description under Parameters.
+ mode : str
+ Mode of the interpolation. See description under Parameters.
+ nodes : ndarray
+ A 1-D array of node values for the interpolation.
+ A : internal property, do not use
+
+ See Also
+ --------
+ RBFInterpolator
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.interpolate import Rbf
+ >>> rng = np.random.default_rng()
+ >>> x, y, z, d = rng.random((4, 50))
+ >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
+ >>> xi = yi = zi = np.linspace(0, 1, 20)
+ >>> di = rbfi(xi, yi, zi) # interpolated values
+ >>> di.shape
+ (20,)
+
+ """
+ # Available radial basis functions that can be selected as strings;
+ # they all start with _h_ (self._init_function relies on that)
+ def _h_multiquadric(self, r):
+ return np.sqrt((1.0/self.epsilon*r)**2 + 1)
+
+ def _h_inverse_multiquadric(self, r):
+ return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
+
+ def _h_gaussian(self, r):
+ return np.exp(-(1.0/self.epsilon*r)**2)
+
+ def _h_linear(self, r):
+ return r
+
+ def _h_cubic(self, r):
+ return r**3
+
+ def _h_quintic(self, r):
+ return r**5
+
+ def _h_thin_plate(self, r):
+ return xlogy(r**2, r)
+
+ # Setup self._function and do smoke test on initial r
+ def _init_function(self, r):
+ if isinstance(self.function, str):
+ self.function = self.function.lower()
+ _mapped = {'inverse': 'inverse_multiquadric',
+ 'inverse multiquadric': 'inverse_multiquadric',
+ 'thin-plate': 'thin_plate'}
+ if self.function in _mapped:
+ self.function = _mapped[self.function]
+
+ func_name = "_h_" + self.function
+ if hasattr(self, func_name):
+ self._function = getattr(self, func_name)
+ else:
+ functionlist = [x[3:] for x in dir(self)
+ if x.startswith('_h_')]
+ raise ValueError("function must be a callable or one of " +
+ ", ".join(functionlist))
+ self._function = getattr(self, "_h_"+self.function)
+ elif callable(self.function):
+ allow_one = False
+ if hasattr(self.function, 'func_code') or \
+ hasattr(self.function, '__code__'):
+ val = self.function
+ allow_one = True
+ elif hasattr(self.function, "__call__"):
+ val = self.function.__call__.__func__
+ else:
+ raise ValueError("Cannot determine number of arguments to "
+ "function")
+
+ argcount = val.__code__.co_argcount
+ if allow_one and argcount == 1:
+ self._function = self.function
+ elif argcount == 2:
+ self._function = self.function.__get__(self, Rbf)
+ else:
+ raise ValueError("Function argument must take 1 or 2 "
+ "arguments.")
+
+ a0 = self._function(r)
+ if a0.shape != r.shape:
+ raise ValueError("Callable must take array and return array of "
+ "the same shape")
+ return a0
+
+ def __init__(self, *args, **kwargs):
+ # `args` can be a variable number of arrays; we flatten them and store
+ # them as a single 2-D array `xi` of shape (n_args-1, array_size),
+ # plus a 1-D array `di` for the values.
+ # All arrays must have the same number of elements
+ self.xi = np.asarray([np.asarray(a, dtype=np.float64).flatten()
+ for a in args[:-1]])
+ self.N = self.xi.shape[-1]
+
+ self.mode = kwargs.pop('mode', '1-D')
+
+ if self.mode == '1-D':
+ self.di = np.asarray(args[-1]).flatten()
+ self._target_dim = 1
+ elif self.mode == 'N-D':
+ self.di = np.asarray(args[-1])
+ self._target_dim = self.di.shape[-1]
+ else:
+ raise ValueError("Mode has to be 1-D or N-D.")
+
+ if not all([x.size == self.di.shape[0] for x in self.xi]):
+ raise ValueError("All arrays must be equal length.")
+
+ self.norm = kwargs.pop('norm', 'euclidean')
+ self.epsilon = kwargs.pop('epsilon', None)
+ if self.epsilon is None:
+ # default epsilon is the "the average distance between nodes" based
+ # on a bounding hypercube
+ ximax = np.amax(self.xi, axis=1)
+ ximin = np.amin(self.xi, axis=1)
+ edges = ximax - ximin
+ edges = edges[np.nonzero(edges)]
+ self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
+
+ self.smooth = kwargs.pop('smooth', 0.0)
+ self.function = kwargs.pop('function', 'multiquadric')
+
+ # attach anything left in kwargs to self for use by any user-callable
+ # function or to save on the object returned.
+ for item, value in kwargs.items():
+ setattr(self, item, value)
+
+ # Compute weights
+ if self._target_dim > 1: # If we have more than one target dimension,
+ # we first factorize the matrix
+ self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
+ lu, piv = linalg.lu_factor(self.A)
+ for i in range(self._target_dim):
+ self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
+ else:
+ self.nodes = linalg.solve(self.A, self.di)
+
+ @property
+ def A(self):
+ # this only exists for backwards compatibility: self.A was available
+ # and, at least technically, public.
+ r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
+ return self._init_function(r) - np.eye(self.N)*self.smooth
+
+ def _call_norm(self, x1, x2):
+ return cdist(x1.T, x2.T, self.norm)
+
+ def __call__(self, *args):
+ args = [np.asarray(x) for x in args]
+ if not all([x.shape == y.shape for x in args for y in args]):
+ raise ValueError("Array lengths must be equal")
+ if self._target_dim > 1:
+ shp = args[0].shape + (self._target_dim,)
+ else:
+ shp = args[0].shape
+ xa = np.asarray([a.flatten() for a in args], dtype=np.float64)
+ r = self._call_norm(xa, self.xi)
+ return np.dot(self._function(r), self.nodes).reshape(shp)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py
new file mode 100644
index 0000000000000000000000000000000000000000..6690e6ccf7d5499db10efffb0ef1c0139a90d2ba
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py
@@ -0,0 +1,550 @@
+"""Module for RBF interpolation."""
+import warnings
+from itertools import combinations_with_replacement
+
+import numpy as np
+from numpy.linalg import LinAlgError
+from scipy.spatial import KDTree
+from scipy.special import comb
+from scipy.linalg.lapack import dgesv # type: ignore[attr-defined]
+
+from ._rbfinterp_pythran import (_build_system,
+ _build_evaluation_coefficients,
+ _polynomial_matrix)
+
+
+__all__ = ["RBFInterpolator"]
+
+
+# These RBFs are implemented.
+_AVAILABLE = {
+ "linear",
+ "thin_plate_spline",
+ "cubic",
+ "quintic",
+ "multiquadric",
+ "inverse_multiquadric",
+ "inverse_quadratic",
+ "gaussian"
+ }
+
+
+# The shape parameter does not need to be specified when using these RBFs.
+_SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
+
+
+# For RBFs that are conditionally positive definite of order m, the interpolant
+# should include polynomial terms with degree >= m - 1. Define the minimum
+# degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
+# Approximation Methods with MATLAB". The RBFs that are not in this dictionary
+# are positive definite and do not need polynomial terms.
+_NAME_TO_MIN_DEGREE = {
+ "multiquadric": 0,
+ "linear": 0,
+ "thin_plate_spline": 1,
+ "cubic": 1,
+ "quintic": 2
+ }
+
+
+def _monomial_powers(ndim, degree):
+ """Return the powers for each monomial in a polynomial.
+
+ Parameters
+ ----------
+ ndim : int
+ Number of variables in the polynomial.
+ degree : int
+ Degree of the polynomial.
+
+ Returns
+ -------
+ (nmonos, ndim) int ndarray
+ Array where each row contains the powers for each variable in a
+ monomial.
+
+ """
+ nmonos = comb(degree + ndim, ndim, exact=True)
+ out = np.zeros((nmonos, ndim), dtype=np.dtype("long"))
+ count = 0
+ for deg in range(degree + 1):
+ for mono in combinations_with_replacement(range(ndim), deg):
+ # `mono` is a tuple of variables in the current monomial with
+ # multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
+ for var in mono:
+ out[count, var] += 1
+
+ count += 1
+
+ return out
+
+
+def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
+ """Build and solve the RBF interpolation system of equations.
+
+ Parameters
+ ----------
+ y : (P, N) float ndarray
+ Data point coordinates.
+ d : (P, S) float ndarray
+ Data values at `y`.
+ smoothing : (P,) float ndarray
+ Smoothing parameter for each data point.
+ kernel : str
+ Name of the RBF.
+ epsilon : float
+ Shape parameter.
+ powers : (R, N) int ndarray
+ The exponents for each monomial in the polynomial.
+
+ Returns
+ -------
+ coeffs : (P + R, S) float ndarray
+ Coefficients for each RBF and monomial.
+ shift : (N,) float ndarray
+ Domain shift used to create the polynomial matrix.
+ scale : (N,) float ndarray
+ Domain scaling used to create the polynomial matrix.
+
+ """
+ lhs, rhs, shift, scale = _build_system(
+ y, d, smoothing, kernel, epsilon, powers
+ )
+ _, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
+ if info < 0:
+ raise ValueError(f"The {-info}-th argument had an illegal value.")
+ elif info > 0:
+ msg = "Singular matrix."
+ nmonos = powers.shape[0]
+ if nmonos > 0:
+ pmat = _polynomial_matrix((y - shift)/scale, powers)
+ rank = np.linalg.matrix_rank(pmat)
+ if rank < nmonos:
+ msg = (
+ "Singular matrix. The matrix of monomials evaluated at "
+ "the data point coordinates does not have full column "
+ f"rank ({rank}/{nmonos})."
+ )
+
+ raise LinAlgError(msg)
+
+ return shift, scale, coeffs
+
+
+class RBFInterpolator:
+ """Radial basis function (RBF) interpolation in N dimensions.
+
+ Parameters
+ ----------
+ y : (npoints, ndims) array_like
+ 2-D array of data point coordinates.
+ d : (npoints, ...) array_like
+ N-D array of data values at `y`. The length of `d` along the first
+ axis must be equal to the length of `y`. Unlike some interpolators, the
+ interpolation axis cannot be changed.
+ neighbors : int, optional
+ If specified, the value of the interpolant at each evaluation point
+ will be computed using only this many nearest data points. All the data
+ points are used by default.
+ smoothing : float or (npoints, ) array_like, optional
+ Smoothing parameter. The interpolant perfectly fits the data when this
+ is set to 0. For large values, the interpolant approaches a least
+ squares fit of a polynomial with the specified degree. Default is 0.
+ kernel : str, optional
+ Type of RBF. This should be one of
+
+ - 'linear' : ``-r``
+ - 'thin_plate_spline' : ``r**2 * log(r)``
+ - 'cubic' : ``r**3``
+ - 'quintic' : ``-r**5``
+ - 'multiquadric' : ``-sqrt(1 + r**2)``
+ - 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
+ - 'inverse_quadratic' : ``1/(1 + r**2)``
+ - 'gaussian' : ``exp(-r**2)``
+
+ Default is 'thin_plate_spline'.
+ epsilon : float, optional
+ Shape parameter that scales the input to the RBF. If `kernel` is
+ 'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
+ 1 and can be ignored because it has the same effect as scaling the
+ smoothing parameter. Otherwise, this must be specified.
+ degree : int, optional
+ Degree of the added polynomial. For some RBFs the interpolant may not
+ be well-posed if the polynomial degree is too small. Those RBFs and
+ their corresponding minimum degrees are
+
+ - 'multiquadric' : 0
+ - 'linear' : 0
+ - 'thin_plate_spline' : 1
+ - 'cubic' : 1
+ - 'quintic' : 2
+
+ The default value is the minimum degree for `kernel` or 0 if there is
+ no minimum degree. Set this to -1 for no added polynomial.
+
+ Notes
+ -----
+ An RBF is a scalar valued function in N-dimensional space whose value at
+ :math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
+ is the center of the RBF.
+
+ An RBF interpolant for the vector of data values :math:`d`, which are from
+ locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
+ plus a polynomial with a specified degree. The RBF interpolant is written
+ as
+
+ .. math::
+ f(x) = K(x, y) a + P(x) b,
+
+ where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
+ evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
+ monomials, which span polynomials with the specified degree, evaluated at
+ :math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
+ linear equations
+
+ .. math::
+ (K(y, y) + \\lambda I) a + P(y) b = d
+
+ and
+
+ .. math::
+ P(y)^T a = 0,
+
+ where :math:`\\lambda` is a non-negative smoothing parameter that controls
+ how well we want to fit the data. The data are fit exactly when the
+ smoothing parameter is 0.
+
+ The above system is uniquely solvable if the following requirements are
+ met:
+
+ - :math:`P(y)` must have full column rank. :math:`P(y)` always has full
+ column rank when `degree` is -1 or 0. When `degree` is 1,
+ :math:`P(y)` has full column rank if the data point locations are not
+ all collinear (N=2), coplanar (N=3), etc.
+ - If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
+ 'cubic', or 'quintic', then `degree` must not be lower than the
+ minimum value listed above.
+ - If `smoothing` is 0, then each data point location must be distinct.
+
+ When using an RBF that is not scale invariant ('multiquadric',
+ 'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
+ shape parameter must be chosen (e.g., through cross validation). Smaller
+ values for the shape parameter correspond to wider RBFs. The problem can
+ become ill-conditioned or singular when the shape parameter is too small.
+
+ The memory required to solve for the RBF interpolation coefficients
+ increases quadratically with the number of data points, which can become
+ impractical when interpolating more than about a thousand data points.
+ To overcome memory limitations for large interpolation problems, the
+ `neighbors` argument can be specified to compute an RBF interpolant for
+ each evaluation point using only the nearest data points.
+
+ .. versionadded:: 1.7.0
+
+ See Also
+ --------
+ NearestNDInterpolator
+ LinearNDInterpolator
+ CloughTocher2DInterpolator
+
+ References
+ ----------
+ .. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
+ World Scientific Publishing Co.
+
+ .. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
+
+ .. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
+
+ .. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
+
+ Examples
+ --------
+ Demonstrate interpolating scattered data to a grid in 2-D.
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import RBFInterpolator
+ >>> from scipy.stats.qmc import Halton
+
+ >>> rng = np.random.default_rng()
+ >>> xobs = 2*Halton(2, seed=rng).random(100) - 1
+ >>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
+
+ >>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
+ >>> xflat = xgrid.reshape(2, -1).T
+ >>> yflat = RBFInterpolator(xobs, yobs)(xflat)
+ >>> ygrid = yflat.reshape(50, 50)
+
+ >>> fig, ax = plt.subplots()
+ >>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
+ >>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
+ >>> fig.colorbar(p)
+ >>> plt.show()
+
+ """
+
+ def __init__(self, y, d,
+ neighbors=None,
+ smoothing=0.0,
+ kernel="thin_plate_spline",
+ epsilon=None,
+ degree=None):
+ y = np.asarray(y, dtype=float, order="C")
+ if y.ndim != 2:
+ raise ValueError("`y` must be a 2-dimensional array.")
+
+ ny, ndim = y.shape
+
+ d_dtype = complex if np.iscomplexobj(d) else float
+ d = np.asarray(d, dtype=d_dtype, order="C")
+ if d.shape[0] != ny:
+ raise ValueError(
+ f"Expected the first axis of `d` to have length {ny}."
+ )
+
+ d_shape = d.shape[1:]
+ d = d.reshape((ny, -1))
+ # If `d` is complex, convert it to a float array with twice as many
+ # columns. Otherwise, the LHS matrix would need to be converted to
+ # complex and take up 2x more memory than necessary.
+ d = d.view(float)
+
+ if np.isscalar(smoothing):
+ smoothing = np.full(ny, smoothing, dtype=float)
+ else:
+ smoothing = np.asarray(smoothing, dtype=float, order="C")
+ if smoothing.shape != (ny,):
+ raise ValueError(
+ "Expected `smoothing` to be a scalar or have shape "
+ f"({ny},)."
+ )
+
+ kernel = kernel.lower()
+ if kernel not in _AVAILABLE:
+ raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
+
+ if epsilon is None:
+ if kernel in _SCALE_INVARIANT:
+ epsilon = 1.0
+ else:
+ raise ValueError(
+ "`epsilon` must be specified if `kernel` is not one of "
+ f"{_SCALE_INVARIANT}."
+ )
+ else:
+ epsilon = float(epsilon)
+
+ min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
+ if degree is None:
+ degree = max(min_degree, 0)
+ else:
+ degree = int(degree)
+ if degree < -1:
+ raise ValueError("`degree` must be at least -1.")
+ elif -1 < degree < min_degree:
+ warnings.warn(
+ f"`degree` should not be below {min_degree} except -1 "
+ f"when `kernel` is '{kernel}'."
+ f"The interpolant may not be uniquely "
+ f"solvable, and the smoothing parameter may have an "
+ f"unintuitive effect.",
+ UserWarning, stacklevel=2
+ )
+
+ if neighbors is None:
+ nobs = ny
+ else:
+ # Make sure the number of nearest neighbors used for interpolation
+ # does not exceed the number of observations.
+ neighbors = int(min(neighbors, ny))
+ nobs = neighbors
+
+ powers = _monomial_powers(ndim, degree)
+ # The polynomial matrix must have full column rank in order for the
+ # interpolant to be well-posed, which is not possible if there are
+ # fewer observations than monomials.
+ if powers.shape[0] > nobs:
+ raise ValueError(
+ f"At least {powers.shape[0]} data points are required when "
+ f"`degree` is {degree} and the number of dimensions is {ndim}."
+ )
+
+ if neighbors is None:
+ shift, scale, coeffs = _build_and_solve_system(
+ y, d, smoothing, kernel, epsilon, powers
+ )
+
+ # Make these attributes private since they do not always exist.
+ self._shift = shift
+ self._scale = scale
+ self._coeffs = coeffs
+
+ else:
+ self._tree = KDTree(y)
+
+ self.y = y
+ self.d = d
+ self.d_shape = d_shape
+ self.d_dtype = d_dtype
+ self.neighbors = neighbors
+ self.smoothing = smoothing
+ self.kernel = kernel
+ self.epsilon = epsilon
+ self.powers = powers
+
+ def _chunk_evaluator(
+ self,
+ x,
+ y,
+ shift,
+ scale,
+ coeffs,
+ memory_budget=1000000
+ ):
+ """
+ Evaluate the interpolation while controlling memory consumption.
+ We chunk the input if we need more memory than specified.
+
+ Parameters
+ ----------
+ x : (Q, N) float ndarray
+ array of points on which to evaluate
+ y: (P, N) float ndarray
+ array of points on which we know function values
+ shift: (N, ) ndarray
+ Domain shift used to create the polynomial matrix.
+ scale : (N,) float ndarray
+ Domain scaling used to create the polynomial matrix.
+ coeffs: (P+R, S) float ndarray
+ Coefficients in front of basis functions
+ memory_budget: int
+ Total amount of memory (in units of sizeof(float)) we wish
+ to devote for storing the array of coefficients for
+ interpolated points. If we need more memory than that, we
+ chunk the input.
+
+ Returns
+ -------
+ (Q, S) float ndarray
+ Interpolated array
+ """
+ nx, ndim = x.shape
+ if self.neighbors is None:
+ nnei = len(y)
+ else:
+ nnei = self.neighbors
+ # in each chunk we consume the same space we already occupy
+ chunksize = memory_budget // (self.powers.shape[0] + nnei) + 1
+ if chunksize <= nx:
+ out = np.empty((nx, self.d.shape[1]), dtype=float)
+ for i in range(0, nx, chunksize):
+ vec = _build_evaluation_coefficients(
+ x[i:i + chunksize, :],
+ y,
+ self.kernel,
+ self.epsilon,
+ self.powers,
+ shift,
+ scale)
+ out[i:i + chunksize, :] = np.dot(vec, coeffs)
+ else:
+ vec = _build_evaluation_coefficients(
+ x,
+ y,
+ self.kernel,
+ self.epsilon,
+ self.powers,
+ shift,
+ scale)
+ out = np.dot(vec, coeffs)
+ return out
+
+ def __call__(self, x):
+ """Evaluate the interpolant at `x`.
+
+ Parameters
+ ----------
+ x : (Q, N) array_like
+ Evaluation point coordinates.
+
+ Returns
+ -------
+ (Q, ...) ndarray
+ Values of the interpolant at `x`.
+
+ """
+ x = np.asarray(x, dtype=float, order="C")
+ if x.ndim != 2:
+ raise ValueError("`x` must be a 2-dimensional array.")
+
+ nx, ndim = x.shape
+ if ndim != self.y.shape[1]:
+ raise ValueError("Expected the second axis of `x` to have length "
+ f"{self.y.shape[1]}.")
+
+ # Our memory budget for storing RBF coefficients is
+ # based on how many floats in memory we already occupy
+ # If this number is below 1e6 we just use 1e6
+ # This memory budget is used to decide how we chunk
+ # the inputs
+ memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
+
+ if self.neighbors is None:
+ out = self._chunk_evaluator(
+ x,
+ self.y,
+ self._shift,
+ self._scale,
+ self._coeffs,
+ memory_budget=memory_budget)
+ else:
+ # Get the indices of the k nearest observation points to each
+ # evaluation point.
+ _, yindices = self._tree.query(x, self.neighbors)
+ if self.neighbors == 1:
+ # `KDTree` squeezes the output when neighbors=1.
+ yindices = yindices[:, None]
+
+ # Multiple evaluation points may have the same neighborhood of
+ # observation points. Make the neighborhoods unique so that we only
+ # compute the interpolation coefficients once for each
+ # neighborhood.
+ yindices = np.sort(yindices, axis=1)
+ yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
+ inv = np.reshape(inv, (-1,)) # flatten, we need 1-D indices
+ # `inv` tells us which neighborhood will be used by each evaluation
+ # point. Now we find which evaluation points will be using each
+ # neighborhood.
+ xindices = [[] for _ in range(len(yindices))]
+ for i, j in enumerate(inv):
+ xindices[j].append(i)
+
+ out = np.empty((nx, self.d.shape[1]), dtype=float)
+ for xidx, yidx in zip(xindices, yindices):
+ # `yidx` are the indices of the observations in this
+ # neighborhood. `xidx` are the indices of the evaluation points
+ # that are using this neighborhood.
+ xnbr = x[xidx]
+ ynbr = self.y[yidx]
+ dnbr = self.d[yidx]
+ snbr = self.smoothing[yidx]
+ shift, scale, coeffs = _build_and_solve_system(
+ ynbr,
+ dnbr,
+ snbr,
+ self.kernel,
+ self.epsilon,
+ self.powers,
+ )
+ out[xidx] = self._chunk_evaluator(
+ xnbr,
+ ynbr,
+ shift,
+ scale,
+ coeffs,
+ memory_budget=memory_budget)
+
+ out = out.view(self.d_dtype)
+ out = out.reshape((nx, ) + self.d_shape)
+ return out
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb17bf9c8b57e8716be4fcfc6296c1ee21ebb985
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi.py
@@ -0,0 +1,766 @@
+__all__ = ['RegularGridInterpolator', 'interpn']
+
+import itertools
+import warnings
+
+import numpy as np
+
+import scipy.sparse.linalg as ssl
+
+from .interpnd import _ndim_coords_from_arrays
+from ._cubic import PchipInterpolator
+from ._rgi_cython import evaluate_linear_2d, find_indices
+from ._bsplines import make_interp_spline
+from ._fitpack2 import RectBivariateSpline
+from ._ndbspline import make_ndbspl
+
+
+def _check_points(points):
+ descending_dimensions = []
+ grid = []
+ for i, p in enumerate(points):
+ # early make points float
+ # see https://github.com/scipy/scipy/pull/17230
+ p = np.asarray(p, dtype=float)
+ if not np.all(p[1:] > p[:-1]):
+ if np.all(p[1:] < p[:-1]):
+ # input is descending, so make it ascending
+ descending_dimensions.append(i)
+ p = np.flip(p)
+ else:
+ raise ValueError(
+ "The points in dimension %d must be strictly "
+ "ascending or descending" % i)
+ # see https://github.com/scipy/scipy/issues/17716
+ p = np.ascontiguousarray(p)
+ grid.append(p)
+ return tuple(grid), tuple(descending_dimensions)
+
+
+def _check_dimensionality(points, values):
+ if len(points) > values.ndim:
+ raise ValueError("There are %d point arrays, but values has %d "
+ "dimensions" % (len(points), values.ndim))
+ for i, p in enumerate(points):
+ if not np.asarray(p).ndim == 1:
+ raise ValueError("The points in dimension %d must be "
+ "1-dimensional" % i)
+ if not values.shape[i] == len(p):
+ raise ValueError("There are %d points and %d values in "
+ "dimension %d" % (len(p), values.shape[i], i))
+
+
+class RegularGridInterpolator:
+ """
+ Interpolator on a regular or rectilinear grid in arbitrary dimensions.
+
+ The data must be defined on a rectilinear grid; that is, a rectangular
+ grid with even or uneven spacing. Linear, nearest-neighbor, spline
+ interpolations are supported. After setting up the interpolator object,
+ the interpolation method may be chosen at each evaluation.
+
+ Parameters
+ ----------
+ points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
+ The points defining the regular grid in n dimensions. The points in
+ each dimension (i.e. every elements of the points tuple) must be
+ strictly ascending or descending.
+
+ values : array_like, shape (m1, ..., mn, ...)
+ The data on the regular grid in n dimensions. Complex data is
+ accepted.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated with ``method="pchip"`` and will raise an
+ error in SciPy 1.15.0. This is because ``PchipInterpolator`` only
+ works with real values. If you are trying to use the real components of
+ the passed array, use ``np.real`` on ``values``.
+
+ method : str, optional
+ The method of interpolation to perform. Supported are "linear",
+ "nearest", "slinear", "cubic", "quintic" and "pchip". This
+ parameter will become the default for the object's ``__call__``
+ method. Default is "linear".
+
+ bounds_error : bool, optional
+ If True, when interpolated values are requested outside of the
+ domain of the input data, a ValueError is raised.
+ If False, then `fill_value` is used.
+ Default is True.
+
+ fill_value : float or None, optional
+ The value to use for points outside of the interpolation domain.
+ If None, values outside the domain are extrapolated.
+ Default is ``np.nan``.
+
+ solver : callable, optional
+ Only used for methods "slinear", "cubic" and "quintic".
+ Sparse linear algebra solver for construction of the NdBSpline instance.
+ Default is the iterative solver `scipy.sparse.linalg.gcrotmk`.
+
+ .. versionadded:: 1.13
+
+ solver_args: dict, optional
+ Additional arguments to pass to `solver`, if any.
+
+ .. versionadded:: 1.13
+
+ Methods
+ -------
+ __call__
+
+ Attributes
+ ----------
+ grid : tuple of ndarrays
+ The points defining the regular grid in n dimensions.
+ This tuple defines the full grid via
+ ``np.meshgrid(*grid, indexing='ij')``
+ values : ndarray
+ Data values at the grid.
+ method : str
+ Interpolation method.
+ fill_value : float or ``None``
+ Use this value for out-of-bounds arguments to `__call__`.
+ bounds_error : bool
+ If ``True``, out-of-bounds argument raise a ``ValueError``.
+
+ Notes
+ -----
+ Contrary to `LinearNDInterpolator` and `NearestNDInterpolator`, this class
+ avoids expensive triangulation of the input data by taking advantage of the
+ regular grid structure.
+
+ In other words, this class assumes that the data is defined on a
+ *rectilinear* grid.
+
+ .. versionadded:: 0.14
+
+ The 'slinear'(k=1), 'cubic'(k=3), and 'quintic'(k=5) methods are
+ tensor-product spline interpolators, where `k` is the spline degree,
+ If any dimension has fewer points than `k` + 1, an error will be raised.
+
+ .. versionadded:: 1.9
+
+ If the input data is such that dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolating.
+
+ **Choosing a solver for spline methods**
+
+ Spline methods, "slinear", "cubic" and "quintic" involve solving a
+ large sparse linear system at instantiation time. Depending on data,
+ the default solver may or may not be adequate. When it is not, you may
+ need to experiment with an optional `solver` argument, where you may
+ choose between the direct solver (`scipy.sparse.linalg.spsolve`) or
+ iterative solvers from `scipy.sparse.linalg`. You may need to supply
+ additional parameters via the optional `solver_args` parameter (for instance,
+ you may supply the starting value or target tolerance). See the
+ `scipy.sparse.linalg` documentation for the full list of available options.
+
+ Alternatively, you may instead use the legacy methods, "slinear_legacy",
+ "cubic_legacy" and "quintic_legacy". These methods allow faster construction
+ but evaluations will be much slower.
+
+ Examples
+ --------
+ **Evaluate a function on the points of a 3-D grid**
+
+ As a first example, we evaluate a simple example function on the points of
+ a 3-D grid:
+
+ >>> from scipy.interpolate import RegularGridInterpolator
+ >>> import numpy as np
+ >>> def f(x, y, z):
+ ... return 2 * x**3 + 3 * y**2 - z
+ >>> x = np.linspace(1, 4, 11)
+ >>> y = np.linspace(4, 7, 22)
+ >>> z = np.linspace(7, 9, 33)
+ >>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
+ >>> data = f(xg, yg, zg)
+
+ ``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``.
+ Next, define an interpolating function from this data:
+
+ >>> interp = RegularGridInterpolator((x, y, z), data)
+
+ Evaluate the interpolating function at the two points
+ ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
+
+ >>> pts = np.array([[2.1, 6.2, 8.3],
+ ... [3.3, 5.2, 7.1]])
+ >>> interp(pts)
+ array([ 125.80469388, 146.30069388])
+
+ which is indeed a close approximation to
+
+ >>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)
+ (125.54200000000002, 145.894)
+
+ **Interpolate and extrapolate a 2D dataset**
+
+ As a second example, we interpolate and extrapolate a 2D data set:
+
+ >>> x, y = np.array([-2, 0, 4]), np.array([-2, 0, 2, 5])
+ >>> def ff(x, y):
+ ... return x**2 + y**2
+
+ >>> xg, yg = np.meshgrid(x, y, indexing='ij')
+ >>> data = ff(xg, yg)
+ >>> interp = RegularGridInterpolator((x, y), data,
+ ... bounds_error=False, fill_value=None)
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax = fig.add_subplot(projection='3d')
+ >>> ax.scatter(xg.ravel(), yg.ravel(), data.ravel(),
+ ... s=60, c='k', label='data')
+
+ Evaluate and plot the interpolator on a finer grid
+
+ >>> xx = np.linspace(-4, 9, 31)
+ >>> yy = np.linspace(-4, 9, 31)
+ >>> X, Y = np.meshgrid(xx, yy, indexing='ij')
+
+ >>> # interpolator
+ >>> ax.plot_wireframe(X, Y, interp((X, Y)), rstride=3, cstride=3,
+ ... alpha=0.4, color='m', label='linear interp')
+
+ >>> # ground truth
+ >>> ax.plot_wireframe(X, Y, ff(X, Y), rstride=3, cstride=3,
+ ... alpha=0.4, label='ground truth')
+ >>> plt.legend()
+ >>> plt.show()
+
+ Other examples are given
+ :ref:`in the tutorial `.
+
+ See Also
+ --------
+ NearestNDInterpolator : Nearest neighbor interpolator on *unstructured*
+ data in N dimensions
+
+ LinearNDInterpolator : Piecewise linear interpolator on *unstructured* data
+ in N dimensions
+
+ interpn : a convenience function which wraps `RegularGridInterpolator`
+
+ scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
+ (suitable for e.g., N-D image resampling)
+
+ References
+ ----------
+ .. [1] Python package *regulargrid* by Johannes Buchner, see
+ https://pypi.python.org/pypi/regulargrid/
+ .. [2] Wikipedia, "Trilinear interpolation",
+ https://en.wikipedia.org/wiki/Trilinear_interpolation
+ .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
+ and multilinear table interpolation in many dimensions." MATH.
+ COMPUT. 50.181 (1988): 189-196.
+ https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
+ :doi:`10.1090/S0025-5718-1988-0917826-0`
+
+ """
+ # this class is based on code originally programmed by Johannes Buchner,
+ # see https://github.com/JohannesBuchner/regulargrid
+
+ _SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3,
+ "slinear_legacy": 1, "cubic_legacy": 3, "quintic_legacy": 5,}
+ _SPLINE_METHODS_recursive = {"slinear_legacy", "cubic_legacy",
+ "quintic_legacy", "pchip"}
+ _SPLINE_METHODS_ndbspl = {"slinear", "cubic", "quintic"}
+ _SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys())
+ _ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS
+
+ def __init__(self, points, values, method="linear", bounds_error=True,
+ fill_value=np.nan, *, solver=None, solver_args=None):
+ if method not in self._ALL_METHODS:
+ raise ValueError("Method '%s' is not defined" % method)
+ elif method in self._SPLINE_METHODS:
+ self._validate_grid_dimensions(points, method)
+ self.method = method
+ self.bounds_error = bounds_error
+ self.grid, self._descending_dimensions = _check_points(points)
+ self.values = self._check_values(values)
+ self._check_dimensionality(self.grid, self.values)
+ self.fill_value = self._check_fill_value(self.values, fill_value)
+ if self._descending_dimensions:
+ self.values = np.flip(values, axis=self._descending_dimensions)
+ if self.method == "pchip" and np.iscomplexobj(self.values):
+ msg = ("`PchipInterpolator` only works with real values. Passing "
+ "complex-dtyped `values` with `method='pchip'` is deprecated "
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
+ "use the real components of the passed array, use `np.real` on "
+ "the array before passing to `RegularGridInterpolator`.")
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ if method in self._SPLINE_METHODS_ndbspl:
+ if solver_args is None:
+ solver_args = {}
+ self._spline = self._construct_spline(method, solver, **solver_args)
+ else:
+ if solver is not None or solver_args:
+ raise ValueError(
+ f"{method =} does not accept the 'solver' argument. Got "
+ f" {solver = } and with arguments {solver_args}."
+ )
+
+ def _construct_spline(self, method, solver=None, **solver_args):
+ if solver is None:
+ solver = ssl.gcrotmk
+ spl = make_ndbspl(
+ self.grid, self.values, self._SPLINE_DEGREE_MAP[method],
+ solver=solver, **solver_args
+ )
+ return spl
+
+ def _check_dimensionality(self, grid, values):
+ _check_dimensionality(grid, values)
+
+ def _check_points(self, points):
+ return _check_points(points)
+
+ def _check_values(self, values):
+ if not hasattr(values, 'ndim'):
+ # allow reasonable duck-typed values
+ values = np.asarray(values)
+
+ if hasattr(values, 'dtype') and hasattr(values, 'astype'):
+ if not np.issubdtype(values.dtype, np.inexact):
+ values = values.astype(float)
+
+ return values
+
+ def _check_fill_value(self, values, fill_value):
+ if fill_value is not None:
+ fill_value_dtype = np.asarray(fill_value).dtype
+ if (hasattr(values, 'dtype') and not
+ np.can_cast(fill_value_dtype, values.dtype,
+ casting='same_kind')):
+ raise ValueError("fill_value must be either 'None' or "
+ "of a type compatible with values")
+ return fill_value
+
+ def __call__(self, xi, method=None, *, nu=None):
+ """
+ Interpolation at coordinates.
+
+ Parameters
+ ----------
+ xi : ndarray of shape (..., ndim)
+ The coordinates to evaluate the interpolator at.
+
+ method : str, optional
+ The method of interpolation to perform. Supported are "linear",
+ "nearest", "slinear", "cubic", "quintic" and "pchip". Default is
+ the method chosen when the interpolator was created.
+
+ nu : sequence of ints, length ndim, optional
+ If not None, the orders of the derivatives to evaluate.
+ Each entry must be non-negative.
+ Only allowed for methods "slinear", "cubic" and "quintic".
+
+ .. versionadded:: 1.13
+
+ Returns
+ -------
+ values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
+ Interpolated values at `xi`. See notes for behaviour when
+ ``xi.ndim == 1``.
+
+ Notes
+ -----
+ In the case that ``xi.ndim == 1`` a new axis is inserted into
+ the 0 position of the returned array, values_x, so its shape is
+ instead ``(1,) + values.shape[ndim:]``.
+
+ Examples
+ --------
+ Here we define a nearest-neighbor interpolator of a simple function
+
+ >>> import numpy as np
+ >>> x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
+ >>> def f(x, y):
+ ... return x**2 + y**2
+ >>> data = f(*np.meshgrid(x, y, indexing='ij', sparse=True))
+ >>> from scipy.interpolate import RegularGridInterpolator
+ >>> interp = RegularGridInterpolator((x, y), data, method='nearest')
+
+ By construction, the interpolator uses the nearest-neighbor
+ interpolation
+
+ >>> interp([[1.5, 1.3], [0.3, 4.5]])
+ array([2., 9.])
+
+ We can however evaluate the linear interpolant by overriding the
+ `method` parameter
+
+ >>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear')
+ array([ 4.7, 24.3])
+ """
+ method = self.method if method is None else method
+ is_method_changed = self.method != method
+ if method not in self._ALL_METHODS:
+ raise ValueError("Method '%s' is not defined" % method)
+ if is_method_changed and method in self._SPLINE_METHODS_ndbspl:
+ self._spline = self._construct_spline(method)
+
+ if nu is not None and method not in self._SPLINE_METHODS_ndbspl:
+ raise ValueError(
+ f"Can only compute derivatives for methods "
+ f"{self._SPLINE_METHODS_ndbspl}, got {method =}."
+ )
+
+ xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi)
+
+ if method == "linear":
+ indices, norm_distances = self._find_indices(xi.T)
+ if (ndim == 2 and hasattr(self.values, 'dtype') and
+ self.values.ndim == 2 and self.values.flags.writeable and
+ self.values.dtype in (np.float64, np.complex128) and
+ self.values.dtype.byteorder == '='):
+ # until cython supports const fused types, the fast path
+ # cannot support non-writeable values
+ # a fast path
+ out = np.empty(indices.shape[1], dtype=self.values.dtype)
+ result = evaluate_linear_2d(self.values,
+ indices,
+ norm_distances,
+ self.grid,
+ out)
+ else:
+ result = self._evaluate_linear(indices, norm_distances)
+ elif method == "nearest":
+ indices, norm_distances = self._find_indices(xi.T)
+ result = self._evaluate_nearest(indices, norm_distances)
+ elif method in self._SPLINE_METHODS:
+ if is_method_changed:
+ self._validate_grid_dimensions(self.grid, method)
+ if method in self._SPLINE_METHODS_recursive:
+ result = self._evaluate_spline(xi, method)
+ else:
+ result = self._spline(xi, nu=nu)
+
+ if not self.bounds_error and self.fill_value is not None:
+ result[out_of_bounds] = self.fill_value
+
+ # f(nan) = nan, if any
+ if np.any(nans):
+ result[nans] = np.nan
+ return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
+
+ def _prepare_xi(self, xi):
+ ndim = len(self.grid)
+ xi = _ndim_coords_from_arrays(xi, ndim=ndim)
+ if xi.shape[-1] != len(self.grid):
+ raise ValueError("The requested sample points xi have dimension "
+ f"{xi.shape[-1]} but this "
+ f"RegularGridInterpolator has dimension {ndim}")
+
+ xi_shape = xi.shape
+ xi = xi.reshape(-1, xi_shape[-1])
+ xi = np.asarray(xi, dtype=float)
+
+ # find nans in input
+ nans = np.any(np.isnan(xi), axis=-1)
+
+ if self.bounds_error:
+ for i, p in enumerate(xi.T):
+ if not np.logical_and(np.all(self.grid[i][0] <= p),
+ np.all(p <= self.grid[i][-1])):
+ raise ValueError("One of the requested xi is out of bounds "
+ "in dimension %d" % i)
+ out_of_bounds = None
+ else:
+ out_of_bounds = self._find_out_of_bounds(xi.T)
+
+ return xi, xi_shape, ndim, nans, out_of_bounds
+
+ def _evaluate_linear(self, indices, norm_distances):
+ # slice for broadcasting over trailing dimensions in self.values
+ vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
+
+ # Compute shifting up front before zipping everything together
+ shift_norm_distances = [1 - yi for yi in norm_distances]
+ shift_indices = [i + 1 for i in indices]
+
+ # The formula for linear interpolation in 2d takes the form:
+ # values = self.values[(i0, i1)] * (1 - y0) * (1 - y1) + \
+ # self.values[(i0, i1 + 1)] * (1 - y0) * y1 + \
+ # self.values[(i0 + 1, i1)] * y0 * (1 - y1) + \
+ # self.values[(i0 + 1, i1 + 1)] * y0 * y1
+ # We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2)
+ zipped1 = zip(indices, shift_norm_distances)
+ zipped2 = zip(shift_indices, norm_distances)
+
+ # Take all products of zipped1 and zipped2 and iterate over them
+ # to get the terms in the above formula. This corresponds to iterating
+ # over the vertices of a hypercube.
+ hypercube = itertools.product(*zip(zipped1, zipped2))
+ value = np.array([0.])
+ for h in hypercube:
+ edge_indices, weights = zip(*h)
+ weight = np.array([1.])
+ for w in weights:
+ weight = weight * w
+ term = np.asarray(self.values[edge_indices]) * weight[vslice]
+ value = value + term # cannot use += because broadcasting
+ return value
+
+ def _evaluate_nearest(self, indices, norm_distances):
+ idx_res = [np.where(yi <= .5, i, i + 1)
+ for i, yi in zip(indices, norm_distances)]
+ return self.values[tuple(idx_res)]
+
+ def _validate_grid_dimensions(self, points, method):
+ k = self._SPLINE_DEGREE_MAP[method]
+ for i, point in enumerate(points):
+ ndim = len(np.atleast_1d(point))
+ if ndim <= k:
+ raise ValueError(f"There are {ndim} points in dimension {i},"
+ f" but method {method} requires at least "
+ f" {k+1} points per dimension.")
+
+ def _evaluate_spline(self, xi, method):
+ # ensure xi is 2D list of points to evaluate (`m` is the number of
+ # points and `n` is the number of interpolation dimensions,
+ # ``n == len(self.grid)``.)
+ if xi.ndim == 1:
+ xi = xi.reshape((1, xi.size))
+ m, n = xi.shape
+
+ # Reorder the axes: n-dimensional process iterates over the
+ # interpolation axes from the last axis downwards: E.g. for a 4D grid
+ # the order of axes is 3, 2, 1, 0. Each 1D interpolation works along
+ # the 0th axis of its argument array (for 1D routine it's its ``y``
+ # array). Thus permute the interpolation axes of `values` *and keep
+ # trailing dimensions trailing*.
+ axes = tuple(range(self.values.ndim))
+ axx = axes[:n][::-1] + axes[n:]
+ values = self.values.transpose(axx)
+
+ if method == 'pchip':
+ _eval_func = self._do_pchip
+ else:
+ _eval_func = self._do_spline_fit
+ k = self._SPLINE_DEGREE_MAP[method]
+
+ # Non-stationary procedure: difficult to vectorize this part entirely
+ # into numpy-level operations. Unfortunately this requires explicit
+ # looping over each point in xi.
+
+ # can at least vectorize the first pass across all points in the
+ # last variable of xi.
+ last_dim = n - 1
+ first_values = _eval_func(self.grid[last_dim],
+ values,
+ xi[:, last_dim],
+ k)
+
+ # the rest of the dimensions have to be on a per point-in-xi basis
+ shape = (m, *self.values.shape[n:])
+ result = np.empty(shape, dtype=self.values.dtype)
+ for j in range(m):
+ # Main process: Apply 1D interpolate in each dimension
+ # sequentially, starting with the last dimension.
+ # These are then "folded" into the next dimension in-place.
+ folded_values = first_values[j, ...]
+ for i in range(last_dim-1, -1, -1):
+ # Interpolate for each 1D from the last dimensions.
+ # This collapses each 1D sequence into a scalar.
+ folded_values = _eval_func(self.grid[i],
+ folded_values,
+ xi[j, i],
+ k)
+ result[j, ...] = folded_values
+
+ return result
+
+ @staticmethod
+ def _do_spline_fit(x, y, pt, k):
+ local_interp = make_interp_spline(x, y, k=k, axis=0)
+ values = local_interp(pt)
+ return values
+
+ @staticmethod
+ def _do_pchip(x, y, pt, k):
+ local_interp = PchipInterpolator(x, y, axis=0)
+ values = local_interp(pt)
+ return values
+
+ def _find_indices(self, xi):
+ return find_indices(self.grid, xi)
+
+ def _find_out_of_bounds(self, xi):
+ # check for out of bounds xi
+ out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
+ # iterate through dimensions
+ for x, grid in zip(xi, self.grid):
+ out_of_bounds += x < grid[0]
+ out_of_bounds += x > grid[-1]
+ return out_of_bounds
+
+
+def interpn(points, values, xi, method="linear", bounds_error=True,
+ fill_value=np.nan):
+ """
+ Multidimensional interpolation on regular or rectilinear grids.
+
+ Strictly speaking, not all regular grids are supported - this function
+ works on *rectilinear* grids, that is, a rectangular grid with even or
+ uneven spacing.
+
+ Parameters
+ ----------
+ points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
+ The points defining the regular grid in n dimensions. The points in
+ each dimension (i.e. every elements of the points tuple) must be
+ strictly ascending or descending.
+
+ values : array_like, shape (m1, ..., mn, ...)
+ The data on the regular grid in n dimensions. Complex data is
+ accepted.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated with ``method="pchip"`` and will raise an
+ error in SciPy 1.15.0. This is because ``PchipInterpolator`` only
+ works with real values. If you are trying to use the real components of
+ the passed array, use ``np.real`` on ``values``.
+
+ xi : ndarray of shape (..., ndim)
+ The coordinates to sample the gridded data at
+
+ method : str, optional
+ The method of interpolation to perform. Supported are "linear",
+ "nearest", "slinear", "cubic", "quintic", "pchip", and "splinef2d".
+ "splinef2d" is only supported for 2-dimensional data.
+
+ bounds_error : bool, optional
+ If True, when interpolated values are requested outside of the
+ domain of the input data, a ValueError is raised.
+ If False, then `fill_value` is used.
+
+ fill_value : number, optional
+ If provided, the value to use for points outside of the
+ interpolation domain. If None, values outside
+ the domain are extrapolated. Extrapolation is not supported by method
+ "splinef2d".
+
+ Returns
+ -------
+ values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
+ Interpolated values at `xi`. See notes for behaviour when
+ ``xi.ndim == 1``.
+
+ See Also
+ --------
+ NearestNDInterpolator : Nearest neighbor interpolation on unstructured
+ data in N dimensions
+ LinearNDInterpolator : Piecewise linear interpolant on unstructured data
+ in N dimensions
+ RegularGridInterpolator : interpolation on a regular or rectilinear grid
+ in arbitrary dimensions (`interpn` wraps this
+ class).
+ RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
+ scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
+ (suitable for e.g., N-D image resampling)
+
+ Notes
+ -----
+
+ .. versionadded:: 0.14
+
+ In the case that ``xi.ndim == 1`` a new axis is inserted into
+ the 0 position of the returned array, values_x, so its shape is
+ instead ``(1,) + values.shape[ndim:]``.
+
+ If the input data is such that input dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolation.
+
+ Examples
+ --------
+ Evaluate a simple example function on the points of a regular 3-D grid:
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import interpn
+ >>> def value_func_3d(x, y, z):
+ ... return 2 * x + 3 * y - z
+ >>> x = np.linspace(0, 4, 5)
+ >>> y = np.linspace(0, 5, 6)
+ >>> z = np.linspace(0, 6, 7)
+ >>> points = (x, y, z)
+ >>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
+
+ Evaluate the interpolating function at a point
+
+ >>> point = np.array([2.21, 3.12, 1.15])
+ >>> print(interpn(points, values, point))
+ [12.63]
+
+ """
+ # sanity check 'method' kwarg
+ if method not in ["linear", "nearest", "cubic", "quintic", "pchip",
+ "splinef2d", "slinear",
+ "slinear_legacy", "cubic_legacy", "quintic_legacy"]:
+ raise ValueError("interpn only understands the methods 'linear', "
+ "'nearest', 'slinear', 'cubic', 'quintic', 'pchip', "
+ f"and 'splinef2d'. You provided {method}.")
+
+ if not hasattr(values, 'ndim'):
+ values = np.asarray(values)
+
+ ndim = values.ndim
+ if ndim > 2 and method == "splinef2d":
+ raise ValueError("The method splinef2d can only be used for "
+ "2-dimensional input data")
+ if not bounds_error and fill_value is None and method == "splinef2d":
+ raise ValueError("The method splinef2d does not support extrapolation.")
+
+ # sanity check consistency of input dimensions
+ if len(points) > ndim:
+ raise ValueError("There are %d point arrays, but values has %d "
+ "dimensions" % (len(points), ndim))
+ if len(points) != ndim and method == 'splinef2d':
+ raise ValueError("The method splinef2d can only be used for "
+ "scalar data with one point per coordinate")
+
+ grid, descending_dimensions = _check_points(points)
+ _check_dimensionality(grid, values)
+
+ # sanity check requested xi
+ xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
+ if xi.shape[-1] != len(grid):
+ raise ValueError("The requested sample points xi have dimension "
+ "%d, but this RegularGridInterpolator has "
+ "dimension %d" % (xi.shape[-1], len(grid)))
+
+ if bounds_error:
+ for i, p in enumerate(xi.T):
+ if not np.logical_and(np.all(grid[i][0] <= p),
+ np.all(p <= grid[i][-1])):
+ raise ValueError("One of the requested xi is out of bounds "
+ "in dimension %d" % i)
+
+ # perform interpolation
+ if method in RegularGridInterpolator._ALL_METHODS:
+ interp = RegularGridInterpolator(points, values, method=method,
+ bounds_error=bounds_error,
+ fill_value=fill_value)
+ return interp(xi)
+ elif method == "splinef2d":
+ xi_shape = xi.shape
+ xi = xi.reshape(-1, xi.shape[-1])
+
+ # RectBivariateSpline doesn't support fill_value; we need to wrap here
+ idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
+ grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
+ axis=0)
+ result = np.empty_like(xi[:, 0])
+
+ # make a copy of values for RectBivariateSpline
+ interp = RectBivariateSpline(points[0], points[1], values[:])
+ result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
+ result[np.logical_not(idx_valid)] = fill_value
+
+ return result.reshape(xi_shape[:-1])
+ else:
+ raise ValueError(f"unknown {method = }")
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..0afeb4d77b43683326c00eeaf689e7f65f4a870d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..df9e100e29dad7228ffb5e5c1414d1797a9eb367
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/fitpack.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/fitpack.py
new file mode 100644
index 0000000000000000000000000000000000000000..68a6a240961018cac8e59419245ee6791cba7a67
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/fitpack.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'BSpline',
+ 'bisplev',
+ 'bisplrep',
+ 'dblint',
+ 'insert',
+ 'spalde',
+ 'splantider',
+ 'splder',
+ 'splev',
+ 'splint',
+ 'splprep',
+ 'splrep',
+ 'sproot',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="fitpack",
+ private_modules=["_fitpack_py"], all=__all__,
+ attribute=name)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/ndgriddata.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/ndgriddata.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb2b1694d244d00b6aea9784fc0ad384a793d57d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/ndgriddata.py
@@ -0,0 +1,25 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'CloughTocher2DInterpolator',
+ 'LinearNDInterpolator',
+ 'NDInterpolatorBase',
+ 'NearestNDInterpolator',
+ 'cKDTree',
+ 'griddata',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="ndgriddata",
+ private_modules=["_ndgriddata"], all=__all__,
+ attribute=name)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/rbf.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/rbf.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ae1facd687108fcba124e43f71eda01f45a48e3
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/rbf.py
@@ -0,0 +1,25 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'Rbf',
+ 'cdist',
+ 'linalg',
+ 'pdist',
+ 'squareform',
+ 'xlogy',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="rbf",
+ private_modules=["_rbf"], all=__all__,
+ attribute=name)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..78a6247ab409a51294fdf1fdd7a44adc41f4f2ed
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9d51cbce00870b73c71814d88b7e78eebb2a631
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18e67bde8cfc4dbfb8dc668b768990c489114599
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a6c6afc271ce1af26ebd28bbc1ee1c8cc175fda
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d79c4225f3983a16f78a819cb0b0185d3689497a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..acd7995c5f00c0183b4e731fbe672e606d93d404
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..896ff1c46d7c1f7d194a7ff79bd93c1555c60cb2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..011d08dd5a143c488a6bffd8ee623c91b3cbc461
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bc17cbc452292ed7fb650b7560e3d0f3142b67e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8bf69897c89f689d2239ded0b72ddc1eb145e404
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c95ff49cb6f9fef2804db88cd952818fa8548a93
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96ead6be0eaf467c0713b74ec7a55f1e1a5ec399
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rgi.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rgi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..253d9f765460fa0ade598a7755535fdf1ca26ad2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rgi.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_bsplines.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_bsplines.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a75f4d30743aee33676ee11b7ff9da0628fe340
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_bsplines.py
@@ -0,0 +1,2621 @@
+import os
+import operator
+import itertools
+
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose, assert_
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.interpolate import (
+ BSpline, BPoly, PPoly, make_interp_spline, make_lsq_spline, _bspl,
+ splev, splrep, splprep, splder, splantider, sproot, splint, insert,
+ CubicSpline, NdBSpline, make_smoothing_spline, RegularGridInterpolator,
+)
+import scipy.linalg as sl
+import scipy.sparse.linalg as ssl
+
+from scipy.interpolate._bsplines import (_not_a_knot, _augknt,
+ _woodbury_algorithm, _periodic_knots,
+ _make_interp_per_full_matr)
+import scipy.interpolate._fitpack_impl as _impl
+from scipy._lib._util import AxisError
+
+# XXX: move to the interpolate namespace
+from scipy.interpolate._ndbspline import make_ndbspl
+
+from scipy.interpolate import dfitpack
+from scipy.interpolate import _bsplines as _b
+
+
+class TestBSpline:
+
+ def test_ctor(self):
+ # knots should be an ordered 1-D array of finite real numbers
+ assert_raises((TypeError, ValueError), BSpline,
+ **dict(t=[1, 1.j], c=[1.], k=0))
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
+
+ # for n+k+1 knots and degree k need at least n coefficients
+ assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
+ assert_raises(ValueError, BSpline,
+ **dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
+
+ # non-integer orders
+ assert_raises(TypeError, BSpline,
+ **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
+ assert_raises(TypeError, BSpline,
+ **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
+
+ # basic interval cannot have measure zero (here: [1..1])
+ assert_raises(ValueError, BSpline,
+ **dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
+
+ # tck vs self.tck
+ n, k = 11, 3
+ t = np.arange(n+k+1)
+ c = np.random.random(n)
+ b = BSpline(t, c, k)
+
+ assert_allclose(t, b.t)
+ assert_allclose(c, b.c)
+ assert_equal(k, b.k)
+
+ def test_tck(self):
+ b = _make_random_spline()
+ tck = b.tck
+
+ assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)
+ assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)
+ assert_equal(b.k, tck[2])
+
+ # b.tck is read-only
+ with pytest.raises(AttributeError):
+ b.tck = 'foo'
+
+ def test_degree_0(self):
+ xx = np.linspace(0, 1, 10)
+
+ b = BSpline(t=[0, 1], c=[3.], k=0)
+ assert_allclose(b(xx), 3)
+
+ b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
+ assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))
+
+ def test_degree_1(self):
+ t = [0, 1, 2, 3, 4]
+ c = [1, 2, 3]
+ k = 1
+ b = BSpline(t, c, k)
+
+ x = np.linspace(1, 3, 50)
+ assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),
+ b(x), atol=1e-14)
+ assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)
+
+ def test_bernstein(self):
+ # a special knot vector: Bernstein polynomials
+ k = 3
+ t = np.asarray([0]*(k+1) + [1]*(k+1))
+ c = np.asarray([1., 2., 3., 4.])
+ bp = BPoly(c.reshape(-1, 1), [0, 1])
+ bspl = BSpline(t, c, k)
+
+ xx = np.linspace(-1., 2., 10)
+ assert_allclose(bp(xx, extrapolate=True),
+ bspl(xx, extrapolate=True), atol=1e-14)
+ assert_allclose(splev(xx, (t, c, k)),
+ bspl(xx), atol=1e-14)
+
+ def test_rndm_naive_eval(self):
+ # test random coefficient spline *on the base interval*,
+ # t[k] <= x < t[-k-1]
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 50)
+ y_b = b(xx)
+
+ y_n = [_naive_eval(x, t, c, k) for x in xx]
+ assert_allclose(y_b, y_n, atol=1e-14)
+
+ y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]
+ assert_allclose(y_b, y_n2, atol=1e-14)
+
+ def test_rndm_splev(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 50)
+ assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
+
+ def test_rndm_splrep(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(20))
+ y = np.random.random(20)
+
+ tck = splrep(x, y)
+ b = BSpline(*tck)
+
+ t, k = b.t, b.k
+ xx = np.linspace(t[k], t[-k-1], 80)
+ assert_allclose(b(xx), splev(xx, tck), atol=1e-14)
+
+ def test_rndm_unity(self):
+ b = _make_random_spline()
+ b.c = np.ones_like(b.c)
+ xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100)
+ assert_allclose(b(xx), 1.)
+
+ def test_vectorization(self):
+ n, k = 22, 3
+ t = np.sort(np.random.random(n))
+ c = np.random.random(size=(n, 6, 7))
+ b = BSpline(t, c, k)
+ tm, tp = t[k], t[-k-1]
+ xx = tm + (tp - tm) * np.random.random((3, 4, 5))
+ assert_equal(b(xx).shape, (3, 4, 5, 6, 7))
+
+ def test_len_c(self):
+ # for n+k+1 knots, only first n coefs are used.
+ # and BTW this is consistent with FITPACK
+ n, k = 33, 3
+ t = np.sort(np.random.random(n+k+1))
+ c = np.random.random(n)
+
+ # pad coefficients with random garbage
+ c_pad = np.r_[c, np.random.random(k+1)]
+
+ b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)
+
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[0] - dt, t[-1] + dt, 50)
+ assert_allclose(b(xx), b_pad(xx), atol=1e-14)
+ assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
+ assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)
+
+ def test_endpoints(self):
+ # base interval is closed
+ b = _make_random_spline()
+ t, _, k = b.tck
+ tm, tp = t[k], t[-k-1]
+ for extrap in (True, False):
+ assert_allclose(b([tm, tp], extrap),
+ b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9)
+
+ def test_continuity(self):
+ # assert continuity at internal knots
+ b = _make_random_spline()
+ t, _, k = b.tck
+ assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),
+ atol=1e-9)
+
+ def test_extrap(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50)
+ mask = (t[k] < xx) & (xx < t[-k-1])
+
+ # extrap has no effect within the base interval
+ assert_allclose(b(xx[mask], extrapolate=True),
+ b(xx[mask], extrapolate=False))
+
+ # extrapolated values agree with FITPACK
+ assert_allclose(b(xx, extrapolate=True),
+ splev(xx, (t, c, k), ext=0))
+
+ def test_default_extrap(self):
+ # BSpline defaults to extrapolate=True
+ b = _make_random_spline()
+ t, _, k = b.tck
+ xx = [t[0] - 1, t[-1] + 1]
+ yy = b(xx)
+ assert_(not np.all(np.isnan(yy)))
+
+ def test_periodic_extrap(self):
+ np.random.seed(1234)
+ t = np.sort(np.random.random(8))
+ c = np.random.random(4)
+ k = 3
+ b = BSpline(t, c, k, extrapolate='periodic')
+ n = t.size - (k + 1)
+
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[k] - dt, t[n] + dt, 50)
+ xy = t[k] + (xx - t[k]) % (t[n] - t[k])
+ assert_allclose(b(xx), splev(xy, (t, c, k)))
+
+ # Direct check
+ xx = [-1, 0, 0.5, 1]
+ xy = t[k] + (xx - t[k]) % (t[n] - t[k])
+ assert_equal(b(xx, extrapolate='periodic'), b(xy, extrapolate=True))
+
+ def test_ppoly(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ pp = PPoly.from_spline((t, c, k))
+
+ xx = np.linspace(t[k], t[-k], 100)
+ assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14)
+
+ def test_derivative_rndm(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[0], t[-1], 50)
+ xx = np.r_[xx, t]
+
+ for der in range(1, k+1):
+ yd = splev(xx, (t, c, k), der=der)
+ assert_allclose(yd, b(xx, nu=der), atol=1e-14)
+
+ # higher derivatives all vanish
+ assert_allclose(b(xx, nu=k+1), 0, atol=1e-14)
+
+ def test_derivative_jumps(self):
+ # example from de Boor, Chap IX, example (24)
+ # NB: knots augmented & corresp coefs are zeroed out
+ # in agreement with the convention (29)
+ k = 2
+ t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]
+ np.random.seed(1234)
+ c = np.r_[0, 0, np.random.random(5), 0, 0]
+ b = BSpline(t, c, k)
+
+ # b is continuous at x != 6 (triple knot)
+ x = np.asarray([1, 3, 4, 6])
+ assert_allclose(b(x[x != 6] - 1e-10),
+ b(x[x != 6] + 1e-10))
+ assert_(not np.allclose(b(6.-1e-10), b(6+1e-10)))
+
+ # 1st derivative jumps at double knots, 1 & 6:
+ x0 = np.asarray([3, 4])
+ assert_allclose(b(x0 - 1e-10, nu=1),
+ b(x0 + 1e-10, nu=1))
+ x1 = np.asarray([1, 6])
+ assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1),
+ b(x1 + 1e-10, nu=1))))
+
+ # 2nd derivative is not guaranteed to be continuous either
+ assert_(not np.all(np.allclose(b(x - 1e-10, nu=2),
+ b(x + 1e-10, nu=2))))
+
+ def test_basis_element_quadratic(self):
+ xx = np.linspace(-1, 4, 20)
+ b = BSpline.basis_element(t=[0, 1, 2, 3])
+ assert_allclose(b(xx),
+ splev(xx, (b.t, b.c, b.k)), atol=1e-14)
+ assert_allclose(b(xx),
+ B_0123(xx), atol=1e-14)
+
+ b = BSpline.basis_element(t=[0, 1, 1, 2])
+ xx = np.linspace(0, 2, 10)
+ assert_allclose(b(xx),
+ np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
+
+ def test_basis_element_rndm(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)
+
+ def test_cmplx(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ cc = c * (1. + 3.j)
+
+ b = BSpline(t, cc, k)
+ b_re = BSpline(t, b.c.real, k)
+ b_im = BSpline(t, b.c.imag, k)
+
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b(xx).real, b_re(xx), atol=1e-14)
+ assert_allclose(b(xx).imag, b_im(xx), atol=1e-14)
+
+ def test_nan(self):
+ # nan in, nan out.
+ b = BSpline.basis_element([0, 1, 1, 2])
+ assert_(np.isnan(b(np.nan)))
+
+ def test_derivative_method(self):
+ b = _make_random_spline(k=5)
+ t, c, k = b.tck
+ b0 = BSpline(t, c, k)
+ xx = np.linspace(t[k], t[-k-1], 20)
+ for j in range(1, k):
+ b = b.derivative()
+ assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antiderivative_method(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b.antiderivative().derivative()(xx),
+ b(xx), atol=1e-14, rtol=1e-14)
+
+ # repeat with N-D array for c
+ c = np.c_[c, c, c]
+ c = np.dstack((c, c))
+ b = BSpline(t, c, k)
+ assert_allclose(b.antiderivative().derivative()(xx),
+ b(xx), atol=1e-14, rtol=1e-14)
+
+ def test_integral(self):
+ b = BSpline.basis_element([0, 1, 2]) # x for x < 1 else 2 - x
+ assert_allclose(b.integrate(0, 1), 0.5)
+ assert_allclose(b.integrate(1, 0), -1 * 0.5)
+ assert_allclose(b.integrate(1, 0), -0.5)
+
+ # extrapolate or zeros outside of [0, 2]; default is yes
+ assert_allclose(b.integrate(-1, 1), 0)
+ assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
+ assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
+ assert_allclose(b.integrate(1, -1, extrapolate=False), -1 * 0.5)
+
+ # Test ``_fitpack._splint()``
+ assert_allclose(b.integrate(1, -1, extrapolate=False),
+ _impl.splint(1, -1, b.tck))
+
+ # Test ``extrapolate='periodic'``.
+ b.extrapolate = 'periodic'
+ i = b.antiderivative()
+ period_int = i(2) - i(0)
+
+ assert_allclose(b.integrate(0, 2), period_int)
+ assert_allclose(b.integrate(2, 0), -1 * period_int)
+ assert_allclose(b.integrate(-9, -7), period_int)
+ assert_allclose(b.integrate(-8, -4), 2 * period_int)
+
+ assert_allclose(b.integrate(0.5, 1.5), i(1.5) - i(0.5))
+ assert_allclose(b.integrate(1.5, 3), i(1) - i(0) + i(2) - i(1.5))
+ assert_allclose(b.integrate(1.5 + 12, 3 + 12),
+ i(1) - i(0) + i(2) - i(1.5))
+ assert_allclose(b.integrate(1.5, 3 + 12),
+ i(1) - i(0) + i(2) - i(1.5) + 6 * period_int)
+
+ assert_allclose(b.integrate(0, -1), i(0) - i(1))
+ assert_allclose(b.integrate(-9, -10), i(0) - i(1))
+ assert_allclose(b.integrate(0, -9), i(1) - i(2) - 4 * period_int)
+
+ def test_integrate_ppoly(self):
+ # test .integrate method to be consistent with PPoly.integrate
+ x = [0, 1, 2, 3, 4]
+ b = make_interp_spline(x, x)
+ b.extrapolate = 'periodic'
+ p = PPoly.from_spline(b)
+
+ for x0, x1 in [(-5, 0.5), (0.5, 5), (-4, 13)]:
+ assert_allclose(b.integrate(x0, x1),
+ p.integrate(x0, x1))
+
+ def test_subclassing(self):
+ # classmethods should not decay to the base class
+ class B(BSpline):
+ pass
+
+ b = B.basis_element([0, 1, 2, 2])
+ assert_equal(b.__class__, B)
+ assert_equal(b.derivative().__class__, B)
+ assert_equal(b.antiderivative().__class__, B)
+
+ @pytest.mark.parametrize('axis', range(-4, 4))
+ def test_axis(self, axis):
+ n, k = 22, 3
+ t = np.linspace(0, 1, n + k + 1)
+ sh = [6, 7, 8]
+ # We need the positive axis for some of the indexing and slices used
+ # in this test.
+ pos_axis = axis % 4
+ sh.insert(pos_axis, n) # [22, 6, 7, 8] etc
+ c = np.random.random(size=sh)
+ b = BSpline(t, c, k, axis=axis)
+ assert_equal(b.c.shape,
+ [sh[pos_axis],] + sh[:pos_axis] + sh[pos_axis+1:])
+
+ xp = np.random.random((3, 4, 5))
+ assert_equal(b(xp).shape,
+ sh[:pos_axis] + list(xp.shape) + sh[pos_axis+1:])
+
+ # -c.ndim <= axis < c.ndim
+ for ax in [-c.ndim - 1, c.ndim]:
+ assert_raises(AxisError, BSpline,
+ **dict(t=t, c=c, k=k, axis=ax))
+
+ # derivative, antiderivative keeps the axis
+ for b1 in [BSpline(t, c, k, axis=axis).derivative(),
+ BSpline(t, c, k, axis=axis).derivative(2),
+ BSpline(t, c, k, axis=axis).antiderivative(),
+ BSpline(t, c, k, axis=axis).antiderivative(2)]:
+ assert_equal(b1.axis, b.axis)
+
+ def test_neg_axis(self):
+ k = 2
+ t = [0, 1, 2, 3, 4, 5, 6]
+ c = np.array([[-1, 2, 0, -1], [2, 0, -3, 1]])
+
+ spl = BSpline(t, c, k, axis=-1)
+ spl0 = BSpline(t, c[0], k)
+ spl1 = BSpline(t, c[1], k)
+ assert_equal(spl(2.5), [spl0(2.5), spl1(2.5)])
+
+ def test_design_matrix_bc_types(self):
+ '''
+ Splines with different boundary conditions are built on different
+ types of vectors of knots. As far as design matrix depends only on
+ vector of knots, `k` and `x` it is useful to make tests for different
+ boundary conditions (and as following different vectors of knots).
+ '''
+ def run_design_matrix_tests(n, k, bc_type):
+ '''
+ To avoid repetition of code the following function is provided.
+ '''
+ np.random.seed(1234)
+ x = np.sort(np.random.random_sample(n) * 40 - 20)
+ y = np.random.random_sample(n) * 40 - 20
+ if bc_type == "periodic":
+ y[0] = y[-1]
+
+ bspl = make_interp_spline(x, y, k=k, bc_type=bc_type)
+
+ c = np.eye(len(bspl.t) - k - 1)
+ des_matr_def = BSpline(bspl.t, c, k)(x)
+ des_matr_csr = BSpline.design_matrix(x,
+ bspl.t,
+ k).toarray()
+ assert_allclose(des_matr_csr @ bspl.c, y, atol=1e-14)
+ assert_allclose(des_matr_def, des_matr_csr, atol=1e-14)
+
+ # "clamped" and "natural" work only with `k = 3`
+ n = 11
+ k = 3
+ for bc in ["clamped", "natural"]:
+ run_design_matrix_tests(n, k, bc)
+
+ # "not-a-knot" works with odd `k`
+ for k in range(3, 8, 2):
+ run_design_matrix_tests(n, k, "not-a-knot")
+
+ # "periodic" works with any `k` (even more than `n`)
+ n = 5 # smaller `n` to test `k > n` case
+ for k in range(2, 7):
+ run_design_matrix_tests(n, k, "periodic")
+
+ @pytest.mark.parametrize('extrapolate', [False, True, 'periodic'])
+ @pytest.mark.parametrize('degree', range(5))
+ def test_design_matrix_same_as_BSpline_call(self, extrapolate, degree):
+ """Test that design_matrix(x) is equivalent to BSpline(..)(x)."""
+ np.random.seed(1234)
+ x = np.random.random_sample(10 * (degree + 1))
+ xmin, xmax = np.amin(x), np.amax(x)
+ k = degree
+ t = np.r_[np.linspace(xmin - 2, xmin - 1, degree),
+ np.linspace(xmin, xmax, 2 * (degree + 1)),
+ np.linspace(xmax + 1, xmax + 2, degree)]
+ c = np.eye(len(t) - k - 1)
+ bspline = BSpline(t, c, k, extrapolate)
+ assert_allclose(
+ bspline(x), BSpline.design_matrix(x, t, k, extrapolate).toarray()
+ )
+
+ # extrapolation regime
+ x = np.array([xmin - 10, xmin - 1, xmax + 1.5, xmax + 10])
+ if not extrapolate:
+ with pytest.raises(ValueError):
+ BSpline.design_matrix(x, t, k, extrapolate)
+ else:
+ assert_allclose(
+ bspline(x),
+ BSpline.design_matrix(x, t, k, extrapolate).toarray()
+ )
+
+ def test_design_matrix_x_shapes(self):
+ # test for different `x` shapes
+ np.random.seed(1234)
+ n = 10
+ k = 3
+ x = np.sort(np.random.random_sample(n) * 40 - 20)
+ y = np.random.random_sample(n) * 40 - 20
+
+ bspl = make_interp_spline(x, y, k=k)
+ for i in range(1, 4):
+ xc = x[:i]
+ yc = y[:i]
+ des_matr_csr = BSpline.design_matrix(xc,
+ bspl.t,
+ k).toarray()
+ assert_allclose(des_matr_csr @ bspl.c, yc, atol=1e-14)
+
+ def test_design_matrix_t_shapes(self):
+ # test for minimal possible `t` shape
+ t = [1., 1., 1., 2., 3., 4., 4., 4.]
+ des_matr = BSpline.design_matrix(2., t, 3).toarray()
+ assert_allclose(des_matr,
+ [[0.25, 0.58333333, 0.16666667, 0.]],
+ atol=1e-14)
+
+ def test_design_matrix_asserts(self):
+ np.random.seed(1234)
+ n = 10
+ k = 3
+ x = np.sort(np.random.random_sample(n) * 40 - 20)
+ y = np.random.random_sample(n) * 40 - 20
+ bspl = make_interp_spline(x, y, k=k)
+ # invalid vector of knots (should be a 1D non-descending array)
+ # here the actual vector of knots is reversed, so it is invalid
+ with assert_raises(ValueError):
+ BSpline.design_matrix(x, bspl.t[::-1], k)
+ k = 2
+ t = [0., 1., 2., 3., 4., 5.]
+ x = [1., 2., 3., 4.]
+ # out of bounds
+ with assert_raises(ValueError):
+ BSpline.design_matrix(x, t, k)
+
+ @pytest.mark.parametrize('bc_type', ['natural', 'clamped',
+ 'periodic', 'not-a-knot'])
+ def test_from_power_basis(self, bc_type):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(20))
+ y = np.random.random(20)
+ if bc_type == 'periodic':
+ y[-1] = y[0]
+ cb = CubicSpline(x, y, bc_type=bc_type)
+ bspl = BSpline.from_power_basis(cb, bc_type=bc_type)
+ xx = np.linspace(0, 1, 20)
+ assert_allclose(cb(xx), bspl(xx), atol=1e-15)
+ bspl_new = make_interp_spline(x, y, bc_type=bc_type)
+ assert_allclose(bspl.c, bspl_new.c, atol=1e-15)
+
+ @pytest.mark.parametrize('bc_type', ['natural', 'clamped',
+ 'periodic', 'not-a-knot'])
+ def test_from_power_basis_complex(self, bc_type):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(20))
+ y = np.random.random(20) + np.random.random(20) * 1j
+ if bc_type == 'periodic':
+ y[-1] = y[0]
+ cb = CubicSpline(x, y, bc_type=bc_type)
+ bspl = BSpline.from_power_basis(cb, bc_type=bc_type)
+ bspl_new_real = make_interp_spline(x, y.real, bc_type=bc_type)
+ bspl_new_imag = make_interp_spline(x, y.imag, bc_type=bc_type)
+ assert_equal(bspl.c.dtype, (bspl_new_real.c
+ + 1j * bspl_new_imag.c).dtype)
+ assert_allclose(bspl.c, bspl_new_real.c
+ + 1j * bspl_new_imag.c, atol=1e-15)
+
+ def test_from_power_basis_exmp(self):
+ '''
+ For x = [0, 1, 2, 3, 4] and y = [1, 1, 1, 1, 1]
+ the coefficients of Cubic Spline in the power basis:
+
+ $[[0, 0, 0, 0, 0],\\$
+ $[0, 0, 0, 0, 0],\\$
+ $[0, 0, 0, 0, 0],\\$
+ $[1, 1, 1, 1, 1]]$
+
+ It could be shown explicitly that coefficients of the interpolating
+ function in B-spline basis are c = [1, 1, 1, 1, 1, 1, 1]
+ '''
+ x = np.array([0, 1, 2, 3, 4])
+ y = np.array([1, 1, 1, 1, 1])
+ bspl = BSpline.from_power_basis(CubicSpline(x, y, bc_type='natural'),
+ bc_type='natural')
+ assert_allclose(bspl.c, [1, 1, 1, 1, 1, 1, 1], atol=1e-15)
+
+ def test_read_only(self):
+ # BSpline must work on read-only knots and coefficients.
+ t = np.array([0, 1])
+ c = np.array([3.0])
+ t.setflags(write=False)
+ c.setflags(write=False)
+
+ xx = np.linspace(0, 1, 10)
+ xx.setflags(write=False)
+
+ b = BSpline(t=t, c=c, k=0)
+ assert_allclose(b(xx), 3)
+
+
+class TestInsert:
+
+ @pytest.mark.parametrize('xval', [0.0, 1.0, 2.5, 4, 6.5, 7.0])
+ def test_insert(self, xval):
+ # insert a knot, incl edges (0.0, 7.0) and exactly at an existing knot (4.0)
+ x = np.arange(8)
+ y = np.sin(x)**3
+ spl = make_interp_spline(x, y, k=3)
+
+ spl_1f = insert(xval, spl) # FITPACK
+ spl_1 = spl.insert_knot(xval)
+
+ assert_allclose(spl_1.t, spl_1f.t, atol=1e-15)
+ assert_allclose(spl_1.c, spl_1f.c[:-spl.k-1], atol=1e-15)
+
+ # knot insertion preserves values, unless multiplicity >= k+1
+ xx = x if xval != x[-1] else x[:-1]
+ xx = np.r_[xx, 0.5*(x[1:] + x[:-1])]
+ assert_allclose(spl(xx), spl_1(xx), atol=1e-15)
+
+ # ... repeat with ndim > 1
+ y1 = np.cos(x)**3
+ spl_y1 = make_interp_spline(x, y1, k=3)
+ spl_yy = make_interp_spline(x, np.c_[y, y1], k=3)
+ spl_yy1 = spl_yy.insert_knot(xval)
+
+ assert_allclose(spl_yy1.t, spl_1.t, atol=1e-15)
+ assert_allclose(spl_yy1.c, np.c_[spl.insert_knot(xval).c,
+ spl_y1.insert_knot(xval).c], atol=1e-15)
+
+ xx = x if xval != x[-1] else x[:-1]
+ xx = np.r_[xx, 0.5*(x[1:] + x[:-1])]
+ assert_allclose(spl_yy(xx), spl_yy1(xx), atol=1e-15)
+
+
+ @pytest.mark.parametrize(
+ 'xval, m', [(0.0, 2), (1.0, 3), (1.5, 5), (4, 2), (7.0, 2)]
+ )
+ def test_insert_multi(self, xval, m):
+ x = np.arange(8)
+ y = np.sin(x)**3
+ spl = make_interp_spline(x, y, k=3)
+
+ spl_1f = insert(xval, spl, m=m)
+ spl_1 = spl.insert_knot(xval, m)
+
+ assert_allclose(spl_1.t, spl_1f.t, atol=1e-15)
+ assert_allclose(spl_1.c, spl_1f.c[:-spl.k-1], atol=1e-15)
+
+ xx = x if xval != x[-1] else x[:-1]
+ xx = np.r_[xx, 0.5*(x[1:] + x[:-1])]
+ assert_allclose(spl(xx), spl_1(xx), atol=1e-15)
+
+ def test_insert_random(self):
+ rng = np.random.default_rng(12345)
+ n, k = 11, 3
+
+ t = np.sort(rng.uniform(size=n+k+1))
+ c = rng.uniform(size=(n, 3, 2))
+ spl = BSpline(t, c, k)
+
+ xv = rng.uniform(low=t[k+1], high=t[-k-1])
+ spl_1 = spl.insert_knot(xv)
+
+ xx = rng.uniform(low=t[k+1], high=t[-k-1], size=33)
+ assert_allclose(spl(xx), spl_1(xx), atol=1e-15)
+
+ @pytest.mark.parametrize('xv', [0, 0.1, 2.0, 4.0, 4.5, # l.h. edge
+ 5.5, 6.0, 6.1, 7.0] # r.h. edge
+ )
+ def test_insert_periodic(self, xv):
+ x = np.arange(8)
+ y = np.sin(x)**3
+ tck = splrep(x, y, k=3)
+ spl = BSpline(*tck, extrapolate="periodic")
+
+ spl_1 = spl.insert_knot(xv)
+ tf, cf, k = insert(xv, spl.tck, per=True)
+
+ assert_allclose(spl_1.t, tf, atol=1e-15)
+ assert_allclose(spl_1.c[:-k-1], cf[:-k-1], atol=1e-15)
+
+ xx = np.random.default_rng(1234).uniform(low=0, high=7, size=41)
+ assert_allclose(spl_1(xx), splev(xx, (tf, cf, k)), atol=1e-15)
+
+ def test_insert_periodic_too_few_internal_knots(self):
+ # both FITPACK and spl.insert_knot raise when there's not enough
+ # internal knots to make a periodic extension.
+ # Below the internal knots are 2, 3, , 4, 5
+ # ^
+ # 2, 3, 3.5, 4, 5
+ # so two knots from each side from the new one, while need at least
+ # from either left or right.
+ xv = 3.5
+ k = 3
+ t = np.array([0]*(k+1) + [2, 3, 4, 5] + [7]*(k+1))
+ c = np.ones(len(t) - k - 1)
+ spl = BSpline(t, c, k, extrapolate="periodic")
+
+ with assert_raises(ValueError):
+ insert(xv, (t, c, k), per=True)
+
+ with assert_raises(ValueError):
+ spl.insert_knot(xv)
+
+ def test_insert_no_extrap(self):
+ k = 3
+ t = np.array([0]*(k+1) + [2, 3, 4, 5] + [7]*(k+1))
+ c = np.ones(len(t) - k - 1)
+ spl = BSpline(t, c, k)
+
+ with assert_raises(ValueError):
+ spl.insert_knot(-1)
+
+ with assert_raises(ValueError):
+ spl.insert_knot(8)
+
+ with assert_raises(ValueError):
+ spl.insert_knot(3, m=0)
+
+
+def test_knots_multiplicity():
+ # Take a spline w/ random coefficients, throw in knots of varying
+ # multiplicity.
+
+ def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14):
+ # check evaluations against FITPACK, incl extrapolations
+ t, c, k = b.tck
+ x = np.unique(t)
+ x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1]
+ assert_allclose(splev(x, (t, c, k), der), b(x, der),
+ atol=atol, rtol=rtol, err_msg=f'der = {der} k = {b.k}')
+
+ # test loop itself
+ # [the index `j` is for interpreting the traceback in case of a failure]
+ for k in [1, 2, 3, 4, 5]:
+ b = _make_random_spline(k=k)
+ for j, b1 in enumerate(_make_multiples(b)):
+ check_splev(b1, j)
+ for der in range(1, k+1):
+ check_splev(b1, j, der, 1e-12, 1e-12)
+
+
+### stolen from @pv, verbatim
+def _naive_B(x, k, i, t):
+ """
+ Naive way to compute B-spline basis functions. Useful only for testing!
+ computes B(x; t[i],..., t[i+k+1])
+ """
+ if k == 0:
+ return 1.0 if t[i] <= x < t[i+1] else 0.0
+ if t[i+k] == t[i]:
+ c1 = 0.0
+ else:
+ c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t)
+ if t[i+k+1] == t[i+1]:
+ c2 = 0.0
+ else:
+ c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t)
+ return (c1 + c2)
+
+
+### stolen from @pv, verbatim
+def _naive_eval(x, t, c, k):
+ """
+ Naive B-spline evaluation. Useful only for testing!
+ """
+ if x == t[k]:
+ i = k
+ else:
+ i = np.searchsorted(t, x) - 1
+ assert t[i] <= x <= t[i+1]
+ assert i >= k and i < len(t) - k
+ return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1))
+
+
+def _naive_eval_2(x, t, c, k):
+ """Naive B-spline evaluation, another way."""
+ n = len(t) - (k+1)
+ assert n >= k+1
+ assert len(c) >= n
+ assert t[k] <= x <= t[n]
+ return sum(c[i] * _naive_B(x, k, i, t) for i in range(n))
+
+
+def _sum_basis_elements(x, t, c, k):
+ n = len(t) - (k+1)
+ assert n >= k+1
+ assert len(c) >= n
+ s = 0.
+ for i in range(n):
+ b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)
+ s += c[i] * np.nan_to_num(b) # zero out out-of-bounds elements
+ return s
+
+
+def B_012(x):
+ """ A linear B-spline function B(x | 0, 1, 2)."""
+ x = np.atleast_1d(x)
+ return np.piecewise(x, [(x < 0) | (x > 2),
+ (x >= 0) & (x < 1),
+ (x >= 1) & (x <= 2)],
+ [lambda x: 0., lambda x: x, lambda x: 2.-x])
+
+
+def B_0123(x, der=0):
+ """A quadratic B-spline function B(x | 0, 1, 2, 3)."""
+ x = np.atleast_1d(x)
+ conds = [x < 1, (x > 1) & (x < 2), x > 2]
+ if der == 0:
+ funcs = [lambda x: x*x/2.,
+ lambda x: 3./4 - (x-3./2)**2,
+ lambda x: (3.-x)**2 / 2]
+ elif der == 2:
+ funcs = [lambda x: 1.,
+ lambda x: -2.,
+ lambda x: 1.]
+ else:
+ raise ValueError('never be here: der=%s' % der)
+ pieces = np.piecewise(x, conds, funcs)
+ return pieces
+
+
+def _make_random_spline(n=35, k=3):
+ np.random.seed(123)
+ t = np.sort(np.random.random(n+k+1))
+ c = np.random.random(n)
+ return BSpline.construct_fast(t, c, k)
+
+
+def _make_multiples(b):
+ """Increase knot multiplicity."""
+ c, k = b.c, b.k
+
+ t1 = b.t.copy()
+ t1[17:19] = t1[17]
+ t1[22] = t1[21]
+ yield BSpline(t1, c, k)
+
+ t1 = b.t.copy()
+ t1[:k+1] = t1[0]
+ yield BSpline(t1, c, k)
+
+ t1 = b.t.copy()
+ t1[-k-1:] = t1[-1]
+ yield BSpline(t1, c, k)
+
+
+class TestInterop:
+ #
+ # Test that FITPACK-based spl* functions can deal with BSpline objects
+ #
+ def setup_method(self):
+ xx = np.linspace(0, 4.*np.pi, 41)
+ yy = np.cos(xx)
+ b = make_interp_spline(xx, yy)
+ self.tck = (b.t, b.c, b.k)
+ self.xx, self.yy, self.b = xx, yy, b
+
+ self.xnew = np.linspace(0, 4.*np.pi, 21)
+
+ c2 = np.c_[b.c, b.c, b.c]
+ self.c2 = np.dstack((c2, c2))
+ self.b2 = BSpline(b.t, self.c2, b.k)
+
+ def test_splev(self):
+ xnew, b, b2 = self.xnew, self.b, self.b2
+
+ # check that splev works with 1-D array of coefficients
+ # for array and scalar `x`
+ assert_allclose(splev(xnew, b),
+ b(xnew), atol=1e-15, rtol=1e-15)
+ assert_allclose(splev(xnew, b.tck),
+ b(xnew), atol=1e-15, rtol=1e-15)
+ assert_allclose([splev(x, b) for x in xnew],
+ b(xnew), atol=1e-15, rtol=1e-15)
+
+ # With N-D coefficients, there's a quirck:
+ # splev(x, BSpline) is equivalent to BSpline(x)
+ with assert_raises(ValueError, match="Calling splev.. with BSpline"):
+ splev(xnew, b2)
+
+ # However, splev(x, BSpline.tck) needs some transposes. This is because
+ # BSpline interpolates along the first axis, while the legacy FITPACK
+ # wrapper does list(map(...)) which effectively interpolates along the
+ # last axis. Like so:
+ sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0)
+ cc = b2.c.transpose(sh)
+ tck = (b2.t, cc, b2.k)
+ assert_allclose(splev(xnew, tck),
+ b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
+
+ def test_splrep(self):
+ x, y = self.xx, self.yy
+ # test that "new" splrep is equivalent to _impl.splrep
+ tck = splrep(x, y)
+ t, c, k = _impl.splrep(x, y)
+ assert_allclose(tck[0], t, atol=1e-15)
+ assert_allclose(tck[1], c, atol=1e-15)
+ assert_equal(tck[2], k)
+
+ # also cover the `full_output=True` branch
+ tck_f, _, _, _ = splrep(x, y, full_output=True)
+ assert_allclose(tck_f[0], t, atol=1e-15)
+ assert_allclose(tck_f[1], c, atol=1e-15)
+ assert_equal(tck_f[2], k)
+
+ # test that the result of splrep roundtrips with splev:
+ # evaluate the spline on the original `x` points
+ yy = splev(x, tck)
+ assert_allclose(y, yy, atol=1e-15)
+
+ # ... and also it roundtrips if wrapped in a BSpline
+ b = BSpline(*tck)
+ assert_allclose(y, b(x), atol=1e-15)
+
+ def test_splrep_errors(self):
+ # test that both "old" and "new" splrep raise for an N-D ``y`` array
+ # with n > 1
+ x, y = self.xx, self.yy
+ y2 = np.c_[y, y]
+ with assert_raises(ValueError):
+ splrep(x, y2)
+ with assert_raises(ValueError):
+ _impl.splrep(x, y2)
+
+ # input below minimum size
+ with assert_raises(TypeError, match="m > k must hold"):
+ splrep(x[:3], y[:3])
+ with assert_raises(TypeError, match="m > k must hold"):
+ _impl.splrep(x[:3], y[:3])
+
+ def test_splprep(self):
+ x = np.arange(15).reshape((3, 5))
+ b, u = splprep(x)
+ tck, u1 = _impl.splprep(x)
+
+ # test the roundtrip with splev for both "old" and "new" output
+ assert_allclose(u, u1, atol=1e-15)
+ assert_allclose(splev(u, b), x, atol=1e-15)
+ assert_allclose(splev(u, tck), x, atol=1e-15)
+
+ # cover the ``full_output=True`` branch
+ (b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True)
+ assert_allclose(u, u_f, atol=1e-15)
+ assert_allclose(splev(u_f, b_f), x, atol=1e-15)
+
+ def test_splprep_errors(self):
+ # test that both "old" and "new" code paths raise for x.ndim > 2
+ x = np.arange(3*4*5).reshape((3, 4, 5))
+ with assert_raises(ValueError, match="too many values to unpack"):
+ splprep(x)
+ with assert_raises(ValueError, match="too many values to unpack"):
+ _impl.splprep(x)
+
+ # input below minimum size
+ x = np.linspace(0, 40, num=3)
+ with assert_raises(TypeError, match="m > k must hold"):
+ splprep([x])
+ with assert_raises(TypeError, match="m > k must hold"):
+ _impl.splprep([x])
+
+ # automatically calculated parameters are non-increasing
+ # see gh-7589
+ x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266]
+ with assert_raises(ValueError, match="Invalid inputs"):
+ splprep([x])
+ with assert_raises(ValueError, match="Invalid inputs"):
+ _impl.splprep([x])
+
+ # given non-increasing parameter values u
+ x = [1, 3, 2, 4]
+ u = [0, 0.3, 0.2, 1]
+ with assert_raises(ValueError, match="Invalid inputs"):
+ splprep(*[[x], None, u])
+
+ def test_sproot(self):
+ b, b2 = self.b, self.b2
+ roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi
+ # sproot accepts a BSpline obj w/ 1-D coef array
+ assert_allclose(sproot(b), roots, atol=1e-7, rtol=1e-7)
+ assert_allclose(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7)
+
+ # ... and deals with trailing dimensions if coef array is N-D
+ with assert_raises(ValueError, match="Calling sproot.. with BSpline"):
+ sproot(b2, mest=50)
+
+ # and legacy behavior is preserved for a tck tuple w/ N-D coef
+ c2r = b2.c.transpose(1, 2, 0)
+ rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50))
+ assert_equal(rr.shape, (3, 2, 4))
+ assert_allclose(rr - roots, 0, atol=1e-12)
+
+ def test_splint(self):
+ # test that splint accepts BSpline objects
+ b, b2 = self.b, self.b2
+ assert_allclose(splint(0, 1, b),
+ splint(0, 1, b.tck), atol=1e-14)
+ assert_allclose(splint(0, 1, b),
+ b.integrate(0, 1), atol=1e-14)
+
+ # ... and deals with N-D arrays of coefficients
+ with assert_raises(ValueError, match="Calling splint.. with BSpline"):
+ splint(0, 1, b2)
+
+ # and the legacy behavior is preserved for a tck tuple w/ N-D coef
+ c2r = b2.c.transpose(1, 2, 0)
+ integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k)))
+ assert_equal(integr.shape, (3, 2))
+ assert_allclose(integr,
+ splint(0, 1, b), atol=1e-14)
+
+ def test_splder(self):
+ for b in [self.b, self.b2]:
+ # pad the c array (FITPACK convention)
+ ct = len(b.t) - len(b.c)
+ if ct > 0:
+ b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
+
+ for n in [1, 2, 3]:
+ bd = splder(b)
+ tck_d = _impl.splder((b.t, b.c, b.k))
+ assert_allclose(bd.t, tck_d[0], atol=1e-15)
+ assert_allclose(bd.c, tck_d[1], atol=1e-15)
+ assert_equal(bd.k, tck_d[2])
+ assert_(isinstance(bd, BSpline))
+ assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
+
+ def test_splantider(self):
+ for b in [self.b, self.b2]:
+ # pad the c array (FITPACK convention)
+ ct = len(b.t) - len(b.c)
+ if ct > 0:
+ b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
+
+ for n in [1, 2, 3]:
+ bd = splantider(b)
+ tck_d = _impl.splantider((b.t, b.c, b.k))
+ assert_allclose(bd.t, tck_d[0], atol=1e-15)
+ assert_allclose(bd.c, tck_d[1], atol=1e-15)
+ assert_equal(bd.k, tck_d[2])
+ assert_(isinstance(bd, BSpline))
+ assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
+
+ def test_insert(self):
+ b, b2, xx = self.b, self.b2, self.xx
+
+ j = b.t.size // 2
+ tn = 0.5*(b.t[j] + b.t[j+1])
+
+ bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))
+ assert_allclose(splev(xx, bn),
+ splev(xx, tck_n), atol=1e-15)
+ assert_(isinstance(bn, BSpline))
+ assert_(isinstance(tck_n, tuple)) # back-compat: tck in, tck out
+
+ # for N-D array of coefficients, BSpline.c needs to be transposed
+ # after that, the results are equivalent.
+ sh = tuple(range(b2.c.ndim))
+ c_ = b2.c.transpose(sh[1:] + (0,))
+ tck_n2 = insert(tn, (b2.t, c_, b2.k))
+
+ bn2 = insert(tn, b2)
+
+ # need a transpose for comparing the results, cf test_splev
+ assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),
+ bn2(xx), atol=1e-15)
+ assert_(isinstance(bn2, BSpline))
+ assert_(isinstance(tck_n2, tuple)) # back-compat: tck in, tck out
+
+
+class TestInterp:
+ #
+ # Test basic ways of constructing interpolating splines.
+ #
+ xx = np.linspace(0., 2.*np.pi)
+ yy = np.sin(xx)
+
+ def test_non_int_order(self):
+ with assert_raises(TypeError):
+ make_interp_spline(self.xx, self.yy, k=2.5)
+
+ def test_order_0(self):
+ b = make_interp_spline(self.xx, self.yy, k=0)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ b = make_interp_spline(self.xx, self.yy, k=0, axis=-1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_linear(self):
+ b = make_interp_spline(self.xx, self.yy, k=1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ b = make_interp_spline(self.xx, self.yy, k=1, axis=-1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ @pytest.mark.parametrize('k', [0, 1, 2, 3])
+ def test_incompatible_x_y(self, k):
+ x = [0, 1, 2, 3, 4, 5]
+ y = [0, 1, 2, 3, 4, 5, 6, 7]
+ with assert_raises(ValueError, match="Shapes of x"):
+ make_interp_spline(x, y, k=k)
+
+ @pytest.mark.parametrize('k', [0, 1, 2, 3])
+ def test_broken_x(self, k):
+ x = [0, 1, 1, 2, 3, 4] # duplicates
+ y = [0, 1, 2, 3, 4, 5]
+ with assert_raises(ValueError, match="x to not have duplicates"):
+ make_interp_spline(x, y, k=k)
+
+ x = [0, 2, 1, 3, 4, 5] # unsorted
+ with assert_raises(ValueError, match="Expect x to be a 1D strictly"):
+ make_interp_spline(x, y, k=k)
+
+ x = [0, 1, 2, 3, 4, 5]
+ x = np.asarray(x).reshape((1, -1)) # 1D
+ with assert_raises(ValueError, match="Expect x to be a 1D strictly"):
+ make_interp_spline(x, y, k=k)
+
+ def test_not_a_knot(self):
+ for k in [3, 5]:
+ b = make_interp_spline(self.xx, self.yy, k)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_periodic(self):
+ # k = 5 here for more derivatives
+ b = make_interp_spline(self.xx, self.yy, k=5, bc_type='periodic')
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ # in periodic case it is expected equality of k-1 first
+ # derivatives at the boundaries
+ for i in range(1, 5):
+ assert_allclose(b(self.xx[0], nu=i), b(self.xx[-1], nu=i), atol=1e-11)
+ # tests for axis=-1
+ b = make_interp_spline(self.xx, self.yy, k=5, bc_type='periodic', axis=-1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ for i in range(1, 5):
+ assert_allclose(b(self.xx[0], nu=i), b(self.xx[-1], nu=i), atol=1e-11)
+
+ @pytest.mark.parametrize('k', [2, 3, 4, 5, 6, 7])
+ def test_periodic_random(self, k):
+ # tests for both cases (k > n and k <= n)
+ n = 5
+ np.random.seed(1234)
+ x = np.sort(np.random.random_sample(n) * 10)
+ y = np.random.random_sample(n) * 100
+ y[0] = y[-1]
+ b = make_interp_spline(x, y, k=k, bc_type='periodic')
+ assert_allclose(b(x), y, atol=1e-14)
+
+ def test_periodic_axis(self):
+ n = self.xx.shape[0]
+ np.random.seed(1234)
+ x = np.random.random_sample(n) * 2 * np.pi
+ x = np.sort(x)
+ x[0] = 0.
+ x[-1] = 2 * np.pi
+ y = np.zeros((2, n))
+ y[0] = np.sin(x)
+ y[1] = np.cos(x)
+ b = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)
+ for i in range(n):
+ assert_allclose(b(x[i]), y[:, i], atol=1e-14)
+ assert_allclose(b(x[0]), b(x[-1]), atol=1e-14)
+
+ def test_periodic_points_exception(self):
+ # first and last points should match when periodic case expected
+ np.random.seed(1234)
+ k = 5
+ n = 8
+ x = np.sort(np.random.random_sample(n))
+ y = np.random.random_sample(n)
+ y[0] = y[-1] - 1 # to be sure that they are not equal
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, k=k, bc_type='periodic')
+
+ def test_periodic_knots_exception(self):
+ # `periodic` case does not work with passed vector of knots
+ np.random.seed(1234)
+ k = 3
+ n = 7
+ x = np.sort(np.random.random_sample(n))
+ y = np.random.random_sample(n)
+ t = np.zeros(n + 2 * k)
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, k, t, 'periodic')
+
+ @pytest.mark.parametrize('k', [2, 3, 4, 5])
+ def test_periodic_splev(self, k):
+ # comparison values of periodic b-spline with splev
+ b = make_interp_spline(self.xx, self.yy, k=k, bc_type='periodic')
+ tck = splrep(self.xx, self.yy, per=True, k=k)
+ spl = splev(self.xx, tck)
+ assert_allclose(spl, b(self.xx), atol=1e-14)
+
+ # comparison derivatives of periodic b-spline with splev
+ for i in range(1, k):
+ spl = splev(self.xx, tck, der=i)
+ assert_allclose(spl, b(self.xx, nu=i), atol=1e-10)
+
+ def test_periodic_cubic(self):
+ # comparison values of cubic periodic b-spline with CubicSpline
+ b = make_interp_spline(self.xx, self.yy, k=3, bc_type='periodic')
+ cub = CubicSpline(self.xx, self.yy, bc_type='periodic')
+ assert_allclose(b(self.xx), cub(self.xx), atol=1e-14)
+
+ # edge case: Cubic interpolation on 3 points
+ n = 3
+ x = np.sort(np.random.random_sample(n) * 10)
+ y = np.random.random_sample(n) * 100
+ y[0] = y[-1]
+ b = make_interp_spline(x, y, k=3, bc_type='periodic')
+ cub = CubicSpline(x, y, bc_type='periodic')
+ assert_allclose(b(x), cub(x), atol=1e-14)
+
+ def test_periodic_full_matrix(self):
+ # comparison values of cubic periodic b-spline with
+ # solution of the system with full matrix
+ k = 3
+ b = make_interp_spline(self.xx, self.yy, k=k, bc_type='periodic')
+ t = _periodic_knots(self.xx, k)
+ c = _make_interp_per_full_matr(self.xx, self.yy, t, k)
+ b1 = np.vectorize(lambda x: _naive_eval(x, t, c, k))
+ assert_allclose(b(self.xx), b1(self.xx), atol=1e-14)
+
+ def test_quadratic_deriv(self):
+ der = [(1, 8.)] # order, value: f'(x) = 8.
+
+ # derivative at right-hand edge
+ b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(None, der))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[-1], 1), der[0][1], atol=1e-14, rtol=1e-14)
+
+ # derivative at left-hand edge
+ b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(der, None))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[0], 1), der[0][1], atol=1e-14, rtol=1e-14)
+
+ def test_cubic_deriv(self):
+ k = 3
+
+ # first derivatives at left & right edges:
+ der_l, der_r = [(1, 3.)], [(1, 4.)]
+ b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(self.xx[0], 1), b(self.xx[-1], 1)],
+ [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
+
+ # 'natural' cubic spline, zero out 2nd derivatives at the boundaries
+ der_l, der_r = [(2, 0)], [(2, 0)]
+ b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_quintic_derivs(self):
+ k, n = 5, 7
+ x = np.arange(n).astype(np.float64)
+ y = np.sin(x)
+ der_l = [(1, -12.), (2, 1)]
+ der_r = [(1, 8.), (2, 3.)]
+ b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r))
+ assert_allclose(b(x), y, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(x[0], 1), b(x[0], 2)],
+ [val for (nu, val) in der_l])
+ assert_allclose([b(x[-1], 1), b(x[-1], 2)],
+ [val for (nu, val) in der_r])
+
+ @pytest.mark.xfail(reason='unstable')
+ def test_cubic_deriv_unstable(self):
+ # 1st and 2nd derivative at x[0], no derivative information at x[-1]
+ # The problem is not that it fails [who would use this anyway],
+ # the problem is that it fails *silently*, and I've no idea
+ # how to detect this sort of instability.
+ # In this particular case: it's OK for len(t) < 20, goes haywire
+ # at larger `len(t)`.
+ k = 3
+ t = _augknt(self.xx, k)
+
+ der_l = [(1, 3.), (2, 4.)]
+ b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_knots_not_data_sites(self):
+ # Knots need not coincide with the data sites.
+ # use a quadratic spline, knots are at data averages,
+ # two additional constraints are zero 2nd derivatives at edges
+ k = 2
+ t = np.r_[(self.xx[0],)*(k+1),
+ (self.xx[1:] + self.xx[:-1]) / 2.,
+ (self.xx[-1],)*(k+1)]
+ b = make_interp_spline(self.xx, self.yy, k, t,
+ bc_type=([(2, 0)], [(2, 0)]))
+
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(self.xx[0], 2), b(self.xx[-1], 2)], [0., 0.],
+ atol=1e-14)
+
+ def test_minimum_points_and_deriv(self):
+ # interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and
+ # f'(0) = 0, f'(1) = 3.
+ k = 3
+ x = [0., 1.]
+ y = [0., 1.]
+ b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)]))
+
+ xx = np.linspace(0., 1.)
+ yy = xx**3
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+
+ def test_deriv_spec(self):
+ # If one of the derivatives is omitted, the spline definition is
+ # incomplete.
+ x = y = [1.0, 2, 3, 4, 5, 6]
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=([(1, 0.)], None))
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=(1, 0.))
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=[(1, 0.)])
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=42)
+
+ # CubicSpline expects`bc_type=(left_pair, right_pair)`, while
+ # here we expect `bc_type=(iterable, iterable)`.
+ l, r = (1, 0.0), (1, 0.0)
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=(l, r))
+
+ def test_complex(self):
+ k = 3
+ xx = self.xx
+ yy = self.yy + 1.j*self.yy
+
+ # first derivatives at left & right edges:
+ der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)]
+ b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(xx[0], 1), b(xx[-1], 1)],
+ [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
+
+ # also test zero and first order
+ for k in (0, 1):
+ b = make_interp_spline(xx, yy, k=k)
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+
+ def test_int_xy(self):
+ x = np.arange(10).astype(int)
+ y = np.arange(10).astype(int)
+
+ # Cython chokes on "buffer type mismatch" (construction) or
+ # "no matching signature found" (evaluation)
+ for k in (0, 1, 2, 3):
+ b = make_interp_spline(x, y, k=k)
+ b(x)
+
+ def test_sliced_input(self):
+ # Cython code chokes on non C contiguous arrays
+ xx = np.linspace(-1, 1, 100)
+
+ x = xx[::5]
+ y = xx[::5]
+
+ for k in (0, 1, 2, 3):
+ make_interp_spline(x, y, k=k)
+
+ def test_check_finite(self):
+ # check_finite defaults to True; nans and such trigger a ValueError
+ x = np.arange(10).astype(float)
+ y = x**2
+
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, make_interp_spline, x, y)
+
+ @pytest.mark.parametrize('k', [1, 2, 3, 5])
+ def test_list_input(self, k):
+ # regression test for gh-8714: TypeError for x, y being lists and k=2
+ x = list(range(10))
+ y = [a**2 for a in x]
+ make_interp_spline(x, y, k=k)
+
+ def test_multiple_rhs(self):
+ yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
+ der_l = [(1, [1., 2.])]
+ der_r = [(1, [3., 4.])]
+
+ b = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[0], 1), der_l[0][1], atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[-1], 1), der_r[0][1], atol=1e-14, rtol=1e-14)
+
+ def test_shapes(self):
+ np.random.seed(1234)
+ k, n = 3, 22
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ b = make_interp_spline(x, y, k)
+ assert_equal(b.c.shape, (n, 5, 6, 7))
+
+ # now throw in some derivatives
+ d_l = [(1, np.random.random((5, 6, 7)))]
+ d_r = [(1, np.random.random((5, 6, 7)))]
+ b = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
+ assert_equal(b.c.shape, (n + k - 1, 5, 6, 7))
+
+ def test_string_aliases(self):
+ yy = np.sin(self.xx)
+
+ # a single string is duplicated
+ b1 = make_interp_spline(self.xx, yy, k=3, bc_type='natural')
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=([(2, 0)], [(2, 0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # two strings are handled
+ b1 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=('natural', 'clamped'))
+ b2 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=([(2, 0)], [(1, 0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # one-sided BCs are OK
+ b1 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, 'clamped'))
+ b2 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, [(1, 0.0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # 'not-a-knot' is equivalent to None
+ b1 = make_interp_spline(self.xx, yy, k=3, bc_type='not-a-knot')
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=None)
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # unknown strings do not pass
+ with assert_raises(ValueError):
+ make_interp_spline(self.xx, yy, k=3, bc_type='typo')
+
+ # string aliases are handled for 2D values
+ yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
+ der_l = [(1, [0., 0.])]
+ der_r = [(2, [0., 0.])]
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
+ b1 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=('clamped', 'natural'))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # ... and for N-D values:
+ np.random.seed(1234)
+ k, n = 3, 22
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ # now throw in some derivatives
+ d_l = [(1, np.zeros((5, 6, 7)))]
+ d_r = [(1, np.zeros((5, 6, 7)))]
+ b1 = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
+ b2 = make_interp_spline(x, y, k, bc_type='clamped')
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ def test_full_matrix(self):
+ np.random.seed(1234)
+ k, n = 3, 7
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=n)
+ t = _not_a_knot(x, k)
+
+ b = make_interp_spline(x, y, k, t)
+ cf = make_interp_full_matr(x, y, t, k)
+ assert_allclose(b.c, cf, atol=1e-14, rtol=1e-14)
+
+ def test_woodbury(self):
+ '''
+ Random elements in diagonal matrix with blocks in the
+ left lower and right upper corners checking the
+ implementation of Woodbury algorithm.
+ '''
+ np.random.seed(1234)
+ n = 201
+ for k in range(3, 32, 2):
+ offset = int((k - 1) / 2)
+ a = np.diagflat(np.random.random((1, n)))
+ for i in range(1, offset + 1):
+ a[:-i, i:] += np.diagflat(np.random.random((1, n - i)))
+ a[i:, :-i] += np.diagflat(np.random.random((1, n - i)))
+ ur = np.random.random((offset, offset))
+ a[:offset, -offset:] = ur
+ ll = np.random.random((offset, offset))
+ a[-offset:, :offset] = ll
+ d = np.zeros((k, n))
+ for i, j in enumerate(range(offset, -offset - 1, -1)):
+ if j < 0:
+ d[i, :j] = np.diagonal(a, offset=j)
+ else:
+ d[i, j:] = np.diagonal(a, offset=j)
+ b = np.random.random(n)
+ assert_allclose(_woodbury_algorithm(d, ur, ll, b, k),
+ np.linalg.solve(a, b), atol=1e-14)
+
+
+def make_interp_full_matr(x, y, t, k):
+ """Assemble an spline order k with knots t to interpolate
+ y(x) using full matrices.
+ Not-a-knot BC only.
+
+ This routine is here for testing only (even though it's functional).
+ """
+ assert x.size == y.size
+ assert t.size == x.size + k + 1
+ n = x.size
+
+ A = np.zeros((n, n), dtype=np.float64)
+
+ for j in range(n):
+ xval = x[j]
+ if xval == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, xval) - 1
+
+ # fill a row
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+ A[j, left-k:left+1] = bb
+
+ c = sl.solve(A, y)
+ return c
+
+
+def make_lsq_full_matrix(x, y, t, k=3):
+ """Make the least-square spline, full matrices."""
+ x, y, t = map(np.asarray, (x, y, t))
+ m = x.size
+ n = t.size - k - 1
+
+ A = np.zeros((m, n), dtype=np.float64)
+
+ for j in range(m):
+ xval = x[j]
+ # find interval
+ if xval == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, xval) - 1
+
+ # fill a row
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+ A[j, left-k:left+1] = bb
+
+ # have observation matrix, can solve the LSQ problem
+ B = np.dot(A.T, A)
+ Y = np.dot(A.T, y)
+ c = sl.solve(B, Y)
+
+ return c, (A, Y)
+
+
+class TestLSQ:
+ #
+ # Test make_lsq_spline
+ #
+ np.random.seed(1234)
+ n, k = 13, 3
+ x = np.sort(np.random.random(n))
+ y = np.random.random(n)
+ t = _augknt(np.linspace(x[0], x[-1], 7), k)
+
+ def test_lstsq(self):
+ # check LSQ construction vs a full matrix version
+ x, y, t, k = self.x, self.y, self.t, self.k
+
+ c0, AY = make_lsq_full_matrix(x, y, t, k)
+ b = make_lsq_spline(x, y, t, k)
+
+ assert_allclose(b.c, c0)
+ assert_equal(b.c.shape, (t.size - k - 1,))
+
+ # also check against numpy.lstsq
+ aa, yy = AY
+ c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1)
+ assert_allclose(b.c, c1)
+
+ def test_weights(self):
+ # weights = 1 is same as None
+ x, y, t, k = self.x, self.y, self.t, self.k
+ w = np.ones_like(x)
+
+ b = make_lsq_spline(x, y, t, k)
+ b_w = make_lsq_spline(x, y, t, k, w=w)
+
+ assert_allclose(b.t, b_w.t, atol=1e-14)
+ assert_allclose(b.c, b_w.c, atol=1e-14)
+ assert_equal(b.k, b_w.k)
+
+ def test_multiple_rhs(self):
+ x, t, k, n = self.x, self.t, self.k, self.n
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ b = make_lsq_spline(x, y, t, k)
+ assert_equal(b.c.shape, (t.size-k-1, 5, 6, 7))
+
+ def test_complex(self):
+ # cmplx-valued `y`
+ x, t, k = self.x, self.t, self.k
+ yc = self.y * (1. + 2.j)
+
+ b = make_lsq_spline(x, yc, t, k)
+ b_re = make_lsq_spline(x, yc.real, t, k)
+ b_im = make_lsq_spline(x, yc.imag, t, k)
+
+ assert_allclose(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
+
+ def test_int_xy(self):
+ x = np.arange(10).astype(int)
+ y = np.arange(10).astype(int)
+ t = _augknt(x, k=1)
+ # Cython chokes on "buffer type mismatch"
+ make_lsq_spline(x, y, t, k=1)
+
+ def test_sliced_input(self):
+ # Cython code chokes on non C contiguous arrays
+ xx = np.linspace(-1, 1, 100)
+
+ x = xx[::3]
+ y = xx[::3]
+ t = _augknt(x, 1)
+ make_lsq_spline(x, y, t, k=1)
+
+ def test_checkfinite(self):
+ # check_finite defaults to True; nans and such trigger a ValueError
+ x = np.arange(12).astype(float)
+ y = x**2
+ t = _augknt(x, 3)
+
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, make_lsq_spline, x, y, t)
+
+ def test_read_only(self):
+ # Check that make_lsq_spline works with read only arrays
+ x, y, t = self.x, self.y, self.t
+ x.setflags(write=False)
+ y.setflags(write=False)
+ t.setflags(write=False)
+ make_lsq_spline(x=x, y=y, t=t)
+
+
+def data_file(basename):
+ return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', basename)
+
+
+class TestSmoothingSpline:
+ #
+ # test make_smoothing_spline
+ #
+ def test_invalid_input(self):
+ np.random.seed(1234)
+ n = 100
+ x = np.sort(np.random.random_sample(n) * 4 - 2)
+ y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+ # ``x`` and ``y`` should have same shapes (1-D array)
+ with assert_raises(ValueError):
+ make_smoothing_spline(x, y[1:])
+ with assert_raises(ValueError):
+ make_smoothing_spline(x[1:], y)
+ with assert_raises(ValueError):
+ make_smoothing_spline(x.reshape(1, n), y)
+
+ # ``x`` should be an ascending array
+ with assert_raises(ValueError):
+ make_smoothing_spline(x[::-1], y)
+
+ x_dupl = np.copy(x)
+ x_dupl[0] = x_dupl[1]
+
+ with assert_raises(ValueError):
+ make_smoothing_spline(x_dupl, y)
+
+ # x and y length must be >= 5
+ x = np.arange(4)
+ y = np.ones(4)
+ exception_message = "``x`` and ``y`` length must be at least 5"
+ with pytest.raises(ValueError, match=exception_message):
+ make_smoothing_spline(x, y)
+
+ def test_compare_with_GCVSPL(self):
+ """
+ Data is generated in the following way:
+ >>> np.random.seed(1234)
+ >>> n = 100
+ >>> x = np.sort(np.random.random_sample(n) * 4 - 2)
+ >>> y = np.sin(x) + np.random.normal(scale=.5, size=n)
+ >>> np.savetxt('x.csv', x)
+ >>> np.savetxt('y.csv', y)
+
+ We obtain the result of performing the GCV smoothing splines
+ package (by Woltring, gcvspl) on the sample data points
+ using its version for Octave (https://github.com/srkuberski/gcvspl).
+ In order to use this implementation, one should clone the repository
+ and open the folder in Octave.
+ In Octave, we load up ``x`` and ``y`` (generated from Python code
+ above):
+
+ >>> x = csvread('x.csv');
+ >>> y = csvread('y.csv');
+
+ Then, in order to access the implementation, we compile gcvspl files in
+ Octave:
+
+ >>> mex gcvsplmex.c gcvspl.c
+ >>> mex spldermex.c gcvspl.c
+
+ The first function computes the vector of unknowns from the dataset
+ (x, y) while the second one evaluates the spline in certain points
+ with known vector of coefficients.
+
+ >>> c = gcvsplmex( x, y, 2 );
+ >>> y0 = spldermex( x, c, 2, x, 0 );
+
+ If we want to compare the results of the gcvspl code, we can save
+ ``y0`` in csv file:
+
+ >>> csvwrite('y0.csv', y0);
+
+ """
+ # load the data sample
+ with np.load(data_file('gcvspl.npz')) as data:
+ # data points
+ x = data['x']
+ y = data['y']
+
+ y_GCVSPL = data['y_GCVSPL']
+ y_compr = make_smoothing_spline(x, y)(x)
+
+ # such tolerance is explained by the fact that the spline is built
+ # using an iterative algorithm for minimizing the GCV criteria. These
+ # algorithms may vary, so the tolerance should be rather low.
+ assert_allclose(y_compr, y_GCVSPL, atol=1e-4, rtol=1e-4)
+
+ def test_non_regularized_case(self):
+ """
+ In case the regularization parameter is 0, the resulting spline
+ is an interpolation spline with natural boundary conditions.
+ """
+ # create data sample
+ np.random.seed(1234)
+ n = 100
+ x = np.sort(np.random.random_sample(n) * 4 - 2)
+ y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+ spline_GCV = make_smoothing_spline(x, y, lam=0.)
+ spline_interp = make_interp_spline(x, y, 3, bc_type='natural')
+
+ grid = np.linspace(x[0], x[-1], 2 * n)
+ assert_allclose(spline_GCV(grid),
+ spline_interp(grid),
+ atol=1e-15)
+
+ def test_weighted_smoothing_spline(self):
+ # create data sample
+ np.random.seed(1234)
+ n = 100
+ x = np.sort(np.random.random_sample(n) * 4 - 2)
+ y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+ spl = make_smoothing_spline(x, y)
+
+ # in order not to iterate over all of the indices, we select 10 of
+ # them randomly
+ for ind in np.random.choice(range(100), size=10):
+ w = np.ones(n)
+ w[ind] = 30.
+ spl_w = make_smoothing_spline(x, y, w)
+ # check that spline with weight in a certain point is closer to the
+ # original point than the one without weights
+ orig = abs(spl(x[ind]) - y[ind])
+ weighted = abs(spl_w(x[ind]) - y[ind])
+
+ if orig < weighted:
+ raise ValueError(f'Spline with weights should be closer to the'
+ f' points than the original one: {orig:.4} < '
+ f'{weighted:.4}')
+
+
+################################
+# NdBSpline tests
+def bspline2(xy, t, c, k):
+ """A naive 2D tensort product spline evaluation."""
+ x, y = xy
+ tx, ty = t
+ nx = len(tx) - k - 1
+ assert (nx >= k+1)
+ ny = len(ty) - k - 1
+ assert (ny >= k+1)
+ return sum(c[ix, iy] * B(x, k, ix, tx) * B(y, k, iy, ty)
+ for ix in range(nx) for iy in range(ny))
+
+
+def B(x, k, i, t):
+ if k == 0:
+ return 1.0 if t[i] <= x < t[i+1] else 0.0
+ if t[i+k] == t[i]:
+ c1 = 0.0
+ else:
+ c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
+ if t[i+k+1] == t[i+1]:
+ c2 = 0.0
+ else:
+ c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
+ return c1 + c2
+
+
+def bspline(x, t, c, k):
+ n = len(t) - k - 1
+ assert (n >= k+1) and (len(c) >= n)
+ return sum(c[i] * B(x, k, i, t) for i in range(n))
+
+
+class NdBSpline0:
+ def __init__(self, t, c, k=3):
+ """Tensor product spline object.
+
+ c[i1, i2, ..., id] * B(x1, i1) * B(x2, i2) * ... * B(xd, id)
+
+ Parameters
+ ----------
+ c : ndarray, shape (n1, n2, ..., nd, ...)
+ b-spline coefficients
+ t : tuple of 1D ndarrays
+ knot vectors in directions 1, 2, ... d
+ ``len(t[i]) == n[i] + k + 1``
+ k : int or length-d tuple of integers
+ spline degrees.
+ """
+ ndim = len(t)
+ assert ndim <= len(c.shape)
+
+ try:
+ len(k)
+ except TypeError:
+ # make k a tuple
+ k = (k,)*ndim
+
+ self.k = tuple(operator.index(ki) for ki in k)
+ self.t = tuple(np.asarray(ti, dtype=float) for ti in t)
+ self.c = c
+
+ def __call__(self, x):
+ ndim = len(self.t)
+ # a single evaluation point: `x` is a 1D array_like, shape (ndim,)
+ assert len(x) == ndim
+
+ # get the indices in an ndim-dimensional vector
+ i = ['none', ]*ndim
+ for d in range(ndim):
+ td, xd = self.t[d], x[d]
+ k = self.k[d]
+
+ # find the index for x[d]
+ if xd == td[k]:
+ i[d] = k
+ else:
+ i[d] = np.searchsorted(td, xd) - 1
+ assert td[i[d]] <= xd <= td[i[d]+1]
+ assert i[d] >= k and i[d] < len(td) - k
+ i = tuple(i)
+
+ # iterate over the dimensions, form linear combinations of
+ # products B(x_1) * B(x_2) * ... B(x_N) of (k+1)**N b-splines
+ # which are non-zero at `i = (i_1, i_2, ..., i_N)`.
+ result = 0
+ iters = [range(i[d] - self.k[d], i[d] + 1) for d in range(ndim)]
+ for idx in itertools.product(*iters):
+ term = self.c[idx] * np.prod([B(x[d], self.k[d], idx[d], self.t[d])
+ for d in range(ndim)])
+ result += term
+ return result
+
+
+class TestNdBSpline:
+
+ def test_1D(self):
+ # test ndim=1 agrees with BSpline
+ rng = np.random.default_rng(12345)
+ n, k = 11, 3
+ n_tr = 7
+ t = np.sort(rng.uniform(size=n + k + 1))
+ c = rng.uniform(size=(n, n_tr))
+
+ b = BSpline(t, c, k)
+ nb = NdBSpline((t,), c, k)
+
+ xi = rng.uniform(size=21)
+ # NdBSpline expects xi.shape=(npts, ndim)
+ assert_allclose(nb(xi[:, None]),
+ b(xi), atol=1e-14)
+ assert nb(xi[:, None]).shape == (xi.shape[0], c.shape[1])
+
+ def make_2d_case(self):
+ # make a 2D separable spline
+ x = np.arange(6)
+ y = x**3
+ spl = make_interp_spline(x, y, k=3)
+
+ y_1 = x**3 + 2*x
+ spl_1 = make_interp_spline(x, y_1, k=3)
+
+ t2 = (spl.t, spl_1.t)
+ c2 = spl.c[:, None] * spl_1.c[None, :]
+
+ return t2, c2, 3
+
+ def make_2d_mixed(self):
+ # make a 2D separable spline w/ kx=3, ky=2
+ x = np.arange(6)
+ y = x**3
+ spl = make_interp_spline(x, y, k=3)
+
+ x = np.arange(5) + 1.5
+ y_1 = x**2 + 2*x
+ spl_1 = make_interp_spline(x, y_1, k=2)
+
+ t2 = (spl.t, spl_1.t)
+ c2 = spl.c[:, None] * spl_1.c[None, :]
+
+ return t2, c2, spl.k, spl_1.k
+
+ def test_2D_separable(self):
+ xi = [(1.5, 2.5), (2.5, 1), (0.5, 1.5)]
+ t2, c2, k = self.make_2d_case()
+ target = [x**3 * (y**3 + 2*y) for (x, y) in xi]
+
+ # sanity check: bspline2 gives the product as constructed
+ assert_allclose([bspline2(xy, t2, c2, k) for xy in xi],
+ target,
+ atol=1e-14)
+
+ # check evaluation on a 2D array: the 1D array of 2D points
+ bspl2 = NdBSpline(t2, c2, k=3)
+ assert bspl2(xi).shape == (len(xi), )
+ assert_allclose(bspl2(xi),
+ target, atol=1e-14)
+
+ # now check on a multidim xi
+ rng = np.random.default_rng(12345)
+ xi = rng.uniform(size=(4, 3, 2)) * 5
+ result = bspl2(xi)
+ assert result.shape == (4, 3)
+
+ # also check the values
+ x, y = xi.reshape((-1, 2)).T
+ assert_allclose(result.ravel(),
+ x**3 * (y**3 + 2*y), atol=1e-14)
+
+ def test_2D_separable_2(self):
+ # test `c` with trailing dimensions, i.e. c.ndim > ndim
+ ndim = 2
+ xi = [(1.5, 2.5), (2.5, 1), (0.5, 1.5)]
+ target = [x**3 * (y**3 + 2*y) for (x, y) in xi]
+
+ t2, c2, k = self.make_2d_case()
+ c2_4 = np.dstack((c2, c2, c2, c2)) # c22.shape = (6, 6, 4)
+
+ xy = (1.5, 2.5)
+ bspl2_4 = NdBSpline(t2, c2_4, k=3)
+ result = bspl2_4(xy)
+ val_single = NdBSpline(t2, c2, k)(xy)
+ assert result.shape == (4,)
+ assert_allclose(result,
+ [val_single, ]*4, atol=1e-14)
+
+ # now try the array xi : the output.shape is (3, 4) where 3
+ # is the number of points in xi and 4 is the trailing dimension of c
+ assert bspl2_4(xi).shape == np.shape(xi)[:-1] + bspl2_4.c.shape[ndim:]
+ assert_allclose(bspl2_4(xi) - np.asarray(target)[:, None],
+ 0, atol=5e-14)
+
+ # two trailing dimensions
+ c2_22 = c2_4.reshape((6, 6, 2, 2))
+ bspl2_22 = NdBSpline(t2, c2_22, k=3)
+
+ result = bspl2_22(xy)
+ assert result.shape == (2, 2)
+ assert_allclose(result,
+ [[val_single, val_single],
+ [val_single, val_single]], atol=1e-14)
+
+ # now try the array xi : the output shape is (3, 2, 2)
+ # for 3 points in xi and c trailing dimensions being (2, 2)
+ assert (bspl2_22(xi).shape ==
+ np.shape(xi)[:-1] + bspl2_22.c.shape[ndim:])
+ assert_allclose(bspl2_22(xi) - np.asarray(target)[:, None, None],
+ 0, atol=5e-14)
+
+ def test_2D_random(self):
+ rng = np.random.default_rng(12345)
+ k = 3
+ tx = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=7)) * 3, 3, 3, 3, 3]
+ ty = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ c = rng.uniform(size=(tx.size-k-1, ty.size-k-1))
+
+ spl = NdBSpline((tx, ty), c, k=k)
+
+ xi = (1., 1.)
+ assert_allclose(spl(xi),
+ bspline2(xi, (tx, ty), c, k), atol=1e-14)
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1]]
+ assert_allclose(spl(xi),
+ [bspline2(xy, (tx, ty), c, k) for xy in xi],
+ atol=1e-14)
+
+ def test_2D_mixed(self):
+ t2, c2, kx, ky = self.make_2d_mixed()
+ xi = [(1.4, 4.5), (2.5, 2.4), (4.5, 3.5)]
+ target = [x**3 * (y**2 + 2*y) for (x, y) in xi]
+ bspl2 = NdBSpline(t2, c2, k=(kx, ky))
+ assert bspl2(xi).shape == (len(xi), )
+ assert_allclose(bspl2(xi),
+ target, atol=1e-14)
+
+ def test_2D_derivative(self):
+ t2, c2, kx, ky = self.make_2d_mixed()
+ xi = [(1.4, 4.5), (2.5, 2.4), (4.5, 3.5)]
+ bspl2 = NdBSpline(t2, c2, k=(kx, ky))
+
+ der = bspl2(xi, nu=(1, 0))
+ assert_allclose(der,
+ [3*x**2 * (y**2 + 2*y) for x, y in xi], atol=1e-14)
+
+ der = bspl2(xi, nu=(1, 1))
+ assert_allclose(der,
+ [3*x**2 * (2*y + 2) for x, y in xi], atol=1e-14)
+
+ der = bspl2(xi, nu=(0, 0))
+ assert_allclose(der,
+ [x**3 * (y**2 + 2*y) for x, y in xi], atol=1e-14)
+
+ with assert_raises(ValueError):
+ # all(nu >= 0)
+ der = bspl2(xi, nu=(-1, 0))
+
+ with assert_raises(ValueError):
+ # len(nu) == ndim
+ der = bspl2(xi, nu=(-1, 0, 1))
+
+ def test_2D_mixed_random(self):
+ rng = np.random.default_rng(12345)
+ kx, ky = 2, 3
+ tx = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=7)) * 3, 3, 3, 3, 3]
+ ty = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ c = rng.uniform(size=(tx.size - kx - 1, ty.size - ky - 1))
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1]]
+
+ bspl2 = NdBSpline((tx, ty), c, k=(kx, ky))
+ bspl2_0 = NdBSpline0((tx, ty), c, k=(kx, ky))
+
+ assert_allclose(bspl2(xi),
+ [bspl2_0(xp) for xp in xi], atol=1e-14)
+
+ def test_tx_neq_ty(self):
+ # 2D separable spline w/ len(tx) != len(ty)
+ x = np.arange(6)
+ y = np.arange(7) + 1.5
+
+ spl_x = make_interp_spline(x, x**3, k=3)
+ spl_y = make_interp_spline(y, y**2 + 2*y, k=3)
+ cc = spl_x.c[:, None] * spl_y.c[None, :]
+ bspl = NdBSpline((spl_x.t, spl_y.t), cc, (spl_x.k, spl_y.k))
+
+ values = (x**3)[:, None] * (y**2 + 2*y)[None, :]
+ rgi = RegularGridInterpolator((x, y), values)
+
+ xi = [(a, b) for a, b in itertools.product(x, y)]
+ bxi = bspl(xi)
+
+ assert not np.isnan(bxi).any()
+ assert_allclose(bxi, rgi(xi), atol=1e-14)
+ assert_allclose(bxi.reshape(values.shape), values, atol=1e-14)
+
+ def make_3d_case(self):
+ # make a 3D separable spline
+ x = np.arange(6)
+ y = x**3
+ spl = make_interp_spline(x, y, k=3)
+
+ y_1 = x**3 + 2*x
+ spl_1 = make_interp_spline(x, y_1, k=3)
+
+ y_2 = x**3 + 3*x + 1
+ spl_2 = make_interp_spline(x, y_2, k=3)
+
+ t2 = (spl.t, spl_1.t, spl_2.t)
+ c2 = (spl.c[:, None, None] *
+ spl_1.c[None, :, None] *
+ spl_2.c[None, None, :])
+
+ return t2, c2, 3
+
+ def test_3D_separable(self):
+ rng = np.random.default_rng(12345)
+ x, y, z = rng.uniform(size=(3, 11)) * 5
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+
+ xi = [_ for _ in zip(x, y, z)]
+ result = bspl3(xi)
+ assert result.shape == (11,)
+ assert_allclose(result, target, atol=1e-14)
+
+ def test_3D_derivative(self):
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+ rng = np.random.default_rng(12345)
+ x, y, z = rng.uniform(size=(3, 11)) * 5
+ xi = [_ for _ in zip(x, y, z)]
+
+ assert_allclose(bspl3(xi, nu=(1, 0, 0)),
+ 3*x**2 * (y**3 + 2*y) * (z**3 + 3*z + 1), atol=1e-14)
+
+ assert_allclose(bspl3(xi, nu=(2, 0, 0)),
+ 6*x * (y**3 + 2*y) * (z**3 + 3*z + 1), atol=1e-14)
+
+ assert_allclose(bspl3(xi, nu=(2, 1, 0)),
+ 6*x * (3*y**2 + 2) * (z**3 + 3*z + 1), atol=1e-14)
+
+ assert_allclose(bspl3(xi, nu=(2, 1, 3)),
+ 6*x * (3*y**2 + 2) * (6), atol=1e-14)
+
+ assert_allclose(bspl3(xi, nu=(2, 1, 4)),
+ np.zeros(len(xi)), atol=1e-14)
+
+ def test_3D_random(self):
+ rng = np.random.default_rng(12345)
+ k = 3
+ tx = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=7)) * 3, 3, 3, 3, 3]
+ ty = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ tz = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ c = rng.uniform(size=(tx.size-k-1, ty.size-k-1, tz.size-k-1))
+
+ spl = NdBSpline((tx, ty, tz), c, k=k)
+ spl_0 = NdBSpline0((tx, ty, tz), c, k=k)
+
+ xi = (1., 1., 1)
+ assert_allclose(spl(xi), spl_0(xi), atol=1e-14)
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1],
+ [0.9, 1.4, 1.9]]
+ assert_allclose(spl(xi), [spl_0(xp) for xp in xi], atol=1e-14)
+
+ def test_3D_random_complex(self):
+ rng = np.random.default_rng(12345)
+ k = 3
+ tx = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=7)) * 3, 3, 3, 3, 3]
+ ty = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ tz = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ c = (rng.uniform(size=(tx.size-k-1, ty.size-k-1, tz.size-k-1)) +
+ rng.uniform(size=(tx.size-k-1, ty.size-k-1, tz.size-k-1))*1j)
+
+ spl = NdBSpline((tx, ty, tz), c, k=k)
+ spl_re = NdBSpline((tx, ty, tz), c.real, k=k)
+ spl_im = NdBSpline((tx, ty, tz), c.imag, k=k)
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1],
+ [0.9, 1.4, 1.9]]
+ assert_allclose(spl(xi),
+ spl_re(xi) + 1j*spl_im(xi), atol=1e-14)
+
+ @pytest.mark.parametrize('cls_extrap', [None, True])
+ @pytest.mark.parametrize('call_extrap', [None, True])
+ def test_extrapolate_3D_separable(self, cls_extrap, call_extrap):
+ # test that extrapolate=True does extrapolate
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3, extrapolate=cls_extrap)
+
+ # evaluate out of bounds
+ x, y, z = [-2, -1, 7], [-3, -0.5, 6.5], [-1, -1.5, 7.5]
+ x, y, z = map(np.asarray, (x, y, z))
+ xi = [_ for _ in zip(x, y, z)]
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+
+ result = bspl3(xi, extrapolate=call_extrap)
+ assert_allclose(result, target, atol=1e-14)
+
+ @pytest.mark.parametrize('extrap', [(False, True), (True, None)])
+ def test_extrapolate_3D_separable_2(self, extrap):
+ # test that call(..., extrapolate=None) defers to self.extrapolate,
+ # otherwise supersedes self.extrapolate
+ t3, c3, k = self.make_3d_case()
+ cls_extrap, call_extrap = extrap
+ bspl3 = NdBSpline(t3, c3, k=3, extrapolate=cls_extrap)
+
+ # evaluate out of bounds
+ x, y, z = [-2, -1, 7], [-3, -0.5, 6.5], [-1, -1.5, 7.5]
+ x, y, z = map(np.asarray, (x, y, z))
+ xi = [_ for _ in zip(x, y, z)]
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+
+ result = bspl3(xi, extrapolate=call_extrap)
+ assert_allclose(result, target, atol=1e-14)
+
+ def test_extrapolate_false_3D_separable(self):
+ # test that extrapolate=False produces nans for out-of-bounds values
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+
+ # evaluate out of bounds and inside
+ x, y, z = [-2, 1, 7], [-3, 0.5, 6.5], [-1, 1.5, 7.5]
+ x, y, z = map(np.asarray, (x, y, z))
+ xi = [_ for _ in zip(x, y, z)]
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+
+ result = bspl3(xi, extrapolate=False)
+ assert np.isnan(result[0])
+ assert np.isnan(result[-1])
+ assert_allclose(result[1:-1], target[1:-1], atol=1e-14)
+
+ def test_x_nan_3D(self):
+ # test that spline(nan) is nan
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+
+ # evaluate out of bounds and inside
+ x = np.asarray([-2, 3, np.nan, 1, 2, 7, np.nan])
+ y = np.asarray([-3, 3.5, 1, np.nan, 3, 6.5, 6.5])
+ z = np.asarray([-1, 3.5, 2, 3, np.nan, 7.5, 7.5])
+ xi = [_ for _ in zip(x, y, z)]
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+ mask = np.isnan(x) | np.isnan(y) | np.isnan(z)
+ target[mask] = np.nan
+
+ result = bspl3(xi)
+ assert np.isnan(result[mask]).all()
+ assert_allclose(result, target, atol=1e-14)
+
+ def test_non_c_contiguous(self):
+ # check that non C-contiguous inputs are OK
+ rng = np.random.default_rng(12345)
+ kx, ky = 3, 3
+ tx = np.sort(rng.uniform(low=0, high=4, size=16))
+ tx = np.r_[(tx[0],)*kx, tx, (tx[-1],)*kx]
+ ty = np.sort(rng.uniform(low=0, high=4, size=16))
+ ty = np.r_[(ty[0],)*ky, ty, (ty[-1],)*ky]
+
+ assert not tx[::2].flags.c_contiguous
+ assert not ty[::2].flags.c_contiguous
+
+ c = rng.uniform(size=(tx.size//2 - kx - 1, ty.size//2 - ky - 1))
+ c = c.T
+ assert not c.flags.c_contiguous
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1]]
+
+ bspl2 = NdBSpline((tx[::2], ty[::2]), c, k=(kx, ky))
+ bspl2_0 = NdBSpline0((tx[::2], ty[::2]), c, k=(kx, ky))
+
+ assert_allclose(bspl2(xi),
+ [bspl2_0(xp) for xp in xi], atol=1e-14)
+
+ def test_readonly(self):
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+
+ for i in range(3):
+ t3[i].flags.writeable = False
+ c3.flags.writeable = False
+
+ bspl3_ = NdBSpline(t3, c3, k=3)
+
+ assert bspl3((1, 2, 3)) == bspl3_((1, 2, 3))
+
+ def test_design_matrix(self):
+ t3, c3, k = self.make_3d_case()
+
+ xi = np.asarray([[1, 2, 3], [4, 5, 6]])
+ dm = NdBSpline(t3, c3, k).design_matrix(xi, t3, k)
+ dm1 = NdBSpline.design_matrix(xi, t3, [k, k, k])
+ assert dm.shape[0] == xi.shape[0]
+ assert_allclose(dm.todense(), dm1.todense(), atol=1e-16)
+
+ with assert_raises(ValueError):
+ NdBSpline.design_matrix([1, 2, 3], t3, [k]*3)
+
+ with assert_raises(ValueError, match="Data and knots*"):
+ NdBSpline.design_matrix([[1, 2]], t3, [k]*3)
+
+
+class TestMakeND:
+ def test_2D_separable_simple(self):
+ x = np.arange(6)
+ y = np.arange(6) + 0.5
+ values = x[:, None]**3 * (y**3 + 2*y)[None, :]
+ xi = [(a, b) for a, b in itertools.product(x, y)]
+
+ bspl = make_ndbspl((x, y), values, k=1)
+ assert_allclose(bspl(xi), values.ravel(), atol=1e-15)
+
+ # test the coefficients vs outer product of 1D coefficients
+ spl_x = make_interp_spline(x, x**3, k=1)
+ spl_y = make_interp_spline(y, y**3 + 2*y, k=1)
+ cc = spl_x.c[:, None] * spl_y.c[None, :]
+ assert_allclose(cc, bspl.c, atol=1e-11, rtol=0)
+
+ # test against RGI
+ from scipy.interpolate import RegularGridInterpolator as RGI
+ rgi = RGI((x, y), values, method='linear')
+ assert_allclose(rgi(xi), bspl(xi), atol=1e-14)
+
+ def test_2D_separable_trailing_dims(self):
+ # test `c` with trailing dimensions, i.e. c.ndim > ndim
+ x = np.arange(6)
+ y = np.arange(6)
+ xi = [(a, b) for a, b in itertools.product(x, y)]
+
+ # make values4.shape = (6, 6, 4)
+ values = x[:, None]**3 * (y**3 + 2*y)[None, :]
+ values4 = np.dstack((values, values, values, values))
+ bspl = make_ndbspl((x, y), values4, k=3, solver=ssl.spsolve)
+
+ result = bspl(xi)
+ target = np.dstack((values, values, values, values))
+ assert result.shape == (36, 4)
+ assert_allclose(result.reshape(6, 6, 4),
+ target, atol=1e-14)
+
+ # now two trailing dimensions
+ values22 = values4.reshape((6, 6, 2, 2))
+ bspl = make_ndbspl((x, y), values22, k=3, solver=ssl.spsolve)
+
+ result = bspl(xi)
+ assert result.shape == (36, 2, 2)
+ assert_allclose(result.reshape(6, 6, 2, 2),
+ target.reshape((6, 6, 2, 2)), atol=1e-14)
+
+ @pytest.mark.parametrize('k', [(3, 3), (1, 1), (3, 1), (1, 3), (3, 5)])
+ def test_2D_mixed(self, k):
+ # make a 2D separable spline w/ len(tx) != len(ty)
+ x = np.arange(6)
+ y = np.arange(7) + 1.5
+ xi = [(a, b) for a, b in itertools.product(x, y)]
+
+ values = (x**3)[:, None] * (y**2 + 2*y)[None, :]
+ bspl = make_ndbspl((x, y), values, k=k, solver=ssl.spsolve)
+ assert_allclose(bspl(xi), values.ravel(), atol=1e-15)
+
+ def _get_sample_2d_data(self):
+ # from test_rgi.py::TestIntepN
+ x = np.array([.5, 2., 3., 4., 5.5, 6.])
+ y = np.array([.5, 2., 3., 4., 5.5, 6.])
+ z = np.array(
+ [
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 3, 2, 1, 1],
+ [1, 2, 2, 2, 1, 1],
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 2, 2, 1, 1],
+ ]
+ )
+ return x, y, z
+
+ def test_2D_vs_RGI_linear(self):
+ x, y, z = self._get_sample_2d_data()
+ bspl = make_ndbspl((x, y), z, k=1)
+ rgi = RegularGridInterpolator((x, y), z, method='linear')
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ def test_2D_vs_RGI_cubic(self):
+ x, y, z = self._get_sample_2d_data()
+ bspl = make_ndbspl((x, y), z, k=3, solver=ssl.spsolve)
+ rgi = RegularGridInterpolator((x, y), z, method='cubic_legacy')
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ @pytest.mark.parametrize('solver', [ssl.gmres, ssl.gcrotmk])
+ def test_2D_vs_RGI_cubic_iterative(self, solver):
+ # same as `test_2D_vs_RGI_cubic`, only with an iterative solver.
+ # Note the need to add an explicit `rtol` solver_arg to achieve the
+ # target accuracy of 1e-14. (the relation between solver atol/rtol
+ # and the accuracy of the final result is not direct and needs experimenting)
+ x, y, z = self._get_sample_2d_data()
+ bspl = make_ndbspl((x, y), z, k=3, solver=solver, rtol=1e-6)
+ rgi = RegularGridInterpolator((x, y), z, method='cubic_legacy')
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ def test_2D_vs_RGI_quintic(self):
+ x, y, z = self._get_sample_2d_data()
+ bspl = make_ndbspl((x, y), z, k=5, solver=ssl.spsolve)
+ rgi = RegularGridInterpolator((x, y), z, method='quintic_legacy')
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ @pytest.mark.parametrize(
+ 'k, meth', [(1, 'linear'), (3, 'cubic_legacy'), (5, 'quintic_legacy')]
+ )
+ def test_3D_random_vs_RGI(self, k, meth):
+ rndm = np.random.default_rng(123456)
+ x = np.cumsum(rndm.uniform(size=6))
+ y = np.cumsum(rndm.uniform(size=7))
+ z = np.cumsum(rndm.uniform(size=8))
+ values = rndm.uniform(size=(6, 7, 8))
+
+ bspl = make_ndbspl((x, y, z), values, k=k, solver=ssl.spsolve)
+ rgi = RegularGridInterpolator((x, y, z), values, method=meth)
+
+ xi = np.random.uniform(low=0.7, high=2.1, size=(11, 3))
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ def test_solver_err_not_converged(self):
+ x, y, z = self._get_sample_2d_data()
+ solver_args = {'maxiter': 1}
+ with assert_raises(ValueError, match='solver'):
+ make_ndbspl((x, y), z, k=3, **solver_args)
+
+ with assert_raises(ValueError, match='solver'):
+ make_ndbspl((x, y), np.dstack((z, z)), k=3, **solver_args)
+
+
+class TestFpchec:
+ # https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpchec.f
+
+ def test_1D_x_t(self):
+ k = 1
+ t = np.arange(12).reshape(2, 6)
+ x = np.arange(12)
+
+ with pytest.raises(ValueError, match="1D sequence"):
+ _b.fpcheck(x, t, k)
+
+ with pytest.raises(ValueError, match="1D sequence"):
+ _b.fpcheck(t, x, k)
+
+ def test_condition_1(self):
+ # c 1) k+1 <= n-k-1 <= m
+ k = 3
+ n = 2*(k + 1) - 1 # not OK
+ m = n + 11 # OK
+ t = np.arange(n)
+ x = np.arange(m)
+
+ assert dfitpack.fpchec(x, t, k) == 10
+ with pytest.raises(ValueError, match="Need k+1*"):
+ _b.fpcheck(x, t, k)
+
+ n = 2*(k+1) + 1 # OK
+ m = n - k - 2 # not OK
+ t = np.arange(n)
+ x = np.arange(m)
+
+ assert dfitpack.fpchec(x, t, k) == 10
+ with pytest.raises(ValueError, match="Need k+1*"):
+ _b.fpcheck(x, t, k)
+
+ def test_condition_2(self):
+ # c 2) t(1) <= t(2) <= ... <= t(k+1)
+ # c t(n-k) <= t(n-k+1) <= ... <= t(n)
+ k = 3
+ t = [0]*(k+1) + [2] + [5]*(k+1) # this is OK
+ x = [1, 2, 3, 4, 4.5]
+
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None # does not raise
+
+ tt = t.copy()
+ tt[-1] = tt[0] # not OK
+ assert dfitpack.fpchec(x, tt, k) == 20
+ with pytest.raises(ValueError, match="Last k knots*"):
+ _b.fpcheck(x, tt, k)
+
+ tt = t.copy()
+ tt[0] = tt[-1] # not OK
+ assert dfitpack.fpchec(x, tt, k) == 20
+ with pytest.raises(ValueError, match="First k knots*"):
+ _b.fpcheck(x, tt, k)
+
+ def test_condition_3(self):
+ # c 3) t(k+1) < t(k+2) < ... < t(n-k)
+ k = 3
+ t = [0]*(k+1) + [2, 3] + [5]*(k+1) # this is OK
+ x = [1, 2, 3, 3.5, 4, 4.5]
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ t = [0]*(k+1) + [2, 2] + [5]*(k+1) # this is not OK
+ assert dfitpack.fpchec(x, t, k) == 30
+ with pytest.raises(ValueError, match="Internal knots*"):
+ _b.fpcheck(x, t, k)
+
+ def test_condition_4(self):
+ # c 4) t(k+1) <= x(i) <= t(n-k)
+ # NB: FITPACK's fpchec only checks x[0] & x[-1], so we follow.
+ k = 3
+ t = [0]*(k+1) + [5]*(k+1)
+ x = [1, 2, 3, 3.5, 4, 4.5] # this is OK
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ xx = x.copy()
+ xx[0] = t[0] # still OK
+ assert dfitpack.fpchec(xx, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ xx = x.copy()
+ xx[0] = t[0] - 1 # not OK
+ assert dfitpack.fpchec(xx, t, k) == 40
+ with pytest.raises(ValueError, match="Out of bounds*"):
+ _b.fpcheck(xx, t, k)
+
+ xx = x.copy()
+ xx[-1] = t[-1] + 1 # not OK
+ assert dfitpack.fpchec(xx, t, k) == 40
+ with pytest.raises(ValueError, match="Out of bounds*"):
+ _b.fpcheck(xx, t, k)
+
+ # ### Test the S-W condition (no 5)
+ # c 5) the conditions specified by schoenberg and whitney must hold
+ # c for at least one subset of data points, i.e. there must be a
+ # c subset of data points y(j) such that
+ # c t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
+ def test_condition_5_x1xm(self):
+ # x(1).ge.t(k2) .or. x(m).le.t(nk1)
+ k = 1
+ t = [0, 0, 1, 2, 2]
+ x = [1.1, 1.1, 1.1]
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ x = [0.5, 0.5, 0.5]
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ def test_condition_5_k1(self):
+ # special case nk3 (== n - k - 2) < 2
+ k = 1
+ t = [0, 0, 1, 1]
+ x = [0.5, 0.6]
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ def test_condition_5_1(self):
+ # basically, there can't be an interval of t[j]..t[j+k+1] with no x
+ k = 3
+ t = [0]*(k+1) + [2] + [5]*(k+1)
+ x = [3]*5
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ t = [0]*(k+1) + [2] + [5]*(k+1)
+ x = [1]*5
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ def test_condition_5_2(self):
+ # same as _5_1, only the empty interval is in the middle
+ k = 3
+ t = [0]*(k+1) + [2, 3] + [5]*(k+1)
+ x = [1.1]*5 + [4]
+
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ # and this one is OK
+ x = [1.1]*4 + [4, 4]
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ def test_condition_5_3(self):
+ # similar to _5_2, covers a different failure branch
+ k = 1
+ t = [0, 0, 2, 3, 4, 5, 6, 7, 7]
+ x = [1, 1, 1, 5.2, 5.2, 5.2, 6.5]
+
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack.py
new file mode 100644
index 0000000000000000000000000000000000000000..c76178681de063e21fda5afedd4759248e8e19bb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack.py
@@ -0,0 +1,503 @@
+import itertools
+import os
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_,
+ assert_almost_equal, assert_array_almost_equal)
+from pytest import raises as assert_raises
+import pytest
+from scipy._lib._testutils import check_free_memory
+
+from scipy.interpolate import RectBivariateSpline
+
+from scipy.interpolate._fitpack_py import (splrep, splev, bisplrep, bisplev,
+ sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
+from scipy.interpolate.dfitpack import regrid_smth
+from scipy.interpolate._fitpack2 import dfitpack_int
+
+
+def data_file(basename):
+ return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', basename)
+
+
+def norm2(x):
+ return np.sqrt(np.dot(x.T, x))
+
+
+def f1(x, d=0):
+ """Derivatives of sin->cos->-sin->-cos."""
+ if d % 4 == 0:
+ return np.sin(x)
+ if d % 4 == 1:
+ return np.cos(x)
+ if d % 4 == 2:
+ return -np.sin(x)
+ if d % 4 == 3:
+ return -np.cos(x)
+
+
+def makepairs(x, y):
+ """Helper function to create an array of pairs of x and y."""
+ xy = np.array(list(itertools.product(np.asarray(x), np.asarray(y))))
+ return xy.T
+
+
+class TestSmokeTests:
+ """
+ Smoke tests (with a few asserts) for fitpack routines -- mostly
+ check that they are runnable
+ """
+ def check_1(self, per=0, s=0, a=0, b=2*np.pi, at_nodes=False,
+ xb=None, xe=None):
+ if xb is None:
+ xb = a
+ if xe is None:
+ xe = b
+
+ N = 20
+ # nodes and middle points of the nodes
+ x = np.linspace(a, b, N + 1)
+ x1 = a + (b - a) * np.arange(1, N, dtype=float) / float(N - 1)
+ v = f1(x)
+
+ def err_est(k, d):
+ # Assume f has all derivatives < 1
+ h = 1.0 / N
+ tol = 5 * h**(.75*(k-d))
+ if s > 0:
+ tol += 1e5*s
+ return tol
+
+ for k in range(1, 6):
+ tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
+ tt = tck[0][k:-k] if at_nodes else x1
+
+ for d in range(k+1):
+ tol = err_est(k, d)
+ err = norm2(f1(tt, d) - splev(tt, tck, d)) / norm2(f1(tt, d))
+ assert err < tol
+
+ def check_2(self, per=0, N=20, ia=0, ib=2*np.pi):
+ a, b, dx = 0, 2*np.pi, 0.2*np.pi
+ x = np.linspace(a, b, N+1) # nodes
+ v = np.sin(x)
+
+ def err_est(k, d):
+ # Assume f has all derivatives < 1
+ h = 1.0 / N
+ tol = 5 * h**(.75*(k-d))
+ return tol
+
+ nk = []
+ for k in range(1, 6):
+ tck = splrep(x, v, s=0, per=per, k=k, xe=b)
+ nk.append([splint(ia, ib, tck), spalde(dx, tck)])
+
+ k = 1
+ for r in nk:
+ d = 0
+ for dr in r[1]:
+ tol = err_est(k, d)
+ assert_allclose(dr, f1(dx, d), atol=0, rtol=tol)
+ d = d+1
+ k = k+1
+
+ def test_smoke_splrep_splev(self):
+ self.check_1(s=1e-6)
+ self.check_1(b=1.5*np.pi)
+ self.check_1(b=1.5*np.pi, xe=2*np.pi, per=1, s=1e-1)
+
+ @pytest.mark.parametrize('per', [0, 1])
+ @pytest.mark.parametrize('at_nodes', [True, False])
+ def test_smoke_splrep_splev_2(self, per, at_nodes):
+ self.check_1(per=per, at_nodes=at_nodes)
+
+ @pytest.mark.parametrize('N', [20, 50])
+ @pytest.mark.parametrize('per', [0, 1])
+ def test_smoke_splint_spalde(self, N, per):
+ self.check_2(per=per, N=N)
+
+ @pytest.mark.parametrize('N', [20, 50])
+ @pytest.mark.parametrize('per', [0, 1])
+ def test_smoke_splint_spalde_iaib(self, N, per):
+ self.check_2(ia=0.2*np.pi, ib=np.pi, N=N, per=per)
+
+ def test_smoke_sproot(self):
+ # sproot is only implemented for k=3
+ a, b = 0.1, 15
+ x = np.linspace(a, b, 20)
+ v = np.sin(x)
+
+ for k in [1, 2, 4, 5]:
+ tck = splrep(x, v, s=0, per=0, k=k, xe=b)
+ with assert_raises(ValueError):
+ sproot(tck)
+
+ k = 3
+ tck = splrep(x, v, s=0, k=3)
+ roots = sproot(tck)
+ assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
+ assert_allclose(roots, np.pi * np.array([1, 2, 3, 4]), rtol=1e-3)
+
+ @pytest.mark.parametrize('N', [20, 50])
+ @pytest.mark.parametrize('k', [1, 2, 3, 4, 5])
+ def test_smoke_splprep_splrep_splev(self, N, k):
+ a, b, dx = 0, 2.*np.pi, 0.2*np.pi
+ x = np.linspace(a, b, N+1) # nodes
+ v = np.sin(x)
+
+ tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
+ uv = splev(dx, tckp)
+ err1 = abs(uv[1] - np.sin(uv[0]))
+ assert err1 < 1e-2
+
+ tck = splrep(x, v, s=0, per=0, k=k)
+ err2 = abs(splev(uv[0], tck) - np.sin(uv[0]))
+ assert err2 < 1e-2
+
+ # Derivatives of parametric cubic spline at u (first function)
+ if k == 3:
+ tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
+ for d in range(1, k+1):
+ uv = splev(dx, tckp, d)
+
+ def test_smoke_bisplrep_bisplev(self):
+ xb, xe = 0, 2.*np.pi
+ yb, ye = 0, 2.*np.pi
+ kx, ky = 3, 3
+ Nx, Ny = 20, 20
+
+ def f2(x, y):
+ return np.sin(x+y)
+
+ x = np.linspace(xb, xe, Nx + 1)
+ y = np.linspace(yb, ye, Ny + 1)
+ xy = makepairs(x, y)
+ tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
+
+ tt = [tck[0][kx:-kx], tck[1][ky:-ky]]
+ t2 = makepairs(tt[0], tt[1])
+ v1 = bisplev(tt[0], tt[1], tck)
+ v2 = f2(t2[0], t2[1])
+ v2.shape = len(tt[0]), len(tt[1])
+
+ assert norm2(np.ravel(v1 - v2)) < 1e-2
+
+
+class TestSplev:
+ def test_1d_shape(self):
+ x = [1,2,3,4,5]
+ y = [4,5,6,7,8]
+ tck = splrep(x, y)
+ z = splev([1], tck)
+ assert_equal(z.shape, (1,))
+ z = splev(1, tck)
+ assert_equal(z.shape, ())
+
+ def test_2d_shape(self):
+ x = [1, 2, 3, 4, 5]
+ y = [4, 5, 6, 7, 8]
+ tck = splrep(x, y)
+ t = np.array([[1.0, 1.5, 2.0, 2.5],
+ [3.0, 3.5, 4.0, 4.5]])
+ z = splev(t, tck)
+ z0 = splev(t[0], tck)
+ z1 = splev(t[1], tck)
+ assert_equal(z, np.vstack((z0, z1)))
+
+ def test_extrapolation_modes(self):
+ # test extrapolation modes
+ # * if ext=0, return the extrapolated value.
+ # * if ext=1, return 0
+ # * if ext=2, raise a ValueError
+ # * if ext=3, return the boundary value.
+ x = [1,2,3]
+ y = [0,2,4]
+ tck = splrep(x, y, k=1)
+
+ rstl = [[-2, 6], [0, 0], None, [0, 4]]
+ for ext in (0, 1, 3):
+ assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
+
+ assert_raises(ValueError, splev, [0, 4], tck, ext=2)
+
+
+class TestSplder:
+ def setup_method(self):
+ # non-uniform grid, just to make it sure
+ x = np.linspace(0, 1, 100)**3
+ y = np.sin(20 * x)
+ self.spl = splrep(x, y)
+
+ # double check that knots are non-uniform
+ assert_(np.ptp(np.diff(self.spl[0])) > 0)
+
+ def test_inverse(self):
+ # Check that antiderivative + derivative is identity.
+ for n in range(5):
+ spl2 = splantider(self.spl, n)
+ spl3 = splder(spl2, n)
+ assert_allclose(self.spl[0], spl3[0])
+ assert_allclose(self.spl[1], spl3[1])
+ assert_equal(self.spl[2], spl3[2])
+
+ def test_splder_vs_splev(self):
+ # Check derivative vs. FITPACK
+
+ for n in range(3+1):
+ # Also extrapolation!
+ xx = np.linspace(-1, 2, 2000)
+ if n == 3:
+ # ... except that FITPACK extrapolates strangely for
+ # order 0, so let's not check that.
+ xx = xx[(xx >= 0) & (xx <= 1)]
+
+ dy = splev(xx, self.spl, n)
+ spl2 = splder(self.spl, n)
+ dy2 = splev(xx, spl2)
+ if n == 1:
+ assert_allclose(dy, dy2, rtol=2e-6)
+ else:
+ assert_allclose(dy, dy2)
+
+ def test_splantider_vs_splint(self):
+ # Check antiderivative vs. FITPACK
+ spl2 = splantider(self.spl)
+
+ # no extrapolation, splint assumes function is zero outside
+ # range
+ xx = np.linspace(0, 1, 20)
+
+ for x1 in xx:
+ for x2 in xx:
+ y1 = splint(x1, x2, self.spl)
+ y2 = splev(x2, spl2) - splev(x1, spl2)
+ assert_allclose(y1, y2)
+
+ def test_order0_diff(self):
+ assert_raises(ValueError, splder, self.spl, 4)
+
+ def test_kink(self):
+ # Should refuse to differentiate splines with kinks
+
+ spl2 = insert(0.5, self.spl, m=2)
+ splder(spl2, 2) # Should work
+ assert_raises(ValueError, splder, spl2, 3)
+
+ spl2 = insert(0.5, self.spl, m=3)
+ splder(spl2, 1) # Should work
+ assert_raises(ValueError, splder, spl2, 2)
+
+ spl2 = insert(0.5, self.spl, m=4)
+ assert_raises(ValueError, splder, spl2, 1)
+
+ def test_multidim(self):
+ # c can have trailing dims
+ for n in range(3):
+ t, c, k = self.spl
+ c2 = np.c_[c, c, c]
+ c2 = np.dstack((c2, c2))
+
+ spl2 = splantider((t, c2, k), n)
+ spl3 = splder(spl2, n)
+
+ assert_allclose(t, spl3[0])
+ assert_allclose(c2, spl3[1])
+ assert_equal(k, spl3[2])
+
+
+class TestSplint:
+ def test_len_c(self):
+ n, k = 7, 3
+ x = np.arange(n)
+ y = x**3
+ t, c, k = splrep(x, y, s=0)
+
+ # note that len(c) == len(t) == 11 (== len(x) + 2*(k-1))
+ assert len(t) == len(c) == n + 2*(k-1)
+
+ # integrate directly: $\int_0^6 x^3 dx = 6^4 / 4$
+ res = splint(0, 6, (t, c, k))
+ assert_allclose(res, 6**4 / 4, atol=1e-15)
+
+ # check that the coefficients past len(t) - k - 1 are ignored
+ c0 = c.copy()
+ c0[len(t)-k-1:] = np.nan
+ res0 = splint(0, 6, (t, c0, k))
+ assert_allclose(res0, 6**4 / 4, atol=1e-15)
+
+ # however, all other coefficients *are* used
+ c0[6] = np.nan
+ assert np.isnan(splint(0, 6, (t, c0, k)))
+
+ # check that the coefficient array can have length `len(t) - k - 1`
+ c1 = c[:len(t) - k - 1]
+ res1 = splint(0, 6, (t, c1, k))
+ assert_allclose(res1, 6**4 / 4, atol=1e-15)
+
+ # however shorter c arrays raise. The error from f2py is a
+ # `dftipack.error`, which is an Exception but not ValueError etc.
+ with assert_raises(Exception, match=r">=n-k-1"):
+ splint(0, 1, (np.ones(10), np.ones(5), 3))
+
+
+class TestBisplrep:
+ def test_overflow(self):
+ from numpy.lib.stride_tricks import as_strided
+ if dfitpack_int.itemsize == 8:
+ size = 1500000**2
+ else:
+ size = 400**2
+ # Don't allocate a real array, as it's very big, but rely
+ # on that it's not referenced
+ x = as_strided(np.zeros(()), shape=(size,))
+ assert_raises(OverflowError, bisplrep, x, x, x, w=x,
+ xb=0, xe=1, yb=0, ye=1, s=0)
+
+ def test_regression_1310(self):
+ # Regression test for gh-1310
+ with np.load(data_file('bug-1310.npz')) as loaded_data:
+ data = loaded_data['data']
+
+ # Shouldn't crash -- the input data triggers work array sizes
+ # that caused previously some data to not be aligned on
+ # sizeof(double) boundaries in memory, which made the Fortran
+ # code to crash when compiled with -O3
+ bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
+ full_output=True)
+
+ @pytest.mark.skipif(dfitpack_int != np.int64, reason="needs ilp64 fitpack")
+ def test_ilp64_bisplrep(self):
+ check_free_memory(28000) # VM size, doesn't actually use the pages
+ x = np.linspace(0, 1, 400)
+ y = np.linspace(0, 1, 400)
+ x, y = np.meshgrid(x, y)
+ z = np.zeros_like(x)
+ tck = bisplrep(x, y, z, kx=3, ky=3, s=0)
+ assert_allclose(bisplev(0.5, 0.5, tck), 0.0)
+
+
+def test_dblint():
+ # Basic test to see it runs and gives the correct result on a trivial
+ # problem. Note that `dblint` is not exposed in the interpolate namespace.
+ x = np.linspace(0, 1)
+ y = np.linspace(0, 1)
+ xx, yy = np.meshgrid(x, y)
+ rect = RectBivariateSpline(x, y, 4 * xx * yy)
+ tck = list(rect.tck)
+ tck.extend(rect.degrees)
+
+ assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
+ assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
+ assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
+ assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
+
+
+def test_splev_der_k():
+ # regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
+ # for x outside of knot range
+
+ # test case from gh-2188
+ tck = (np.array([0., 0., 2.5, 2.5]),
+ np.array([-1.56679978, 2.43995873, 0., 0.]),
+ 1)
+ t, c, k = tck
+ x = np.array([-3, 0, 2.5, 3])
+
+ # an explicit form of the linear spline
+ assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
+ assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
+
+ # now check a random spline vs splder
+ np.random.seed(1234)
+ x = np.sort(np.random.random(30))
+ y = np.random.random(30)
+ t, c, k = splrep(x, y)
+
+ x = [t[0] - 1., t[-1] + 1.]
+ tck2 = splder((t, c, k), k)
+ assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
+
+
+def test_splprep_segfault():
+ # regression test for gh-3847: splprep segfaults if knots are specified
+ # for task=-1
+ t = np.arange(0, 1.1, 0.1)
+ x = np.sin(2*np.pi*t)
+ y = np.cos(2*np.pi*t)
+ tck, u = splprep([x, y], s=0)
+ np.arange(0, 1.01, 0.01)
+
+ uknots = tck[0] # using the knots from the previous fitting
+ tck, u = splprep([x, y], task=-1, t=uknots) # here is the crash
+
+
+def test_bisplev_integer_overflow():
+ np.random.seed(1)
+
+ x = np.linspace(0, 1, 11)
+ y = x
+ z = np.random.randn(11, 11).ravel()
+ kx = 1
+ ky = 1
+
+ nx, tx, ny, ty, c, fp, ier = regrid_smth(
+ x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
+ tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
+
+ xp = np.zeros([2621440])
+ yp = np.zeros([2621440])
+
+ assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
+
+
+@pytest.mark.xslow
+def test_gh_1766():
+ # this should fail gracefully instead of segfaulting (int overflow)
+ size = 22
+ kx, ky = 3, 3
+ def f2(x, y):
+ return np.sin(x+y)
+
+ x = np.linspace(0, 10, size)
+ y = np.linspace(50, 700, size)
+ xy = makepairs(x, y)
+ tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
+ # the size value here can either segfault
+ # or produce a MemoryError on main
+ tx_ty_size = 500000
+ tck[0] = np.arange(tx_ty_size)
+ tck[1] = np.arange(tx_ty_size) * 4
+ tt_0 = np.arange(50)
+ tt_1 = np.arange(50) * 3
+ with pytest.raises(MemoryError):
+ bisplev(tt_0, tt_1, tck, 1, 1)
+
+
+def test_spalde_scalar_input():
+ # Ticket #629
+ x = np.linspace(0, 10)
+ y = x**3
+ tck = splrep(x, y, k=3, t=[5])
+ res = spalde(np.float64(1), tck)
+ des = np.array([1., 3., 6., 6.])
+ assert_almost_equal(res, des)
+
+
+def test_spalde_nc():
+ # regression test for https://github.com/scipy/scipy/issues/19002
+ # here len(t) = 29 and len(c) = 25 (== len(t) - k - 1)
+ x = np.asarray([-10., -9., -8., -7., -6., -5., -4., -3., -2.5, -2., -1.5,
+ -1., -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 4., 5., 6.],
+ dtype="float")
+ t = [-10.0, -10.0, -10.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0,
+ -2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0,
+ 5.0, 6.0, 6.0, 6.0, 6.0]
+ c = np.asarray([1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
+ k = 3
+
+ res = spalde(x, (t, c, k))
+ res_splev = np.asarray([splev(x, (t, c, k), nu) for nu in range(4)])
+ assert_allclose(res, res_splev.T, atol=1e-15)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack2.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack2.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1ebf0d26e8795c5230b4336a02126a8e1e53096
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack2.py
@@ -0,0 +1,1355 @@
+# Created by Pearu Peterson, June 2003
+import itertools
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
+ assert_array_almost_equal, assert_allclose, suppress_warnings)
+from pytest import raises as assert_raises
+
+from numpy import array, diff, linspace, meshgrid, ones, pi, shape
+from scipy.interpolate._fitpack_py import bisplrep, bisplev, splrep, spalde
+from scipy.interpolate._fitpack2 import (UnivariateSpline,
+ LSQUnivariateSpline, InterpolatedUnivariateSpline,
+ LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
+ LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
+ RectSphereBivariateSpline)
+
+
+class TestUnivariateSpline:
+ def test_linear_constant(self):
+ x = [1,2,3]
+ y = [3,3,3]
+ lut = UnivariateSpline(x,y,k=1)
+ assert_array_almost_equal(lut.get_knots(),[1,3])
+ assert_array_almost_equal(lut.get_coeffs(),[3,3])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
+
+ def test_preserve_shape(self):
+ x = [1, 2, 3]
+ y = [0, 2, 4]
+ lut = UnivariateSpline(x, y, k=1)
+ arg = 2
+ assert_equal(shape(arg), shape(lut(arg)))
+ assert_equal(shape(arg), shape(lut(arg, nu=1)))
+ arg = [1.5, 2, 2.5]
+ assert_equal(shape(arg), shape(lut(arg)))
+ assert_equal(shape(arg), shape(lut(arg, nu=1)))
+
+ def test_linear_1d(self):
+ x = [1,2,3]
+ y = [0,2,4]
+ lut = UnivariateSpline(x,y,k=1)
+ assert_array_almost_equal(lut.get_knots(),[1,3])
+ assert_array_almost_equal(lut.get_coeffs(),[0,4])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
+
+ def test_subclassing(self):
+ # See #731
+
+ class ZeroSpline(UnivariateSpline):
+ def __call__(self, x):
+ return 0*array(x)
+
+ sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
+ assert_array_equal(sp([1.5, 2.5]), [0., 0.])
+
+ def test_empty_input(self):
+ # Test whether empty input returns an empty output. Ticket 1014
+ x = [1,3,5,7,9]
+ y = [0,4,9,12,21]
+ spl = UnivariateSpline(x, y, k=3)
+ assert_array_equal(spl([]), array([]))
+
+ def test_roots(self):
+ x = [1, 3, 5, 7, 9]
+ y = [0, 4, 9, 12, 21]
+ spl = UnivariateSpline(x, y, k=3)
+ assert_almost_equal(spl.roots()[0], 1.050290639101332)
+
+ def test_roots_length(self): # for gh18335
+ x = np.linspace(0, 50 * np.pi, 1000)
+ y = np.cos(x)
+ spl = UnivariateSpline(x, y, s=0)
+ assert_equal(len(spl.roots()), 50)
+
+ def test_derivatives(self):
+ x = [1, 3, 5, 7, 9]
+ y = [0, 4, 9, 12, 21]
+ spl = UnivariateSpline(x, y, k=3)
+ assert_almost_equal(spl.derivatives(3.5),
+ [5.5152902, 1.7146577, -0.1830357, 0.3125])
+
+ def test_derivatives_2(self):
+ x = np.arange(8)
+ y = x**3 + 2.*x**2
+
+ tck = splrep(x, y, s=0)
+ ders = spalde(3, tck)
+ assert_allclose(ders, [45., # 3**3 + 2*(3)**2
+ 39., # 3*(3)**2 + 4*(3)
+ 22., # 6*(3) + 4
+ 6.], # 6*3**0
+ atol=1e-15)
+ spl = UnivariateSpline(x, y, s=0, k=3)
+ assert_allclose(spl.derivatives(3),
+ ders,
+ atol=1e-15)
+
+ def test_resize_regression(self):
+ """Regression test for #1375."""
+ x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
+ -0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
+ 0.65016502, 1.]
+ y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
+ 0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
+ 0.62928599, 1.]
+ w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
+ 6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
+ 6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
+ 1.00000000e+12]
+ spl = UnivariateSpline(x=x, y=y, w=w, s=None)
+ desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
+ assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
+
+ def test_out_of_range_regression(self):
+ # Test different extrapolation modes. See ticket 3557
+ x = np.arange(5, dtype=float)
+ y = x**3
+
+ xp = linspace(-8, 13, 100)
+ xp_zeros = xp.copy()
+ xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
+ xp_clip = xp.copy()
+ xp_clip[xp_clip < x[0]] = x[0]
+ xp_clip[xp_clip > x[-1]] = x[-1]
+
+ for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
+ spl = cls(x=x, y=y)
+ for ext in [0, 'extrapolate']:
+ assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
+ for ext in [1, 'zeros']:
+ assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
+ for ext in [2, 'raise']:
+ assert_raises(ValueError, spl, xp, **dict(ext=ext))
+ for ext in [3, 'const']:
+ assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
+
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ spl = LSQUnivariateSpline(x, y, t)
+ assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
+ assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
+ assert_raises(ValueError, spl, xp, **dict(ext=2))
+ assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
+
+ # also make sure that unknown values for `ext` are caught early
+ for ext in [-1, 'unknown']:
+ spl = UnivariateSpline(x, y)
+ assert_raises(ValueError, spl, xp, **dict(ext=ext))
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, ext=ext))
+
+ def test_lsq_fpchec(self):
+ xs = np.arange(100) * 1.
+ ys = np.arange(100) * 1.
+ knots = np.linspace(0, 99, 10)
+ bbox = (-1, 101)
+ assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
+ bbox=bbox)
+
+ def test_derivative_and_antiderivative(self):
+ # Thin wrappers to splder/splantider, so light smoke test only.
+ x = np.linspace(0, 1, 70)**3
+ y = np.cos(x)
+
+ spl = UnivariateSpline(x, y, s=0)
+ spl2 = spl.antiderivative(2).derivative(2)
+ assert_allclose(spl(0.3), spl2(0.3))
+
+ spl2 = spl.antiderivative(1)
+ assert_allclose(spl2(0.6) - spl2(0.2),
+ spl.integral(0.2, 0.6))
+
+ def test_derivative_extrapolation(self):
+ # Regression test for gh-10195: for a const-extrapolation spline
+ # its derivative evaluates to zero for extrapolation
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 5]
+ f = UnivariateSpline(x_values, y_values, ext='const', k=3)
+
+ x = [-1, 0, -0.5, 9, 9.5, 10]
+ assert_allclose(f.derivative()(x), 0, atol=1e-15)
+
+ def test_integral_out_of_bounds(self):
+ # Regression test for gh-7906: .integral(a, b) is wrong if both
+ # a and b are out-of-bounds
+ x = np.linspace(0., 1., 7)
+ for ext in range(4):
+ f = UnivariateSpline(x, x, s=0, ext=ext)
+ for (a, b) in [(1, 1), (1, 5), (2, 5),
+ (0, 0), (-2, 0), (-2, -1)]:
+ assert_allclose(f.integral(a, b), 0, atol=1e-15)
+
+ def test_nan(self):
+ # bail out early if the input data contains nans
+ x = np.arange(10, dtype=float)
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(x, y, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ y_end = y[-1]
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, check_finite=True))
+ y[-1] = y_end # check valid y but invalid w
+ w[-1] = z
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, w=w, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, w=w, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, w=w, check_finite=True))
+
+ def test_strictly_increasing_x(self):
+ # Test the x is required to be strictly increasing for
+ # UnivariateSpline if s=0 and for InterpolatedUnivariateSpline,
+ # but merely increasing for UnivariateSpline if s>0
+ # and for LSQUnivariateSpline; see gh-8535
+ xx = np.arange(10, dtype=float)
+ yy = xx**3
+ x = np.arange(10, dtype=float)
+ x[1] = x[0]
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(xx, yy, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ UnivariateSpline(x=x, y=y, w=w, s=1, check_finite=True)
+ LSQUnivariateSpline(x=x, y=y, t=t, w=w, check_finite=True)
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, s=0, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+
+ def test_increasing_x(self):
+ # Test that x is required to be increasing, see gh-8535
+ xx = np.arange(10, dtype=float)
+ yy = xx**3
+ x = np.arange(10, dtype=float)
+ x[1] = x[0] - 1.0
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(xx, yy, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, w=w, check_finite=True))
+
+ def test_invalid_input_for_univariate_spline(self):
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ UnivariateSpline(x_values, y_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [-1.0, 1.0, 1.0, 1.0]
+ UnivariateSpline(x_values, y_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ UnivariateSpline(x_values, y_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ UnivariateSpline(x_values, y_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ UnivariateSpline(x_values, y_values, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ def test_invalid_input_for_interpolated_univariate_spline(self):
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ InterpolatedUnivariateSpline(x_values, y_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [-1.0, 1.0, 1.0, 1.0]
+ InterpolatedUnivariateSpline(x_values, y_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ InterpolatedUnivariateSpline(x_values, y_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ InterpolatedUnivariateSpline(x_values, y_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ def test_invalid_input_for_lsq_univariate_spline(self):
+
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ spl = UnivariateSpline(x_values, y_values, check_finite=True)
+ t_values = spl.get_knots()[3:4] # interior knots w/ default k=3
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ LSQUnivariateSpline(x_values, y_values, t_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [1.0, 1.0, 1.0, 1.0]
+ LSQUnivariateSpline(x_values, y_values, t_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ message = "Interior knots t must satisfy Schoenberg-Whitney conditions"
+ with assert_raises(ValueError, match=message) as info:
+ bbox = (100, -100)
+ LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ LSQUnivariateSpline(x_values, y_values, t_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ def test_array_like_input(self):
+ x_values = np.array([1, 2, 4, 6, 8.5])
+ y_values = np.array([0.5, 0.8, 1.3, 2.5, 2.8])
+ w_values = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
+ bbox = np.array([-100, 100])
+ # np.array input
+ spl1 = UnivariateSpline(x=x_values, y=y_values, w=w_values,
+ bbox=bbox)
+ # list input
+ spl2 = UnivariateSpline(x=x_values.tolist(), y=y_values.tolist(),
+ w=w_values.tolist(), bbox=bbox.tolist())
+
+ assert_allclose(spl1([0.1, 0.5, 0.9, 0.99]),
+ spl2([0.1, 0.5, 0.9, 0.99]))
+
+ def test_fpknot_oob_crash(self):
+ # https://github.com/scipy/scipy/issues/3691
+ x = range(109)
+ y = [0., 0., 0., 0., 0., 10.9, 0., 11., 0.,
+ 0., 0., 10.9, 0., 0., 0., 0., 0., 0.,
+ 10.9, 0., 0., 0., 11., 0., 0., 0., 10.9,
+ 0., 0., 0., 10.5, 0., 0., 0., 10.7, 0.,
+ 0., 0., 11., 0., 0., 0., 0., 0., 0.,
+ 10.9, 0., 0., 10.7, 0., 0., 0., 10.6, 0.,
+ 0., 0., 10.5, 0., 0., 10.7, 0., 0., 10.5,
+ 0., 0., 11.5, 0., 0., 0., 10.7, 0., 0.,
+ 10.7, 0., 0., 10.9, 0., 0., 10.8, 0., 0.,
+ 0., 10.7, 0., 0., 10.6, 0., 0., 0., 10.4,
+ 0., 0., 10.6, 0., 0., 10.5, 0., 0., 0.,
+ 10.7, 0., 0., 0., 10.4, 0., 0., 0., 10.8, 0.]
+ with suppress_warnings() as sup:
+ r = sup.record(
+ UserWarning,
+ r"""
+The maximal number of iterations maxit \(set to 20 by the program\)
+allowed for finding a smoothing spline with fp=s has been reached: s
+too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs\(fp-s\)/s < tol.""")
+ UnivariateSpline(x, y, k=1)
+ assert_equal(len(r), 1)
+
+
+class TestLSQBivariateSpline:
+ # NOTE: The systems in this test class are rank-deficient
+ def test_linear_constant(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+ assert_equal(len(r), 1)
+
+ assert_almost_equal(lut(2,2), 3.)
+
+ def test_bilinearity(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [0,7,8,3,4,7,1,3,4]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ # This seems to fail (ier=1, see ticket 1642).
+ sup.filter(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+
+ tx, ty = lut.get_knots()
+ for xa, xb in zip(tx[:-1], tx[1:]):
+ for ya, yb in zip(ty[:-1], ty[1:]):
+ for t in [0.1, 0.5, 0.9]:
+ for s in [0.3, 0.4, 0.7]:
+ xp = xa*(1-t) + xb*t
+ yp = ya*(1-s) + yb*s
+ zp = (+ lut(xa, ya)*(1-t)*(1-s)
+ + lut(xb, ya)*t*(1-s)
+ + lut(xa, yb)*(1-t)*s
+ + lut(xb, yb)*t*s)
+ assert_almost_equal(lut(xp,yp), zp)
+
+ def test_integral(self):
+ x = [1,1,1,2,2,2,8,8,8]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = array([0,7,8,3,4,7,1,3,4])
+
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
+ assert_equal(len(r), 1)
+ tx, ty = lut.get_knots()
+ tz = lut(tx, ty)
+ trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+
+ assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]),
+ trpz)
+
+ def test_empty_input(self):
+ # Test whether empty inputs returns an empty output. Ticket 1014
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
+ assert_equal(len(r), 1)
+
+ assert_array_equal(lut([], []), np.zeros((0,0)))
+ assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ s = 0.1
+ tx = [1 + s, 3 - s]
+ ty = [1 + s, 3 - s]
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0, num=10)
+ LSQBivariateSpline(x, y, z, tx, ty)
+ assert "x, y, and z should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0, num=20)
+ LSQBivariateSpline(x, y, z, tx, ty, w=w)
+ assert "x, y, z, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ w = np.linspace(-1.0, 10.0)
+ LSQBivariateSpline(x, y, z, tx, ty, w=w)
+ assert "w should be positive" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-100, 100, -100)
+ LSQBivariateSpline(x, y, z, tx, ty, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ LSQBivariateSpline(x, y, z, tx, ty, kx=10, ky=10)
+ assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in \
+ str(info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQBivariateSpline(x, y, z, tx, ty, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQBivariateSpline(x, y, z, tx, ty, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ s = 0.1
+ tx = np.array([1 + s, 3 - s])
+ ty = np.array([1 + s, 3 - s])
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0)
+ bbox = np.array([1.0, 10.0, 1.0, 10.0])
+
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ # np.array input
+ spl1 = LSQBivariateSpline(x, y, z, tx, ty, w=w, bbox=bbox)
+ # list input
+ spl2 = LSQBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ tx.tolist(), ty.tolist(), w=w.tolist(),
+ bbox=bbox)
+ assert_allclose(spl1(2.0, 2.0), spl2(2.0, 2.0))
+ assert_equal(len(r), 2)
+
+ def test_unequal_length_of_knots(self):
+ """Test for the case when the input knot-location arrays in x and y are
+ of different lengths.
+ """
+ x, y = np.mgrid[0:100, 0:100]
+ x = x.ravel()
+ y = y.ravel()
+ z = 3.0 * np.ones_like(x)
+ tx = np.linspace(0.1, 98.0, 29)
+ ty = np.linspace(0.1, 98.0, 33)
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x,y,z,tx,ty)
+ assert_equal(len(r), 1)
+
+ assert_almost_equal(lut(x, y, grid=False), z)
+
+
+class TestSmoothBivariateSpline:
+ def test_linear_constant(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
+ assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
+ assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
+
+ def test_linear_1d(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [0,0,0,2,2,2,4,4,4]
+ lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
+ assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
+ assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
+
+ def test_integral(self):
+ x = [1,1,1,2,2,2,4,4,4]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = array([0,7,8,3,4,7,1,3,4])
+
+ with suppress_warnings() as sup:
+ # This seems to fail (ier=1, see ticket 1642).
+ sup.filter(UserWarning, "\nThe required storage space")
+ lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
+
+ tx = [1,2,4]
+ ty = [1,2,3]
+
+ tz = lut(tx, ty)
+ trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+ assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
+
+ lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
+ assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
+ decimal=0) # the quadratures give 23.75 and 23.85
+
+ tz = lut(tx[:-1], ty[:-1])
+ trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+ assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
+
+ def test_rerun_lwrk2_too_small(self):
+ # in this setting, lwrk2 is too small in the default run. Here we
+ # check for equality with the bisplrep/bisplev output because there,
+ # an automatic re-run of the spline representation is done if ier>10.
+ x = np.linspace(-2, 2, 80)
+ y = np.linspace(-2, 2, 80)
+ z = x + y
+ xi = np.linspace(-1, 1, 100)
+ yi = np.linspace(-2, 2, 100)
+ tck = bisplrep(x, y, z)
+ res1 = bisplev(xi, yi, tck)
+ interp_ = SmoothBivariateSpline(x, y, z)
+ res2 = interp_(xi, yi)
+ assert_almost_equal(res1, res2)
+
+ def test_invalid_input(self):
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0, num=10)
+ SmoothBivariateSpline(x, y, z)
+ assert "x, y, and z should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0, num=20)
+ SmoothBivariateSpline(x, y, z, w=w)
+ assert "x, y, z, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ w = np.linspace(-1.0, 10.0)
+ SmoothBivariateSpline(x, y, z, w=w)
+ assert "w should be positive" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-100, 100, -100)
+ SmoothBivariateSpline(x, y, z, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ SmoothBivariateSpline(x, y, z, kx=10, ky=10)
+ assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in\
+ str(info.value)
+
+ with assert_raises(ValueError) as info:
+ SmoothBivariateSpline(x, y, z, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothBivariateSpline(x, y, z, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothBivariateSpline(x, y, z, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ x = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
+ y = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
+ z = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ w = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
+ bbox = np.array([1.0, 3.0, 1.0, 3.0])
+ # np.array input
+ spl1 = SmoothBivariateSpline(x, y, z, w=w, bbox=bbox, kx=1, ky=1)
+ # list input
+ spl2 = SmoothBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ bbox=bbox.tolist(), w=w.tolist(),
+ kx=1, ky=1)
+ assert_allclose(spl1(0.1, 0.5), spl2(0.1, 0.5))
+
+
+class TestLSQSphereBivariateSpline:
+ def setup_method(self):
+ # define the input data and coordinates
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
+ phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+ knotdata = data[::5, ::5]
+ # calculate spline coefficients
+ lats, lons = meshgrid(theta, phi)
+ lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ self.lut_lsq = lut_lsq
+ self.data = knotdata
+ self.new_lons, self.new_lats = knotsp, knotst
+
+ def test_linear_constant(self):
+ assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
+ assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
+ self.data)
+
+ def test_empty_input(self):
+ assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
+ assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
+ ntheta) * pi
+ phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1), nphi) * 2. * pi
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = linspace(-0.1, 1.0, num=ntheta) * pi
+ invalid_lats, lons = meshgrid(invalid_theta, phi)
+ LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = linspace(0.1, 1.1, num=ntheta) * pi
+ invalid_lats, lons = meshgrid(invalid_theta, phi)
+ LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = linspace(-0.1, 1.0, num=ntheta) * 2.0 * pi
+ lats, invalid_lons = meshgrid(theta, invalid_phi)
+ LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = linspace(0.0, 1.1, num=ntheta) * 2.0 * pi
+ lats, invalid_lons = meshgrid(theta, invalid_phi)
+ LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ lats, lons = meshgrid(theta, phi)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotst = np.copy(knotst)
+ invalid_knotst[0] = -0.1
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), invalid_knotst, knotsp)
+ assert "tt should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotst = np.copy(knotst)
+ invalid_knotst[0] = pi
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), invalid_knotst, knotsp)
+ assert "tt should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotsp = np.copy(knotsp)
+ invalid_knotsp[0] = -0.1
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, invalid_knotsp)
+ assert "tp should be between (0, 2pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotsp = np.copy(knotsp)
+ invalid_knotsp[0] = 2 * pi
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, invalid_knotsp)
+ assert "tp should be between (0, 2pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, w=invalid_w)
+ assert "w should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
+ ntheta) * pi
+ phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1),
+ nphi) * 2. * pi
+ lats, lons = meshgrid(theta, phi)
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+ w = ones(lats.ravel().shape[0])
+
+ # np.array input
+ spl1 = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp, w=w)
+ # list input
+ spl2 = LSQSphereBivariateSpline(lats.ravel().tolist(),
+ lons.ravel().tolist(),
+ data.T.ravel().tolist(),
+ knotst.tolist(),
+ knotsp.tolist(), w=w.tolist())
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestSmoothSphereBivariateSpline:
+ def setup_method(self):
+ theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
+ .75*pi, .75*pi])
+ phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
+ 1.5 * pi])
+ r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
+
+ def test_linear_constant(self):
+ assert_almost_equal(self.lut.get_residual(), 0.)
+ assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
+ [[3, 3], [3, 3], [3, 3]])
+
+ def test_empty_input(self):
+ assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
+ assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi, .5 * pi,
+ .75 * pi, .75 * pi, .75 * pi])
+ phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
+ 1.5 * pi])
+ r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = array([-0.1 * pi, .25 * pi, .25 * pi, .5 * pi,
+ .5 * pi, .5 * pi, .75 * pi, .75 * pi,
+ .75 * pi])
+ SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi,
+ .5 * pi, .5 * pi, .75 * pi, .75 * pi,
+ 1.1 * pi])
+ SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = array([-.1 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
+ .5 * pi, pi, 1.5 * pi])
+ SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = array([1.0 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
+ .5 * pi, pi, 2.1 * pi])
+ SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
+ SmoothSphereBivariateSpline(theta, phi, r, w=invalid_w, s=1E10)
+ assert "w should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, s=-1.0)
+ assert "s should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, eps=-1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ theta = np.array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi,
+ .5 * pi, .75 * pi, .75 * pi, .75 * pi])
+ phi = np.array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi,
+ pi, 1.5 * pi])
+ r = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ w = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
+
+ # np.array input
+ spl1 = SmoothSphereBivariateSpline(theta, phi, r, w=w, s=1E10)
+
+ # list input
+ spl2 = SmoothSphereBivariateSpline(theta.tolist(), phi.tolist(),
+ r.tolist(), w=w.tolist(), s=1E10)
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestRectBivariateSpline:
+ def test_defaults(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y),z)
+
+ def test_evaluate(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+
+ xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
+ yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
+ zi = lut.ev(xi, yi)
+ zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
+
+ assert_almost_equal(zi, zi2)
+
+ def test_derivatives_grid(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
+ [0,0,-11,0,0],[0,0,4,0,0]])/6.
+ dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
+ [2,.25,0,-.25,-2],[4,-1,0,1,-4]])
+ dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
+ [-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y,dx=1),dx)
+ assert_array_almost_equal(lut(x,y,dy=1),dy)
+ assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
+
+ def test_derivatives(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ dx = array([0,0,2./3,0,0])
+ dy = array([4,-1,0,-.25,-4])
+ dxdy = array([160,65,0,55,32])/24.
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
+ assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
+ assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
+
+ def test_partial_derivative_method_grid(self):
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1],
+ [1, 2, 1, 2, 1],
+ [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1],
+ [1, 2, 1, 2, 1]])
+ dx = array([[0, 0, -20, 0, 0],
+ [0, 0, 13, 0, 0],
+ [0, 0, 4, 0, 0],
+ [0, 0, -11, 0, 0],
+ [0, 0, 4, 0, 0]]) / 6.
+ dy = array([[4, -1, 0, 1, -4],
+ [4, -1, 0, 1, -4],
+ [0, 1.5, 0, -1.5, 0],
+ [2, .25, 0, -.25, -2],
+ [4, -1, 0, 1, -4]])
+ dxdy = array([[40, -25, 0, 25, -40],
+ [-26, 16.25, 0, -16.25, 26],
+ [-8, 5, 0, -5, 8],
+ [22, -13.75, 0, 13.75, -22],
+ [-8, 5, 0, -5, 8]]) / 6.
+ lut = RectBivariateSpline(x, y, z)
+ assert_array_almost_equal(lut.partial_derivative(1, 0)(x, y), dx)
+ assert_array_almost_equal(lut.partial_derivative(0, 1)(x, y), dy)
+ assert_array_almost_equal(lut.partial_derivative(1, 1)(x, y), dxdy)
+
+ def test_partial_derivative_method(self):
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1],
+ [1, 2, 1, 2, 1],
+ [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1],
+ [1, 2, 1, 2, 1]])
+ dx = array([0, 0, 2./3, 0, 0])
+ dy = array([4, -1, 0, -.25, -4])
+ dxdy = array([160, 65, 0, 55, 32]) / 24.
+ lut = RectBivariateSpline(x, y, z)
+ assert_array_almost_equal(lut.partial_derivative(1, 0)(x, y,
+ grid=False),
+ dx)
+ assert_array_almost_equal(lut.partial_derivative(0, 1)(x, y,
+ grid=False),
+ dy)
+ assert_array_almost_equal(lut.partial_derivative(1, 1)(x, y,
+ grid=False),
+ dxdy)
+
+ def test_partial_derivative_order_too_large(self):
+ x = array([0, 1, 2, 3, 4], dtype=float)
+ y = x.copy()
+ z = ones((x.size, y.size))
+ lut = RectBivariateSpline(x, y, z)
+ with assert_raises(ValueError):
+ lut.partial_derivative(4, 1)
+
+ def test_broadcast(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+ assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
+
+ def test_invalid_input(self):
+
+ with assert_raises(ValueError) as info:
+ x = array([6, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "x must be strictly increasing" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([2, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "y must be strictly increasing" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "x dimension of z must have same number of elements as x"\
+ in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 3, 2],
+ [1, 2, 2, 2], [1, 2, 1, 2]])
+ RectBivariateSpline(x, y, z)
+ assert "y dimension of z must have same number of elements as y"\
+ in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ bbox = (-100, 100, -100)
+ RectBivariateSpline(x, y, z, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ RectBivariateSpline(x, y, z, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ def test_array_like_input(self):
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ bbox = array([1, 5, 1, 5])
+
+ spl1 = RectBivariateSpline(x, y, z, bbox=bbox)
+ spl2 = RectBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ bbox=bbox.tolist())
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+ def test_not_increasing_input(self):
+ # gh-8565
+ NSamp = 20
+ Theta = np.random.uniform(0, np.pi, NSamp)
+ Phi = np.random.uniform(0, 2 * np.pi, NSamp)
+ Data = np.ones(NSamp)
+
+ Interpolator = SmoothSphereBivariateSpline(Theta, Phi, Data, s=3.5)
+
+ NLon = 6
+ NLat = 3
+ GridPosLats = np.arange(NLat) / NLat * np.pi
+ GridPosLons = np.arange(NLon) / NLon * 2 * np.pi
+
+ # No error
+ Interpolator(GridPosLats, GridPosLons)
+
+ nonGridPosLats = GridPosLats.copy()
+ nonGridPosLats[2] = 0.001
+ with assert_raises(ValueError) as exc_info:
+ Interpolator(nonGridPosLats, GridPosLons)
+ assert "x must be strictly increasing" in str(exc_info.value)
+
+ nonGridPosLons = GridPosLons.copy()
+ nonGridPosLons[2] = 0.001
+ with assert_raises(ValueError) as exc_info:
+ Interpolator(GridPosLats, nonGridPosLons)
+ assert "y must be strictly increasing" in str(exc_info.value)
+
+
+class TestRectSphereBivariateSpline:
+ def test_defaults(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+ lut = RectSphereBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y),z)
+
+ def test_evaluate(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+ lut = RectSphereBivariateSpline(x,y,z)
+ yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
+ xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
+ zi = lut.ev(xi, yi)
+ zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
+ assert_almost_equal(zi, zi2)
+
+ def test_invalid_input(self):
+ data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+ np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(-1, 170, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 181, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-181, 10, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-10, 360, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(10, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data, s=-1)
+ assert "s should be positive" in str(exc_info.value)
+
+ def test_derivatives_grid(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+
+ lut = RectSphereBivariateSpline(x,y,z)
+
+ y = linspace(0.02, 2*pi-0.02, 7)
+ x = linspace(0.02, pi-0.02, 7)
+
+ assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dtheta=1, dphi=1),
+ _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
+ rtol=1e-3, atol=1e-3)
+
+ assert_array_equal(lut(x, y, dtheta=1),
+ lut.partial_derivative(1, 0)(x, y))
+ assert_array_equal(lut(x, y, dphi=1),
+ lut.partial_derivative(0, 1)(x, y))
+ assert_array_equal(lut(x, y, dtheta=1, dphi=1),
+ lut.partial_derivative(1, 1)(x, y))
+
+ assert_array_equal(lut(x, y, dtheta=1, grid=False),
+ lut.partial_derivative(1, 0)(x, y, grid=False))
+ assert_array_equal(lut(x, y, dphi=1, grid=False),
+ lut.partial_derivative(0, 1)(x, y, grid=False))
+ assert_array_equal(lut(x, y, dtheta=1, dphi=1, grid=False),
+ lut.partial_derivative(1, 1)(x, y, grid=False))
+
+ def test_derivatives(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+
+ lut = RectSphereBivariateSpline(x,y,z)
+
+ y = linspace(0.02, 2*pi-0.02, 7)
+ x = linspace(0.02, pi-0.02, 7)
+
+ assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
+ assert_allclose(lut(x, y, dtheta=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dphi=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False),
+ x, y, dx=1, dy=1, eps=1e-6),
+ rtol=1e-3, atol=1e-3)
+
+ def test_invalid_input_2(self):
+ data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+ np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(0, 170, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 180, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-181, 10, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-10, 360, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(10, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data, s=-1)
+ assert "s should be positive" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ y = linspace(0.01, 2 * pi - 0.01, 7)
+ x = linspace(0.01, pi - 0.01, 7)
+ z = array([[1, 2, 1, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
+ [1, 2, 3, 2, 1, 2, 1],
+ [1, 2, 2, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
+ [1, 2, 2, 2, 1, 2, 1],
+ [1, 2, 1, 2, 1, 2, 1]])
+ # np.array input
+ spl1 = RectSphereBivariateSpline(x, y, z)
+ # list input
+ spl2 = RectSphereBivariateSpline(x.tolist(), y.tolist(), z.tolist())
+ assert_array_almost_equal(spl1(x, y), spl2(x, y))
+
+ def test_negative_evaluation(self):
+ lats = np.array([25, 30, 35, 40, 45])
+ lons = np.array([-90, -85, -80, -75, 70])
+ mesh = np.meshgrid(lats, lons)
+ data = mesh[0] + mesh[1] # lon + lat value
+ lat_r = np.radians(lats)
+ lon_r = np.radians(lons)
+ interpolator = RectSphereBivariateSpline(lat_r, lon_r, data)
+ query_lat = np.radians(np.array([35, 37.5]))
+ query_lon = np.radians(np.array([-80, -77.5]))
+ data_interp = interpolator(query_lat, query_lon)
+ ans = np.array([[-45.0, -42.480862],
+ [-49.0625, -46.54315]])
+ assert_array_almost_equal(data_interp, ans)
+
+ def test_pole_continuity_gh_14591(self):
+ # regression test for https://github.com/scipy/scipy/issues/14591
+ # with pole_continuty=(True, True), the internal work array size
+ # was too small, leading to a FITPACK data validation error.
+
+ # The reproducer in gh-14591 was using a NetCDF4 file with
+ # 361x507 arrays, so here we trivialize array sizes to a minimum
+ # which still demonstrates the issue.
+ u = np.arange(1, 10) * np.pi / 10
+ v = np.arange(1, 10) * np.pi / 10
+ r = np.zeros((9, 9))
+ for p in [(True, True), (True, False), (False, False)]:
+ RectSphereBivariateSpline(u, v, r, s=0, pole_continuity=p)
+
+
+def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
+ if dx == 0 and dy == 0:
+ return func(x, y)
+ elif dx == 1 and dy == 0:
+ return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
+ elif dx == 0 and dy == 1:
+ return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
+ elif dx == 1 and dy == 1:
+ return (func(x + eps, y + eps) - func(x - eps, y + eps)
+ - func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
+ else:
+ raise ValueError("invalid derivative order")
+
+
+class Test_DerivedBivariateSpline:
+ """Test the creation, usage, and attribute access of the (private)
+ _DerivedBivariateSpline class.
+ """
+ def setup_method(self):
+ x = np.concatenate(list(zip(range(10), range(10))))
+ y = np.concatenate(list(zip(range(10), range(1, 11))))
+ z = np.concatenate((np.linspace(3, 1, 10), np.linspace(1, 3, 10)))
+ with suppress_warnings() as sup:
+ sup.record(UserWarning, "\nThe coefficients of the spline")
+ self.lut_lsq = LSQBivariateSpline(x, y, z,
+ linspace(0.5, 19.5, 4),
+ linspace(1.5, 20.5, 4),
+ eps=1e-2)
+ self.lut_smooth = SmoothBivariateSpline(x, y, z)
+ xx = linspace(0, 1, 20)
+ yy = xx + 1.0
+ zz = array([np.roll(z, i) for i in range(z.size)])
+ self.lut_rect = RectBivariateSpline(xx, yy, zz)
+ self.orders = list(itertools.product(range(3), range(3)))
+
+ def test_creation_from_LSQ(self):
+ for nux, nuy in self.orders:
+ lut_der = self.lut_lsq.partial_derivative(nux, nuy)
+ a = lut_der(3.5, 3.5, grid=False)
+ b = self.lut_lsq(3.5, 3.5, dx=nux, dy=nuy, grid=False)
+ assert_equal(a, b)
+
+ def test_creation_from_Smooth(self):
+ for nux, nuy in self.orders:
+ lut_der = self.lut_smooth.partial_derivative(nux, nuy)
+ a = lut_der(5.5, 5.5, grid=False)
+ b = self.lut_smooth(5.5, 5.5, dx=nux, dy=nuy, grid=False)
+ assert_equal(a, b)
+
+ def test_creation_from_Rect(self):
+ for nux, nuy in self.orders:
+ lut_der = self.lut_rect.partial_derivative(nux, nuy)
+ a = lut_der(0.5, 1.5, grid=False)
+ b = self.lut_rect(0.5, 1.5, dx=nux, dy=nuy, grid=False)
+ assert_equal(a, b)
+
+ def test_invalid_attribute_fp(self):
+ der = self.lut_rect.partial_derivative(1, 1)
+ with assert_raises(AttributeError):
+ der.fp
+
+ def test_invalid_attribute_get_residual(self):
+ der = self.lut_smooth.partial_derivative(1, 1)
+ with assert_raises(AttributeError):
+ der.get_residual()
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_gil.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_gil.py
new file mode 100644
index 0000000000000000000000000000000000000000..0902308fb6af6802ba216e3aeec499d0ddfb1407
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_gil.py
@@ -0,0 +1,65 @@
+import itertools
+import threading
+import time
+
+import numpy as np
+from numpy.testing import assert_equal
+import pytest
+import scipy.interpolate
+
+
+class TestGIL:
+ """Check if the GIL is properly released by scipy.interpolate functions."""
+
+ def setup_method(self):
+ self.messages = []
+
+ def log(self, message):
+ self.messages.append(message)
+
+ def make_worker_thread(self, target, args):
+ log = self.log
+
+ class WorkerThread(threading.Thread):
+ def run(self):
+ log('interpolation started')
+ target(*args)
+ log('interpolation complete')
+
+ return WorkerThread()
+
+ @pytest.mark.slow
+ @pytest.mark.xfail(reason='race conditions, may depend on system load')
+ def test_rectbivariatespline(self):
+ def generate_params(n_points):
+ x = y = np.linspace(0, 1000, n_points)
+ x_grid, y_grid = np.meshgrid(x, y)
+ z = x_grid * y_grid
+ return x, y, z
+
+ def calibrate_delay(requested_time):
+ for n_points in itertools.count(5000, 1000):
+ args = generate_params(n_points)
+ time_started = time.time()
+ interpolate(*args)
+ if time.time() - time_started > requested_time:
+ return args
+
+ def interpolate(x, y, z):
+ scipy.interpolate.RectBivariateSpline(x, y, z)
+
+ args = calibrate_delay(requested_time=3)
+ worker_thread = self.make_worker_thread(interpolate, args)
+ worker_thread.start()
+ for i in range(3):
+ time.sleep(0.5)
+ self.log('working')
+ worker_thread.join()
+ assert_equal(self.messages, [
+ 'interpolation started',
+ 'working',
+ 'working',
+ 'working',
+ 'interpolation complete',
+ ])
+
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpnd.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpnd.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c7d52b422fb79971ddf86247196e92ea606a22d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpnd.py
@@ -0,0 +1,387 @@
+import os
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
+ suppress_warnings)
+from pytest import raises as assert_raises
+import pytest
+
+import scipy.interpolate.interpnd as interpnd
+import scipy.spatial._qhull as qhull
+
+import pickle
+
+
+def data_file(basename):
+ return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', basename)
+
+
+class TestLinearNDInterpolation:
+ def test_smoketest(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+
+ yi = interpnd.LinearNDInterpolator(x, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_smoketest_alternate(self):
+ # Test at single points, alternate calling convention
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+
+ yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])
+ assert_almost_equal(y, yi)
+
+ def test_complex_smoketest(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ yi = interpnd.LinearNDInterpolator(x, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_tri_input(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.LinearNDInterpolator(tri, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_square(self):
+ # Test barycentric interpolation on a square against a manual
+ # implementation
+
+ points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.float64)
+ values = np.array([1., 2., -3., 5.], dtype=np.float64)
+
+ # NB: assume triangles (0, 1, 3) and (1, 2, 3)
+ #
+ # 1----2
+ # | \ |
+ # | \ |
+ # 0----3
+
+ def ip(x, y):
+ t1 = (x + y <= 1)
+ t2 = ~t1
+
+ x1 = x[t1]
+ y1 = y[t1]
+
+ x2 = x[t2]
+ y2 = y[t2]
+
+ z = 0*x
+
+ z[t1] = (values[0]*(1 - x1 - y1)
+ + values[1]*y1
+ + values[3]*x1)
+
+ z[t2] = (values[2]*(x2 + y2 - 1)
+ + values[1]*(1 - x2)
+ + values[3]*(1 - y2))
+ return z
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],
+ np.linspace(0, 1, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+
+ xi = np.array([xx, yy]).T.copy()
+ zi = interpnd.LinearNDInterpolator(points, values)(xi)
+
+ assert_almost_equal(zi, ip(xx, yy))
+
+ def test_smoketest_rescale(self):
+ # Test at single points
+ x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+
+ yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)
+ assert_almost_equal(y, yi)
+
+ def test_square_rescale(self):
+ # Test barycentric interpolation on a rectangle with rescaling
+ # agaings the same implementation without rescaling
+
+ points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.float64)
+ values = np.array([1., 2., -3., 5.], dtype=np.float64)
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
+ np.linspace(0, 100, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+ xi = np.array([xx, yy]).T.copy()
+ zi = interpnd.LinearNDInterpolator(points, values)(xi)
+ zi_rescaled = interpnd.LinearNDInterpolator(points, values,
+ rescale=True)(xi)
+
+ assert_almost_equal(zi, zi_rescaled)
+
+ def test_tripoints_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.LinearNDInterpolator(tri.points, y)(x)
+ yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,
+ rescale=True)(x)
+ assert_almost_equal(yi, yi_rescale)
+
+ def test_tri_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ match = ("Rescaling is not supported when passing a "
+ "Delaunay triangulation as ``points``.")
+ with pytest.raises(ValueError, match=match):
+ interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)
+
+ def test_pickle(self):
+ # Test at single points
+ np.random.seed(1234)
+ x = np.random.rand(30, 2)
+ y = np.random.rand(30) + 1j*np.random.rand(30)
+
+ ip = interpnd.LinearNDInterpolator(x, y)
+ ip2 = pickle.loads(pickle.dumps(ip))
+
+ assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
+
+
+class TestEstimateGradients2DGlobal:
+ def test_smoketest(self):
+ x = np.array([(0, 0), (0, 2),
+ (1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)
+ tri = qhull.Delaunay(x)
+
+ # Should be exact for linear functions, independent of triangulation
+
+ funcs = [
+ (lambda x, y: 0*x + 1, (0, 0)),
+ (lambda x, y: 0 + x, (1, 0)),
+ (lambda x, y: -2 + y, (0, 1)),
+ (lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))
+ ]
+
+ for j, (func, grad) in enumerate(funcs):
+ z = func(x[:,0], x[:,1])
+ dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)
+
+ assert_equal(dz.shape, (6, 2))
+ assert_allclose(dz, np.array(grad)[None,:] + 0*dz,
+ rtol=1e-5, atol=1e-5, err_msg="item %d" % j)
+
+ def test_regression_2359(self):
+ # Check regression --- for certain point sets, gradient
+ # estimation could end up in an infinite loop
+ points = np.load(data_file('estimate_gradients_hang.npy'))
+ values = np.random.rand(points.shape[0])
+ tri = qhull.Delaunay(points)
+
+ # This should not hang
+ with suppress_warnings() as sup:
+ sup.filter(interpnd.GradientEstimationWarning,
+ "Gradient estimation did not converge")
+ interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
+
+
+class TestCloughTocher2DInterpolator:
+
+ def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False,
+ rescale=False, **kw):
+ np.random.seed(1234)
+ if x is None:
+ x = np.array([(0, 0), (0, 1),
+ (1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),
+ (0.5, 0.2)],
+ dtype=float)
+
+ if not alternate:
+ ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),
+ tol=1e-6, rescale=rescale)
+ else:
+ ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),
+ func(x[:,0], x[:,1]),
+ tol=1e-6, rescale=rescale)
+
+ p = np.random.rand(50, 2)
+
+ if not alternate:
+ a = ip(p)
+ else:
+ a = ip(p[:,0], p[:,1])
+ b = func(p[:,0], p[:,1])
+
+ try:
+ assert_allclose(a, b, **kw)
+ except AssertionError:
+ print("_check_accuracy: abs(a-b):", abs(a - b))
+ print("ip.grad:", ip.grad)
+ raise
+
+ def test_linear_smoketest(self):
+ # Should be exact for linear functions, independent of triangulation
+ funcs = [
+ lambda x, y: 0*x + 1,
+ lambda x, y: 0 + x,
+ lambda x, y: -2 + y,
+ lambda x, y: 3 + 3*x + 14.15*y,
+ ]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ alternate=True,
+ err_msg="Function (alternate) %d" % j)
+ # check rescaling
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ err_msg="Function (rescaled) %d" % j, rescale=True)
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ alternate=True, rescale=True,
+ err_msg="Function (alternate, rescaled) %d" % j)
+
+ def test_quadratic_smoketest(self):
+ # Should be reasonably accurate for quadratic functions
+ funcs = [
+ lambda x, y: x**2,
+ lambda x, y: y**2,
+ lambda x, y: x**2 - y**2,
+ lambda x, y: x*y,
+ ]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
+ err_msg="Function %d" % j, rescale=True)
+
+ def test_tri_input(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_tri_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ match = ("Rescaling is not supported when passing a "
+ "Delaunay triangulation as ``points``.")
+ with pytest.raises(ValueError, match=match):
+ interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)
+
+ def test_tripoints_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)
+ yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)
+ assert_almost_equal(yi, yi_rescale)
+
+ def test_dense(self):
+ # Should be more accurate for dense meshes
+ funcs = [
+ lambda x, y: x**2,
+ lambda x, y: y**2,
+ lambda x, y: x**2 - y**2,
+ lambda x, y: x*y,
+ lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
+ ]
+
+ np.random.seed(4321) # use a different seed than the check!
+ grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),
+ np.random.rand(30*30, 2)]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
+ err_msg="Function %d" % j, rescale=True)
+
+ def test_wrong_ndim(self):
+ x = np.random.randn(30, 3)
+ y = np.random.randn(30)
+ assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)
+
+ def test_pickle(self):
+ # Test at single points
+ np.random.seed(1234)
+ x = np.random.rand(30, 2)
+ y = np.random.rand(30) + 1j*np.random.rand(30)
+
+ ip = interpnd.CloughTocher2DInterpolator(x, y)
+ ip2 = pickle.loads(pickle.dumps(ip))
+
+ assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
+
+ def test_boundary_tri_symmetry(self):
+ # Interpolation at neighbourless triangles should retain
+ # symmetry with mirroring the triangle.
+
+ # Equilateral triangle
+ points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)])
+ values = np.array([1, 0, 0])
+
+ ip = interpnd.CloughTocher2DInterpolator(points, values)
+
+ # Set gradient to zero at vertices
+ ip.grad[...] = 0
+
+ # Interpolation should be symmetric vs. bisector
+ alpha = 0.3
+ p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)])
+ p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)])
+
+ v1 = ip(p1)
+ v2 = ip(p2)
+ assert_allclose(v1, v2)
+
+ # ... and affine invariant
+ np.random.seed(1)
+ A = np.random.randn(2, 2)
+ b = np.random.randn(2)
+
+ points = A.dot(points.T).T + b[None,:]
+ p1 = A.dot(p1) + b
+ p2 = A.dot(p2) + b
+
+ ip = interpnd.CloughTocher2DInterpolator(points, values)
+ ip.grad[...] = 0
+
+ w1 = ip(p1)
+ w2 = ip(p2)
+ assert_allclose(w1, v1)
+ assert_allclose(w2, v2)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpolate.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpolate.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5a1dd600c16cbe1886a31d979b561438df7c13a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpolate.py
@@ -0,0 +1,2584 @@
+from numpy.testing import (assert_, assert_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_array_equal,
+ assert_allclose, suppress_warnings)
+from pytest import raises as assert_raises
+import pytest
+
+from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
+import numpy as np
+
+from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
+ splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
+ NdPPoly, BSpline)
+
+from scipy.special import poch, gamma
+
+from scipy.interpolate import _ppoly
+
+from scipy._lib._gcutils import assert_deallocated, IS_PYPY
+
+from scipy.integrate import nquad
+
+from scipy.special import binom
+
+
+class TestInterp2D:
+ def test_interp2d(self):
+ y, x = mgrid[0:2:20j, 0:pi:21j]
+ z = sin(x+0.5*y)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ II = interp2d(x, y, z)
+ assert_almost_equal(II(1.0, 2.0), sin(2.0), decimal=2)
+
+ v, u = ogrid[0:2:24j, 0:pi:25j]
+ assert_almost_equal(II(u.ravel(), v.ravel()),
+ sin(u+0.5*v), decimal=2)
+
+ def test_interp2d_meshgrid_input(self):
+ # Ticket #703
+ x = linspace(0, 2, 16)
+ y = linspace(0, pi, 21)
+ z = sin(x[None, :] + y[:, None]/2.)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ II = interp2d(x, y, z)
+ assert_almost_equal(II(1.0, 2.0), sin(2.0), decimal=2)
+
+ def test_interp2d_meshgrid_input_unsorted(self):
+ np.random.seed(1234)
+ x = linspace(0, 2, 16)
+ y = linspace(0, pi, 21)
+
+ z = sin(x[None, :] + y[:, None] / 2.)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
+
+ np.random.shuffle(x)
+ z = sin(x[None, :] + y[:, None]/2.)
+ ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
+
+ np.random.shuffle(x)
+ np.random.shuffle(y)
+ z = sin(x[None, :] + y[:, None] / 2.)
+ ip3 = interp2d(x, y, z, kind='cubic')
+
+ x = linspace(0, 2, 31)
+ y = linspace(0, pi, 30)
+
+ assert_equal(ip1(x, y), ip2(x, y))
+ assert_equal(ip1(x, y), ip3(x, y))
+
+ def test_interp2d_eval_unsorted(self):
+ y, x = mgrid[0:2:20j, 0:pi:21j]
+ z = sin(x + 0.5*y)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ func = interp2d(x, y, z)
+
+ xe = np.array([3, 4, 5])
+ ye = np.array([5.3, 7.1])
+ assert_allclose(func(xe, ye), func(xe, ye[::-1]))
+
+ assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
+
+ def test_interp2d_linear(self):
+ # Ticket #898
+ a = np.zeros([5, 5])
+ a[2, 2] = 1.0
+ x = y = np.arange(5)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ b = interp2d(x, y, a, 'linear')
+ assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
+ assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
+
+ def test_interp2d_bounds(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 2, 7)
+ z = x[None, :]**2 + y[:, None]
+
+ ix = np.linspace(-1, 3, 31)
+ iy = np.linspace(-1, 3, 33)
+
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+
+ b = interp2d(x, y, z, bounds_error=True)
+ assert_raises(ValueError, b, ix, iy)
+
+ b = interp2d(x, y, z, fill_value=np.nan)
+ iz = b(ix, iy)
+ mx = (ix < 0) | (ix > 1)
+ my = (iy < 0) | (iy > 2)
+ assert_(np.isnan(iz[my, :]).all())
+ assert_(np.isnan(iz[:, mx]).all())
+ assert_(np.isfinite(iz[~my, :][:, ~mx]).all())
+
+
+class TestInterp1D:
+
+ def setup_method(self):
+ self.x5 = np.arange(5.)
+ self.x10 = np.arange(10.)
+ self.y10 = np.arange(10.)
+ self.x25 = self.x10.reshape((2,5))
+ self.x2 = np.arange(2.)
+ self.y2 = np.arange(2.)
+ self.x1 = np.array([0.])
+ self.y1 = np.array([0.])
+
+ self.y210 = np.arange(20.).reshape((2, 10))
+ self.y102 = np.arange(20.).reshape((10, 2))
+ self.y225 = np.arange(20.).reshape((2, 2, 5))
+ self.y25 = np.arange(10.).reshape((2, 5))
+ self.y235 = np.arange(30.).reshape((2, 3, 5))
+ self.y325 = np.arange(30.).reshape((3, 2, 5))
+
+ # Edge updated test matrix 1
+ # array([[ 30, 1, 2, 3, 4, 5, 6, 7, 8, -30],
+ # [ 30, 11, 12, 13, 14, 15, 16, 17, 18, -30]])
+ self.y210_edge_updated = np.arange(20.).reshape((2, 10))
+ self.y210_edge_updated[:, 0] = 30
+ self.y210_edge_updated[:, -1] = -30
+
+ # Edge updated test matrix 2
+ # array([[ 30, 30],
+ # [ 2, 3],
+ # [ 4, 5],
+ # [ 6, 7],
+ # [ 8, 9],
+ # [ 10, 11],
+ # [ 12, 13],
+ # [ 14, 15],
+ # [ 16, 17],
+ # [-30, -30]])
+ self.y102_edge_updated = np.arange(20.).reshape((10, 2))
+ self.y102_edge_updated[0, :] = 30
+ self.y102_edge_updated[-1, :] = -30
+
+ self.fill_value = -100.0
+
+ def test_validation(self):
+ # Make sure that appropriate exceptions are raised when invalid values
+ # are given to the constructor.
+
+ # These should all work.
+ for kind in ('nearest', 'nearest-up', 'zero', 'linear', 'slinear',
+ 'quadratic', 'cubic', 'previous', 'next'):
+ interp1d(self.x10, self.y10, kind=kind)
+ interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate")
+ interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=np.array([-1]))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=(-1,))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=-1)
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=(-1, -1))
+ interp1d(self.x10, self.y10, kind=0)
+ interp1d(self.x10, self.y10, kind=1)
+ interp1d(self.x10, self.y10, kind=2)
+ interp1d(self.x10, self.y10, kind=3)
+ interp1d(self.x10, self.y210, kind='linear', axis=-1,
+ fill_value=(-1, -1))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=np.ones(10))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=(np.ones(10), np.ones(10)))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=(np.ones(10), -1))
+
+ # x array must be 1D.
+ assert_raises(ValueError, interp1d, self.x25, self.y10)
+
+ # y array cannot be a scalar.
+ assert_raises(ValueError, interp1d, self.x10, np.array(0))
+
+ # Check for x and y arrays having the same length.
+ assert_raises(ValueError, interp1d, self.x10, self.y2)
+ assert_raises(ValueError, interp1d, self.x2, self.y10)
+ assert_raises(ValueError, interp1d, self.x10, self.y102)
+ interp1d(self.x10, self.y210)
+ interp1d(self.x10, self.y102, axis=0)
+
+ # Check for x and y having at least 1 element.
+ assert_raises(ValueError, interp1d, self.x1, self.y10)
+ assert_raises(ValueError, interp1d, self.x10, self.y1)
+
+ # Bad fill values
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=(-1, -1, -1)) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[-1, -1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=np.array((-1, -1, -1))) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[[-1]]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[-1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=np.array([])) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=()) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
+ axis=0, fill_value=[-1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
+ axis=0, fill_value=(0., [-1, -1])) # above doesn't bc
+
+ def test_init(self):
+ # Check that the attributes are initialized appropriately by the
+ # constructor.
+ assert_(interp1d(self.x10, self.y10).copy)
+ assert_(not interp1d(self.x10, self.y10, copy=False).copy)
+ assert_(interp1d(self.x10, self.y10).bounds_error)
+ assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
+ assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
+ assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
+ 3.0)
+ assert_equal(interp1d(self.x10, self.y10, fill_value=(1.0, 2.0)).fill_value,
+ (1.0, 2.0))
+ assert_equal(interp1d(self.x10, self.y10).axis, 0)
+ assert_equal(interp1d(self.x10, self.y210).axis, 1)
+ assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
+ assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
+ assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
+ assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
+
+ def test_assume_sorted(self):
+ # Check for unsorted arrays
+ interp10 = interp1d(self.x10, self.y10)
+ interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
+
+ assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
+ assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
+ interp10([2.4, 5.6, 6.0]))
+
+ # Check assume_sorted keyword (defaults to False)
+ interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
+ assume_sorted=False)
+ assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
+
+ interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
+ assume_sorted=True)
+ # Should raise an error for unsorted input if assume_sorted=True
+ assert_raises(ValueError, interp10_assume_kw2, self.x10)
+
+ # Check that if y is a 2-D array, things are still consistent
+ interp10_y_2d = interp1d(self.x10, self.y210)
+ interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
+ assert_array_almost_equal(interp10_y_2d(self.x10),
+ interp10_y_2d_unsorted(self.x10))
+
+ def test_linear(self):
+ for kind in ['linear', 'slinear']:
+ self._check_linear(kind)
+
+ def _check_linear(self, kind):
+ # Check the actual implementation of linear interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind=kind)
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2.4, 5.6, 6.0]))
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind=kind,
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [-1, 0, 9, 11], rtol=1e-14)
+
+ opts = dict(kind=kind,
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_linear_dtypes(self):
+ # regression test for gh-5898, where 1D linear interpolation has been
+ # delegated to numpy.interp for all float dtypes, and the latter was
+ # not handling e.g. np.float128.
+ for dtyp in [np.float16,
+ np.float32,
+ np.float64,
+ np.longdouble]:
+ x = np.arange(8, dtype=dtyp)
+ y = x
+ yp = interp1d(x, y, kind='linear')(x)
+ assert_equal(yp.dtype, dtyp)
+ assert_allclose(yp, y, atol=1e-15)
+
+ # regression test for gh-14531, where 1D linear interpolation has been
+ # has been extended to delegate to numpy.interp for integer dtypes
+ x = [0, 1, 2]
+ y = [np.nan, 0, 1]
+ yp = interp1d(x, y)(x)
+ assert_allclose(yp, y, atol=1e-15)
+
+ def test_slinear_dtypes(self):
+ # regression test for gh-7273: 1D slinear interpolation fails with
+ # float32 inputs
+ dt_r = [np.float16, np.float32, np.float64]
+ dt_rc = dt_r + [np.complex64, np.complex128]
+ spline_kinds = ['slinear', 'zero', 'quadratic', 'cubic']
+ for dtx in dt_r:
+ x = np.arange(0, 10, dtype=dtx)
+ for dty in dt_rc:
+ y = np.exp(-x/3.0).astype(dty)
+ for dtn in dt_r:
+ xnew = x.astype(dtn)
+ for kind in spline_kinds:
+ f = interp1d(x, y, kind=kind, bounds_error=False)
+ assert_allclose(f(xnew), y, atol=1e-7,
+ err_msg=f"{dtx}, {dty} {dtn}")
+
+ def test_cubic(self):
+ # Check the actual implementation of spline interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='cubic')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10(1.5), np.array([1.5]))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2.4, 5.6, 6.0]),)
+
+ def test_nearest(self):
+ # Check the actual implementation of nearest-neighbour interpolation.
+ # Nearest asserts that half-integer case (1.5) rounds down to 1
+ interp10 = interp1d(self.x10, self.y10, kind='nearest')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='nearest',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, 9], rtol=1e-14)
+
+ opts = dict(kind='nearest',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_nearest_up(self):
+ # Check the actual implementation of nearest-neighbour interpolation.
+ # Nearest-up asserts that half-integer case (1.5) rounds up to 2
+ interp10 = interp1d(self.x10, self.y10, kind='nearest-up')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(2.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='nearest-up',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, 9], rtol=1e-14)
+
+ opts = dict(kind='nearest-up',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_previous(self):
+ # Check the actual implementation of previous interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='previous')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 5., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='previous',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [np.nan, 0, 9, 9], rtol=1e-14)
+
+ # Tests for gh-9591
+ interpolator1D = interp1d(self.x10, self.y10, kind="previous",
+ fill_value='extrapolate')
+ assert_allclose(interpolator1D([-1, -2, 5, 8, 12, 25]),
+ [np.nan, np.nan, 5, 8, 9, 9])
+
+ interpolator2D = interp1d(self.x10, self.y210, kind="previous",
+ fill_value='extrapolate')
+ assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+ [[np.nan, np.nan, 5, 8, 9, 9],
+ [np.nan, np.nan, 15, 18, 19, 19]])
+
+ interpolator2DAxis0 = interp1d(self.x10, self.y102, kind="previous",
+ axis=0, fill_value='extrapolate')
+ assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+ [[np.nan, np.nan],
+ [10, 11],
+ [18, 19]])
+
+ opts = dict(kind='previous',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ # Tests for gh-16813
+ interpolator1D = interp1d([0, 1, 2],
+ [0, 1, -1], kind="previous",
+ fill_value='extrapolate',
+ assume_sorted=True)
+ assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+ [np.nan, np.nan, 0, 1, -1, -1, -1])
+
+ interpolator1D = interp1d([2, 0, 1], # x is not ascending
+ [-1, 0, 1], kind="previous",
+ fill_value='extrapolate',
+ assume_sorted=False)
+ assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+ [np.nan, np.nan, 0, 1, -1, -1, -1])
+
+ interpolator2D = interp1d(self.x10, self.y210_edge_updated,
+ kind="previous",
+ fill_value='extrapolate')
+ assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+ [[np.nan, np.nan, 5, 8, -30, -30],
+ [np.nan, np.nan, 15, 18, -30, -30]])
+
+ interpolator2DAxis0 = interp1d(self.x10, self.y102_edge_updated,
+ kind="previous",
+ axis=0, fill_value='extrapolate')
+ assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+ [[np.nan, np.nan],
+ [10, 11],
+ [-30, -30]])
+
+ def test_next(self):
+ # Check the actual implementation of next interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='next')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(2.))
+ assert_array_almost_equal(interp10(1.5), np.array(2.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([3., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='next',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, np.nan], rtol=1e-14)
+
+ # Tests for gh-9591
+ interpolator1D = interp1d(self.x10, self.y10, kind="next",
+ fill_value='extrapolate')
+ assert_allclose(interpolator1D([-1, -2, 5, 8, 12, 25]),
+ [0, 0, 5, 8, np.nan, np.nan])
+
+ interpolator2D = interp1d(self.x10, self.y210, kind="next",
+ fill_value='extrapolate')
+ assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+ [[0, 0, 5, 8, np.nan, np.nan],
+ [10, 10, 15, 18, np.nan, np.nan]])
+
+ interpolator2DAxis0 = interp1d(self.x10, self.y102, kind="next",
+ axis=0, fill_value='extrapolate')
+ assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+ [[0, 1],
+ [10, 11],
+ [np.nan, np.nan]])
+
+ opts = dict(kind='next',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ # Tests for gh-16813
+ interpolator1D = interp1d([0, 1, 2],
+ [0, 1, -1], kind="next",
+ fill_value='extrapolate',
+ assume_sorted=True)
+ assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+ [0, 0, 0, 1, -1, np.nan, np.nan])
+
+ interpolator1D = interp1d([2, 0, 1], # x is not ascending
+ [-1, 0, 1], kind="next",
+ fill_value='extrapolate',
+ assume_sorted=False)
+ assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+ [0, 0, 0, 1, -1, np.nan, np.nan])
+
+ interpolator2D = interp1d(self.x10, self.y210_edge_updated,
+ kind="next",
+ fill_value='extrapolate')
+ assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+ [[30, 30, 5, 8, np.nan, np.nan],
+ [30, 30, 15, 18, np.nan, np.nan]])
+
+ interpolator2DAxis0 = interp1d(self.x10, self.y102_edge_updated,
+ kind="next",
+ axis=0, fill_value='extrapolate')
+ assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+ [[30, 30],
+ [10, 11],
+ [np.nan, np.nan]])
+
+ def test_zero(self):
+ # Check the actual implementation of zero-order spline interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='zero')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 5., 6.]))
+
+ def bounds_check_helper(self, interpolant, test_array, fail_value):
+ # Asserts that a ValueError is raised and that the error message
+ # contains the value causing this exception.
+ assert_raises(ValueError, interpolant, test_array)
+ try:
+ interpolant(test_array)
+ except ValueError as err:
+ assert (f"{fail_value}" in str(err))
+
+ def _bounds_check(self, kind='linear'):
+ # Test that our handling of out-of-bounds input is correct.
+ extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
+ bounds_error=False, kind=kind)
+
+ assert_array_equal(extrap10(11.2), np.array(self.fill_value))
+ assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
+ assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
+ np.array(self.fill_value),)
+ assert_array_equal(extrap10._check_bounds(
+ np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
+ np.array([[True, False, False, False, False],
+ [False, False, False, False, True]]))
+
+ raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
+ kind=kind)
+
+ self.bounds_check_helper(raises_bounds_error, -1.0, -1.0)
+ self.bounds_check_helper(raises_bounds_error, 11.0, 11.0)
+ self.bounds_check_helper(raises_bounds_error, [0.0, -1.0, 0.0], -1.0)
+ self.bounds_check_helper(raises_bounds_error, [0.0, 1.0, 21.0], 21.0)
+
+ raises_bounds_error([0.0, 5.0, 9.0])
+
+ def _bounds_check_int_nan_fill(self, kind='linear'):
+ x = np.arange(10).astype(int)
+ y = np.arange(10).astype(int)
+ c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
+ yi = c(x - 1)
+ assert_(np.isnan(yi[0]))
+ assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
+
+ def test_bounds(self):
+ for kind in ('linear', 'cubic', 'nearest', 'previous', 'next',
+ 'slinear', 'zero', 'quadratic'):
+ self._bounds_check(kind)
+ self._bounds_check_int_nan_fill(kind)
+
+ def _check_fill_value(self, kind):
+ interp = interp1d(self.x10, self.y10, kind=kind,
+ fill_value=(-100, 100), bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), -100)
+ assert_array_almost_equal(interp([-10, 10]), [-100, 100])
+
+ # Proper broadcasting:
+ # interp along axis of length 5
+ # other dim=(2, 3), (3, 2), (2, 2), or (2,)
+
+ # one singleton fill_value (works for all)
+ for y in (self.y235, self.y325, self.y225, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=100, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), 100)
+ assert_array_almost_equal(interp([-10, 10]), 100)
+
+ # singleton lower, singleton upper
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=(-100, 100), bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), -100)
+ if y.ndim == 3:
+ result = [[[-100, 100]] * y.shape[1]] * y.shape[0]
+ else:
+ result = [[-100, 100]] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # one broadcastable (3,) fill_value
+ fill_value = [100, 200, 300]
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp(-10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
+ [200, 200],
+ [300, 300]]] * 2)
+
+ # one broadcastable (2,) fill_value
+ fill_value = [100, 200]
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y225, self.y325, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ result = [100, 200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(10), result)
+ assert_array_almost_equal(interp(-10), result)
+ result = [[100, 100], [200, 200]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # broadcastable (3,) lower, singleton upper
+ fill_value = (np.array([-100, -200, -300]), 100)
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 100],
+ [-300, 100]]] * 2)
+
+ # broadcastable (2,) lower, singleton upper
+ fill_value = (np.array([-100, -200]), 100)
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y225, self.y325, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ result = [-100, -200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(-10), result)
+ result = [[-100, 100], [-200, 100]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # broadcastable (3,) lower, broadcastable (3,) upper
+ fill_value = ([-100, -200, -300], [100, 200, 300])
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2): # check ndarray as well as list here
+ if ii == 1:
+ fill_value = tuple(np.array(f) for f in fill_value)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 200],
+ [-300, 300]]] * 2)
+ # broadcastable (2,) lower, broadcastable (2,) upper
+ fill_value = ([-100, -200], [100, 200])
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y325, self.y225, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ result = [100, 200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(10), result)
+ result = [-100, -200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(-10), result)
+ result = [[-100, 100], [-200, 200]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # one broadcastable (2, 2) array-like
+ fill_value = [[100, 200], [1000, 2000]]
+ for y in (self.y235, self.y325, self.y25):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2):
+ if ii == 1:
+ fill_value = np.array(fill_value)
+ interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp(-10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
+ [200, 200]],
+ [[1000, 1000],
+ [2000, 2000]]])
+
+ # broadcastable (2, 2) lower, broadcastable (2, 2) upper
+ fill_value = ([[-100, -200], [-1000, -2000]],
+ [[100, 200], [1000, 2000]])
+ for y in (self.y235, self.y325, self.y25):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2):
+ if ii == 1:
+ fill_value = (np.array(fill_value[0]), np.array(fill_value[1]))
+ interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp(-10), [[-100, -200],
+ [-1000, -2000]])
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 200]],
+ [[-1000, 1000],
+ [-2000, 2000]]])
+
+ def test_fill_value(self):
+ # test that two-element fill value works
+ for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
+ 'zero', 'previous', 'next'):
+ self._check_fill_value(kind)
+
+ def test_fill_value_writeable(self):
+ # backwards compat: fill_value is a public writeable attribute
+ interp = interp1d(self.x10, self.y10, fill_value=123.0)
+ assert_equal(interp.fill_value, 123.0)
+ interp.fill_value = 321.0
+ assert_equal(interp.fill_value, 321.0)
+
+ def _nd_check_interp(self, kind='linear'):
+ # Check the behavior when the inputs and outputs are multidimensional.
+
+ # Multidimensional input.
+ interp10 = interp1d(self.x10, self.y10, kind=kind)
+ assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
+ np.array([[3., 5.], [2., 7.]]))
+
+ # Scalar input -> 0-dim scalar array output
+ assert_(isinstance(interp10(1.2), np.ndarray))
+ assert_equal(interp10(1.2).shape, ())
+
+ # Multidimensional outputs.
+ interp210 = interp1d(self.x10, self.y210, kind=kind)
+ assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
+ assert_array_almost_equal(interp210(np.array([1., 2.])),
+ np.array([[1., 2.], [11., 12.]]))
+
+ interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
+ assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
+ assert_array_almost_equal(interp102(np.array([1., 3.])),
+ np.array([[2., 3.], [6., 7.]]))
+
+ # Both at the same time!
+ x_new = np.array([[3., 5.], [2., 7.]])
+ assert_array_almost_equal(interp210(x_new),
+ np.array([[[3., 5.], [2., 7.]],
+ [[13., 15.], [12., 17.]]]))
+ assert_array_almost_equal(interp102(x_new),
+ np.array([[[6., 7.], [10., 11.]],
+ [[4., 5.], [14., 15.]]]))
+
+ def _nd_check_shape(self, kind='linear'):
+ # Check large N-D output shape
+ a = [4, 5, 6, 7]
+ y = np.arange(np.prod(a)).reshape(*a)
+ for n, s in enumerate(a):
+ x = np.arange(s)
+ z = interp1d(x, y, axis=n, kind=kind)
+ assert_array_almost_equal(z(x), y, err_msg=kind)
+
+ x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
+ b = list(a)
+ b[n:n+1] = [2,3,1]
+ assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
+
+ def test_nd(self):
+ for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest',
+ 'zero', 'previous', 'next'):
+ self._nd_check_interp(kind)
+ self._nd_check_shape(kind)
+
+ def _check_complex(self, dtype=np.complex128, kind='linear'):
+ x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
+ y = x * x ** (1 + 2j)
+ y = y.astype(dtype)
+
+ # simple test
+ c = interp1d(x, y, kind=kind)
+ assert_array_almost_equal(y[:-1], c(x)[:-1])
+
+ # check against interpolating real+imag separately
+ xi = np.linspace(1, 10, 31)
+ cr = interp1d(x, y.real, kind=kind)
+ ci = interp1d(x, y.imag, kind=kind)
+ assert_array_almost_equal(c(xi).real, cr(xi))
+ assert_array_almost_equal(c(xi).imag, ci(xi))
+
+ def test_complex(self):
+ for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
+ 'zero', 'previous', 'next'):
+ self._check_complex(np.complex64, kind)
+ self._check_complex(np.complex128, kind)
+
+ @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+ def test_circular_refs(self):
+ # Test interp1d can be automatically garbage collected
+ x = np.linspace(0, 1)
+ y = np.linspace(0, 1)
+ # Confirm interp can be released from memory after use
+ with assert_deallocated(interp1d, x, y) as interp:
+ interp([0.1, 0.2])
+ del interp
+
+ def test_overflow_nearest(self):
+ # Test that the x range doesn't overflow when given integers as input
+ for kind in ('nearest', 'previous', 'next'):
+ x = np.array([0, 50, 127], dtype=np.int8)
+ ii = interp1d(x, x, kind=kind)
+ assert_array_almost_equal(ii(x), x)
+
+ def test_local_nans(self):
+ # check that for local interpolation kinds (slinear, zero) a single nan
+ # only affects its local neighborhood
+ x = np.arange(10).astype(float)
+ y = x.copy()
+ y[6] = np.nan
+ for kind in ('zero', 'slinear'):
+ ir = interp1d(x, y, kind=kind)
+ vals = ir([4.9, 7.0])
+ assert_(np.isfinite(vals).all())
+
+ def test_spline_nans(self):
+ # Backwards compat: a single nan makes the whole spline interpolation
+ # return nans in an array of the correct shape. And it doesn't raise,
+ # just quiet nans because of backcompat.
+ x = np.arange(8).astype(float)
+ y = x.copy()
+ yn = y.copy()
+ yn[3] = np.nan
+
+ for kind in ['quadratic', 'cubic']:
+ ir = interp1d(x, y, kind=kind)
+ irn = interp1d(x, yn, kind=kind)
+ for xnew in (6, [1, 6], [[1, 6], [3, 5]]):
+ xnew = np.asarray(xnew)
+ out, outn = ir(x), irn(x)
+ assert_(np.isnan(outn).all())
+ assert_equal(out.shape, outn.shape)
+
+ def test_all_nans(self):
+ # regression test for gh-11637: interp1d core dumps with all-nan `x`
+ x = np.ones(10) * np.nan
+ y = np.arange(10)
+ with assert_raises(ValueError):
+ interp1d(x, y, kind='cubic')
+
+ def test_read_only(self):
+ x = np.arange(0, 10)
+ y = np.exp(-x / 3.0)
+ xnew = np.arange(0, 9, 0.1)
+ # Check both read-only and not read-only:
+ for xnew_writeable in (True, False):
+ xnew.flags.writeable = xnew_writeable
+ x.flags.writeable = False
+ for kind in ('linear', 'nearest', 'zero', 'slinear', 'quadratic',
+ 'cubic'):
+ f = interp1d(x, y, kind=kind)
+ vals = f(xnew)
+ assert_(np.isfinite(vals).all())
+
+ @pytest.mark.parametrize(
+ "kind", ("linear", "nearest", "nearest-up", "previous", "next")
+ )
+ def test_single_value(self, kind):
+ # https://github.com/scipy/scipy/issues/4043
+ f = interp1d([1.5], [6], kind=kind, bounds_error=False,
+ fill_value=(2, 10))
+ assert_array_equal(f([1, 1.5, 2]), [2, 6, 10])
+ # check still error if bounds_error=True
+ f = interp1d([1.5], [6], kind=kind, bounds_error=True)
+ with assert_raises(ValueError, match="x_new is above"):
+ f(2.0)
+
+
+class TestLagrange:
+
+ def test_lagrange(self):
+ p = poly1d([5,2,1,4,3])
+ xs = np.arange(len(p.coeffs))
+ ys = p(xs)
+ pl = lagrange(xs,ys)
+ assert_array_almost_equal(p.coeffs,pl.coeffs)
+
+
+class TestAkima1DInterpolator:
+ def test_eval(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344, 5.9803623910336236590978842,
+ 5.5067291516462386624652936, 5.2031367459745245795943447,
+ 4.1796554159017080820603951, 3.4110386597938129327189927,
+ 3.])
+ assert_allclose(ak(xi), yi)
+
+ def test_eval_mod(self):
+ # Reference values generated with the following MATLAB code:
+ # format longG
+ # x = 0:10; y = [0. 2. 1. 3. 2. 6. 5.5 5.5 2.7 5.1 3.];
+ # xi = [0. 0.5 1. 1.5 2.5 3.5 4.5 5.1 6.5 7.2 8.6 9.9 10.];
+ # makima(x, y, xi)
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ ak = Akima1DInterpolator(x, y, method="makima")
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.array([
+ 0.0, 1.34471153846154, 2.0, 1.44375, 1.94375, 2.51939102564103,
+ 4.10366931918656, 5.98501550899192, 5.51756330960439, 5.1757231914014,
+ 4.12326636931311, 3.32931513157895, 3.0])
+ assert_allclose(ak(xi), yi)
+
+ def test_eval_2d(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ y = np.column_stack((y, 2. * y))
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344,
+ 5.9803623910336236590978842,
+ 5.5067291516462386624652936,
+ 5.2031367459745245795943447,
+ 4.1796554159017080820603951,
+ 3.4110386597938129327189927, 3.])
+ yi = np.column_stack((yi, 2. * yi))
+ assert_allclose(ak(xi), yi)
+
+ def test_eval_3d(self):
+ x = np.arange(0., 11.)
+ y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ y = np.empty((11, 2, 2))
+ y[:, 0, 0] = y_
+ y[:, 1, 0] = 2. * y_
+ y[:, 0, 1] = 3. * y_
+ y[:, 1, 1] = 4. * y_
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.empty((13, 2, 2))
+ yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344,
+ 5.9803623910336236590978842,
+ 5.5067291516462386624652936,
+ 5.2031367459745245795943447,
+ 4.1796554159017080820603951,
+ 3.4110386597938129327189927, 3.])
+ yi[:, 0, 0] = yi_
+ yi[:, 1, 0] = 2. * yi_
+ yi[:, 0, 1] = 3. * yi_
+ yi[:, 1, 1] = 4. * yi_
+ assert_allclose(ak(xi), yi)
+
+ def test_degenerate_case_multidimensional(self):
+ # This test is for issue #5683.
+ x = np.array([0, 1, 2])
+ y = np.vstack((x, x**2)).T
+ ak = Akima1DInterpolator(x, y)
+ x_eval = np.array([0.5, 1.5])
+ y_eval = ak(x_eval)
+ assert_allclose(y_eval, np.vstack((x_eval, x_eval**2)).T)
+
+ def test_extend(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ ak = Akima1DInterpolator(x, y)
+ match = "Extending a 1-D Akima interpolator is not yet implemented"
+ with pytest.raises(NotImplementedError, match=match):
+ ak.extend(None, None)
+
+ def test_mod_invalid_method(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ match = "`method`=invalid is unsupported."
+ with pytest.raises(NotImplementedError, match=match):
+ Akima1DInterpolator(x, y, method="invalid") # type: ignore
+
+ def test_complex(self):
+ # Complex-valued data deprecated
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ y = y - 2j*y
+ # actually raises ComplexWarning, which subclasses RuntimeWarning, see
+ # https://github.com/numpy/numpy/blob/main/numpy/exceptions.py
+ msg = "Passing an array with a complex.*|Casting complex values to real.*"
+ with pytest.warns((RuntimeWarning, DeprecationWarning), match=msg):
+ Akima1DInterpolator(x, y)
+
+
+class TestPPolyCommon:
+ # test basic functionality for PPoly and BPoly
+ def test_sort_check(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 1, 0.5])
+ assert_raises(ValueError, PPoly, c, x)
+ assert_raises(ValueError, BPoly, c, x)
+
+ def test_ctor_c(self):
+ # wrong shape: `c` must be at least 2D
+ with assert_raises(ValueError):
+ PPoly([1, 2], [0, 1])
+
+ def test_extend(self):
+ # Test adding new points to the piecewise polynomial
+ np.random.seed(1234)
+
+ order = 3
+ x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
+ c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
+
+ for cls in (PPoly, BPoly):
+ pp = cls(c[:,:9], x[:10])
+ pp.extend(c[:,9:], x[10:])
+
+ pp2 = cls(c[:, 10:], x[10:])
+ pp2.extend(c[:, :10], x[:10])
+
+ pp3 = cls(c, x)
+
+ assert_array_equal(pp.c, pp3.c)
+ assert_array_equal(pp.x, pp3.x)
+ assert_array_equal(pp2.c, pp3.c)
+ assert_array_equal(pp2.x, pp3.x)
+
+ def test_extend_diff_orders(self):
+ # Test extending polynomial with different order one
+ np.random.seed(1234)
+
+ x = np.linspace(0, 1, 6)
+ c = np.random.rand(2, 5)
+
+ x2 = np.linspace(1, 2, 6)
+ c2 = np.random.rand(4, 5)
+
+ for cls in (PPoly, BPoly):
+ pp1 = cls(c, x)
+ pp2 = cls(c2, x2)
+
+ pp_comb = cls(c, x)
+ pp_comb.extend(c2, x2[1:])
+
+ # NB. doesn't match to pp1 at the endpoint, because pp1 is not
+ # continuous with pp2 as we took random coefs.
+ xi1 = np.linspace(0, 1, 300, endpoint=False)
+ xi2 = np.linspace(1, 2, 300)
+
+ assert_allclose(pp1(xi1), pp_comb(xi1))
+ assert_allclose(pp2(xi2), pp_comb(xi2))
+
+ def test_extend_descending(self):
+ np.random.seed(0)
+
+ order = 3
+ x = np.sort(np.random.uniform(0, 10, 20))
+ c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3)
+
+ for cls in (PPoly, BPoly):
+ p = cls(c, x)
+
+ p1 = cls(c[:, :9], x[:10])
+ p1.extend(c[:, 9:], x[10:])
+
+ p2 = cls(c[:, 10:], x[10:])
+ p2.extend(c[:, :10], x[:10])
+
+ assert_array_equal(p1.c, p.c)
+ assert_array_equal(p1.x, p.x)
+ assert_array_equal(p2.c, p.c)
+ assert_array_equal(p2.x, p.x)
+
+ def test_shape(self):
+ np.random.seed(1234)
+ c = np.random.rand(8, 12, 5, 6, 7)
+ x = np.sort(np.random.rand(13))
+ xp = np.random.rand(3, 4)
+ for cls in (PPoly, BPoly):
+ p = cls(c, x)
+ assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
+
+ # 'scalars'
+ for cls in (PPoly, BPoly):
+ p = cls(c[..., 0, 0, 0], x)
+
+ assert_equal(np.shape(p(0.5)), ())
+ assert_equal(np.shape(p(np.array(0.5))), ())
+
+ assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]], dtype=object))
+
+ def test_complex_coef(self):
+ np.random.seed(12345)
+ x = np.sort(np.random.random(13))
+ c = np.random.random((8, 12)) * (1. + 0.3j)
+ c_re, c_im = c.real, c.imag
+ xp = np.random.random(5)
+ for cls in (PPoly, BPoly):
+ p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
+ for nu in [0, 1, 2]:
+ assert_allclose(p(xp, nu).real, p_re(xp, nu))
+ assert_allclose(p(xp, nu).imag, p_im(xp, nu))
+
+ def test_axis(self):
+ np.random.seed(12345)
+ c = np.random.rand(3, 4, 5, 6, 7, 8)
+ c_s = c.shape
+ xp = np.random.random((1, 2))
+ for axis in (0, 1, 2, 3):
+ m = c.shape[axis+1]
+ x = np.sort(np.random.rand(m+1))
+ for cls in (PPoly, BPoly):
+ p = cls(c, x, axis=axis)
+ assert_equal(p.c.shape,
+ c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:])
+ res = p(xp)
+ targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:]
+ assert_equal(res.shape, targ_shape)
+
+ # deriv/antideriv does not drop the axis
+ for p1 in [cls(c, x, axis=axis).derivative(),
+ cls(c, x, axis=axis).derivative(2),
+ cls(c, x, axis=axis).antiderivative(),
+ cls(c, x, axis=axis).antiderivative(2)]:
+ assert_equal(p1.axis, p.axis)
+
+ # c array needs two axes for the coefficients and intervals, so
+ # 0 <= axis < c.ndim-1; raise otherwise
+ for axis in (-1, 4, 5, 6):
+ for cls in (BPoly, PPoly):
+ assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
+
+
+class TestPolySubclassing:
+ class P(PPoly):
+ pass
+
+ class B(BPoly):
+ pass
+
+ def _make_polynomials(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(3))
+ c = np.random.random((4, 2))
+ return self.P(c, x), self.B(c, x)
+
+ def test_derivative(self):
+ pp, bp = self._make_polynomials()
+ for p in (pp, bp):
+ pd = p.derivative()
+ assert_equal(p.__class__, pd.__class__)
+
+ ppa = pp.antiderivative()
+ assert_equal(pp.__class__, ppa.__class__)
+
+ def test_from_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = self.P.from_spline(spl)
+ assert_equal(pp.__class__, self.P)
+
+ def test_conversions(self):
+ pp, bp = self._make_polynomials()
+
+ pp1 = self.P.from_bernstein_basis(bp)
+ assert_equal(pp1.__class__, self.P)
+
+ bp1 = self.B.from_power_basis(pp)
+ assert_equal(bp1.__class__, self.B)
+
+ def test_from_derivatives(self):
+ x = [0, 1, 2]
+ y = [[1], [2], [3]]
+ bp = self.B.from_derivatives(x, y)
+ assert_equal(bp.__class__, self.B)
+
+
+class TestPPoly:
+ def test_simple(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x)
+ assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
+ assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
+
+ def test_periodic(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x, extrapolate='periodic')
+
+ assert_allclose(p(1.3), 1 * 0.3 ** 2 + 2 * 0.3 + 3)
+ assert_allclose(p(-0.3), 4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6)
+
+ assert_allclose(p(1.3, 1), 2 * 0.3 + 2)
+ assert_allclose(p(-0.3, 1), 8 * (0.7 - 0.5) + 5)
+
+ def test_read_only(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ xnew = np.array([0, 0.1, 0.2])
+ PPoly(c, x, extrapolate='periodic')
+
+ for writeable in (True, False):
+ x.flags.writeable = writeable
+ c.flags.writeable = writeable
+ f = PPoly(c, x)
+ vals = f(xnew)
+ assert_(np.isfinite(vals).all())
+
+ def test_descending(self):
+ def binom_matrix(power):
+ n = np.arange(power + 1).reshape(-1, 1)
+ k = np.arange(power + 1)
+ B = binom(n, k)
+ return B[::-1, ::-1]
+
+ np.random.seed(0)
+
+ power = 3
+ for m in [10, 20, 30]:
+ x = np.sort(np.random.uniform(0, 10, m + 1))
+ ca = np.random.uniform(-2, 2, size=(power + 1, m))
+
+ h = np.diff(x)
+ h_powers = h[None, :] ** np.arange(power + 1)[::-1, None]
+ B = binom_matrix(power)
+ cap = ca * h_powers
+ cdp = np.dot(B.T, cap)
+ cd = cdp / h_powers
+
+ pa = PPoly(ca, x, extrapolate=True)
+ pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True)
+
+ x_test = np.random.uniform(-10, 20, 100)
+ assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
+ assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
+
+ pa_d = pa.derivative()
+ pd_d = pd.derivative()
+
+ assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
+
+ # Antiderivatives won't be equal because fixing continuity is
+ # done in the reverse order, but surely the differences should be
+ # equal.
+ pa_i = pa.antiderivative()
+ pd_i = pd.antiderivative()
+ for a, b in np.random.uniform(-10, 20, (5, 2)):
+ int_a = pa.integrate(a, b)
+ int_d = pd.integrate(a, b)
+ assert_allclose(int_a, int_d, rtol=1e-13)
+ assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
+ rtol=1e-13)
+
+ roots_d = pd.roots()
+ roots_a = pa.roots()
+ assert_allclose(roots_a, np.sort(roots_d), rtol=1e-12)
+
+ def test_multi_shape(self):
+ c = np.random.rand(6, 2, 1, 2, 3)
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x)
+ assert_equal(p.x.shape, x.shape)
+ assert_equal(p.c.shape, c.shape)
+ assert_equal(p(0.3).shape, c.shape[2:])
+
+ assert_equal(p(np.random.rand(5, 6)).shape, (5, 6) + c.shape[2:])
+
+ dp = p.derivative()
+ assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
+ ip = p.antiderivative()
+ assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
+
+ def test_construct_fast(self):
+ np.random.seed(1234)
+ c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
+ x = np.array([0, 0.5, 1])
+ p = PPoly.construct_fast(c, x)
+ assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
+ assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
+
+ def test_vs_alternative_implementations(self):
+ np.random.seed(1234)
+ c = np.random.rand(3, 12, 22)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+
+ p = PPoly(c, x)
+
+ xp = np.r_[0.3, 0.5, 0.33, 0.6]
+ expected = _ppoly_eval_1(c, x, xp)
+ assert_allclose(p(xp), expected)
+
+ expected = _ppoly_eval_2(c[:,:,0], x, xp)
+ assert_allclose(p(xp)[:,0], expected)
+
+ def test_from_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ assert_allclose(pp(xi), splev(xi, spl))
+
+ # make sure .from_spline accepts BSpline objects
+ b = BSpline(*spl)
+ ppp = PPoly.from_spline(b)
+ assert_allclose(ppp(xi), b(xi))
+
+ # BSpline's extrapolate attribute propagates unless overridden
+ t, c, k = spl
+ for extrap in (None, True, False):
+ b = BSpline(t, c, k, extrapolate=extrap)
+ p = PPoly.from_spline(b)
+ assert_equal(p.extrapolate, b.extrapolate)
+
+ def test_derivative_simple(self):
+ np.random.seed(1234)
+ c = np.array([[4, 3, 2, 1]]).T
+ dc = np.array([[3*4, 2*3, 2]]).T
+ ddc = np.array([[2*3*4, 1*2*3]]).T
+ x = np.array([0, 1])
+
+ pp = PPoly(c, x)
+ dpp = PPoly(dc, x)
+ ddpp = PPoly(ddc, x)
+
+ assert_allclose(pp.derivative().c, dpp.c)
+ assert_allclose(pp.derivative(2).c, ddpp.c)
+
+ def test_derivative_eval(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ for dx in range(0, 3):
+ assert_allclose(pp(xi, dx), splev(xi, spl, dx))
+
+ def test_derivative(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ for dx in range(0, 10):
+ assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
+ err_msg="dx=%d" % (dx,))
+
+ def test_antiderivative_of_constant(self):
+ # https://github.com/scipy/scipy/issues/4216
+ p = PPoly([[1.]], [0, 1])
+ assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
+ assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
+
+ def test_antiderivative_regression_4355(self):
+ # https://github.com/scipy/scipy/issues/4355
+ p = PPoly([[1., 0.5]], [0, 1, 2])
+ q = p.antiderivative()
+ assert_equal(q.c, [[1, 0.5], [0, 1]])
+ assert_equal(q.x, [0, 1, 2])
+ assert_allclose(p.integrate(0, 2), 1.5)
+ assert_allclose(q(2) - q(0), 1.5)
+
+ def test_antiderivative_simple(self):
+ np.random.seed(1234)
+ # [ p1(x) = 3*x**2 + 2*x + 1,
+ # p2(x) = 1.6875]
+ c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
+ # [ pp1(x) = x**3 + x**2 + x,
+ # pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
+ ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
+ # [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
+ # ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
+ iic = np.array([[1/4, 1/3, 1/2, 0, 0],
+ [0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
+ x = np.array([0, 0.25, 1])
+
+ pp = PPoly(c, x)
+ ipp = pp.antiderivative()
+ iipp = pp.antiderivative(2)
+ iipp2 = ipp.antiderivative()
+
+ assert_allclose(ipp.x, x)
+ assert_allclose(ipp.c.T, ic.T)
+ assert_allclose(iipp.c.T, iic.T)
+ assert_allclose(iipp2.c.T, iic.T)
+
+ def test_antiderivative_vs_derivative(self):
+ np.random.seed(1234)
+ x = np.linspace(0, 1, 30)**2
+ y = np.random.rand(len(x))
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ for dx in range(0, 10):
+ ipp = pp.antiderivative(dx)
+
+ # check that derivative is inverse op
+ pp2 = ipp.derivative(dx)
+ assert_allclose(pp.c, pp2.c)
+
+ # check continuity
+ for k in range(dx):
+ pp2 = ipp.derivative(k)
+
+ r = 1e-13
+ endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
+
+ assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
+ rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
+
+ def test_antiderivative_vs_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ for dx in range(0, 10):
+ pp2 = pp.antiderivative(dx)
+ spl2 = splantider(spl, dx)
+
+ xi = np.linspace(0, 1, 200)
+ assert_allclose(pp2(xi), splev(xi, spl2),
+ rtol=1e-7)
+
+ def test_antiderivative_continuity(self):
+ c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T
+ x = np.array([0, 0.5, 1])
+
+ p = PPoly(c, x)
+ ip = p.antiderivative()
+
+ # check continuity
+ assert_allclose(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8)
+
+ # check that only lowest order coefficients were changed
+ p2 = ip.derivative()
+ assert_allclose(p2.c, p.c)
+
+ def test_integrate(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ a, b = 0.3, 0.9
+ ig = pp.integrate(a, b)
+
+ ipp = pp.antiderivative()
+ assert_allclose(ig, ipp(b) - ipp(a))
+ assert_allclose(ig, splint(a, b, spl))
+
+ a, b = -0.3, 0.9
+ ig = pp.integrate(a, b, extrapolate=True)
+ assert_allclose(ig, ipp(b) - ipp(a))
+
+ assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
+
+ def test_integrate_readonly(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ for writeable in (True, False):
+ x.flags.writeable = writeable
+
+ P = PPoly(c, x)
+ vals = P.integrate(1, 4)
+
+ assert_(np.isfinite(vals).all())
+
+ def test_integrate_periodic(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ P = PPoly(c, x, extrapolate='periodic')
+ I = P.antiderivative()
+
+ period_int = I(4) - I(1)
+
+ assert_allclose(P.integrate(1, 4), period_int)
+ assert_allclose(P.integrate(-10, -7), period_int)
+ assert_allclose(P.integrate(-10, -4), 2 * period_int)
+
+ assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
+ assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5 + 12, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
+
+ assert_allclose(P.integrate(0, -1), I(2) - I(3))
+ assert_allclose(P.integrate(-9, -10), I(2) - I(3))
+ assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
+
+ def test_roots(self):
+ x = np.linspace(0, 1, 31)**2
+ y = np.sin(30*x)
+
+ spl = splrep(x, y, s=0, k=3)
+ pp = PPoly.from_spline(spl)
+
+ r = pp.roots()
+ r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
+ assert_allclose(r, sproot(spl), atol=1e-15)
+
+ def test_roots_idzero(self):
+ # Roots for piecewise polynomials with identically zero
+ # sections.
+ c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
+ x = np.array([0, 0.4, 0.6, 1.0])
+
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(),
+ [0.25, 0.4, np.nan, 0.6 + 0.25])
+
+ # ditto for p.solve(const) with sections identically equal const
+ const = 2.
+ c1 = c.copy()
+ c1[1, :] += const
+ pp1 = PPoly(c1, x)
+
+ assert_array_equal(pp1.solve(const),
+ [0.25, 0.4, np.nan, 0.6 + 0.25])
+
+ def test_roots_all_zero(self):
+ # test the code path for the polynomial being identically zero everywhere
+ c = [[0], [0]]
+ x = [0, 1]
+ p = PPoly(c, x)
+ assert_array_equal(p.roots(), [0, np.nan])
+ assert_array_equal(p.solve(0), [0, np.nan])
+ assert_array_equal(p.solve(1), [])
+
+ c = [[0, 0], [0, 0]]
+ x = [0, 1, 2]
+ p = PPoly(c, x)
+ assert_array_equal(p.roots(), [0, np.nan, 1, np.nan])
+ assert_array_equal(p.solve(0), [0, np.nan, 1, np.nan])
+ assert_array_equal(p.solve(1), [])
+
+ def test_roots_repeated(self):
+ # Check roots repeated in multiple sections are reported only
+ # once.
+
+ # [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
+ c = np.array([[1, 0, -1], [-1, 0, 0]]).T
+ x = np.array([-1, 0, 1])
+
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(), [-2, 0])
+ assert_array_equal(pp.roots(extrapolate=False), [0])
+
+ def test_roots_discont(self):
+ # Check that a discontinuity across zero is reported as root
+ c = np.array([[1], [-1]]).T
+ x = np.array([0, 0.5, 1])
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(), [0.5])
+ assert_array_equal(pp.roots(discontinuity=False), [])
+
+ # ditto for a discontinuity across y:
+ assert_array_equal(pp.solve(0.5), [0.5])
+ assert_array_equal(pp.solve(0.5, discontinuity=False), [])
+
+ assert_array_equal(pp.solve(1.5), [])
+ assert_array_equal(pp.solve(1.5, discontinuity=False), [])
+
+ def test_roots_random(self):
+ # Check high-order polynomials with random coefficients
+ np.random.seed(1234)
+
+ num = 0
+
+ for extrapolate in (True, False):
+ for order in range(0, 20):
+ x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
+ c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
+
+ pp = PPoly(c, x)
+ for y in [0, np.random.random()]:
+ r = pp.solve(y, discontinuity=False, extrapolate=extrapolate)
+
+ for i in range(2):
+ for j in range(3):
+ rr = r[i,j]
+ if rr.size > 0:
+ # Check that the reported roots indeed are roots
+ num += rr.size
+ val = pp(rr, extrapolate=extrapolate)[:,i,j]
+ cmpval = pp(rr, nu=1,
+ extrapolate=extrapolate)[:,i,j]
+ msg = f"({extrapolate!r}) r = {repr(rr)}"
+ assert_allclose((val-y) / cmpval, 0, atol=1e-7,
+ err_msg=msg)
+
+ # Check that we checked a number of roots
+ assert_(num > 100, repr(num))
+
+ def test_roots_croots(self):
+ # Test the complex root finding algorithm
+ np.random.seed(1234)
+
+ for k in range(1, 15):
+ c = np.random.rand(k, 1, 130)
+
+ if k == 3:
+ # add a case with zero discriminant
+ c[:,0,0] = 1, 2, 1
+
+ for y in [0, np.random.random()]:
+ w = np.empty(c.shape, dtype=complex)
+ _ppoly._croots_poly1(c, w)
+
+ if k == 1:
+ assert_(np.isnan(w).all())
+ continue
+
+ res = 0
+ cres = 0
+ for i in range(k):
+ res += c[i,None] * w**(k-1-i)
+ cres += abs(c[i,None] * w**(k-1-i))
+ with np.errstate(invalid='ignore'):
+ res /= cres
+ res = res.ravel()
+ res = res[~np.isnan(res)]
+ assert_allclose(res, 0, atol=1e-10)
+
+ def test_extrapolate_attr(self):
+ # [ 1 - x**2 ]
+ c = np.array([[-1, 0, 1]]).T
+ x = np.array([0, 1])
+
+ for extrapolate in [True, False, None]:
+ pp = PPoly(c, x, extrapolate=extrapolate)
+ pp_d = pp.derivative()
+ pp_i = pp.antiderivative()
+
+ if extrapolate is False:
+ assert_(np.isnan(pp([-0.1, 1.1])).all())
+ assert_(np.isnan(pp_i([-0.1, 1.1])).all())
+ assert_(np.isnan(pp_d([-0.1, 1.1])).all())
+ assert_equal(pp.roots(), [1])
+ else:
+ assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
+ assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
+ assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
+ assert_allclose(pp.roots(), [1, -1])
+
+
+class TestBPoly:
+ def test_simple(self):
+ x = [0, 1]
+ c = [[3]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.1), 3.)
+
+ def test_simple2(self):
+ x = [0, 1]
+ c = [[3], [1]]
+ bp = BPoly(c, x) # 3*(1-x) + 1*x
+ assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
+
+ def test_simple3(self):
+ x = [0, 1]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
+ assert_allclose(bp(0.2),
+ 3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
+
+ def test_simple4(self):
+ x = [0, 1]
+ c = [[1], [1], [1], [2]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.3), 0.7**3 +
+ 3 * 0.7**2 * 0.3 +
+ 3 * 0.7 * 0.3**2 +
+ 2 * 0.3**3)
+
+ def test_simple5(self):
+ x = [0, 1]
+ c = [[1], [1], [8], [2], [1]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.3), 0.7**4 +
+ 4 * 0.7**3 * 0.3 +
+ 8 * 6 * 0.7**2 * 0.3**2 +
+ 2 * 4 * 0.7 * 0.3**3 +
+ 0.3**4)
+
+ def test_periodic(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ # [3*(1-x)**2, 2*((x-1)/2)**2]
+ bp = BPoly(c, x, extrapolate='periodic')
+
+ assert_allclose(bp(3.4), 3 * 0.6**2)
+ assert_allclose(bp(-1.3), 2 * (0.7/2)**2)
+
+ assert_allclose(bp(3.4, 1), -6 * 0.6)
+ assert_allclose(bp(-1.3, 1), 2 * (0.7/2))
+
+ def test_descending(self):
+ np.random.seed(0)
+
+ power = 3
+ for m in [10, 20, 30]:
+ x = np.sort(np.random.uniform(0, 10, m + 1))
+ ca = np.random.uniform(-0.1, 0.1, size=(power + 1, m))
+ # We need only to flip coefficients to get it right!
+ cd = ca[::-1].copy()
+
+ pa = BPoly(ca, x, extrapolate=True)
+ pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True)
+
+ x_test = np.random.uniform(-10, 20, 100)
+ assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
+ assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
+
+ pa_d = pa.derivative()
+ pd_d = pd.derivative()
+
+ assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
+
+ # Antiderivatives won't be equal because fixing continuity is
+ # done in the reverse order, but surely the differences should be
+ # equal.
+ pa_i = pa.antiderivative()
+ pd_i = pd.antiderivative()
+ for a, b in np.random.uniform(-10, 20, (5, 2)):
+ int_a = pa.integrate(a, b)
+ int_d = pd.integrate(a, b)
+ assert_allclose(int_a, int_d, rtol=1e-12)
+ assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
+ rtol=1e-12)
+
+ def test_multi_shape(self):
+ c = np.random.rand(6, 2, 1, 2, 3)
+ x = np.array([0, 0.5, 1])
+ p = BPoly(c, x)
+ assert_equal(p.x.shape, x.shape)
+ assert_equal(p.c.shape, c.shape)
+ assert_equal(p(0.3).shape, c.shape[2:])
+ assert_equal(p(np.random.rand(5,6)).shape,
+ (5,6)+c.shape[2:])
+
+ dp = p.derivative()
+ assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
+
+ def test_interval_length(self):
+ x = [0, 2]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x)
+ xval = 0.1
+ s = xval / 2 # s = (x - xa) / (xb - xa)
+ assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
+
+ def test_two_intervals(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
+
+ assert_allclose(bp(0.4), 3 * 0.6*0.6)
+ assert_allclose(bp(1.7), 2 * (0.7/2)**2)
+
+ def test_extrapolate_attr(self):
+ x = [0, 2]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x)
+
+ for extrapolate in (True, False, None):
+ bp = BPoly(c, x, extrapolate=extrapolate)
+ bp_d = bp.derivative()
+ if extrapolate is False:
+ assert_(np.isnan(bp([-0.1, 2.1])).all())
+ assert_(np.isnan(bp_d([-0.1, 2.1])).all())
+ else:
+ assert_(not np.isnan(bp([-0.1, 2.1])).any())
+ assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
+
+
+class TestBPolyCalculus:
+ def test_derivative(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
+ bp_der = bp.derivative()
+ assert_allclose(bp_der(0.4), -6*(0.6))
+ assert_allclose(bp_der(1.7), 0.7)
+
+ # derivatives in-place
+ assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
+ [-6*(1-0.4), 6., 0.])
+ assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
+ [0.7, 1., 0])
+
+ def test_derivative_ppoly(self):
+ # make sure it's consistent w/ power basis
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+
+ for d in range(k):
+ bp = bp.derivative()
+ pp = pp.derivative()
+ xp = np.linspace(x[0], x[-1], 21)
+ assert_allclose(bp(xp), pp(xp))
+
+ def test_deriv_inplace(self):
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+
+ # test both real and complex coefficients
+ for cc in [c.copy(), c*(1. + 2.j)]:
+ bp = BPoly(cc, x)
+ xp = np.linspace(x[0], x[-1], 21)
+ for i in range(k):
+ assert_allclose(bp(xp, i), bp.derivative(i)(xp))
+
+ def test_antiderivative_simple(self):
+ # f(x) = x for x \in [0, 1),
+ # (x-1)/2 for x \in [1, 3]
+ #
+ # antiderivative is then
+ # F(x) = x**2 / 2 for x \in [0, 1),
+ # 0.5*x*(x/2 - 1) + A for x \in [1, 3]
+ # where A = 3/4 for continuity at x = 1.
+ x = [0, 1, 3]
+ c = [[0, 0], [1, 1]]
+
+ bp = BPoly(c, x)
+ bi = bp.antiderivative()
+
+ xx = np.linspace(0, 3, 11)
+ assert_allclose(bi(xx),
+ np.where(xx < 1, xx**2 / 2.,
+ 0.5 * xx * (xx/2. - 1) + 3./4),
+ atol=1e-12, rtol=1e-12)
+
+ def test_der_antider(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10, 2, 3))
+ bp = BPoly(c, x)
+
+ xx = np.linspace(x[0], x[-1], 100)
+ assert_allclose(bp.antiderivative().derivative()(xx),
+ bp(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antider_ppoly(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10, 2, 3))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+
+ xx = np.linspace(x[0], x[-1], 10)
+
+ assert_allclose(bp.antiderivative(2)(xx),
+ pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antider_continuous(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10))
+ bp = BPoly(c, x).antiderivative()
+
+ xx = bp.x[1:-1]
+ assert_allclose(bp(xx - 1e-14),
+ bp(xx + 1e-14), atol=1e-12, rtol=1e-12)
+
+ def test_integrate(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+ assert_allclose(bp.integrate(0, 1),
+ pp.integrate(0, 1), atol=1e-12, rtol=1e-12)
+
+ def test_integrate_extrap(self):
+ c = [[1]]
+ x = [0, 1]
+ b = BPoly(c, x)
+
+ # default is extrapolate=True
+ assert_allclose(b.integrate(0, 2), 2., atol=1e-14)
+
+ # .integrate argument overrides self.extrapolate
+ b1 = BPoly(c, x, extrapolate=False)
+ assert_(np.isnan(b1.integrate(0, 2)))
+ assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14)
+
+ def test_integrate_periodic(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ P = BPoly.from_power_basis(PPoly(c, x), extrapolate='periodic')
+ I = P.antiderivative()
+
+ period_int = I(4) - I(1)
+
+ assert_allclose(P.integrate(1, 4), period_int)
+ assert_allclose(P.integrate(-10, -7), period_int)
+ assert_allclose(P.integrate(-10, -4), 2 * period_int)
+
+ assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
+ assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5 + 12, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
+
+ assert_allclose(P.integrate(0, -1), I(2) - I(3))
+ assert_allclose(P.integrate(-9, -10), I(2) - I(3))
+ assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
+
+ def test_antider_neg(self):
+ # .derivative(-nu) ==> .andiderivative(nu) and vice versa
+ c = [[1]]
+ x = [0, 1]
+ b = BPoly(c, x)
+
+ xx = np.linspace(0, 1, 21)
+
+ assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx),
+ atol=1e-12, rtol=1e-12)
+ assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx),
+ atol=1e-12, rtol=1e-12)
+
+
+class TestPolyConversions:
+ def test_bp_from_pp(self):
+ x = [0, 1, 3]
+ c = [[3, 2], [1, 8], [4, 3]]
+ pp = PPoly(c, x)
+ bp = BPoly.from_power_basis(pp)
+ pp1 = PPoly.from_bernstein_basis(bp)
+
+ xp = [0.1, 1.4]
+ assert_allclose(pp(xp), bp(xp))
+ assert_allclose(pp(xp), pp1(xp))
+
+ def test_bp_from_pp_random(self):
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+ pp = PPoly(c, x)
+ bp = BPoly.from_power_basis(pp)
+ pp1 = PPoly.from_bernstein_basis(bp)
+
+ xp = np.linspace(x[0], x[-1], 21)
+ assert_allclose(pp(xp), bp(xp))
+ assert_allclose(pp(xp), pp1(xp))
+
+ def test_pp_from_bp(self):
+ x = [0, 1, 3]
+ c = [[3, 3], [1, 1], [4, 2]]
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+ bp1 = BPoly.from_power_basis(pp)
+
+ xp = [0.1, 1.4]
+ assert_allclose(bp(xp), pp(xp))
+ assert_allclose(bp(xp), bp1(xp))
+
+ def test_broken_conversions(self):
+ # regression test for gh-10597: from_power_basis only accepts PPoly etc.
+ x = [0, 1, 3]
+ c = [[3, 3], [1, 1], [4, 2]]
+ pp = PPoly(c, x)
+ with assert_raises(TypeError):
+ PPoly.from_bernstein_basis(pp)
+
+ bp = BPoly(c, x)
+ with assert_raises(TypeError):
+ BPoly.from_power_basis(bp)
+
+
+class TestBPolyFromDerivatives:
+ def test_make_poly_1(self):
+ c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
+ assert_allclose(c1, [2., 3.])
+
+ def test_make_poly_2(self):
+ c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
+ assert_allclose(c1, [1., 1., 1.])
+
+ # f'(0) = 3
+ c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
+ assert_allclose(c2, [2., 7./2, 1.])
+
+ # f'(1) = 3
+ c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
+ assert_allclose(c3, [2., -0.5, 1.])
+
+ def test_make_poly_3(self):
+ # f'(0)=2, f''(0)=3
+ c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
+ assert_allclose(c1, [1., 5./3, 17./6, 4.])
+
+ # f'(1)=2, f''(1)=3
+ c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
+ assert_allclose(c2, [1., 19./6, 10./3, 4.])
+
+ # f'(0)=2, f'(1)=3
+ c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
+ assert_allclose(c3, [1., 5./3, 3., 4.])
+
+ def test_make_poly_12(self):
+ np.random.seed(12345)
+ ya = np.r_[0, np.random.random(5)]
+ yb = np.r_[0, np.random.random(5)]
+
+ c = BPoly._construct_from_derivatives(0, 1, ya, yb)
+ pp = BPoly(c[:, None], [0, 1])
+ for j in range(6):
+ assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
+ pp = pp.derivative()
+
+ def test_raise_degree(self):
+ np.random.seed(12345)
+ x = [0, 1]
+ k, d = 8, 5
+ c = np.random.random((k, 1, 2, 3, 4))
+ bp = BPoly(c, x)
+
+ c1 = BPoly._raise_degree(c, d)
+ bp1 = BPoly(c1, x)
+
+ xp = np.linspace(0, 1, 11)
+ assert_allclose(bp(xp), bp1(xp))
+
+ def test_xi_yi(self):
+ assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
+
+ def test_coords_order(self):
+ xi = [0, 0, 1]
+ yi = [[0], [0], [0]]
+ assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
+
+ def test_zeros(self):
+ xi = [0, 1, 2, 3]
+ yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree
+ pp = BPoly.from_derivatives(xi, yi)
+ assert_(pp.c.shape == (4, 3))
+
+ ppd = pp.derivative()
+ for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
+ assert_allclose([pp(xp), ppd(xp)], [0., 0.])
+
+ def _make_random_mk(self, m, k):
+ # k derivatives at each breakpoint
+ np.random.seed(1234)
+ xi = np.asarray([1. * j**2 for j in range(m+1)])
+ yi = [np.random.random(k) for j in range(m+1)]
+ return xi, yi
+
+ def test_random_12(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+ pp = BPoly.from_derivatives(xi, yi)
+
+ for order in range(k//2):
+ assert_allclose(pp(xi), [yy[order] for yy in yi])
+ pp = pp.derivative()
+
+ def test_order_zero(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+ assert_raises(ValueError, BPoly.from_derivatives,
+ **dict(xi=xi, yi=yi, orders=0))
+
+ def test_orders_too_high(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok
+ assert_raises(ValueError, BPoly.from_derivatives, # but this is not
+ **dict(xi=xi, yi=yi, orders=2*k))
+
+ def test_orders_global(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ # ok, this is confusing. Local polynomials will be of the order 5
+ # which means that up to the 2nd derivatives will be used at each point
+ order = 5
+ pp = BPoly.from_derivatives(xi, yi, orders=order)
+
+ for j in range(order//2+1):
+ assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
+
+ # now repeat with `order` being even: on each interval, it uses
+ # order//2 'derivatives' @ the right-hand endpoint and
+ # order//2+1 @ 'derivatives' the left-hand endpoint
+ order = 6
+ pp = BPoly.from_derivatives(xi, yi, orders=order)
+ for j in range(order//2):
+ assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
+
+ def test_orders_local(self):
+ m, k = 7, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ orders = [o + 1 for o in range(m)]
+ for i, x in enumerate(xi[1:-1]):
+ pp = BPoly.from_derivatives(xi, yi, orders=orders)
+ for j in range(orders[i] // 2 + 1):
+ assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
+
+ def test_yi_trailing_dims(self):
+ m, k = 7, 5
+ xi = np.sort(np.random.random(m+1))
+ yi = np.random.random((m+1, k, 6, 7, 8))
+ pp = BPoly.from_derivatives(xi, yi)
+ assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
+
+ def test_gh_5430(self):
+ # At least one of these raises an error unless gh-5430 is
+ # fixed. In py2k an int is implemented using a C long, so
+ # which one fails depends on your system. In py3k there is only
+ # one arbitrary precision integer type, so both should fail.
+ orders = np.int32(1)
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = np.int64(1)
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = 1
+ # This worked before; make sure it still works
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = 1
+
+
+class TestNdPPoly:
+ def test_simple_1d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5)
+ x = np.linspace(0, 1, 5+1)
+
+ xi = np.random.rand(200)
+
+ p = NdPPoly(c, (x,))
+ v1 = p((xi,))
+
+ v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel()
+ assert_allclose(v1, v2)
+
+ def test_simple_2d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7)
+ x = np.linspace(0, 1, 6+1)
+ y = np.linspace(0, 1, 7+1)**2
+
+ xi = np.random.rand(200)
+ yi = np.random.rand(200)
+
+ v1 = np.empty([len(xi), 1], dtype=c.dtype)
+ v1.fill(np.nan)
+ _ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1),
+ (x, y),
+ np.array([4, 5], dtype=np.intc),
+ np.c_[xi, yi],
+ np.array([0, 0], dtype=np.intc),
+ 1,
+ v1)
+ v1 = v1.ravel()
+ v2 = _ppoly2d_eval(c, (x, y), xi, yi)
+ assert_allclose(v1, v2)
+
+ p = NdPPoly(c, (x, y))
+ for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)):
+ v1 = p(np.c_[xi, yi], nu=nu)
+ v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu)
+ assert_allclose(v1, v2, err_msg=repr(nu))
+
+ def test_simple_3d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9)
+ x = np.linspace(0, 1, 7+1)
+ y = np.linspace(0, 1, 8+1)**2
+ z = np.linspace(0, 1, 9+1)**3
+
+ xi = np.random.rand(40)
+ yi = np.random.rand(40)
+ zi = np.random.rand(40)
+
+ p = NdPPoly(c, (x, y, z))
+
+ for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0),
+ (6, 0, 2)):
+ v1 = p((xi, yi, zi), nu=nu)
+ v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu)
+ assert_allclose(v1, v2, err_msg=repr(nu))
+
+ def test_simple_4d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9, 10, 11)
+ x = np.linspace(0, 1, 8+1)
+ y = np.linspace(0, 1, 9+1)**2
+ z = np.linspace(0, 1, 10+1)**3
+ u = np.linspace(0, 1, 11+1)**4
+
+ xi = np.random.rand(20)
+ yi = np.random.rand(20)
+ zi = np.random.rand(20)
+ ui = np.random.rand(20)
+
+ p = NdPPoly(c, (x, y, z, u))
+ v1 = p((xi, yi, zi, ui))
+
+ v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui)
+ assert_allclose(v1, v2)
+
+ def test_deriv_1d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5)
+ x = np.linspace(0, 1, 5+1)
+
+ p = NdPPoly(c, (x,))
+
+ # derivative
+ dp = p.derivative(nu=[1])
+ p1 = PPoly(c, x)
+ dp1 = p1.derivative()
+ assert_allclose(dp.c, dp1.c)
+
+ # antiderivative
+ dp = p.antiderivative(nu=[2])
+ p1 = PPoly(c, x)
+ dp1 = p1.antiderivative(2)
+ assert_allclose(dp.c, dp1.c)
+
+ def test_deriv_3d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9)
+ x = np.linspace(0, 1, 7+1)
+ y = np.linspace(0, 1, 8+1)**2
+ z = np.linspace(0, 1, 9+1)**3
+
+ p = NdPPoly(c, (x, y, z))
+
+ # differentiate vs x
+ p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x)
+ dp = p.derivative(nu=[2])
+ dp1 = p1.derivative(2)
+ assert_allclose(dp.c,
+ dp1.c.transpose(0, 2, 3, 1, 4, 5))
+
+ # antidifferentiate vs y
+ p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y)
+ dp = p.antiderivative(nu=[0, 1, 0])
+ dp1 = p1.antiderivative(1)
+ assert_allclose(dp.c,
+ dp1.c.transpose(2, 0, 3, 4, 1, 5))
+
+ # differentiate vs z
+ p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z)
+ dp = p.derivative(nu=[0, 0, 3])
+ dp1 = p1.derivative(3)
+ assert_allclose(dp.c,
+ dp1.c.transpose(2, 3, 0, 4, 5, 1))
+
+ def test_deriv_3d_simple(self):
+ # Integrate to obtain function x y**2 z**4 / (2! 4!)
+
+ c = np.ones((1, 1, 1, 3, 4, 5))
+ x = np.linspace(0, 1, 3+1)**1
+ y = np.linspace(0, 1, 4+1)**2
+ z = np.linspace(0, 1, 5+1)**3
+
+ p = NdPPoly(c, (x, y, z))
+ ip = p.antiderivative((1, 0, 4))
+ ip = ip.antiderivative((0, 2, 0))
+
+ xi = np.random.rand(20)
+ yi = np.random.rand(20)
+ zi = np.random.rand(20)
+
+ assert_allclose(ip((xi, yi, zi)),
+ xi * yi**2 * zi**4 / (gamma(3)*gamma(5)))
+
+ def test_integrate_2d(self):
+ np.random.seed(1234)
+ c = np.random.rand(4, 5, 16, 17)
+ x = np.linspace(0, 1, 16+1)**1
+ y = np.linspace(0, 1, 17+1)**2
+
+ # make continuously differentiable so that nquad() has an
+ # easier time
+ c = c.transpose(0, 2, 1, 3)
+ cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
+ _ppoly.fix_continuity(cx, x, 2)
+ c = cx.reshape(c.shape)
+ c = c.transpose(0, 2, 1, 3)
+ c = c.transpose(1, 3, 0, 2)
+ cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
+ _ppoly.fix_continuity(cx, y, 2)
+ c = cx.reshape(c.shape)
+ c = c.transpose(2, 0, 3, 1).copy()
+
+ # Check integration
+ p = NdPPoly(c, (x, y))
+
+ for ranges in [[(0, 1), (0, 1)],
+ [(0, 0.5), (0, 1)],
+ [(0, 1), (0, 0.5)],
+ [(0.3, 0.7), (0.6, 0.2)]]:
+
+ ig = p.integrate(ranges)
+ ig2, err2 = nquad(lambda x, y: p((x, y)), ranges,
+ opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2)
+ assert_allclose(ig, ig2, rtol=1e-5, atol=1e-5,
+ err_msg=repr(ranges))
+
+ def test_integrate_1d(self):
+ np.random.seed(1234)
+ c = np.random.rand(4, 5, 6, 16, 17, 18)
+ x = np.linspace(0, 1, 16+1)**1
+ y = np.linspace(0, 1, 17+1)**2
+ z = np.linspace(0, 1, 18+1)**3
+
+ # Check 1-D integration
+ p = NdPPoly(c, (x, y, z))
+
+ u = np.random.rand(200)
+ v = np.random.rand(200)
+ a, b = 0.2, 0.7
+
+ px = p.integrate_1d(a, b, axis=0)
+ pax = p.antiderivative((1, 0, 0))
+ assert_allclose(px((u, v)), pax((b, u, v)) - pax((a, u, v)))
+
+ py = p.integrate_1d(a, b, axis=1)
+ pay = p.antiderivative((0, 1, 0))
+ assert_allclose(py((u, v)), pay((u, b, v)) - pay((u, a, v)))
+
+ pz = p.integrate_1d(a, b, axis=2)
+ paz = p.antiderivative((0, 0, 1))
+ assert_allclose(pz((u, v)), paz((u, v, b)) - paz((u, v, a)))
+
+
+def _ppoly_eval_1(c, x, xps):
+ """Evaluate piecewise polynomial manually"""
+ out = np.zeros((len(xps), c.shape[2]))
+ for i, xp in enumerate(xps):
+ if xp < 0 or xp > 1:
+ out[i,:] = np.nan
+ continue
+ j = np.searchsorted(x, xp) - 1
+ d = xp - x[j]
+ assert_(x[j] <= xp < x[j+1])
+ r = sum(c[k,j] * d**(c.shape[0]-k-1)
+ for k in range(c.shape[0]))
+ out[i,:] = r
+ return out
+
+
+def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
+ """Evaluate piecewise polynomial manually (another way)"""
+ a = breaks[0]
+ b = breaks[-1]
+ K = coeffs.shape[0]
+
+ saveshape = np.shape(xnew)
+ xnew = np.ravel(xnew)
+ res = np.empty_like(xnew)
+ mask = (xnew >= a) & (xnew <= b)
+ res[~mask] = fill
+ xx = xnew.compress(mask)
+ indxs = np.searchsorted(breaks, xx)-1
+ indxs = indxs.clip(0, len(breaks))
+ pp = coeffs
+ diff = xx - breaks.take(indxs)
+ V = np.vander(diff, N=K)
+ values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in range(len(xx))])
+ res[mask] = values
+ res.shape = saveshape
+ return res
+
+
+def _dpow(x, y, n):
+ """
+ d^n (x**y) / dx^n
+ """
+ if n < 0:
+ raise ValueError("invalid derivative order")
+ elif n > y:
+ return 0
+ else:
+ return poch(y - n + 1, n) * x**(y - n)
+
+
+def _ppoly2d_eval(c, xs, xnew, ynew, nu=None):
+ """
+ Straightforward evaluation of 2-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ nx, ny = c.shape[:2]
+
+ for jout, (x, y) in enumerate(zip(xnew, ynew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+
+ val = 0
+
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ val += (c[nx-k1-1,ny-k2-1,j1,j2]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1]))
+
+ out[jout] = val
+
+ return out
+
+
+def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None):
+ """
+ Straightforward evaluation of 3-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ nx, ny, nz = c.shape[:3]
+
+ for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1]) and
+ (xs[2][0] <= z <= xs[2][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+ j3 = np.searchsorted(xs[2], z) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+ s3 = z - xs[2][j3]
+
+ val = 0
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ for k3 in range(c.shape[2]):
+ val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1])
+ * _dpow(s3, k3, nu[2]))
+
+ out[jout] = val
+
+ return out
+
+
+def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None):
+ """
+ Straightforward evaluation of 4-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0, 0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ mx, my, mz, mu = c.shape[:4]
+
+ for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1]) and
+ (xs[2][0] <= z <= xs[2][-1]) and
+ (xs[3][0] <= u <= xs[3][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+ j3 = np.searchsorted(xs[2], z) - 1
+ j4 = np.searchsorted(xs[3], u) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+ s3 = z - xs[2][j3]
+ s4 = u - xs[3][j4]
+
+ val = 0
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ for k3 in range(c.shape[2]):
+ for k4 in range(c.shape[3]):
+ val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1])
+ * _dpow(s3, k3, nu[2])
+ * _dpow(s4, k4, nu[3]))
+
+ out[jout] = val
+
+ return out
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_ndgriddata.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_ndgriddata.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3e1ed8968ae5fd55e2f55d8a24c0ee2fa4ade45
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_ndgriddata.py
@@ -0,0 +1,284 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_array_equal, assert_allclose
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.interpolate import (griddata, NearestNDInterpolator,
+ LinearNDInterpolator,
+ CloughTocher2DInterpolator)
+
+
+parametrize_interpolators = pytest.mark.parametrize(
+ "interpolator", [NearestNDInterpolator, LinearNDInterpolator,
+ CloughTocher2DInterpolator]
+)
+
+class TestGriddata:
+ def test_fill_value(self):
+ x = [(0,0), (0,1), (1,0)]
+ y = [1, 2, 3]
+
+ yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
+ assert_array_equal(yi, [-1., -1, 1])
+
+ yi = griddata(x, y, [(1,1), (1,2), (0,0)])
+ assert_array_equal(yi, [np.nan, np.nan, 1])
+
+ def test_alternative_call(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
+ + np.array([0,1])[None,:])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
+ rescale=rescale)
+ assert_allclose(y, yi, atol=1e-14, err_msg=msg)
+
+ def test_multivalue_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
+ + np.array([0,1])[None,:])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, x, method=method, rescale=rescale)
+ assert_allclose(y, yi, atol=1e-14, err_msg=msg)
+
+ def test_multipoint_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+
+ xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, xi, method=method, rescale=rescale)
+
+ assert_equal(yi.shape, (5, 3), err_msg=msg)
+ assert_allclose(yi, np.tile(y[:,None], (1, 3)),
+ atol=1e-14, err_msg=msg)
+
+ def test_complex_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 2j*y[::-1]
+
+ xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, xi, method=method, rescale=rescale)
+
+ assert_equal(yi.shape, (5, 3), err_msg=msg)
+ assert_allclose(yi, np.tile(y[:,None], (1, 3)),
+ atol=1e-14, err_msg=msg)
+
+ def test_1d(self):
+ x = np.array([1, 2.5, 3, 4.5, 5, 6])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ assert_allclose(griddata(x, y, x, method=method), y,
+ err_msg=method, atol=1e-14)
+ assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
+ err_msg=method, atol=1e-14)
+ assert_allclose(griddata((x,), y, (x,), method=method), y,
+ err_msg=method, atol=1e-14)
+
+ def test_1d_borders(self):
+ # Test for nearest neighbor case with xi outside
+ # the range of the values.
+ x = np.array([1, 2.5, 3, 4.5, 5, 6])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+ xi = np.array([0.9, 6.5])
+ yi_should = np.array([1.0, 1.0])
+
+ method = 'nearest'
+ assert_allclose(griddata(x, y, xi,
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+ assert_allclose(griddata(x.reshape(6, 1), y, xi,
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+ assert_allclose(griddata((x, ), y, (xi, ),
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+
+ def test_1d_unsorted(self):
+ x = np.array([2.5, 1, 4.5, 5, 6, 3])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ assert_allclose(griddata(x, y, x, method=method), y,
+ err_msg=method, atol=1e-10)
+ assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
+ err_msg=method, atol=1e-10)
+ assert_allclose(griddata((x,), y, (x,), method=method), y,
+ err_msg=method, atol=1e-10)
+
+ def test_square_rescale_manual(self):
+ points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.float64)
+ points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)],
+ dtype=np.float64)
+ values = np.array([1., 2., -3., 5., 9.], dtype=np.float64)
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
+ np.linspace(0, 100, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+ xi = np.array([xx, yy]).T.copy()
+
+ for method in ('nearest', 'linear', 'cubic'):
+ msg = method
+ zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
+ method=method)
+ zi_rescaled = griddata(points, values, xi, method=method,
+ rescale=True)
+ assert_allclose(zi, zi_rescaled, err_msg=msg,
+ atol=1e-12)
+
+ def test_xi_1d(self):
+ # Check that 1-D xi is interpreted as a coordinate
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 2j*y[::-1]
+
+ xi = np.array([0.5, 0.5])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ p1 = griddata(x, y, xi, method=method)
+ p2 = griddata(x, y, xi[None,:], method=method)
+ assert_allclose(p1, p2, err_msg=method)
+
+ xi1 = np.array([0.5])
+ xi3 = np.array([0.5, 0.5, 0.5])
+ assert_raises(ValueError, griddata, x, y, xi1,
+ method=method)
+ assert_raises(ValueError, griddata, x, y, xi3,
+ method=method)
+
+
+class TestNearestNDInterpolator:
+ def test_nearest_options(self):
+ # smoke test that NearestNDInterpolator accept cKDTree options
+ npts, nd = 4, 3
+ x = np.arange(npts*nd).reshape((npts, nd))
+ y = np.arange(npts)
+ nndi = NearestNDInterpolator(x, y)
+
+ opts = {'balanced_tree': False, 'compact_nodes': False}
+ nndi_o = NearestNDInterpolator(x, y, tree_options=opts)
+ assert_allclose(nndi(x), nndi_o(x), atol=1e-14)
+
+ def test_nearest_list_argument(self):
+ nd = np.array([[0, 0, 0, 0, 1, 0, 1],
+ [0, 0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 1, 1, 2]])
+ d = nd[:, 3:]
+
+ # z is np.array
+ NI = NearestNDInterpolator((d[0], d[1]), d[2])
+ assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
+
+ # z is list
+ NI = NearestNDInterpolator((d[0], d[1]), list(d[2]))
+ assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
+
+ def test_nearest_query_options(self):
+ nd = np.array([[0, 0.5, 0, 1],
+ [0, 0, 0.5, 1],
+ [0, 1, 1, 2]])
+ delta = 0.1
+ query_points = [0 + delta, 1 + delta], [0 + delta, 1 + delta]
+
+ # case 1 - query max_dist is smaller than
+ # the query points' nearest distance to nd.
+ NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
+ distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
+ assert_array_equal(NI(query_points, distance_upper_bound=distance_upper_bound),
+ [np.nan, np.nan])
+
+ # case 2 - query p is inf, will return [0, 2]
+ distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
+ p = np.inf
+ assert_array_equal(
+ NI(query_points, distance_upper_bound=distance_upper_bound, p=p),
+ [0, 2]
+ )
+
+ # case 3 - query max_dist is larger, so should return non np.nan
+ distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) + 1e-7
+ assert_array_equal(
+ NI(query_points, distance_upper_bound=distance_upper_bound),
+ [0, 2]
+ )
+
+ def test_nearest_query_valid_inputs(self):
+ nd = np.array([[0, 1, 0, 1],
+ [0, 0, 1, 1],
+ [0, 1, 1, 2]])
+ NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
+ with assert_raises(TypeError):
+ NI([0.5, 0.5], query_options="not a dictionary")
+
+
+class TestNDInterpolators:
+ @parametrize_interpolators
+ def test_broadcastable_input(self, interpolator):
+ # input data
+ np.random.seed(0)
+ x = np.random.random(10)
+ y = np.random.random(10)
+ z = np.hypot(x, y)
+
+ # x-y grid for interpolation
+ X = np.linspace(min(x), max(x))
+ Y = np.linspace(min(y), max(y))
+ X, Y = np.meshgrid(X, Y)
+ XY = np.vstack((X.ravel(), Y.ravel())).T
+ interp = interpolator(list(zip(x, y)), z)
+ # single array input
+ interp_points0 = interp(XY)
+ # tuple input
+ interp_points1 = interp((X, Y))
+ interp_points2 = interp((X, 0.0))
+ # broadcastable input
+ interp_points3 = interp(X, Y)
+ interp_points4 = interp(X, 0.0)
+
+ assert_equal(interp_points0.size ==
+ interp_points1.size ==
+ interp_points2.size ==
+ interp_points3.size ==
+ interp_points4.size, True)
+
+ @parametrize_interpolators
+ def test_read_only(self, interpolator):
+ # input data
+ np.random.seed(0)
+ xy = np.random.random((10, 2))
+ x, y = xy[:, 0], xy[:, 1]
+ z = np.hypot(x, y)
+
+ # interpolation points
+ XY = np.random.random((50, 2))
+
+ xy.setflags(write=False)
+ z.setflags(write=False)
+ XY.setflags(write=False)
+
+ interp = interpolator(xy, z)
+ interp(XY)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_pade.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_pade.py
new file mode 100644
index 0000000000000000000000000000000000000000..f58e01e5e730d2e5c4630f41933a0b77a17efc77
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_pade.py
@@ -0,0 +1,104 @@
+from numpy.testing import (assert_array_equal, assert_array_almost_equal)
+from scipy.interpolate import pade
+
+def test_pade_trivial():
+ nump, denomp = pade([1.0], 0)
+ assert_array_equal(nump.c, [1.0])
+ assert_array_equal(denomp.c, [1.0])
+
+ nump, denomp = pade([1.0], 0, 0)
+ assert_array_equal(nump.c, [1.0])
+ assert_array_equal(denomp.c, [1.0])
+
+
+def test_pade_4term_exp():
+ # First four Taylor coefficients of exp(x).
+ # Unlike poly1d, the first array element is the zero-order term.
+ an = [1.0, 1.0, 0.5, 1.0/6]
+
+ nump, denomp = pade(an, 0)
+ assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1)
+ assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
+
+ nump, denomp = pade(an, 2)
+ assert_array_almost_equal(nump.c, [1.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
+
+ nump, denomp = pade(an, 3)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
+
+ # Testing inclusion of optional parameter
+ nump, denomp = pade(an, 0, 3)
+ assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1, 2)
+ assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
+
+ nump, denomp = pade(an, 2, 1)
+ assert_array_almost_equal(nump.c, [1.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
+
+ nump, denomp = pade(an, 3, 0)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
+
+ # Testing reducing array.
+ nump, denomp = pade(an, 0, 2)
+ assert_array_almost_equal(nump.c, [0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1, 1)
+ assert_array_almost_equal(nump.c, [1.0/2, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/2, 1.0])
+
+ nump, denomp = pade(an, 2, 0)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [1.0/2, -1.0, 1.0])
+
+
+def test_pade_ints():
+ # Simple test sequences (one of ints, one of floats).
+ an_int = [1, 2, 3, 4]
+ an_flt = [1.0, 2.0, 3.0, 4.0]
+
+ # Make sure integer arrays give the same result as float arrays with same values.
+ for i in range(0, len(an_int)):
+ for j in range(0, len(an_int) - i):
+
+ # Create float and int pade approximation for given order.
+ nump_int, denomp_int = pade(an_int, i, j)
+ nump_flt, denomp_flt = pade(an_flt, i, j)
+
+ # Check that they are the same.
+ assert_array_equal(nump_int.c, nump_flt.c)
+ assert_array_equal(denomp_int.c, denomp_flt.c)
+
+
+def test_pade_complex():
+ # Test sequence with known solutions - see page 6 of 10.1109/PESGM.2012.6344759.
+ # Variable x is parameter - these tests will work with any complex number.
+ x = 0.2 + 0.6j
+ an = [1.0, x, -x*x.conjugate(), x.conjugate()*(x**2) + x*(x.conjugate()**2),
+ -(x**3)*x.conjugate() - 3*(x*x.conjugate())**2 - x*(x.conjugate()**3)]
+
+ nump, denomp = pade(an, 1, 1)
+ assert_array_almost_equal(nump.c, [x + x.conjugate(), 1.0])
+ assert_array_almost_equal(denomp.c, [x.conjugate(), 1.0])
+
+ nump, denomp = pade(an, 1, 2)
+ assert_array_almost_equal(nump.c, [x**2, 2*x + x.conjugate(), 1.0])
+ assert_array_almost_equal(denomp.c, [x + x.conjugate(), 1.0])
+
+ nump, denomp = pade(an, 2, 2)
+ assert_array_almost_equal(
+ nump.c,
+ [x**2 + x*x.conjugate() + x.conjugate()**2, 2*(x + x.conjugate()), 1.0]
+ )
+ assert_array_almost_equal(denomp.c, [x.conjugate()**2, x + 2*x.conjugate(), 1.0])
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_polyint.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_polyint.py
new file mode 100644
index 0000000000000000000000000000000000000000..31215b1e986a9fc57a3b0f18823bcc6ffd9e81f9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_polyint.py
@@ -0,0 +1,941 @@
+import warnings
+import io
+import numpy as np
+
+from numpy.testing import (
+ assert_almost_equal, assert_array_equal, assert_array_almost_equal,
+ assert_allclose, assert_equal, assert_)
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.interpolate import (
+ KroghInterpolator, krogh_interpolate,
+ BarycentricInterpolator, barycentric_interpolate,
+ approximate_taylor_polynomial, CubicHermiteSpline, pchip,
+ PchipInterpolator, pchip_interpolate, Akima1DInterpolator, CubicSpline,
+ make_interp_spline)
+
+
+def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
+ extra_args={}):
+ np.random.seed(1234)
+
+ x = [-1, 0, 1, 2, 3, 4]
+ s = list(range(1, len(y_shape)+1))
+ s.insert(axis % (len(y_shape)+1), 0)
+ y = np.random.rand(*((6,) + y_shape)).transpose(s)
+
+ xi = np.zeros(x_shape)
+ if interpolator_cls is CubicHermiteSpline:
+ dydx = np.random.rand(*((6,) + y_shape)).transpose(s)
+ yi = interpolator_cls(x, y, dydx, axis=axis, **extra_args)(xi)
+ else:
+ yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
+
+ target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ + x_shape + y.shape[axis:][1:])
+ assert_equal(yi.shape, target_shape)
+
+ # check it works also with lists
+ if x_shape and y.size > 0:
+ if interpolator_cls is CubicHermiteSpline:
+ interpolator_cls(list(x), list(y), list(dydx), axis=axis,
+ **extra_args)(list(xi))
+ else:
+ interpolator_cls(list(x), list(y), axis=axis,
+ **extra_args)(list(xi))
+
+ # check also values
+ if xi.size > 0 and deriv_shape is None:
+ bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
+ yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
+ yv = yv.reshape(bs_shape)
+
+ yi, y = np.broadcast_arrays(yi, yv)
+ assert_allclose(yi, y)
+
+
+SHAPES = [(), (0,), (1,), (6, 2, 5)]
+
+
+def test_shapes():
+
+ def spl_interp(x, y, axis):
+ return make_interp_spline(x, y, axis=axis)
+
+ for ip in [KroghInterpolator, BarycentricInterpolator, CubicHermiteSpline,
+ pchip, Akima1DInterpolator, CubicSpline, spl_interp]:
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ if ip != CubicSpline:
+ check_shape(ip, s1, s2, None, axis)
+ else:
+ for bc in ['natural', 'clamped']:
+ extra = {'bc_type': bc}
+ check_shape(ip, s1, s2, None, axis, extra)
+
+def test_derivs_shapes():
+ for ip in [KroghInterpolator, BarycentricInterpolator]:
+ def interpolator_derivs(x, y, axis=0):
+ return ip(x, y, axis).derivatives
+
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ check_shape(interpolator_derivs, s1, s2, (6,), axis)
+
+
+def test_deriv_shapes():
+ def krogh_deriv(x, y, axis=0):
+ return KroghInterpolator(x, y, axis).derivative
+
+ def bary_deriv(x, y, axis=0):
+ return BarycentricInterpolator(x, y, axis).derivative
+
+ def pchip_deriv(x, y, axis=0):
+ return pchip(x, y, axis).derivative()
+
+ def pchip_deriv2(x, y, axis=0):
+ return pchip(x, y, axis).derivative(2)
+
+ def pchip_antideriv(x, y, axis=0):
+ return pchip(x, y, axis).antiderivative()
+
+ def pchip_antideriv2(x, y, axis=0):
+ return pchip(x, y, axis).antiderivative(2)
+
+ def pchip_deriv_inplace(x, y, axis=0):
+ class P(PchipInterpolator):
+ def __call__(self, x):
+ return PchipInterpolator.__call__(self, x, 1)
+ pass
+ return P(x, y, axis)
+
+ def akima_deriv(x, y, axis=0):
+ return Akima1DInterpolator(x, y, axis).derivative()
+
+ def akima_antideriv(x, y, axis=0):
+ return Akima1DInterpolator(x, y, axis).antiderivative()
+
+ def cspline_deriv(x, y, axis=0):
+ return CubicSpline(x, y, axis).derivative()
+
+ def cspline_antideriv(x, y, axis=0):
+ return CubicSpline(x, y, axis).antiderivative()
+
+ def bspl_deriv(x, y, axis=0):
+ return make_interp_spline(x, y, axis=axis).derivative()
+
+ def bspl_antideriv(x, y, axis=0):
+ return make_interp_spline(x, y, axis=axis).antiderivative()
+
+ for ip in [krogh_deriv, bary_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
+ pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
+ cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ check_shape(ip, s1, s2, (), axis)
+
+
+def test_complex():
+ x = [1, 2, 3, 4]
+ y = [1, 2, 1j, 3]
+
+ for ip in [KroghInterpolator, BarycentricInterpolator, CubicSpline]:
+ p = ip(x, y)
+ assert_allclose(y, p(x))
+
+ dydx = [0, -1j, 2, 3j]
+ p = CubicHermiteSpline(x, y, dydx)
+ assert_allclose(y, p(x))
+ assert_allclose(dydx, p(x, 1))
+
+
+class TestKrogh:
+ def setup_method(self):
+ self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
+ self.test_xs = np.linspace(-1,1,100)
+ self.xs = np.linspace(-1,1,5)
+ self.ys = self.true_poly(self.xs)
+
+ def test_lagrange(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
+
+ def test_scalar(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(7),P(7))
+ assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
+
+ def test_derivatives(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
+ D[i])
+
+ def test_low_derivatives(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ D = P.derivatives(self.test_xs,len(self.xs)+2)
+ for i in range(D.shape[0]):
+ assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
+ D[i])
+
+ def test_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ m = 10
+ r = P.derivatives(self.test_xs,m)
+ for i in range(m):
+ assert_almost_equal(P.derivative(self.test_xs,i),r[i])
+
+ def test_high_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ for i in range(len(self.xs), 2*len(self.xs)):
+ assert_almost_equal(P.derivative(self.test_xs,i),
+ np.zeros(len(self.test_xs)))
+
+ def test_ndim_derivatives(self):
+ poly1 = self.true_poly
+ poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
+ poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
+ ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
+
+ P = KroghInterpolator(self.xs, ys, axis=0)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_allclose(D[i],
+ np.stack((poly1.deriv(i)(self.test_xs),
+ poly2.deriv(i)(self.test_xs),
+ poly3.deriv(i)(self.test_xs)),
+ axis=-1))
+
+ def test_ndim_derivative(self):
+ poly1 = self.true_poly
+ poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
+ poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
+ ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
+
+ P = KroghInterpolator(self.xs, ys, axis=0)
+ for i in range(P.n):
+ assert_allclose(P.derivative(self.test_xs, i),
+ np.stack((poly1.deriv(i)(self.test_xs),
+ poly2.deriv(i)(self.test_xs),
+ poly3.deriv(i)(self.test_xs)),
+ axis=-1))
+
+ def test_hermite(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
+
+ def test_vector(self):
+ xs = [0, 1, 2]
+ ys = np.array([[0,1],[1,0],[2,1]])
+ P = KroghInterpolator(xs,ys)
+ Pi = [KroghInterpolator(xs,ys[:,i]) for i in range(ys.shape[1])]
+ test_xs = np.linspace(-1,3,100)
+ assert_almost_equal(P(test_xs),
+ np.asarray([p(test_xs) for p in Pi]).T)
+ assert_almost_equal(P.derivatives(test_xs),
+ np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
+ (1,2,0)))
+
+ def test_empty(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_array_equal(P([]), [])
+
+ def test_shapes_scalarvalue(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_array_equal(np.shape(P(0)), ())
+ assert_array_equal(np.shape(P(np.array(0))), ())
+ assert_array_equal(np.shape(P([0])), (1,))
+ assert_array_equal(np.shape(P([0,1])), (2,))
+
+ def test_shapes_scalarvalue_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,))
+ assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
+
+ def test_shapes_vectorvalue(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+ assert_array_equal(np.shape(P(0)), (3,))
+ assert_array_equal(np.shape(P([0])), (1,3))
+ assert_array_equal(np.shape(P([0,1])), (2,3))
+
+ def test_shapes_1d_vectorvalue(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
+ assert_array_equal(np.shape(P(0)), (1,))
+ assert_array_equal(np.shape(P([0])), (1,1))
+ assert_array_equal(np.shape(P([0,1])), (2,1))
+
+ def test_shapes_vectorvalue_derivative(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,3))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
+
+ def test_wrapper(self):
+ P = KroghInterpolator(self.xs, self.ys)
+ ki = krogh_interpolate
+ assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
+ assert_almost_equal(P.derivative(self.test_xs, 2),
+ ki(self.xs, self.ys, self.test_xs, der=2))
+ assert_almost_equal(P.derivatives(self.test_xs, 2),
+ ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
+
+ def test_int_inputs(self):
+ # Check input args are cast correctly to floats, gh-3669
+ x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
+ 13104, 60000]
+ offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
+ -0.48002351, -0.34925329, -0.26503107,
+ -0.13148093, -0.12988833, -0.12979296,
+ -0.12973574, -0.08582937, 0.05])
+ f = KroghInterpolator(x, offset_cdf)
+
+ assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
+ 0, atol=1e-10)
+
+ def test_derivatives_complex(self):
+ # regression test for gh-7381: krogh.derivatives(0) fails complex y
+ x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
+ func = KroghInterpolator(x, y)
+ cmplx = func.derivatives(0)
+
+ cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
+ 1j*KroghInterpolator(x, y.imag).derivatives(0))
+ assert_allclose(cmplx, cmplx2, atol=1e-15)
+
+ def test_high_degree_warning(self):
+ with pytest.warns(UserWarning, match="40 degrees provided,"):
+ KroghInterpolator(np.arange(40), np.ones(40))
+
+
+class TestTaylor:
+ def test_exponential(self):
+ degree = 5
+ p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
+ for i in range(degree+1):
+ assert_almost_equal(p(0),1)
+ p = p.deriv()
+ assert_almost_equal(p(0),0)
+
+
+class TestBarycentric:
+ def setup_method(self):
+ self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
+ self.test_xs = np.linspace(-1, 1, 100)
+ self.xs = np.linspace(-1, 1, 5)
+ self.ys = self.true_poly(self.xs)
+
+ def test_lagrange(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_allclose(P(self.test_xs), self.true_poly(self.test_xs))
+
+ def test_scalar(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_allclose(P(7), self.true_poly(7))
+ assert_allclose(P(np.array(7)), self.true_poly(np.array(7)))
+
+ def test_derivatives(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_allclose(self.true_poly.deriv(i)(self.test_xs), D[i])
+
+ def test_low_derivatives(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ D = P.derivatives(self.test_xs, len(self.xs)+2)
+ for i in range(D.shape[0]):
+ assert_allclose(self.true_poly.deriv(i)(self.test_xs),
+ D[i],
+ atol=1e-12)
+
+ def test_derivative(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ m = 10
+ r = P.derivatives(self.test_xs, m)
+ for i in range(m):
+ assert_allclose(P.derivative(self.test_xs, i), r[i])
+
+ def test_high_derivative(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ for i in range(len(self.xs), 5*len(self.xs)):
+ assert_allclose(P.derivative(self.test_xs, i),
+ np.zeros(len(self.test_xs)))
+
+ def test_ndim_derivatives(self):
+ poly1 = self.true_poly
+ poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
+ poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
+ ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
+
+ P = BarycentricInterpolator(self.xs, ys, axis=0)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_allclose(D[i],
+ np.stack((poly1.deriv(i)(self.test_xs),
+ poly2.deriv(i)(self.test_xs),
+ poly3.deriv(i)(self.test_xs)),
+ axis=-1),
+ atol=1e-12)
+
+ def test_ndim_derivative(self):
+ poly1 = self.true_poly
+ poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
+ poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
+ ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
+
+ P = BarycentricInterpolator(self.xs, ys, axis=0)
+ for i in range(P.n):
+ assert_allclose(P.derivative(self.test_xs, i),
+ np.stack((poly1.deriv(i)(self.test_xs),
+ poly2.deriv(i)(self.test_xs),
+ poly3.deriv(i)(self.test_xs)),
+ axis=-1),
+ atol=1e-12)
+
+ def test_delayed(self):
+ P = BarycentricInterpolator(self.xs)
+ P.set_yi(self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+ def test_append(self):
+ P = BarycentricInterpolator(self.xs[:3], self.ys[:3])
+ P.add_xi(self.xs[3:], self.ys[3:])
+ assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+ def test_vector(self):
+ xs = [0, 1, 2]
+ ys = np.array([[0, 1], [1, 0], [2, 1]])
+ BI = BarycentricInterpolator
+ P = BI(xs, ys)
+ Pi = [BI(xs, ys[:, i]) for i in range(ys.shape[1])]
+ test_xs = np.linspace(-1, 3, 100)
+ assert_almost_equal(P(test_xs),
+ np.asarray([p(test_xs) for p in Pi]).T)
+
+ def test_shapes_scalarvalue(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_array_equal(np.shape(P(0)), ())
+ assert_array_equal(np.shape(P(np.array(0))), ())
+ assert_array_equal(np.shape(P([0])), (1,))
+ assert_array_equal(np.shape(P([0, 1])), (2,))
+
+ def test_shapes_scalarvalue_derivative(self):
+ P = BarycentricInterpolator(self.xs,self.ys)
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,))
+ assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
+
+ def test_shapes_vectorvalue(self):
+ P = BarycentricInterpolator(self.xs, np.outer(self.ys, np.arange(3)))
+ assert_array_equal(np.shape(P(0)), (3,))
+ assert_array_equal(np.shape(P([0])), (1, 3))
+ assert_array_equal(np.shape(P([0, 1])), (2, 3))
+
+ def test_shapes_1d_vectorvalue(self):
+ P = BarycentricInterpolator(self.xs, np.outer(self.ys, [1]))
+ assert_array_equal(np.shape(P(0)), (1,))
+ assert_array_equal(np.shape(P([0])), (1, 1))
+ assert_array_equal(np.shape(P([0,1])), (2, 1))
+
+ def test_shapes_vectorvalue_derivative(self):
+ P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,3))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
+
+ def test_wrapper(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ bi = barycentric_interpolate
+ assert_allclose(P(self.test_xs), bi(self.xs, self.ys, self.test_xs))
+ assert_allclose(P.derivative(self.test_xs, 2),
+ bi(self.xs, self.ys, self.test_xs, der=2))
+ assert_allclose(P.derivatives(self.test_xs, 2),
+ bi(self.xs, self.ys, self.test_xs, der=[0, 1]))
+
+ def test_int_input(self):
+ x = 1000 * np.arange(1, 11) # np.prod(x[-1] - x[:-1]) overflows
+ y = np.arange(1, 11)
+ value = barycentric_interpolate(x, y, 1000 * 9.5)
+ assert_almost_equal(value, 9.5)
+
+ def test_large_chebyshev(self):
+ # The weights for Chebyshev points of the second kind have analytically
+ # solvable weights. Naive calculation of barycentric weights will fail
+ # for large N because of numerical underflow and overflow. We test
+ # correctness for large N against analytical Chebyshev weights.
+
+ # Without capacity scaling or permutation, n=800 fails,
+ # With just capacity scaling, n=1097 fails
+ # With both capacity scaling and random permutation, n=30000 succeeds
+ n = 1100
+ j = np.arange(n + 1).astype(np.float64)
+ x = np.cos(j * np.pi / n)
+
+ # See page 506 of Berrut and Trefethen 2004 for this formula
+ w = (-1) ** j
+ w[0] *= 0.5
+ w[-1] *= 0.5
+
+ P = BarycentricInterpolator(x)
+
+ # It's okay to have a constant scaling factor in the weights because it
+ # cancels out in the evaluation of the polynomial.
+ factor = P.wi[0]
+ assert_almost_equal(P.wi / (2 * factor), w)
+
+ def test_warning(self):
+ # Test if the divide-by-zero warning is properly ignored when computing
+ # interpolated values equals to interpolation points
+ P = BarycentricInterpolator([0, 1], [1, 2])
+ with np.errstate(divide='raise'):
+ yi = P(P.xi)
+
+ # Check if the interpolated values match the input values
+ # at the nodes
+ assert_almost_equal(yi, P.yi.ravel())
+
+ def test_repeated_node(self):
+ # check that a repeated node raises a ValueError
+ # (computing the weights requires division by xi[i] - xi[j])
+ xis = np.array([0.1, 0.5, 0.9, 0.5])
+ ys = np.array([1, 2, 3, 4])
+ with pytest.raises(ValueError,
+ match="Interpolation points xi must be distinct."):
+ BarycentricInterpolator(xis, ys)
+
+
+class TestPCHIP:
+ def _make_random(self, npts=20):
+ np.random.seed(1234)
+ xi = np.sort(np.random.random(npts))
+ yi = np.random.random(npts)
+ return pchip(xi, yi), xi, yi
+
+ def test_overshoot(self):
+ # PCHIP should not overshoot
+ p, xi, yi = self._make_random()
+ for i in range(len(xi)-1):
+ x1, x2 = xi[i], xi[i+1]
+ y1, y2 = yi[i], yi[i+1]
+ if y1 > y2:
+ y1, y2 = y2, y1
+ xp = np.linspace(x1, x2, 10)
+ yp = p(xp)
+ assert_(((y1 <= yp + 1e-15) & (yp <= y2 + 1e-15)).all())
+
+ def test_monotone(self):
+ # PCHIP should preserve monotonicty
+ p, xi, yi = self._make_random()
+ for i in range(len(xi)-1):
+ x1, x2 = xi[i], xi[i+1]
+ y1, y2 = yi[i], yi[i+1]
+ xp = np.linspace(x1, x2, 10)
+ yp = p(xp)
+ assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
+
+ def test_cast(self):
+ # regression test for integer input data, see gh-3453
+ data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
+ [-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
+ xx = np.arange(100)
+ curve = pchip(data[0], data[1])(xx)
+
+ data1 = data * 1.0
+ curve1 = pchip(data1[0], data1[1])(xx)
+
+ assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
+
+ def test_nag(self):
+ # Example from NAG C implementation,
+ # http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
+ # suggested in gh-5326 as a smoke test for the way the derivatives
+ # are computed (see also gh-3453)
+ dataStr = '''
+ 7.99 0.00000E+0
+ 8.09 0.27643E-4
+ 8.19 0.43750E-1
+ 8.70 0.16918E+0
+ 9.20 0.46943E+0
+ 10.00 0.94374E+0
+ 12.00 0.99864E+0
+ 15.00 0.99992E+0
+ 20.00 0.99999E+0
+ '''
+ data = np.loadtxt(io.StringIO(dataStr))
+ pch = pchip(data[:,0], data[:,1])
+
+ resultStr = '''
+ 7.9900 0.0000
+ 9.1910 0.4640
+ 10.3920 0.9645
+ 11.5930 0.9965
+ 12.7940 0.9992
+ 13.9950 0.9998
+ 15.1960 0.9999
+ 16.3970 1.0000
+ 17.5980 1.0000
+ 18.7990 1.0000
+ 20.0000 1.0000
+ '''
+ result = np.loadtxt(io.StringIO(resultStr))
+ assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
+
+ def test_endslopes(self):
+ # this is a smoke test for gh-3453: PCHIP interpolator should not
+ # set edge slopes to zero if the data do not suggest zero edge derivatives
+ x = np.array([0.0, 0.1, 0.25, 0.35])
+ y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
+ y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
+ for pp in (pchip(x, y1), pchip(x, y2)):
+ for t in (x[0], x[-1]):
+ assert_(pp(t, 1) != 0)
+
+ def test_all_zeros(self):
+ x = np.arange(10)
+ y = np.zeros_like(x)
+
+ # this should work and not generate any warnings
+ with warnings.catch_warnings():
+ warnings.filterwarnings('error')
+ pch = pchip(x, y)
+
+ xx = np.linspace(0, 9, 101)
+ assert_equal(pch(xx), 0.)
+
+ def test_two_points(self):
+ # regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
+ # it tries to use a three-point scheme to estimate edge derivatives,
+ # while there are only two points available.
+ # Instead, it should construct a linear interpolator.
+ x = np.linspace(0, 1, 11)
+ p = pchip([0, 1], [0, 2])
+ assert_allclose(p(x), 2*x, atol=1e-15)
+
+ def test_pchip_interpolate(self):
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1),
+ [1.])
+
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0),
+ [3.5])
+
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]),
+ [[3.5], [1]])
+
+ def test_roots(self):
+ # regression test for gh-6357: .roots method should work
+ p = pchip([0, 1], [-1, 1])
+ r = p.roots()
+ assert_allclose(r, 0.5)
+
+
+class TestCubicSpline:
+ @staticmethod
+ def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
+ tol=1e-14):
+ """Check that spline coefficients satisfy the continuity and boundary
+ conditions."""
+ x = S.x
+ c = S.c
+ dx = np.diff(x)
+ dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
+ dxi = dx[:-1]
+
+ # Check C2 continuity.
+ assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
+ c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
+ assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
+ 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
+ assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
+ rtol=tol, atol=tol)
+
+ # Check that we found a parabola, the third derivative is 0.
+ if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
+ assert_allclose(c[0], 0, rtol=tol, atol=tol)
+ return
+
+ # Check periodic boundary conditions.
+ if bc_start == 'periodic':
+ assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
+ assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
+ assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
+ return
+
+ # Check other boundary conditions.
+ if bc_start == 'not-a-knot':
+ if x.size == 2:
+ slope = (S(x[1]) - S(x[0])) / dx[0]
+ assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)
+ else:
+ assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)
+ elif bc_start == 'clamped':
+ assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)
+ elif bc_start == 'natural':
+ assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)
+ else:
+ order, value = bc_start
+ assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)
+
+ if bc_end == 'not-a-knot':
+ if x.size == 2:
+ slope = (S(x[1]) - S(x[0])) / dx[0]
+ assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)
+ else:
+ assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)
+ elif bc_end == 'clamped':
+ assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)
+ elif bc_end == 'natural':
+ assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)
+ else:
+ order, value = bc_end
+ assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)
+
+ def check_all_bc(self, x, y, axis):
+ deriv_shape = list(y.shape)
+ del deriv_shape[axis]
+ first_deriv = np.empty(deriv_shape)
+ first_deriv.fill(2)
+ second_deriv = np.empty(deriv_shape)
+ second_deriv.fill(-1)
+ bc_all = [
+ 'not-a-knot',
+ 'natural',
+ 'clamped',
+ (1, first_deriv),
+ (2, second_deriv)
+ ]
+ for bc in bc_all[:3]:
+ S = CubicSpline(x, y, axis=axis, bc_type=bc)
+ self.check_correctness(S, bc, bc)
+
+ for bc_start in bc_all:
+ for bc_end in bc_all:
+ S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
+ self.check_correctness(S, bc_start, bc_end, tol=2e-14)
+
+ def test_general(self):
+ x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
+ y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
+ for n in [2, 3, x.size]:
+ self.check_all_bc(x[:n], y[:n], 0)
+
+ Y = np.empty((2, n, 2))
+ Y[0, :, 0] = y[:n]
+ Y[0, :, 1] = y[:n] - 1
+ Y[1, :, 0] = y[:n] + 2
+ Y[1, :, 1] = y[:n] + 3
+ self.check_all_bc(x[:n], Y, 1)
+
+ def test_periodic(self):
+ for n in [2, 3, 5]:
+ x = np.linspace(0, 2 * np.pi, n)
+ y = np.cos(x)
+ S = CubicSpline(x, y, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ Y = np.empty((2, n, 2))
+ Y[0, :, 0] = y
+ Y[0, :, 1] = y + 2
+ Y[1, :, 0] = y - 1
+ Y[1, :, 1] = y + 5
+ S = CubicSpline(x, Y, axis=1, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ def test_periodic_eval(self):
+ x = np.linspace(0, 2 * np.pi, 10)
+ y = np.cos(x)
+ S = CubicSpline(x, y, bc_type='periodic')
+ assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
+
+ def test_second_derivative_continuity_gh_11758(self):
+ # gh-11758: C2 continuity fail
+ x = np.array([0.9, 1.3, 1.9, 2.1, 2.6, 3.0, 3.9, 4.4, 4.7, 5.0, 6.0,
+ 7.0, 8.0, 9.2, 10.5, 11.3, 11.6, 12.0, 12.6, 13.0, 13.3])
+ y = np.array([1.3, 1.5, 1.85, 2.1, 2.6, 2.7, 2.4, 2.15, 2.05, 2.1,
+ 2.25, 2.3, 2.25, 1.95, 1.4, 0.9, 0.7, 0.6, 0.5, 0.4, 1.3])
+ S = CubicSpline(x, y, bc_type='periodic', extrapolate='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ def test_three_points(self):
+ # gh-11758: Fails computing a_m2_m1
+ # In this case, s (first derivatives) could be found manually by solving
+ # system of 2 linear equations. Due to solution of this system,
+ # s[i] = (h1m2 + h2m1) / (h1 + h2), where h1 = x[1] - x[0], h2 = x[2] - x[1],
+ # m1 = (y[1] - y[0]) / h1, m2 = (y[2] - y[1]) / h2
+ x = np.array([1.0, 2.75, 3.0])
+ y = np.array([1.0, 15.0, 1.0])
+ S = CubicSpline(x, y, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+ assert_allclose(S.derivative(1)(x), np.array([-48.0, -48.0, -48.0]))
+
+ def test_periodic_three_points_multidim(self):
+ # make sure one multidimensional interpolator does the same as multiple
+ # one-dimensional interpolators
+ x = np.array([0.0, 1.0, 3.0])
+ y = np.array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
+ S = CubicSpline(x, y, bc_type="periodic")
+ self.check_correctness(S, 'periodic', 'periodic')
+ S0 = CubicSpline(x, y[:, 0], bc_type="periodic")
+ S1 = CubicSpline(x, y[:, 1], bc_type="periodic")
+ q = np.linspace(0, 2, 5)
+ assert_allclose(S(q)[:, 0], S0(q))
+ assert_allclose(S(q)[:, 1], S1(q))
+
+ def test_dtypes(self):
+ x = np.array([0, 1, 2, 3], dtype=int)
+ y = np.array([-5, 2, 3, 1], dtype=int)
+ S = CubicSpline(x, y)
+ self.check_correctness(S)
+
+ y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
+ S = CubicSpline(x, y)
+ self.check_correctness(S)
+
+ S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
+ self.check_correctness(S, "natural", (1, 2j))
+
+ y = np.array([-5, 2, 3, 1])
+ S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
+ self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
+
+ def test_small_dx(self):
+ rng = np.random.RandomState(0)
+ x = np.sort(rng.uniform(size=100))
+ y = 1e4 + rng.uniform(size=100)
+ S = CubicSpline(x, y)
+ self.check_correctness(S, tol=1e-13)
+
+ def test_incorrect_inputs(self):
+ x = np.array([1, 2, 3, 4])
+ y = np.array([1, 2, 3, 4])
+ xc = np.array([1 + 1j, 2, 3, 4])
+ xn = np.array([np.nan, 2, 3, 4])
+ xo = np.array([2, 1, 3, 4])
+ yn = np.array([np.nan, 2, 3, 4])
+ y3 = [1, 2, 3]
+ x1 = [1]
+ y1 = [1]
+
+ assert_raises(ValueError, CubicSpline, xc, y)
+ assert_raises(ValueError, CubicSpline, xn, y)
+ assert_raises(ValueError, CubicSpline, x, yn)
+ assert_raises(ValueError, CubicSpline, xo, y)
+ assert_raises(ValueError, CubicSpline, x, y3)
+ assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
+ assert_raises(ValueError, CubicSpline, x1, y1)
+
+ wrong_bc = [('periodic', 'clamped'),
+ ((2, 0), (3, 10)),
+ ((1, 0), ),
+ (0., 0.),
+ 'not-a-typo']
+
+ for bc_type in wrong_bc:
+ assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
+
+ # Shapes mismatch when giving arbitrary derivative values:
+ Y = np.c_[y, y]
+ bc1 = ('clamped', (1, 0))
+ bc2 = ('clamped', (1, [0, 0, 0]))
+ bc3 = ('clamped', (1, [[0, 0]]))
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
+
+ # periodic condition, y[-1] must be equal to y[0]:
+ assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
+
+
+def test_CubicHermiteSpline_correctness():
+ x = [0, 2, 7]
+ y = [-1, 2, 3]
+ dydx = [0, 3, 7]
+ s = CubicHermiteSpline(x, y, dydx)
+ assert_allclose(s(x), y, rtol=1e-15)
+ assert_allclose(s(x, 1), dydx, rtol=1e-15)
+
+
+def test_CubicHermiteSpline_error_handling():
+ x = [1, 2, 3]
+ y = [0, 3, 5]
+ dydx = [1, -1, 2, 3]
+ assert_raises(ValueError, CubicHermiteSpline, x, y, dydx)
+
+ dydx_with_nan = [1, 0, np.nan]
+ assert_raises(ValueError, CubicHermiteSpline, x, y, dydx_with_nan)
+
+
+def test_roots_extrapolate_gh_11185():
+ x = np.array([0.001, 0.002])
+ y = np.array([1.66066935e-06, 1.10410807e-06])
+ dy = np.array([-1.60061854, -1.600619])
+ p = CubicHermiteSpline(x, y, dy)
+
+ # roots(extrapolate=True) for a polynomial with a single interval
+ # should return all three real roots
+ r = p.roots(extrapolate=True)
+ assert_equal(p.c.shape[1], 1)
+ assert_equal(r.size, 3)
+
+
+class TestZeroSizeArrays:
+ # regression tests for gh-17241 : CubicSpline et al must not segfault
+ # when y.size == 0
+ # The two methods below are _almost_ the same, but not quite:
+ # one is for objects which have the `bc_type` argument (CubicSpline)
+ # and the other one is for those which do not (Pchip, Akima1D)
+
+ @pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
+ np.zeros((10, 5, 0))])
+ @pytest.mark.parametrize('bc_type',
+ ['not-a-knot', 'periodic', 'natural', 'clamped'])
+ @pytest.mark.parametrize('axis', [0, 1, 2])
+ @pytest.mark.parametrize('cls', [make_interp_spline, CubicSpline])
+ def test_zero_size(self, cls, y, bc_type, axis):
+ x = np.arange(10)
+ xval = np.arange(3)
+
+ obj = cls(x, y, bc_type=bc_type)
+ assert obj(xval).size == 0
+ assert obj(xval).shape == xval.shape + y.shape[1:]
+
+ # Also check with an explicit non-default axis
+ yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
+
+ obj = cls(x, yt, bc_type=bc_type, axis=axis)
+ sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
+ assert obj(xval).size == 0
+ assert obj(xval).shape == sh
+
+ @pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
+ np.zeros((10, 5, 0))])
+ @pytest.mark.parametrize('axis', [0, 1, 2])
+ @pytest.mark.parametrize('cls', [PchipInterpolator, Akima1DInterpolator])
+ def test_zero_size_2(self, cls, y, axis):
+ x = np.arange(10)
+ xval = np.arange(3)
+
+ obj = cls(x, y)
+ assert obj(xval).size == 0
+ assert obj(xval).shape == xval.shape + y.shape[1:]
+
+ # Also check with an explicit non-default axis
+ yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
+
+ obj = cls(x, yt, axis=axis)
+ sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
+ assert obj(xval).size == 0
+ assert obj(xval).shape == sh
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbf.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbf.py
new file mode 100644
index 0000000000000000000000000000000000000000..418042c65a906430ddecc5dabd98af2777747184
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbf.py
@@ -0,0 +1,222 @@
+# Created by John Travers, Robert Hetland, 2007
+""" Test functions for rbf module """
+
+import numpy as np
+from numpy.testing import (assert_, assert_array_almost_equal,
+ assert_almost_equal)
+from numpy import linspace, sin, cos, random, exp, allclose
+from scipy.interpolate._rbf import Rbf
+
+FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
+ 'cubic', 'quintic', 'thin-plate', 'linear')
+
+
+def check_rbf1d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (1D)
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=function)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+ assert_almost_equal(rbf(float(x[0])), y[0])
+
+
+def check_rbf2d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (2D).
+ x = random.rand(50,1)*4-2
+ y = random.rand(50,1)*4-2
+ z = x*exp(-x**2-1j*y**2)
+ rbf = Rbf(x, y, z, epsilon=2, function=function)
+ zi = rbf(x, y)
+ zi.shape = x.shape
+ assert_array_almost_equal(z, zi)
+
+
+def check_rbf3d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (3D).
+ x = random.rand(50, 1)*4 - 2
+ y = random.rand(50, 1)*4 - 2
+ z = random.rand(50, 1)*4 - 2
+ d = x*exp(-x**2 - y**2)
+ rbf = Rbf(x, y, z, d, epsilon=2, function=function)
+ di = rbf(x, y, z)
+ di.shape = x.shape
+ assert_array_almost_equal(di, d)
+
+
+def test_rbf_interpolation():
+ for function in FUNCTIONS:
+ check_rbf1d_interpolation(function)
+ check_rbf2d_interpolation(function)
+ check_rbf3d_interpolation(function)
+
+
+def check_2drbf1d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (1D)
+ x = linspace(0, 10, 9)
+ y0 = sin(x)
+ y1 = cos(x)
+ y = np.vstack([y0, y1]).T
+ rbf = Rbf(x, y, function=function, mode='N-D')
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+ assert_almost_equal(rbf(float(x[0])), y[0])
+
+
+def check_2drbf2d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (2D).
+ x = random.rand(50, ) * 4 - 2
+ y = random.rand(50, ) * 4 - 2
+ z0 = x * exp(-x ** 2 - 1j * y ** 2)
+ z1 = y * exp(-y ** 2 - 1j * x ** 2)
+ z = np.vstack([z0, z1]).T
+ rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
+ zi = rbf(x, y)
+ zi.shape = z.shape
+ assert_array_almost_equal(z, zi)
+
+
+def check_2drbf3d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (3D).
+ x = random.rand(50, ) * 4 - 2
+ y = random.rand(50, ) * 4 - 2
+ z = random.rand(50, ) * 4 - 2
+ d0 = x * exp(-x ** 2 - y ** 2)
+ d1 = y * exp(-y ** 2 - x ** 2)
+ d = np.vstack([d0, d1]).T
+ rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
+ di = rbf(x, y, z)
+ di.shape = d.shape
+ assert_array_almost_equal(di, d)
+
+
+def test_2drbf_interpolation():
+ for function in FUNCTIONS:
+ check_2drbf1d_interpolation(function)
+ check_2drbf2d_interpolation(function)
+ check_2drbf3d_interpolation(function)
+
+
+def check_rbf1d_regularity(function, atol):
+ # Check that the Rbf function approximates a smooth function well away
+ # from the nodes.
+ x = linspace(0, 10, 9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=function)
+ xi = linspace(0, 10, 100)
+ yi = rbf(xi)
+ msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
+ assert_(allclose(yi, sin(xi), atol=atol), msg)
+
+
+def test_rbf_regularity():
+ tolerances = {
+ 'multiquadric': 0.1,
+ 'inverse multiquadric': 0.15,
+ 'gaussian': 0.15,
+ 'cubic': 0.15,
+ 'quintic': 0.1,
+ 'thin-plate': 0.1,
+ 'linear': 0.2
+ }
+ for function in FUNCTIONS:
+ check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
+
+
+def check_2drbf1d_regularity(function, atol):
+ # Check that the 2-D Rbf function approximates a smooth function well away
+ # from the nodes.
+ x = linspace(0, 10, 9)
+ y0 = sin(x)
+ y1 = cos(x)
+ y = np.vstack([y0, y1]).T
+ rbf = Rbf(x, y, function=function, mode='N-D')
+ xi = linspace(0, 10, 100)
+ yi = rbf(xi)
+ msg = "abs-diff: %f" % abs(yi - np.vstack([sin(xi), cos(xi)]).T).max()
+ assert_(allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg)
+
+
+def test_2drbf_regularity():
+ tolerances = {
+ 'multiquadric': 0.1,
+ 'inverse multiquadric': 0.15,
+ 'gaussian': 0.15,
+ 'cubic': 0.15,
+ 'quintic': 0.1,
+ 'thin-plate': 0.15,
+ 'linear': 0.2
+ }
+ for function in FUNCTIONS:
+ check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
+
+
+def check_rbf1d_stability(function):
+ # Check that the Rbf function with default epsilon is not subject
+ # to overshoot. Regression for issue #4523.
+ #
+ # Generate some data (fixed random seed hence deterministic)
+ np.random.seed(1234)
+ x = np.linspace(0, 10, 50)
+ z = x + 4.0 * np.random.randn(len(x))
+
+ rbf = Rbf(x, z, function=function)
+ xi = np.linspace(0, 10, 1000)
+ yi = rbf(xi)
+
+ # subtract the linear trend and make sure there no spikes
+ assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
+
+def test_rbf_stability():
+ for function in FUNCTIONS:
+ check_rbf1d_stability(function)
+
+
+def test_default_construction():
+ # Check that the Rbf class can be constructed with the default
+ # multiquadric basis function. Regression test for ticket #1228.
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_function_is_callable():
+ # Check that the Rbf class can be constructed with function=callable.
+ x = linspace(0,10,9)
+ y = sin(x)
+ def linfunc(x):
+ return x
+ rbf = Rbf(x, y, function=linfunc)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_two_arg_function_is_callable():
+ # Check that the Rbf class can be constructed with a two argument
+ # function=callable.
+ def _func(self, r):
+ return self.epsilon + r
+
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=_func)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_rbf_epsilon_none():
+ x = linspace(0, 10, 9)
+ y = sin(x)
+ Rbf(x, y, epsilon=None)
+
+
+def test_rbf_epsilon_none_collinear():
+ # Check that collinear points in one dimension doesn't cause an error
+ # due to epsilon = 0
+ x = [1, 2, 3]
+ y = [4, 4, 4]
+ z = [5, 6, 7]
+ rbf = Rbf(x, y, z, epsilon=None)
+ assert_(rbf.epsilon > 0)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbfinterp.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbfinterp.py
new file mode 100644
index 0000000000000000000000000000000000000000..188d5e1d8ad9b6e0d93ba0d02736dc11042dacaf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbfinterp.py
@@ -0,0 +1,516 @@
+import pickle
+import pytest
+import numpy as np
+from numpy.linalg import LinAlgError
+from numpy.testing import assert_allclose, assert_array_equal
+from scipy.stats.qmc import Halton
+from scipy.spatial import cKDTree
+from scipy.interpolate._rbfinterp import (
+ _AVAILABLE, _SCALE_INVARIANT, _NAME_TO_MIN_DEGREE, _monomial_powers,
+ RBFInterpolator
+ )
+from scipy.interpolate import _rbfinterp_pythran
+
+
+def _vandermonde(x, degree):
+ # Returns a matrix of monomials that span polynomials with the specified
+ # degree evaluated at x.
+ powers = _monomial_powers(x.shape[1], degree)
+ return _rbfinterp_pythran._polynomial_matrix(x, powers)
+
+
+def _1d_test_function(x):
+ # Test function used in Wahba's "Spline Models for Observational Data".
+ # domain ~= (0, 3), range ~= (-1.0, 0.2)
+ x = x[:, 0]
+ y = 4.26*(np.exp(-x) - 4*np.exp(-2*x) + 3*np.exp(-3*x))
+ return y
+
+
+def _2d_test_function(x):
+ # Franke's test function.
+ # domain ~= (0, 1) X (0, 1), range ~= (0.0, 1.2)
+ x1, x2 = x[:, 0], x[:, 1]
+ term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
+ term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
+ term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
+ term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
+ y = term1 + term2 + term3 + term4
+ return y
+
+
+def _is_conditionally_positive_definite(kernel, m):
+ # Tests whether the kernel is conditionally positive definite of order m.
+ # See chapter 7 of Fasshauer's "Meshfree Approximation Methods with
+ # MATLAB".
+ nx = 10
+ ntests = 100
+ for ndim in [1, 2, 3, 4, 5]:
+ # Generate sample points with a Halton sequence to avoid samples that
+ # are too close to each other, which can make the matrix singular.
+ seq = Halton(ndim, scramble=False, seed=np.random.RandomState())
+ for _ in range(ntests):
+ x = 2*seq.random(nx) - 1
+ A = _rbfinterp_pythran._kernel_matrix(x, kernel)
+ P = _vandermonde(x, m - 1)
+ Q, R = np.linalg.qr(P, mode='complete')
+ # Q2 forms a basis spanning the space where P.T.dot(x) = 0. Project
+ # A onto this space, and then see if it is positive definite using
+ # the Cholesky decomposition. If not, then the kernel is not c.p.d.
+ # of order m.
+ Q2 = Q[:, P.shape[1]:]
+ B = Q2.T.dot(A).dot(Q2)
+ try:
+ np.linalg.cholesky(B)
+ except np.linalg.LinAlgError:
+ return False
+
+ return True
+
+
+# Sorting the parametrize arguments is necessary to avoid a parallelization
+# issue described here: https://github.com/pytest-dev/pytest-xdist/issues/432.
+@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+def test_conditionally_positive_definite(kernel):
+ # Test if each kernel in _AVAILABLE is conditionally positive definite of
+ # order m, where m comes from _NAME_TO_MIN_DEGREE. This is a necessary
+ # condition for the smoothed RBF interpolant to be well-posed in general.
+ m = _NAME_TO_MIN_DEGREE.get(kernel, -1) + 1
+ assert _is_conditionally_positive_definite(kernel, m)
+
+
+class _TestRBFInterpolator:
+ @pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
+ def test_scale_invariance_1d(self, kernel):
+ # Verify that the functions in _SCALE_INVARIANT are insensitive to the
+ # shape parameter (when smoothing == 0) in 1d.
+ seq = Halton(1, scramble=False, seed=np.random.RandomState())
+ x = 3*seq.random(50)
+ y = _1d_test_function(x)
+ xitp = 3*seq.random(50)
+ yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
+ yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ @pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
+ def test_scale_invariance_2d(self, kernel):
+ # Verify that the functions in _SCALE_INVARIANT are insensitive to the
+ # shape parameter (when smoothing == 0) in 2d.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+ x = seq.random(100)
+ y = _2d_test_function(x)
+ xitp = seq.random(100)
+ yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
+ yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+ def test_extreme_domains(self, kernel):
+ # Make sure the interpolant remains numerically stable for very
+ # large/small domains.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+ scale = 1e50
+ shift = 1e55
+
+ x = seq.random(100)
+ y = _2d_test_function(x)
+ xitp = seq.random(100)
+
+ if kernel in _SCALE_INVARIANT:
+ yitp1 = self.build(x, y, kernel=kernel)(xitp)
+ yitp2 = self.build(
+ x*scale + shift, y,
+ kernel=kernel
+ )(xitp*scale + shift)
+ else:
+ yitp1 = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+ yitp2 = self.build(
+ x*scale + shift, y,
+ epsilon=5.0/scale,
+ kernel=kernel
+ )(xitp*scale + shift)
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ def test_polynomial_reproduction(self):
+ # If the observed data comes from a polynomial, then the interpolant
+ # should be able to reproduce the polynomial exactly, provided that
+ # `degree` is sufficiently high.
+ rng = np.random.RandomState(0)
+ seq = Halton(2, scramble=False, seed=rng)
+ degree = 3
+
+ x = seq.random(50)
+ xitp = seq.random(50)
+
+ P = _vandermonde(x, degree)
+ Pitp = _vandermonde(xitp, degree)
+
+ poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+
+ y = P.dot(poly_coeffs)
+ yitp1 = Pitp.dot(poly_coeffs)
+ yitp2 = self.build(x, y, degree=degree)(xitp)
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ @pytest.mark.slow
+ def test_chunking(self, monkeypatch):
+ # If the observed data comes from a polynomial, then the interpolant
+ # should be able to reproduce the polynomial exactly, provided that
+ # `degree` is sufficiently high.
+ rng = np.random.RandomState(0)
+ seq = Halton(2, scramble=False, seed=rng)
+ degree = 3
+
+ largeN = 1000 + 33
+ # this is large to check that chunking of the RBFInterpolator is tested
+ x = seq.random(50)
+ xitp = seq.random(largeN)
+
+ P = _vandermonde(x, degree)
+ Pitp = _vandermonde(xitp, degree)
+
+ poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+
+ y = P.dot(poly_coeffs)
+ yitp1 = Pitp.dot(poly_coeffs)
+ interp = self.build(x, y, degree=degree)
+ ce_real = interp._chunk_evaluator
+
+ def _chunk_evaluator(*args, **kwargs):
+ kwargs.update(memory_budget=100)
+ return ce_real(*args, **kwargs)
+
+ monkeypatch.setattr(interp, '_chunk_evaluator', _chunk_evaluator)
+ yitp2 = interp(xitp)
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ def test_vector_data(self):
+ # Make sure interpolating a vector field is the same as interpolating
+ # each component separately.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = np.array([_2d_test_function(x),
+ _2d_test_function(x[:, ::-1])]).T
+
+ yitp1 = self.build(x, y)(xitp)
+ yitp2 = self.build(x, y[:, 0])(xitp)
+ yitp3 = self.build(x, y[:, 1])(xitp)
+
+ assert_allclose(yitp1[:, 0], yitp2)
+ assert_allclose(yitp1[:, 1], yitp3)
+
+ def test_complex_data(self):
+ # Interpolating complex input should be the same as interpolating the
+ # real and complex components.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = _2d_test_function(x) + 1j*_2d_test_function(x[:, ::-1])
+
+ yitp1 = self.build(x, y)(xitp)
+ yitp2 = self.build(x, y.real)(xitp)
+ yitp3 = self.build(x, y.imag)(xitp)
+
+ assert_allclose(yitp1.real, yitp2)
+ assert_allclose(yitp1.imag, yitp3)
+
+ @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+ def test_interpolation_misfit_1d(self, kernel):
+ # Make sure that each kernel, with its default `degree` and an
+ # appropriate `epsilon`, does a good job at interpolation in 1d.
+ seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+ x = 3*seq.random(50)
+ xitp = 3*seq.random(50)
+
+ y = _1d_test_function(x)
+ ytrue = _1d_test_function(xitp)
+ yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+
+ mse = np.mean((yitp - ytrue)**2)
+ assert mse < 1.0e-4
+
+ @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+ def test_interpolation_misfit_2d(self, kernel):
+ # Make sure that each kernel, with its default `degree` and an
+ # appropriate `epsilon`, does a good job at interpolation in 2d.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = _2d_test_function(x)
+ ytrue = _2d_test_function(xitp)
+ yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+
+ mse = np.mean((yitp - ytrue)**2)
+ assert mse < 2.0e-4
+
+ @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+ def test_smoothing_misfit(self, kernel):
+ # Make sure we can find a smoothing parameter for each kernel that
+ # removes a sufficient amount of noise.
+ rng = np.random.RandomState(0)
+ seq = Halton(1, scramble=False, seed=rng)
+
+ noise = 0.2
+ rmse_tol = 0.1
+ smoothing_range = 10**np.linspace(-4, 1, 20)
+
+ x = 3*seq.random(100)
+ y = _1d_test_function(x) + rng.normal(0.0, noise, (100,))
+ ytrue = _1d_test_function(x)
+ rmse_within_tol = False
+ for smoothing in smoothing_range:
+ ysmooth = self.build(
+ x, y,
+ epsilon=1.0,
+ smoothing=smoothing,
+ kernel=kernel)(x)
+ rmse = np.sqrt(np.mean((ysmooth - ytrue)**2))
+ if rmse < rmse_tol:
+ rmse_within_tol = True
+ break
+
+ assert rmse_within_tol
+
+ def test_array_smoothing(self):
+ # Test using an array for `smoothing` to give less weight to a known
+ # outlier.
+ rng = np.random.RandomState(0)
+ seq = Halton(1, scramble=False, seed=rng)
+ degree = 2
+
+ x = seq.random(50)
+ P = _vandermonde(x, degree)
+ poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+ y = P.dot(poly_coeffs)
+ y_with_outlier = np.copy(y)
+ y_with_outlier[10] += 1.0
+ smoothing = np.zeros((50,))
+ smoothing[10] = 1000.0
+ yitp = self.build(x, y_with_outlier, smoothing=smoothing)(x)
+ # Should be able to reproduce the uncorrupted data almost exactly.
+ assert_allclose(yitp, y, atol=1e-4)
+
+ def test_inconsistent_x_dimensions_error(self):
+ # ValueError should be raised if the observation points and evaluation
+ # points have a different number of dimensions.
+ y = Halton(2, scramble=False, seed=np.random.RandomState()).random(10)
+ d = _2d_test_function(y)
+ x = Halton(1, scramble=False, seed=np.random.RandomState()).random(10)
+ match = 'Expected the second axis of `x`'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d)(x)
+
+ def test_inconsistent_d_length_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(1)
+ match = 'Expected the first axis of `d`'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d)
+
+ def test_y_not_2d_error(self):
+ y = np.linspace(0, 1, 5)
+ d = np.zeros(5)
+ match = '`y` must be a 2-dimensional array.'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d)
+
+ def test_inconsistent_smoothing_length_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ smoothing = np.ones(1)
+ match = 'Expected `smoothing` to be'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d, smoothing=smoothing)
+
+ def test_invalid_kernel_name_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ match = '`kernel` must be one of'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d, kernel='test')
+
+ def test_epsilon_not_specified_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ for kernel in _AVAILABLE:
+ if kernel in _SCALE_INVARIANT:
+ continue
+
+ match = '`epsilon` must be specified'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d, kernel=kernel)
+
+ def test_x_not_2d_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ x = np.linspace(0, 1, 5)
+ d = np.zeros(5)
+ match = '`x` must be a 2-dimensional array.'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d)(x)
+
+ def test_not_enough_observations_error(self):
+ y = np.linspace(0, 1, 1)[:, None]
+ d = np.zeros(1)
+ match = 'At least 2 data points are required'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d, kernel='thin_plate_spline')
+
+ def test_degree_warning(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ for kernel, deg in _NAME_TO_MIN_DEGREE.items():
+ # Only test for kernels that its minimum degree is not 0.
+ if deg >= 1:
+ match = f'`degree` should not be below {deg}'
+ with pytest.warns(Warning, match=match):
+ self.build(y, d, epsilon=1.0, kernel=kernel, degree=deg-1)
+
+ def test_minus_one_degree(self):
+ # Make sure a degree of -1 is accepted without any warning.
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ for kernel, _ in _NAME_TO_MIN_DEGREE.items():
+ self.build(y, d, epsilon=1.0, kernel=kernel, degree=-1)
+
+ def test_rank_error(self):
+ # An error should be raised when `kernel` is "thin_plate_spline" and
+ # observations are 2-D and collinear.
+ y = np.array([[2.0, 0.0], [1.0, 0.0], [0.0, 0.0]])
+ d = np.array([0.0, 0.0, 0.0])
+ match = 'does not have full column rank'
+ with pytest.raises(LinAlgError, match=match):
+ self.build(y, d, kernel='thin_plate_spline')(y)
+
+ def test_single_point(self):
+ # Make sure interpolation still works with only one point (in 1, 2, and
+ # 3 dimensions).
+ for dim in [1, 2, 3]:
+ y = np.zeros((1, dim))
+ d = np.ones((1,))
+ f = self.build(y, d, kernel='linear')(y)
+ assert_allclose(d, f)
+
+ def test_pickleable(self):
+ # Make sure we can pickle and unpickle the interpolant without any
+ # changes in the behavior.
+ seq = Halton(1, scramble=False, seed=np.random.RandomState(2305982309))
+
+ x = 3*seq.random(50)
+ xitp = 3*seq.random(50)
+
+ y = _1d_test_function(x)
+
+ interp = self.build(x, y)
+
+ yitp1 = interp(xitp)
+ yitp2 = pickle.loads(pickle.dumps(interp))(xitp)
+
+ assert_array_equal(yitp1, yitp2)
+
+
+class TestRBFInterpolatorNeighborsNone(_TestRBFInterpolator):
+ def build(self, *args, **kwargs):
+ return RBFInterpolator(*args, **kwargs)
+
+ def test_smoothing_limit_1d(self):
+ # For large smoothing parameters, the interpolant should approach a
+ # least squares fit of a polynomial with the specified degree.
+ seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+ degree = 3
+ smoothing = 1e8
+
+ x = 3*seq.random(50)
+ xitp = 3*seq.random(50)
+
+ y = _1d_test_function(x)
+
+ yitp1 = self.build(
+ x, y,
+ degree=degree,
+ smoothing=smoothing
+ )(xitp)
+
+ P = _vandermonde(x, degree)
+ Pitp = _vandermonde(xitp, degree)
+ yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ def test_smoothing_limit_2d(self):
+ # For large smoothing parameters, the interpolant should approach a
+ # least squares fit of a polynomial with the specified degree.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ degree = 3
+ smoothing = 1e8
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = _2d_test_function(x)
+
+ yitp1 = self.build(
+ x, y,
+ degree=degree,
+ smoothing=smoothing
+ )(xitp)
+
+ P = _vandermonde(x, degree)
+ Pitp = _vandermonde(xitp, degree)
+ yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+
+class TestRBFInterpolatorNeighbors20(_TestRBFInterpolator):
+ # RBFInterpolator using 20 nearest neighbors.
+ def build(self, *args, **kwargs):
+ return RBFInterpolator(*args, **kwargs, neighbors=20)
+
+ def test_equivalent_to_rbf_interpolator(self):
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = _2d_test_function(x)
+
+ yitp1 = self.build(x, y)(xitp)
+
+ yitp2 = []
+ tree = cKDTree(x)
+ for xi in xitp:
+ _, nbr = tree.query(xi, 20)
+ yitp2.append(RBFInterpolator(x[nbr], y[nbr])(xi[None])[0])
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+
+class TestRBFInterpolatorNeighborsInf(TestRBFInterpolatorNeighborsNone):
+ # RBFInterpolator using neighbors=np.inf. This should give exactly the same
+ # results as neighbors=None, but it will be slower.
+ def build(self, *args, **kwargs):
+ return RBFInterpolator(*args, **kwargs, neighbors=np.inf)
+
+ def test_equivalent_to_rbf_interpolator(self):
+ seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+ x = 3*seq.random(50)
+ xitp = 3*seq.random(50)
+
+ y = _1d_test_function(x)
+ yitp1 = self.build(x, y)(xitp)
+ yitp2 = RBFInterpolator(x, y)(xitp)
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rgi.py b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rgi.py
new file mode 100644
index 0000000000000000000000000000000000000000..5503b39dc67eb3c1d50e338ac26708095aff55d6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/test_rgi.py
@@ -0,0 +1,1111 @@
+import itertools
+
+import pytest
+import numpy as np
+
+from numpy.testing import (assert_allclose, assert_equal, assert_warns,
+ assert_array_almost_equal, assert_array_equal)
+from pytest import raises as assert_raises
+
+from scipy.interpolate import (RegularGridInterpolator, interpn,
+ RectBivariateSpline,
+ NearestNDInterpolator, LinearNDInterpolator)
+
+from scipy.sparse._sputils import matrix
+from scipy._lib._util import ComplexWarning
+
+
+parametrize_rgi_interp_methods = pytest.mark.parametrize(
+ "method", RegularGridInterpolator._ALL_METHODS
+)
+
+class TestRegularGridInterpolator:
+ def _get_sample_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points = [(0., .5, 1.)] * 4
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def _get_sample_4d_2(self):
+ # create another 4-D grid of 3 points in each dimension
+ points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def _get_sample_4d_3(self):
+ # create another 4-D grid of 7 points in each dimension
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0)] * 4
+ values = np.asarray([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def _get_sample_4d_4(self):
+ # create another 4-D grid of 2 points in each dimension
+ points = [(0.0, 1.0)] * 4
+ values = np.asarray([0.0, 1.0])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ @parametrize_rgi_interp_methods
+ def test_list_input(self, method):
+ points, values = self._get_sample_4d_3()
+
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+
+ interp = RegularGridInterpolator(points,
+ values.tolist(),
+ method=method)
+ v1 = interp(sample.tolist())
+ interp = RegularGridInterpolator(points,
+ values,
+ method=method)
+ v2 = interp(sample)
+ assert_allclose(v1, v2)
+
+ @pytest.mark.parametrize('method', ['cubic', 'quintic', 'pchip'])
+ def test_spline_dim_error(self, method):
+ points, values = self._get_sample_4d_4()
+ match = "points in dimension"
+
+ # Check error raise when creating interpolator
+ with pytest.raises(ValueError, match=match):
+ RegularGridInterpolator(points, values, method=method)
+
+ # Check error raise when creating interpolator
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ with pytest.raises(ValueError, match=match):
+ interp(sample, method=method)
+
+ @pytest.mark.parametrize(
+ "points_values, sample",
+ [
+ (
+ _get_sample_4d,
+ np.asarray(
+ [[0.1, 0.1, 1.0, 0.9],
+ [0.2, 0.1, 0.45, 0.8],
+ [0.5, 0.5, 0.5, 0.5]]
+ ),
+ ),
+ (_get_sample_4d_2, np.asarray([0.1, 0.1, 10.0, 9.0])),
+ ],
+ )
+ def test_linear_and_slinear_close(self, points_values, sample):
+ points, values = points_values(self)
+ interp = RegularGridInterpolator(points, values, method="linear")
+ v1 = interp(sample)
+ interp = RegularGridInterpolator(points, values, method="slinear")
+ v2 = interp(sample)
+ assert_allclose(v1, v2)
+
+ def test_derivatives(self):
+ points, values = self._get_sample_4d()
+ sample = np.array([[0.1 , 0.1 , 1. , 0.9 ],
+ [0.2 , 0.1 , 0.45, 0.8 ],
+ [0.5 , 0.5 , 0.5 , 0.5 ]])
+ interp = RegularGridInterpolator(points, values, method="slinear")
+
+ with assert_raises(ValueError):
+ # wrong number of derivatives (need 4)
+ interp(sample, nu=1)
+
+ assert_allclose(interp(sample, nu=(1, 0, 0, 0)),
+ [1, 1, 1], atol=1e-15)
+ assert_allclose(interp(sample, nu=(0, 1, 0, 0)),
+ [10, 10, 10], atol=1e-15)
+
+ # 2nd derivatives of a linear function are zero
+ assert_allclose(interp(sample, nu=(0, 1, 1, 0)),
+ [0, 0, 0], atol=1e-12)
+
+ @parametrize_rgi_interp_methods
+ def test_complex(self, method):
+ if method == "pchip":
+ pytest.skip("pchip does not make sense for complex data")
+ points, values = self._get_sample_4d_3()
+ values = values - 2j*values
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+
+ interp = RegularGridInterpolator(points, values, method=method)
+ rinterp = RegularGridInterpolator(points, values.real, method=method)
+ iinterp = RegularGridInterpolator(points, values.imag, method=method)
+
+ v1 = interp(sample)
+ v2 = rinterp(sample) + 1j*iinterp(sample)
+ assert_allclose(v1, v2)
+
+ def test_cubic_vs_pchip(self):
+ x, y = [1, 2, 3, 4], [1, 2, 3, 4]
+ xg, yg = np.meshgrid(x, y, indexing='ij')
+
+ values = (lambda x, y: x**4 * y**4)(xg, yg)
+ cubic = RegularGridInterpolator((x, y), values, method='cubic')
+ pchip = RegularGridInterpolator((x, y), values, method='pchip')
+
+ vals_cubic = cubic([1.5, 2])
+ vals_pchip = pchip([1.5, 2])
+ assert not np.allclose(vals_cubic, vals_pchip, atol=1e-14, rtol=0)
+
+ def test_linear_xi1d(self):
+ points, values = self._get_sample_4d_2()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([0.1, 0.1, 10., 9.])
+ wanted = 1001.1
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_linear_xi3d(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ wanted = np.asarray([1001.1, 846.2, 555.5])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ @pytest.mark.parametrize(
+ "sample, wanted",
+ [
+ (np.asarray([0.1, 0.1, 0.9, 0.9]), 1100.0),
+ (np.asarray([0.1, 0.1, 0.1, 0.1]), 0.0),
+ (np.asarray([0.0, 0.0, 0.0, 0.0]), 0.0),
+ (np.asarray([1.0, 1.0, 1.0, 1.0]), 1111.0),
+ (np.asarray([0.1, 0.4, 0.6, 0.9]), 1055.0),
+ ],
+ )
+ def test_nearest(self, sample, wanted):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, method="nearest")
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_linear_edges(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
+ wanted = np.asarray([0., 1111.])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_valid_create(self):
+ # create a 2-D grid of 3 points in each dimension
+ points = [(0., .5, 1.), (0., 1., .5)]
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis]
+ values1 = values[np.newaxis, :]
+ values = (values0 + values1 * 10)
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [((0., .5, 1.), ), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, .75, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values,
+ method="undefmethod")
+
+ def test_valid_call(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
+ assert_raises(ValueError, interp, sample, "undefmethod")
+ sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
+ assert_raises(ValueError, interp, sample)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
+ assert_raises(ValueError, interp, sample)
+
+ def test_out_of_bounds_extrap(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=None)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([0., 1111., 11., 11.])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+
+ def test_out_of_bounds_extrap2(self):
+ points, values = self._get_sample_4d_2()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=None)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([0., 11., 11., 11.])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+
+ def test_out_of_bounds_fill(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=np.nan)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([np.nan, np.nan, np.nan])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ wanted = np.asarray([1001.1, 846.2, 555.5])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_nearest_compare_qhull(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, method="nearest")
+ points_qhull = itertools.product(*points)
+ points_qhull = [p for p in points_qhull]
+ points_qhull = np.asarray(points_qhull)
+ values_qhull = values.reshape(-1)
+ interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ assert_array_almost_equal(interp(sample), interp_qhull(sample))
+
+ def test_linear_compare_qhull(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ points_qhull = itertools.product(*points)
+ points_qhull = [p for p in points_qhull]
+ points_qhull = np.asarray(points_qhull)
+ values_qhull = values.reshape(-1)
+ interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ assert_array_almost_equal(interp(sample), interp_qhull(sample))
+
+ @pytest.mark.parametrize("method", ["nearest", "linear"])
+ def test_duck_typed_values(self, method):
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+
+ values = MyValue((5, 7))
+
+ interp = RegularGridInterpolator((x, y), values, method=method)
+ v1 = interp([0.4, 0.7])
+
+ interp = RegularGridInterpolator((x, y), values._v, method=method)
+ v2 = interp([0.4, 0.7])
+ assert_allclose(v1, v2)
+
+ def test_invalid_fill_value(self):
+ np.random.seed(1234)
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+ values = np.random.rand(5, 7)
+
+ # integers can be cast to floats
+ RegularGridInterpolator((x, y), values, fill_value=1)
+
+ # complex values cannot
+ assert_raises(ValueError, RegularGridInterpolator,
+ (x, y), values, fill_value=1+2j)
+
+ def test_fillvalue_type(self):
+ # from #3703; test that interpolator object construction succeeds
+ values = np.ones((10, 20, 30), dtype='>f4')
+ points = [np.arange(n) for n in values.shape]
+ # xi = [(1, 1, 1)]
+ RegularGridInterpolator(points, values)
+ RegularGridInterpolator(points, values, fill_value=0.)
+
+ def test_length_one_axis(self):
+ # gh-5890, gh-9524 : length-1 axis is legal for method='linear'.
+ # Along the axis it's linear interpolation; away from the length-1
+ # axis, it's an extrapolation, so fill_value should be used.
+ def f(x, y):
+ return x + y
+ x = np.linspace(1, 1, 1)
+ y = np.linspace(1, 10, 10)
+ data = f(*np.meshgrid(x, y, indexing="ij", sparse=True))
+
+ interp = RegularGridInterpolator((x, y), data, method="linear",
+ bounds_error=False, fill_value=101)
+
+ # check values at the grid
+ assert_allclose(interp(np.array([[1, 1], [1, 5], [1, 10]])),
+ [2, 6, 11],
+ atol=1e-14)
+
+ # check off-grid interpolation is indeed linear
+ assert_allclose(interp(np.array([[1, 1.4], [1, 5.3], [1, 10]])),
+ [2.4, 6.3, 11],
+ atol=1e-14)
+
+ # check exrapolation w/ fill_value
+ assert_allclose(interp(np.array([1.1, 2.4])),
+ interp.fill_value,
+ atol=1e-14)
+
+ # check extrapolation: linear along the `y` axis, const along `x`
+ interp.fill_value = None
+ assert_allclose(interp([[1, 0.3], [1, 11.5]]),
+ [1.3, 12.5], atol=1e-15)
+
+ assert_allclose(interp([[1.5, 0.3], [1.9, 11.5]]),
+ [1.3, 12.5], atol=1e-15)
+
+ # extrapolation with method='nearest'
+ interp = RegularGridInterpolator((x, y), data, method="nearest",
+ bounds_error=False, fill_value=None)
+ assert_allclose(interp([[1.5, 1.8], [-4, 5.1]]),
+ [3, 6],
+ atol=1e-15)
+
+ @pytest.mark.parametrize("fill_value", [None, np.nan, np.pi])
+ @pytest.mark.parametrize("method", ['linear', 'nearest'])
+ def test_length_one_axis2(self, fill_value, method):
+ options = {"fill_value": fill_value, "bounds_error": False,
+ "method": method}
+
+ x = np.linspace(0, 2*np.pi, 20)
+ z = np.sin(x)
+
+ fa = RegularGridInterpolator((x,), z[:], **options)
+ fb = RegularGridInterpolator((x, [0]), z[:, None], **options)
+
+ x1a = np.linspace(-1, 2*np.pi+1, 100)
+ za = fa(x1a)
+
+ # evaluated at provided y-value, fb should behave exactly as fa
+ y1b = np.zeros(100)
+ zb = fb(np.vstack([x1a, y1b]).T)
+ assert_allclose(zb, za)
+
+ # evaluated at a different y-value, fb should return fill value
+ y1b = np.ones(100)
+ zb = fb(np.vstack([x1a, y1b]).T)
+ if fill_value is None:
+ assert_allclose(zb, za)
+ else:
+ assert_allclose(zb, fill_value)
+
+ @pytest.mark.parametrize("method", ['nearest', 'linear'])
+ def test_nan_x_1d(self, method):
+ # gh-6624 : if x is nan, result should be nan
+ f = RegularGridInterpolator(([1, 2, 3],), [10, 20, 30], fill_value=1,
+ bounds_error=False, method=method)
+ assert np.isnan(f([np.nan]))
+
+ # test arbitrary nan pattern
+ rng = np.random.default_rng(8143215468)
+ x = rng.random(size=100)*4
+ i = rng.random(size=100) > 0.5
+ x[i] = np.nan
+ with np.errstate(invalid='ignore'):
+ # out-of-bounds comparisons, `out_of_bounds += x < grid[0]`,
+ # generate numpy warnings if `x` contains nans.
+ # These warnings should propagate to user (since `x` is user
+ # input) and we simply filter them out.
+ res = f(x)
+
+ assert_equal(res[i], np.nan)
+ assert_equal(res[~i], f(x[~i]))
+
+ # also test the length-one axis f(nan)
+ x = [1, 2, 3]
+ y = [1, ]
+ data = np.ones((3, 1))
+ f = RegularGridInterpolator((x, y), data, fill_value=1,
+ bounds_error=False, method=method)
+ assert np.isnan(f([np.nan, 1]))
+ assert np.isnan(f([1, np.nan]))
+
+ @pytest.mark.parametrize("method", ['nearest', 'linear'])
+ def test_nan_x_2d(self, method):
+ x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
+
+ def f(x, y):
+ return x**2 + y**2
+
+ xg, yg = np.meshgrid(x, y, indexing='ij', sparse=True)
+ data = f(xg, yg)
+ interp = RegularGridInterpolator((x, y), data,
+ method=method, bounds_error=False)
+
+ with np.errstate(invalid='ignore'):
+ res = interp([[1.5, np.nan], [1, 1]])
+ assert_allclose(res[1], 2, atol=1e-14)
+ assert np.isnan(res[0])
+
+ # test arbitrary nan pattern
+ rng = np.random.default_rng(8143215468)
+ x = rng.random(size=100)*4-1
+ y = rng.random(size=100)*8
+ i1 = rng.random(size=100) > 0.5
+ i2 = rng.random(size=100) > 0.5
+ i = i1 | i2
+ x[i1] = np.nan
+ y[i2] = np.nan
+ z = np.array([x, y]).T
+ with np.errstate(invalid='ignore'):
+ # out-of-bounds comparisons, `out_of_bounds += x < grid[0]`,
+ # generate numpy warnings if `x` contains nans.
+ # These warnings should propagate to user (since `x` is user
+ # input) and we simply filter them out.
+ res = interp(z)
+
+ assert_equal(res[i], np.nan)
+ assert_equal(res[~i], interp(z[~i]))
+
+ @parametrize_rgi_interp_methods
+ @pytest.mark.parametrize(("ndims", "func"), [
+ (2, lambda x, y: 2 * x ** 3 + 3 * y ** 2),
+ (3, lambda x, y, z: 2 * x ** 3 + 3 * y ** 2 - z),
+ (4, lambda x, y, z, a: 2 * x ** 3 + 3 * y ** 2 - z + a),
+ (5, lambda x, y, z, a, b: 2 * x ** 3 + 3 * y ** 2 - z + a * b),
+ ])
+ def test_descending_points_nd(self, method, ndims, func):
+
+ if ndims == 5 and method in {"cubic", "quintic"}:
+ pytest.skip("too slow; OOM (quintic); or nearly so (cubic)")
+
+ rng = np.random.default_rng(42)
+ sample_low = 1
+ sample_high = 5
+ test_points = rng.uniform(sample_low, sample_high, size=(2, ndims))
+
+ ascending_points = [np.linspace(sample_low, sample_high, 12)
+ for _ in range(ndims)]
+
+ ascending_values = func(*np.meshgrid(*ascending_points,
+ indexing="ij",
+ sparse=True))
+
+ ascending_interp = RegularGridInterpolator(ascending_points,
+ ascending_values,
+ method=method)
+ ascending_result = ascending_interp(test_points)
+
+ descending_points = [xi[::-1] for xi in ascending_points]
+ descending_values = func(*np.meshgrid(*descending_points,
+ indexing="ij",
+ sparse=True))
+ descending_interp = RegularGridInterpolator(descending_points,
+ descending_values,
+ method=method)
+ descending_result = descending_interp(test_points)
+
+ assert_array_equal(ascending_result, descending_result)
+
+ def test_invalid_points_order(self):
+ def val_func_2d(x, y):
+ return 2 * x ** 3 + 3 * y ** 2
+
+ x = np.array([.5, 2., 0., 4., 5.5]) # not ascending or descending
+ y = np.array([.5, 2., 3., 4., 5.5])
+ points = (x, y)
+ values = val_func_2d(*np.meshgrid(*points, indexing='ij',
+ sparse=True))
+ match = "must be strictly ascending or descending"
+ with pytest.raises(ValueError, match=match):
+ RegularGridInterpolator(points, values)
+
+ @parametrize_rgi_interp_methods
+ def test_fill_value(self, method):
+ interp = RegularGridInterpolator([np.arange(6)], np.ones(6),
+ method=method, bounds_error=False)
+ assert np.isnan(interp([10]))
+
+ @parametrize_rgi_interp_methods
+ def test_nonscalar_values(self, method):
+
+ if method == "quintic":
+ pytest.skip("Way too slow.")
+
+ # Verify that non-scalar valued values also works
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] * 2 + [
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0)
+ ] * 2
+
+ rng = np.random.default_rng(1234)
+ values = rng.random((6, 6, 6, 6, 8))
+ sample = rng.random((7, 3, 4))
+
+ interp = RegularGridInterpolator(points, values, method=method,
+ bounds_error=False)
+ v = interp(sample)
+ assert_equal(v.shape, (7, 3, 8), err_msg=method)
+
+ vs = []
+ for j in range(8):
+ interp = RegularGridInterpolator(points, values[..., j],
+ method=method,
+ bounds_error=False)
+ vs.append(interp(sample))
+ v2 = np.array(vs).transpose(1, 2, 0)
+
+ assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+ @parametrize_rgi_interp_methods
+ @pytest.mark.parametrize("flip_points", [False, True])
+ def test_nonscalar_values_2(self, method, flip_points):
+
+ if method in {"cubic", "quintic"}:
+ pytest.skip("Way too slow.")
+
+ # Verify that non-scalar valued values also work : use different
+ # lengths of axes to simplify tracing the internals
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+ (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0),
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0),
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0, 47)]
+
+ # verify, that strictly decreasing dimensions work
+ if flip_points:
+ points = [tuple(reversed(p)) for p in points]
+
+ rng = np.random.default_rng(1234)
+
+ trailing_points = (3, 2)
+ # NB: values has a `num_trailing_dims` trailing dimension
+ values = rng.random((6, 7, 8, 9, *trailing_points))
+ sample = rng.random(4) # a single sample point !
+
+ interp = RegularGridInterpolator(points, values, method=method,
+ bounds_error=False)
+ v = interp(sample)
+
+ # v has a single sample point *per entry in the trailing dimensions*
+ assert v.shape == (1, *trailing_points)
+
+ # check the values, too : manually loop over the trailing dimensions
+ vs = np.empty(values.shape[-2:])
+ for i in range(values.shape[-2]):
+ for j in range(values.shape[-1]):
+ interp = RegularGridInterpolator(points, values[..., i, j],
+ method=method,
+ bounds_error=False)
+ vs[i, j] = interp(sample).item()
+ v2 = np.expand_dims(vs, axis=0)
+ assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+ def test_nonscalar_values_linear_2D(self):
+ # Verify that non-scalar values work in the 2D fast path
+ method = 'linear'
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+ (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0), ]
+
+ rng = np.random.default_rng(1234)
+
+ trailing_points = (3, 4)
+ # NB: values has a `num_trailing_dims` trailing dimension
+ values = rng.random((6, 7, *trailing_points))
+ sample = rng.random(2) # a single sample point !
+
+ interp = RegularGridInterpolator(points, values, method=method,
+ bounds_error=False)
+ v = interp(sample)
+
+ # v has a single sample point *per entry in the trailing dimensions*
+ assert v.shape == (1, *trailing_points)
+
+ # check the values, too : manually loop over the trailing dimensions
+ vs = np.empty(values.shape[-2:])
+ for i in range(values.shape[-2]):
+ for j in range(values.shape[-1]):
+ interp = RegularGridInterpolator(points, values[..., i, j],
+ method=method,
+ bounds_error=False)
+ vs[i, j] = interp(sample).item()
+ v2 = np.expand_dims(vs, axis=0)
+ assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+ @pytest.mark.parametrize(
+ "dtype",
+ [np.float32, np.float64, np.complex64, np.complex128]
+ )
+ @pytest.mark.parametrize("xi_dtype", [np.float32, np.float64])
+ def test_float32_values(self, dtype, xi_dtype):
+ # regression test for gh-17718: values.dtype=float32 fails
+ def f(x, y):
+ return 2 * x**3 + 3 * y**2
+
+ x = np.linspace(1, 4, 11)
+ y = np.linspace(4, 7, 22)
+
+ xg, yg = np.meshgrid(x, y, indexing='ij', sparse=True)
+ data = f(xg, yg)
+
+ data = data.astype(dtype)
+
+ interp = RegularGridInterpolator((x, y), data)
+
+ pts = np.array([[2.1, 6.2],
+ [3.3, 5.2]], dtype=xi_dtype)
+
+ # the values here are just what the call returns; the test checks that
+ # that the call succeeds at all, instead of failing with cython not
+ # having a float32 kernel
+ assert_allclose(interp(pts), [134.10469388, 153.40069388], atol=1e-7)
+
+ def test_bad_solver(self):
+ x = np.linspace(0, 3, 7)
+ y = np.linspace(0, 3, 7)
+ xg, yg = np.meshgrid(x, y, indexing='ij', sparse=True)
+ data = xg + yg
+
+ # default method 'linear' does not accept 'solver'
+ with assert_raises(ValueError):
+ RegularGridInterpolator((x, y), data, solver=lambda x: x)
+
+ with assert_raises(TypeError):
+ # wrong solver interface
+ RegularGridInterpolator(
+ (x, y), data, method='slinear', solver=lambda x: x
+ )
+
+ with assert_raises(TypeError):
+ # unknown argument
+ RegularGridInterpolator(
+ (x, y), data, method='slinear', solver=lambda x: x, woof='woof'
+ )
+
+ with assert_raises(TypeError):
+ # unknown argument
+ RegularGridInterpolator(
+ (x, y), data, method='slinear', solver_args={'woof': 42}
+ )
+
+
+class MyValue:
+ """
+ Minimal indexable object
+ """
+
+ def __init__(self, shape):
+ self.ndim = 2
+ self.shape = shape
+ self._v = np.arange(np.prod(shape)).reshape(shape)
+
+ def __getitem__(self, idx):
+ return self._v[idx]
+
+ def __array_interface__(self):
+ return None
+
+ def __array__(self, dtype=None, copy=None):
+ raise RuntimeError("No array representation")
+
+
+class TestInterpN:
+ def _sample_2d_data(self):
+ x = np.array([.5, 2., 3., 4., 5.5, 6.])
+ y = np.array([.5, 2., 3., 4., 5.5, 6.])
+ z = np.array(
+ [
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 3, 2, 1, 1],
+ [1, 2, 2, 2, 1, 1],
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 2, 2, 1, 1],
+ ]
+ )
+ return x, y, z
+
+ def test_spline_2d(self):
+ x, y, z = self._sample_2d_data()
+ lut = RectBivariateSpline(x, y, z)
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
+ lut.ev(xi[:, 0], xi[:, 1]))
+
+ @parametrize_rgi_interp_methods
+ def test_list_input(self, method):
+ x, y, z = self._sample_2d_data()
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ v1 = interpn((x, y), z, xi, method=method)
+ v2 = interpn(
+ (x.tolist(), y.tolist()), z.tolist(), xi.tolist(), method=method
+ )
+ assert_allclose(v1, v2, err_msg=method)
+
+ def test_spline_2d_outofbounds(self):
+ x = np.array([.5, 2., 3., 4., 5.5])
+ y = np.array([.5, 2., 3., 4., 5.5])
+ z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ lut = RectBivariateSpline(x, y, z)
+
+ xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
+ actual = interpn((x, y), z, xi, method="splinef2d",
+ bounds_error=False, fill_value=999.99)
+ expected = lut.ev(xi[:, 0], xi[:, 1])
+ expected[2:4] = 999.99
+ assert_array_almost_equal(actual, expected)
+
+ # no extrapolation for splinef2d
+ assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
+ bounds_error=False, fill_value=None)
+
+ def _sample_4d_data(self):
+ points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def test_linear_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ interp_rg = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0.1, 0.1, 10., 9.]])
+ wanted = interpn(points, values, sample, method="linear")
+ assert_array_almost_equal(interp_rg(sample), wanted)
+
+ def test_4d_linear_outofbounds(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
+ wanted = 999.99
+ actual = interpn(points, values, sample, method="linear",
+ bounds_error=False, fill_value=999.99)
+ assert_array_almost_equal(actual, wanted)
+
+ def test_nearest_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ interp_rg = RegularGridInterpolator(points, values, method="nearest")
+ sample = np.asarray([[0.1, 0.1, 10., 9.]])
+ wanted = interpn(points, values, sample, method="nearest")
+ assert_array_almost_equal(interp_rg(sample), wanted)
+
+ def test_4d_nearest_outofbounds(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
+ wanted = 999.99
+ actual = interpn(points, values, sample, method="nearest",
+ bounds_error=False, fill_value=999.99)
+ assert_array_almost_equal(actual, wanted)
+
+ def test_xi_1d(self):
+ # verify that 1-D xi works as expected
+ points, values = self._sample_4d_data()
+ sample = np.asarray([0.1, 0.1, 10., 9.])
+ v1 = interpn(points, values, sample, bounds_error=False)
+ v2 = interpn(points, values, sample[None,:], bounds_error=False)
+ assert_allclose(v1, v2)
+
+ def test_xi_nd(self):
+ # verify that higher-d xi works as expected
+ points, values = self._sample_4d_data()
+
+ np.random.seed(1234)
+ sample = np.random.rand(2, 3, 4)
+
+ v1 = interpn(points, values, sample, method='nearest',
+ bounds_error=False)
+ assert_equal(v1.shape, (2, 3))
+
+ v2 = interpn(points, values, sample.reshape(-1, 4),
+ method='nearest', bounds_error=False)
+ assert_allclose(v1, v2.reshape(v1.shape))
+
+ @parametrize_rgi_interp_methods
+ def test_xi_broadcast(self, method):
+ # verify that the interpolators broadcast xi
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+
+ xi = np.linspace(0, 1, 2)
+ yi = np.linspace(0, 3, 3)
+
+ sample = (xi[:, None], yi[None, :])
+ v1 = interpn(points, values, sample, method=method, bounds_error=False)
+ assert_equal(v1.shape, (2, 3))
+
+ xx, yy = np.meshgrid(xi, yi)
+ sample = np.c_[xx.T.ravel(), yy.T.ravel()]
+
+ v2 = interpn(points, values, sample,
+ method=method, bounds_error=False)
+ assert_allclose(v1, v2.reshape(v1.shape))
+
+ @parametrize_rgi_interp_methods
+ def test_nonscalar_values(self, method):
+
+ if method == "quintic":
+ pytest.skip("Way too slow.")
+
+ # Verify that non-scalar valued values also works
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] * 2 + [
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0)
+ ] * 2
+
+ rng = np.random.default_rng(1234)
+ values = rng.random((6, 6, 6, 6, 8))
+ sample = rng.random((7, 3, 4))
+
+ v = interpn(points, values, sample, method=method,
+ bounds_error=False)
+ assert_equal(v.shape, (7, 3, 8), err_msg=method)
+
+ vs = [interpn(points, values[..., j], sample, method=method,
+ bounds_error=False) for j in range(8)]
+ v2 = np.array(vs).transpose(1, 2, 0)
+
+ assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+ @parametrize_rgi_interp_methods
+ def test_nonscalar_values_2(self, method):
+
+ if method in {"cubic", "quintic"}:
+ pytest.skip("Way too slow.")
+
+ # Verify that non-scalar valued values also work : use different
+ # lengths of axes to simplify tracing the internals
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+ (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0),
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0),
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0, 47)]
+
+ rng = np.random.default_rng(1234)
+
+ trailing_points = (3, 2)
+ # NB: values has a `num_trailing_dims` trailing dimension
+ values = rng.random((6, 7, 8, 9, *trailing_points))
+ sample = rng.random(4) # a single sample point !
+
+ v = interpn(points, values, sample, method=method, bounds_error=False)
+
+ # v has a single sample point *per entry in the trailing dimensions*
+ assert v.shape == (1, *trailing_points)
+
+ # check the values, too : manually loop over the trailing dimensions
+ vs = [[
+ interpn(points, values[..., i, j], sample, method=method,
+ bounds_error=False) for i in range(values.shape[-2])
+ ] for j in range(values.shape[-1])]
+
+ assert_allclose(v, np.asarray(vs).T, atol=1e-14, err_msg=method)
+
+ def test_non_scalar_values_splinef2d(self):
+ # Vector-valued splines supported with fitpack
+ points, values = self._sample_4d_data()
+
+ np.random.seed(1234)
+ values = np.random.rand(3, 3, 3, 3, 6)
+ sample = np.random.rand(7, 11, 4)
+ assert_raises(ValueError, interpn, points, values, sample,
+ method='splinef2d')
+
+ @parametrize_rgi_interp_methods
+ def test_complex(self, method):
+ if method == "pchip":
+ pytest.skip("pchip does not make sense for complex data")
+
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+ values = values - 2j*values
+
+ sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ v1 = interpn(points, values, sample, method=method)
+ v2r = interpn(points, values.real, sample, method=method)
+ v2i = interpn(points, values.imag, sample, method=method)
+ v2 = v2r + 1j*v2i
+
+ assert_allclose(v1, v2)
+
+ def test_complex_pchip(self):
+ # Complex-valued data deprecated for pchip
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+ values = values - 2j*values
+
+ sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ with pytest.deprecated_call(match='complex'):
+ interpn(points, values, sample, method='pchip')
+
+ def test_complex_spline2fd(self):
+ # Complex-valued data not supported by spline2fd
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+ values = values - 2j*values
+
+ sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ with assert_warns(ComplexWarning):
+ interpn(points, values, sample, method='splinef2d')
+
+ @pytest.mark.parametrize(
+ "method",
+ ["linear", "nearest"]
+ )
+ def test_duck_typed_values(self, method):
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+
+ values = MyValue((5, 7))
+
+ v1 = interpn((x, y), values, [0.4, 0.7], method=method)
+ v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
+ assert_allclose(v1, v2)
+
+ @parametrize_rgi_interp_methods
+ def test_matrix_input(self, method):
+ x = np.linspace(0, 2, 6)
+ y = np.linspace(0, 1, 7)
+
+ values = matrix(np.random.rand(6, 7))
+
+ sample = np.random.rand(3, 7, 2)
+
+ v1 = interpn((x, y), values, sample, method=method)
+ v2 = interpn((x, y), np.asarray(values), sample, method=method)
+ assert_allclose(v1, v2)
+
+ def test_length_one_axis(self):
+ # gh-5890, gh-9524 : length-1 axis is legal for method='linear'.
+ # Along the axis it's linear interpolation; away from the length-1
+ # axis, it's an extrapolation, so fill_value should be used.
+
+ values = np.array([[0.1, 1, 10]])
+ xi = np.array([[1, 2.2], [1, 3.2], [1, 3.8]])
+
+ res = interpn(([1], [2, 3, 4]), values, xi)
+ wanted = [0.9*0.2 + 0.1, # on [2, 3) it's 0.9*(x-2) + 0.1
+ 9*0.2 + 1, # on [3, 4] it's 9*(x-3) + 1
+ 9*0.8 + 1]
+
+ assert_allclose(res, wanted, atol=1e-15)
+
+ # check extrapolation
+ xi = np.array([[1.1, 2.2], [1.5, 3.2], [-2.3, 3.8]])
+ res = interpn(([1], [2, 3, 4]), values, xi,
+ bounds_error=False, fill_value=None)
+
+ assert_allclose(res, wanted, atol=1e-15)
+
+ def test_descending_points(self):
+ def value_func_4d(x, y, z, a):
+ return 2 * x ** 3 + 3 * y ** 2 - z - a
+
+ x1 = np.array([0, 1, 2, 3])
+ x2 = np.array([0, 10, 20, 30])
+ x3 = np.array([0, 10, 20, 30])
+ x4 = np.array([0, .1, .2, .30])
+ points = (x1, x2, x3, x4)
+ values = value_func_4d(
+ *np.meshgrid(*points, indexing='ij', sparse=True))
+ pts = (0.1, 0.3, np.transpose(np.linspace(0, 30, 4)),
+ np.linspace(0, 0.3, 4))
+ correct_result = interpn(points, values, pts)
+
+ x1_descend = x1[::-1]
+ x2_descend = x2[::-1]
+ x3_descend = x3[::-1]
+ x4_descend = x4[::-1]
+ points_shuffled = (x1_descend, x2_descend, x3_descend, x4_descend)
+ values_shuffled = value_func_4d(
+ *np.meshgrid(*points_shuffled, indexing='ij', sparse=True))
+ test_result = interpn(points_shuffled, values_shuffled, pts)
+
+ assert_array_equal(correct_result, test_result)
+
+ def test_invalid_points_order(self):
+ x = np.array([.5, 2., 0., 4., 5.5]) # not ascending or descending
+ y = np.array([.5, 2., 3., 4., 5.5])
+ z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
+
+ match = "must be strictly ascending or descending"
+ with pytest.raises(ValueError, match=match):
+ interpn((x, y), z, xi)
+
+ def test_invalid_xi_dimensions(self):
+ # https://github.com/scipy/scipy/issues/16519
+ points = [(0, 1)]
+ values = [0, 1]
+ xi = np.ones((1, 1, 3))
+ msg = ("The requested sample points xi have dimension 3, but this "
+ "RegularGridInterpolator has dimension 1")
+ with assert_raises(ValueError, match=msg):
+ interpn(points, values, xi)
+
+ def test_readonly_grid(self):
+ # https://github.com/scipy/scipy/issues/17716
+ x = np.linspace(0, 4, 5)
+ y = np.linspace(0, 5, 6)
+ z = np.linspace(0, 6, 7)
+ points = (x, y, z)
+ values = np.ones((5, 6, 7))
+ point = np.array([2.21, 3.12, 1.15])
+ for d in points:
+ d.flags.writeable = False
+ values.flags.writeable = False
+ point.flags.writeable = False
+ interpn(points, values, point)
+ RegularGridInterpolator(points, values)(point)
+
+ def test_2d_readonly_grid(self):
+ # https://github.com/scipy/scipy/issues/17716
+ # test special 2d case
+ x = np.linspace(0, 4, 5)
+ y = np.linspace(0, 5, 6)
+ points = (x, y)
+ values = np.ones((5, 6))
+ point = np.array([2.21, 3.12])
+ for d in points:
+ d.flags.writeable = False
+ values.flags.writeable = False
+ point.flags.writeable = False
+ interpn(points, values, point)
+ RegularGridInterpolator(points, values)(point)
+
+ def test_non_c_contiguous_grid(self):
+ # https://github.com/scipy/scipy/issues/17716
+ x = np.linspace(0, 4, 5)
+ x = np.vstack((x, np.empty_like(x))).T.copy()[:, 0]
+ assert not x.flags.c_contiguous
+ y = np.linspace(0, 5, 6)
+ z = np.linspace(0, 6, 7)
+ points = (x, y, z)
+ values = np.ones((5, 6, 7))
+ point = np.array([2.21, 3.12, 1.15])
+ interpn(points, values, point)
+ RegularGridInterpolator(points, values)(point)
+
+ @pytest.mark.parametrize("dtype", ['>f8', ' int:
+ return len(self._shape)
+
+ @property
+ def _shape_as_2d(self):
+ s = self._shape
+ return (1, s[-1]) if len(s) == 1 else s
+
+ @property
+ def _bsr_container(self):
+ from ._bsr import bsr_array
+ return bsr_array
+
+ @property
+ def _coo_container(self):
+ from ._coo import coo_array
+ return coo_array
+
+ @property
+ def _csc_container(self):
+ from ._csc import csc_array
+ return csc_array
+
+ @property
+ def _csr_container(self):
+ from ._csr import csr_array
+ return csr_array
+
+ @property
+ def _dia_container(self):
+ from ._dia import dia_array
+ return dia_array
+
+ @property
+ def _dok_container(self):
+ from ._dok import dok_array
+ return dok_array
+
+ @property
+ def _lil_container(self):
+ from ._lil import lil_array
+ return lil_array
+
+ def __init__(self, maxprint=MAXPRINT):
+ self._shape = None
+ if self.__class__.__name__ == '_spbase':
+ raise ValueError("This class is not intended"
+ " to be instantiated directly.")
+ self.maxprint = maxprint
+
+ # Use this in 1.14.0 and later:
+ #
+ # @property
+ # def shape(self):
+ # return self._shape
+
+ def reshape(self, *args, **kwargs):
+ """reshape(self, shape, order='C', copy=False)
+
+ Gives a new shape to a sparse array/matrix without changing its data.
+
+ Parameters
+ ----------
+ shape : length-2 tuple of ints
+ The new shape should be compatible with the original shape.
+ order : {'C', 'F'}, optional
+ Read the elements using this index order. 'C' means to read and
+ write the elements using C-like index order; e.g., read entire first
+ row, then second row, etc. 'F' means to read and write the elements
+ using Fortran-like index order; e.g., read entire first column, then
+ second column, etc.
+ copy : bool, optional
+ Indicates whether or not attributes of self should be copied
+ whenever possible. The degree to which attributes are copied varies
+ depending on the type of sparse array being used.
+
+ Returns
+ -------
+ reshaped : sparse array/matrix
+ A sparse array/matrix with the given `shape`, not necessarily of the same
+ format as the current object.
+
+ See Also
+ --------
+ numpy.reshape : NumPy's implementation of 'reshape' for ndarrays
+ """
+ # If the shape already matches, don't bother doing an actual reshape
+ # Otherwise, the default is to convert to COO and use its reshape
+ is_array = isinstance(self, sparray)
+ shape = check_shape(args, self.shape, allow_1d=is_array)
+ order, copy = check_reshape_kwargs(kwargs)
+ if shape == self.shape:
+ if copy:
+ return self.copy()
+ else:
+ return self
+
+ return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)
+
+ def resize(self, shape):
+ """Resize the array/matrix in-place to dimensions given by ``shape``
+
+ Any elements that lie within the new shape will remain at the same
+ indices, while non-zero elements lying outside the new shape are
+ removed.
+
+ Parameters
+ ----------
+ shape : (int, int)
+ number of rows and columns in the new array/matrix
+
+ Notes
+ -----
+ The semantics are not identical to `numpy.ndarray.resize` or
+ `numpy.resize`. Here, the same data will be maintained at each index
+ before and after reshape, if that index is within the new bounds. In
+ numpy, resizing maintains contiguity of the array, moving elements
+ around in the logical array but not within a flattened representation.
+
+ We give no guarantees about whether the underlying data attributes
+ (arrays, etc.) will be modified in place or replaced with new objects.
+ """
+ # As an inplace operation, this requires implementation in each format.
+ raise NotImplementedError(
+ f'{type(self).__name__}.resize is not implemented')
+
+ def astype(self, dtype, casting='unsafe', copy=True):
+ """Cast the array/matrix elements to a specified type.
+
+ Parameters
+ ----------
+ dtype : string or numpy dtype
+ Typecode or data-type to which to cast the data.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur.
+ Defaults to 'unsafe' for backwards compatibility.
+ 'no' means the data types should not be cast at all.
+ 'equiv' means only byte-order changes are allowed.
+ 'safe' means only casts which can preserve values are allowed.
+ 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ 'unsafe' means any data conversions may be done.
+ copy : bool, optional
+ If `copy` is `False`, the result might share some memory with this
+ array/matrix. If `copy` is `True`, it is guaranteed that the result and
+ this array/matrix do not share any memory.
+ """
+
+ dtype = np.dtype(dtype)
+ if self.dtype != dtype:
+ return self.tocsr().astype(
+ dtype, casting=casting, copy=copy).asformat(self.format)
+ elif copy:
+ return self.copy()
+ else:
+ return self
+
+ @classmethod
+ def _ascontainer(cls, X, **kwargs):
+ if issubclass(cls, sparray):
+ return np.asarray(X, **kwargs)
+ else:
+ return asmatrix(X, **kwargs)
+
+ @classmethod
+ def _container(cls, X, **kwargs):
+ if issubclass(cls, sparray):
+ return np.array(X, **kwargs)
+ else:
+ return matrix(X, **kwargs)
+
+ def _asfptype(self):
+ """Upcast array to a floating point format (if necessary)"""
+
+ fp_types = ['f', 'd', 'F', 'D']
+
+ if self.dtype.char in fp_types:
+ return self
+ else:
+ for fp_type in fp_types:
+ if self.dtype <= np.dtype(fp_type):
+ return self.astype(fp_type)
+
+ raise TypeError('cannot upcast [%s] to a floating '
+ 'point format' % self.dtype.name)
+
+ def __iter__(self):
+ for r in range(self.shape[0]):
+ yield self[r]
+
+ def _getmaxprint(self):
+ """Maximum number of elements to display when printed."""
+ return self.maxprint
+
+ def count_nonzero(self):
+ """Number of non-zero entries, equivalent to
+
+ np.count_nonzero(a.toarray())
+
+ Unlike the nnz property, which return the number of stored
+ entries (the length of the data attribute), this method counts the
+ actual number of non-zero entries in data.
+ """
+ raise NotImplementedError("count_nonzero not implemented for %s." %
+ self.__class__.__name__)
+
+ def _getnnz(self, axis=None):
+ """Number of stored values, including explicit zeros.
+
+ Parameters
+ ----------
+ axis : None, 0, or 1
+ Select between the number of values across the whole array, in
+ each column, or in each row.
+
+ See also
+ --------
+ count_nonzero : Number of non-zero entries
+ """
+ raise NotImplementedError("getnnz not implemented for %s." %
+ self.__class__.__name__)
+
+ @property
+ def nnz(self) -> int:
+ """Number of stored values, including explicit zeros.
+
+ See also
+ --------
+ count_nonzero : Number of non-zero entries
+ """
+ return self._getnnz()
+
+ @property
+ def size(self) -> int:
+ """Number of stored values.
+
+ See also
+ --------
+ count_nonzero : Number of non-zero values.
+ """
+ return self._getnnz()
+
+ @property
+ def format(self) -> str:
+ """Format string for matrix."""
+ return self._format
+
+ @property
+ def A(self) -> np.ndarray:
+ """DEPRECATED: Return a dense array.
+
+ .. deprecated:: 1.11.0
+
+ `.A` is deprecated and will be removed in v1.14.0.
+ Use `.toarray()` instead.
+ """
+ if isinstance(self, sparray):
+ message = ("`.A` is deprecated and will be removed in v1.14.0. "
+ "Use `.toarray()` instead.")
+ warn(VisibleDeprecationWarning(message), stacklevel=2)
+ return self.toarray()
+
+ @property
+ def T(self):
+ """Transpose."""
+ return self.transpose()
+
+ @property
+ def H(self):
+ """DEPRECATED: Returns the (complex) conjugate transpose.
+
+ .. deprecated:: 1.11.0
+
+ `.H` is deprecated and will be removed in v1.14.0.
+ Please use `.T.conjugate()` instead.
+ """
+ if isinstance(self, sparray):
+ message = ("`.H` is deprecated and will be removed in v1.14.0. "
+ "Please use `.T.conjugate()` instead.")
+ warn(VisibleDeprecationWarning(message), stacklevel=2)
+ return self.T.conjugate()
+
+ @property
+ def real(self):
+ return self._real()
+
+ @property
+ def imag(self):
+ return self._imag()
+
+ def __repr__(self):
+ _, format_name = _formats[self.format]
+ sparse_cls = 'array' if isinstance(self, sparray) else 'matrix'
+ shape_str = 'x'.join(str(x) for x in self.shape)
+ return (
+ f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n"
+ f"\twith {self.nnz} stored elements in {format_name} format>"
+ )
+
+ def __str__(self):
+ maxprint = self._getmaxprint()
+
+ A = self.tocoo()
+
+ # helper function, outputs "(i,j) v"
+ def tostr(row, col, data):
+ triples = zip(list(zip(row, col)), data)
+ return '\n'.join([(' {}\t{}'.format(*t)) for t in triples])
+
+ if self.nnz > maxprint:
+ half = maxprint // 2
+ out = tostr(A.row[:half], A.col[:half], A.data[:half])
+ out += "\n :\t:\n"
+ half = maxprint - maxprint//2
+ out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
+ else:
+ out = tostr(A.row, A.col, A.data)
+
+ return out
+
+ def __bool__(self): # Simple -- other ideas?
+ if self.shape == (1, 1):
+ return self.nnz != 0
+ else:
+ raise ValueError("The truth value of an array with more than one "
+ "element is ambiguous. Use a.any() or a.all().")
+ __nonzero__ = __bool__
+
+ # What should len(sparse) return? For consistency with dense matrices,
+ # perhaps it should be the number of rows? But for some uses the number of
+ # non-zeros is more important. For now, raise an exception!
+ def __len__(self):
+ raise TypeError("sparse array length is ambiguous; use getnnz()"
+ " or shape[0]")
+
+ def asformat(self, format, copy=False):
+ """Return this array/matrix in the passed format.
+
+ Parameters
+ ----------
+ format : {str, None}
+ The desired sparse format ("csr", "csc", "lil", "dok", "array", ...)
+ or None for no conversion.
+ copy : bool, optional
+ If True, the result is guaranteed to not share data with self.
+
+ Returns
+ -------
+ A : This array/matrix in the passed format.
+ """
+ if format is None or format == self.format:
+ if copy:
+ return self.copy()
+ else:
+ return self
+ else:
+ try:
+ convert_method = getattr(self, 'to' + format)
+ except AttributeError as e:
+ raise ValueError(f'Format {format} is unknown.') from e
+
+ # Forward the copy kwarg, if it's accepted.
+ try:
+ return convert_method(copy=copy)
+ except TypeError:
+ return convert_method()
+
+ ###################################################################
+ # NOTE: All arithmetic operations use csr_matrix by default.
+ # Therefore a new sparse array format just needs to define a
+ # .tocsr() method to provide arithmetic support. Any of these
+ # methods can be overridden for efficiency.
+ ####################################################################
+
+ def multiply(self, other):
+ """Point-wise multiplication by another array/matrix."""
+ return self.tocsr().multiply(other)
+
+ def maximum(self, other):
+ """Element-wise maximum between this and another array/matrix."""
+ return self.tocsr().maximum(other)
+
+ def minimum(self, other):
+ """Element-wise minimum between this and another array/matrix."""
+ return self.tocsr().minimum(other)
+
+ def dot(self, other):
+ """Ordinary dot product
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.sparse import csr_array
+ >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
+ >>> v = np.array([1, 0, -1])
+ >>> A.dot(v)
+ array([ 1, -3, -1], dtype=int64)
+
+ """
+ if np.isscalar(other):
+ return self * other
+ else:
+ return self @ other
+
+ def power(self, n, dtype=None):
+ """Element-wise power."""
+ return self.tocsr().power(n, dtype=dtype)
+
+ def __eq__(self, other):
+ return self.tocsr().__eq__(other)
+
+ def __ne__(self, other):
+ return self.tocsr().__ne__(other)
+
+ def __lt__(self, other):
+ return self.tocsr().__lt__(other)
+
+ def __gt__(self, other):
+ return self.tocsr().__gt__(other)
+
+ def __le__(self, other):
+ return self.tocsr().__le__(other)
+
+ def __ge__(self, other):
+ return self.tocsr().__ge__(other)
+
+ def __abs__(self):
+ return abs(self.tocsr())
+
+ def __round__(self, ndigits=0):
+ return round(self.tocsr(), ndigits=ndigits)
+
+ def _add_sparse(self, other):
+ return self.tocsr()._add_sparse(other)
+
+ def _add_dense(self, other):
+ return self.tocoo()._add_dense(other)
+
+ def _sub_sparse(self, other):
+ return self.tocsr()._sub_sparse(other)
+
+ def _sub_dense(self, other):
+ return self.todense() - other
+
+ def _rsub_dense(self, other):
+ # note: this can't be replaced by other + (-self) for unsigned types
+ return other - self.todense()
+
+ def __add__(self, other): # self + other
+ if isscalarlike(other):
+ if other == 0:
+ return self.copy()
+ # Now we would add this scalar to every element.
+ raise NotImplementedError('adding a nonzero scalar to a '
+ 'sparse array is not supported')
+ elif issparse(other):
+ if other.shape != self.shape:
+ raise ValueError("inconsistent shapes")
+ return self._add_sparse(other)
+ elif isdense(other):
+ other = np.broadcast_to(other, self.shape)
+ return self._add_dense(other)
+ else:
+ return NotImplemented
+
+ def __radd__(self,other): # other + self
+ return self.__add__(other)
+
+ def __sub__(self, other): # self - other
+ if isscalarlike(other):
+ if other == 0:
+ return self.copy()
+ raise NotImplementedError('subtracting a nonzero scalar from a '
+ 'sparse array is not supported')
+ elif issparse(other):
+ if other.shape != self.shape:
+ raise ValueError("inconsistent shapes")
+ return self._sub_sparse(other)
+ elif isdense(other):
+ other = np.broadcast_to(other, self.shape)
+ return self._sub_dense(other)
+ else:
+ return NotImplemented
+
+ def __rsub__(self,other): # other - self
+ if isscalarlike(other):
+ if other == 0:
+ return -self.copy()
+ raise NotImplementedError('subtracting a sparse array from a '
+ 'nonzero scalar is not supported')
+ elif isdense(other):
+ other = np.broadcast_to(other, self.shape)
+ return self._rsub_dense(other)
+ else:
+ return NotImplemented
+
+ def _matmul_dispatch(self, other):
+ """np.array-like matmul & `np.matrix`-like mul, i.e. `dot` or `NotImplemented`
+
+ interpret other and call one of the following
+ self._mul_scalar()
+ self._matmul_vector()
+ self._matmul_multivector()
+ self._matmul_sparse()
+ """
+ # This method has to be different from `__matmul__` because it is also
+ # called by sparse matrix classes.
+
+ # Currently matrix multiplication is only supported
+ # for 2D arrays. Hence we unpacked and use only the
+ # two last axes' lengths.
+ M, N = self._shape_as_2d
+
+ if other.__class__ is np.ndarray:
+ # Fast path for the most common case
+ if other.shape == (N,):
+ return self._matmul_vector(other)
+ elif other.shape == (N, 1):
+ result = self._matmul_vector(other.ravel())
+ if self.ndim == 1:
+ return result
+ return result.reshape(M, 1)
+ elif other.ndim == 2 and other.shape[0] == N:
+ return self._matmul_multivector(other)
+
+ if isscalarlike(other):
+ # scalar value
+ return self._mul_scalar(other)
+
+ if issparse(other):
+ if self.shape[-1] != other.shape[0]:
+ raise ValueError('dimension mismatch')
+ if other.ndim == 1:
+ raise ValueError('Cannot yet multiply a 1d sparse array')
+ return self._matmul_sparse(other)
+
+ # If it's a list or whatever, treat it like an array
+ other_a = np.asanyarray(other)
+
+ if other_a.ndim == 0 and other_a.dtype == np.object_:
+ # Not interpretable as an array; return NotImplemented so that
+ # other's __rmatmul__ can kick in if that's implemented.
+ return NotImplemented
+
+ try:
+ other.shape
+ except AttributeError:
+ other = other_a
+
+ if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
+ # dense row or column vector
+ if other.shape != (N,) and other.shape != (N, 1):
+ raise ValueError('dimension mismatch')
+
+ result = self._matmul_vector(np.ravel(other))
+
+ if isinstance(other, np.matrix):
+ result = self._ascontainer(result)
+
+ if other.ndim == 2 and other.shape[1] == 1:
+ # If 'other' was an (nx1) column vector, reshape the result
+ result = result.reshape(-1, 1)
+
+ return result
+
+ elif other.ndim == 2:
+ ##
+ # dense 2D array or matrix ("multivector")
+
+ if other.shape[0] != N:
+ raise ValueError('dimension mismatch')
+
+ result = self._matmul_multivector(np.asarray(other))
+
+ if isinstance(other, np.matrix):
+ result = self._ascontainer(result)
+
+ return result
+
+ else:
+ raise ValueError('could not interpret dimensions')
+
+ def __mul__(self, *args, **kwargs):
+ return self.multiply(*args, **kwargs)
+
+ def __rmul__(self, *args, **kwargs): # other * self
+ return self.multiply(*args, **kwargs)
+
+ # by default, use CSR for __mul__ handlers
+ def _mul_scalar(self, other):
+ return self.tocsr()._mul_scalar(other)
+
+ def _matmul_vector(self, other):
+ return self.tocsr()._matmul_vector(other)
+
+ def _matmul_multivector(self, other):
+ return self.tocsr()._matmul_multivector(other)
+
+ def _matmul_sparse(self, other):
+ return self.tocsr()._matmul_sparse(other)
+
+ def _rmatmul_dispatch(self, other):
+ if isscalarlike(other):
+ return self._mul_scalar(other)
+ else:
+ # Don't use asarray unless we have to
+ try:
+ tr = other.transpose()
+ except AttributeError:
+ tr = np.asarray(other).transpose()
+ ret = self.transpose()._matmul_dispatch(tr)
+ if ret is NotImplemented:
+ return NotImplemented
+ return ret.transpose()
+
+ #######################
+ # matmul (@) operator #
+ #######################
+
+ def __matmul__(self, other):
+ if isscalarlike(other):
+ raise ValueError("Scalar operands are not allowed, "
+ "use '*' instead")
+ return self._matmul_dispatch(other)
+
+ def __rmatmul__(self, other):
+ if isscalarlike(other):
+ raise ValueError("Scalar operands are not allowed, "
+ "use '*' instead")
+ return self._rmatmul_dispatch(other)
+
+ ####################
+ # Other Arithmetic #
+ ####################
+
+ def _divide(self, other, true_divide=False, rdivide=False):
+ if isscalarlike(other):
+ if rdivide:
+ if true_divide:
+ return np.true_divide(other, self.todense())
+ else:
+ return np.divide(other, self.todense())
+
+ if true_divide and np.can_cast(self.dtype, np.float64):
+ return self.astype(np.float64)._mul_scalar(1./other)
+ else:
+ r = self._mul_scalar(1./other)
+
+ scalar_dtype = np.asarray(other).dtype
+ if (np.issubdtype(self.dtype, np.integer) and
+ np.issubdtype(scalar_dtype, np.integer)):
+ return r.astype(self.dtype)
+ else:
+ return r
+
+ elif isdense(other):
+ if not rdivide:
+ if true_divide:
+ recip = np.true_divide(1., other)
+ else:
+ recip = np.divide(1., other)
+ return self.multiply(recip)
+ else:
+ if true_divide:
+ return np.true_divide(other, self.todense())
+ else:
+ return np.divide(other, self.todense())
+ elif issparse(other):
+ if rdivide:
+ return other._divide(self, true_divide, rdivide=False)
+
+ self_csr = self.tocsr()
+ if true_divide and np.can_cast(self.dtype, np.float64):
+ return self_csr.astype(np.float64)._divide_sparse(other)
+ else:
+ return self_csr._divide_sparse(other)
+ else:
+ return NotImplemented
+
+ def __truediv__(self, other):
+ return self._divide(other, true_divide=True)
+
+ def __div__(self, other):
+ # Always do true division
+ return self._divide(other, true_divide=True)
+
+ def __rtruediv__(self, other):
+ # Implementing this as the inverse would be too magical -- bail out
+ return NotImplemented
+
+ def __rdiv__(self, other):
+ # Implementing this as the inverse would be too magical -- bail out
+ return NotImplemented
+
+ def __neg__(self):
+ return -self.tocsr()
+
+ def __iadd__(self, other):
+ return NotImplemented
+
+ def __isub__(self, other):
+ return NotImplemented
+
+ def __imul__(self, other):
+ return NotImplemented
+
+ def __idiv__(self, other):
+ return self.__itruediv__(other)
+
+ def __itruediv__(self, other):
+ return NotImplemented
+
+ def __pow__(self, *args, **kwargs):
+ return self.power(*args, **kwargs)
+
+ def transpose(self, axes=None, copy=False):
+ """
+ Reverses the dimensions of the sparse array/matrix.
+
+ Parameters
+ ----------
+ axes : None, optional
+ This argument is in the signature *solely* for NumPy
+ compatibility reasons. Do not pass in anything except
+ for the default value.
+ copy : bool, optional
+ Indicates whether or not attributes of `self` should be
+ copied whenever possible. The degree to which attributes
+ are copied varies depending on the type of sparse array/matrix
+ being used.
+
+ Returns
+ -------
+ p : `self` with the dimensions reversed.
+
+ Notes
+ -----
+ If `self` is a `csr_array` or a `csc_array`, then this will return a
+ `csc_array` or a `csr_array`, respectively.
+
+ See Also
+ --------
+ numpy.transpose : NumPy's implementation of 'transpose' for ndarrays
+ """
+ return self.tocsr(copy=copy).transpose(axes=axes, copy=False)
+
+ def conjugate(self, copy=True):
+ """Element-wise complex conjugation.
+
+ If the array/matrix is of non-complex data type and `copy` is False,
+ this method does nothing and the data is not copied.
+
+ Parameters
+ ----------
+ copy : bool, optional
+ If True, the result is guaranteed to not share data with self.
+
+ Returns
+ -------
+ A : The element-wise complex conjugate.
+
+ """
+ if np.issubdtype(self.dtype, np.complexfloating):
+ return self.tocsr(copy=copy).conjugate(copy=False)
+ elif copy:
+ return self.copy()
+ else:
+ return self
+
+ def conj(self, copy=True):
+ return self.conjugate(copy=copy)
+
+ conj.__doc__ = conjugate.__doc__
+
+ def _real(self):
+ return self.tocsr()._real()
+
+ def _imag(self):
+ return self.tocsr()._imag()
+
+ def nonzero(self):
+ """Nonzero indices of the array/matrix.
+
+ Returns a tuple of arrays (row,col) containing the indices
+ of the non-zero elements of the array.
+
+ Examples
+ --------
+ >>> from scipy.sparse import csr_array
+ >>> A = csr_array([[1,2,0],[0,0,3],[4,0,5]])
+ >>> A.nonzero()
+ (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
+
+ """
+
+ # convert to COOrdinate format
+ A = self.tocoo()
+ nz_mask = A.data != 0
+ return (A.row[nz_mask], A.col[nz_mask])
+
+ def _getcol(self, j):
+ """Returns a copy of column j of the array, as an (m x 1) sparse
+ array (column vector).
+ """
+ if self.ndim == 1:
+ raise ValueError("getcol not provided for 1d arrays. Use indexing A[j]")
+ # Subclasses should override this method for efficiency.
+ # Post-multiply by a (n x 1) column vector 'a' containing all zeros
+ # except for a_j = 1
+ N = self.shape[-1]
+ if j < 0:
+ j += N
+ if j < 0 or j >= N:
+ raise IndexError("index out of bounds")
+ col_selector = self._csc_container(([1], [[j], [0]]),
+ shape=(N, 1), dtype=self.dtype)
+ result = self @ col_selector
+ return result
+
+ def _getrow(self, i):
+ """Returns a copy of row i of the array, as a (1 x n) sparse
+ array (row vector).
+ """
+ if self.ndim == 1:
+ raise ValueError("getrow not meaningful for a 1d array")
+ # Subclasses should override this method for efficiency.
+ # Pre-multiply by a (1 x m) row vector 'a' containing all zeros
+ # except for a_i = 1
+ M = self.shape[0]
+ if i < 0:
+ i += M
+ if i < 0 or i >= M:
+ raise IndexError("index out of bounds")
+ row_selector = self._csr_container(([1], [[0], [i]]),
+ shape=(1, M), dtype=self.dtype)
+ return row_selector @ self
+
+ # The following dunder methods cannot be implemented.
+ #
+ # def __array__(self):
+ # # Sparse matrices rely on NumPy wrapping them in object arrays under
+ # # the hood to make unary ufuncs work on them. So we cannot raise
+ # # TypeError here - which would be handy to not give users object
+ # # arrays they probably don't want (they're looking for `.toarray()`).
+ # #
+ # # Conversion with `toarray()` would also break things because of the
+ # # behavior discussed above, plus we want to avoid densification by
+ # # accident because that can too easily blow up memory.
+ #
+ # def __array_ufunc__(self):
+ # # We cannot implement __array_ufunc__ due to mismatching semantics.
+ # # See gh-7707 and gh-7349 for details.
+ #
+ # def __array_function__(self):
+ # # We cannot implement __array_function__ due to mismatching semantics.
+ # # See gh-10362 for details.
+
+ def todense(self, order=None, out=None):
+ """
+ Return a dense representation of this sparse array/matrix.
+
+ Parameters
+ ----------
+ order : {'C', 'F'}, optional
+ Whether to store multi-dimensional data in C (row-major)
+ or Fortran (column-major) order in memory. The default
+ is 'None', which provides no ordering guarantees.
+ Cannot be specified in conjunction with the `out`
+ argument.
+
+ out : ndarray, 2-D, optional
+ If specified, uses this array (or `numpy.matrix`) as the
+ output buffer instead of allocating a new array to
+ return. The provided array must have the same shape and
+ dtype as the sparse array/matrix on which you are calling the
+ method.
+
+ Returns
+ -------
+ arr : numpy.matrix, 2-D
+ A NumPy matrix object with the same shape and containing
+ the same data represented by the sparse array/matrix, with the
+ requested memory order. If `out` was passed and was an
+ array (rather than a `numpy.matrix`), it will be filled
+ with the appropriate values and returned wrapped in a
+ `numpy.matrix` object that shares the same memory.
+ """
+ return self._ascontainer(self.toarray(order=order, out=out))
+
+ def toarray(self, order=None, out=None):
+ """
+ Return a dense ndarray representation of this sparse array/matrix.
+
+ Parameters
+ ----------
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C (row-major)
+ or Fortran (column-major) order in memory. The default
+ is 'None', which provides no ordering guarantees.
+ Cannot be specified in conjunction with the `out`
+ argument.
+
+ out : ndarray, 2-D, optional
+ If specified, uses this array as the output buffer
+ instead of allocating a new array to return. The provided
+ array must have the same shape and dtype as the sparse
+ array/matrix on which you are calling the method. For most
+ sparse types, `out` is required to be memory contiguous
+ (either C or Fortran ordered).
+
+ Returns
+ -------
+ arr : ndarray, 2-D
+ An array with the same shape and containing the same
+ data represented by the sparse array/matrix, with the requested
+ memory order. If `out` was passed, the same object is
+ returned after being modified in-place to contain the
+ appropriate values.
+ """
+ return self.tocoo(copy=False).toarray(order=order, out=out)
+
+ # Any sparse array format deriving from _spbase must define one of
+ # tocsr or tocoo. The other conversion methods may be implemented for
+ # efficiency, but are not required.
+ def tocsr(self, copy=False):
+ """Convert this array/matrix to Compressed Sparse Row format.
+
+ With copy=False, the data/indices may be shared between this array/matrix and
+ the resultant csr_array/matrix.
+ """
+ return self.tocoo(copy=copy).tocsr(copy=False)
+
+ def todok(self, copy=False):
+ """Convert this array/matrix to Dictionary Of Keys format.
+
+ With copy=False, the data/indices may be shared between this array/matrix and
+ the resultant dok_array/matrix.
+ """
+ return self.tocoo(copy=copy).todok(copy=False)
+
+ def tocoo(self, copy=False):
+ """Convert this array/matrix to COOrdinate format.
+
+ With copy=False, the data/indices may be shared between this array/matrix and
+ the resultant coo_array/matrix.
+ """
+ return self.tocsr(copy=False).tocoo(copy=copy)
+
+ def tolil(self, copy=False):
+ """Convert this array/matrix to List of Lists format.
+
+ With copy=False, the data/indices may be shared between this array/matrix and
+ the resultant lil_array/matrix.
+ """
+ return self.tocsr(copy=False).tolil(copy=copy)
+
+ def todia(self, copy=False):
+ """Convert this array/matrix to sparse DIAgonal format.
+
+ With copy=False, the data/indices may be shared between this array/matrix and
+ the resultant dia_array/matrix.
+ """
+ return self.tocoo(copy=copy).todia(copy=False)
+
+ def tobsr(self, blocksize=None, copy=False):
+ """Convert this array/matrix to Block Sparse Row format.
+
+ With copy=False, the data/indices may be shared between this array/matrix and
+ the resultant bsr_array/matrix.
+
+ When blocksize=(R, C) is provided, it will be used for construction of
+ the bsr_array/matrix.
+ """
+ return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
+
+ def tocsc(self, copy=False):
+ """Convert this array/matrix to Compressed Sparse Column format.
+
+ With copy=False, the data/indices may be shared between this array/matrix and
+ the resultant csc_array/matrix.
+ """
+ return self.tocsr(copy=copy).tocsc(copy=False)
+
+ def copy(self):
+ """Returns a copy of this array/matrix.
+
+ No data/indices will be shared between the returned value and current
+ array/matrix.
+ """
+ return self.__class__(self, copy=True)
+
+ def sum(self, axis=None, dtype=None, out=None):
+ """
+ Sum the array/matrix elements over a given axis.
+
+ Parameters
+ ----------
+ axis : {-2, -1, 0, 1, None} optional
+ Axis along which the sum is computed. The default is to
+ compute the sum of all the array/matrix elements, returning a scalar
+ (i.e., `axis` = `None`).
+ dtype : dtype, optional
+ The type of the returned array/matrix and of the accumulator in which
+ the elements are summed. The dtype of `a` is used by default
+ unless `a` has an integer dtype of less precision than the default
+ platform integer. In that case, if `a` is signed then the platform
+ integer is used while if `a` is unsigned then an unsigned integer
+ of the same precision as the platform integer is used.
+
+ .. versionadded:: 0.18.0
+
+ out : np.matrix, optional
+ Alternative output matrix in which to place the result. It must
+ have the same shape as the expected output, but the type of the
+ output values will be cast if necessary.
+
+ .. versionadded:: 0.18.0
+
+ Returns
+ -------
+ sum_along_axis : np.matrix
+ A matrix with the same shape as `self`, with the specified
+ axis removed.
+
+ See Also
+ --------
+ numpy.matrix.sum : NumPy's implementation of 'sum' for matrices
+
+ """
+ validateaxis(axis)
+
+ # Mimic numpy's casting.
+ res_dtype = get_sum_dtype(self.dtype)
+
+ if self.ndim == 1:
+ if axis not in (None, -1, 0):
+ raise ValueError("axis must be None, -1 or 0")
+ ret = (self @ np.ones(self.shape, dtype=res_dtype)).astype(dtype)
+
+ if out is not None:
+ if any(dim != 1 for dim in out.shape):
+ raise ValueError("dimensions do not match")
+ out[...] = ret
+ return ret
+
+ # We use multiplication by a matrix of ones to achieve this.
+ # For some sparse array formats more efficient methods are
+ # possible -- these should override this function.
+ M, N = self.shape
+
+ if axis is None:
+ # sum over rows and columns
+ return (
+ self @ self._ascontainer(np.ones((N, 1), dtype=res_dtype))
+ ).sum(dtype=dtype, out=out)
+
+ if axis < 0:
+ axis += 2
+
+ # axis = 0 or 1 now
+ if axis == 0:
+ # sum over columns
+ ret = self._ascontainer(
+ np.ones((1, M), dtype=res_dtype)
+ ) @ self
+ else:
+ # sum over rows
+ ret = self @ self._ascontainer(
+ np.ones((N, 1), dtype=res_dtype)
+ )
+
+ if out is not None and out.shape != ret.shape:
+ raise ValueError("dimensions do not match")
+
+ return ret.sum(axis=axis, dtype=dtype, out=out)
+
+ def mean(self, axis=None, dtype=None, out=None):
+ """
+ Compute the arithmetic mean along the specified axis.
+
+ Returns the average of the array/matrix elements. The average is taken
+ over all elements in the array/matrix by default, otherwise over the
+ specified axis. `float64` intermediate and return values are used
+ for integer inputs.
+
+ Parameters
+ ----------
+ axis : {-2, -1, 0, 1, None} optional
+ Axis along which the mean is computed. The default is to compute
+ the mean of all elements in the array/matrix (i.e., `axis` = `None`).
+ dtype : data-type, optional
+ Type to use in computing the mean. For integer inputs, the default
+ is `float64`; for floating point inputs, it is the same as the
+ input dtype.
+
+ .. versionadded:: 0.18.0
+
+ out : np.matrix, optional
+ Alternative output matrix in which to place the result. It must
+ have the same shape as the expected output, but the type of the
+ output values will be cast if necessary.
+
+ .. versionadded:: 0.18.0
+
+ Returns
+ -------
+ m : np.matrix
+
+ See Also
+ --------
+ numpy.matrix.mean : NumPy's implementation of 'mean' for matrices
+
+ """
+ validateaxis(axis)
+
+ res_dtype = self.dtype.type
+ integral = (np.issubdtype(self.dtype, np.integer) or
+ np.issubdtype(self.dtype, np.bool_))
+
+ # output dtype
+ if dtype is None:
+ if integral:
+ res_dtype = np.float64
+ else:
+ res_dtype = np.dtype(dtype).type
+
+ # intermediate dtype for summation
+ inter_dtype = np.float64 if integral else res_dtype
+ inter_self = self.astype(inter_dtype)
+
+ if self.ndim == 1:
+ if axis not in (None, -1, 0):
+ raise ValueError("axis must be None, -1 or 0")
+ res = inter_self / self.shape[-1]
+ return res.sum(dtype=res_dtype, out=out)
+
+ if axis is None:
+ return (inter_self / (self.shape[0] * self.shape[1]))\
+ .sum(dtype=res_dtype, out=out)
+
+ if axis < 0:
+ axis += 2
+
+ # axis = 0 or 1 now
+ if axis == 0:
+ return (inter_self * (1.0 / self.shape[0])).sum(
+ axis=0, dtype=res_dtype, out=out)
+ else:
+ return (inter_self * (1.0 / self.shape[1])).sum(
+ axis=1, dtype=res_dtype, out=out)
+
+ def diagonal(self, k=0):
+ """Returns the kth diagonal of the array/matrix.
+
+ Parameters
+ ----------
+ k : int, optional
+ Which diagonal to get, corresponding to elements a[i, i+k].
+ Default: 0 (the main diagonal).
+
+ .. versionadded:: 1.0
+
+ See also
+ --------
+ numpy.diagonal : Equivalent numpy function.
+
+ Examples
+ --------
+ >>> from scipy.sparse import csr_array
+ >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
+ >>> A.diagonal()
+ array([1, 0, 5])
+ >>> A.diagonal(k=1)
+ array([2, 3])
+ """
+ return self.tocsr().diagonal(k=k)
+
+ def trace(self, offset=0):
+ """Returns the sum along diagonals of the sparse array/matrix.
+
+ Parameters
+ ----------
+ offset : int, optional
+ Which diagonal to get, corresponding to elements a[i, i+offset].
+ Default: 0 (the main diagonal).
+
+ """
+ return self.diagonal(k=offset).sum()
+
+ def setdiag(self, values, k=0):
+ """
+ Set diagonal or off-diagonal elements of the array/matrix.
+
+ Parameters
+ ----------
+ values : array_like
+ New values of the diagonal elements.
+
+ Values may have any length. If the diagonal is longer than values,
+ then the remaining diagonal entries will not be set. If values are
+ longer than the diagonal, then the remaining values are ignored.
+
+ If a scalar value is given, all of the diagonal is set to it.
+
+ k : int, optional
+ Which off-diagonal to set, corresponding to elements a[i,i+k].
+ Default: 0 (the main diagonal).
+
+ """
+ M, N = self.shape
+ if (k > 0 and k >= N) or (k < 0 and -k >= M):
+ raise ValueError("k exceeds array dimensions")
+ self._setdiag(np.asarray(values), k)
+
+ def _setdiag(self, values, k):
+ """This part of the implementation gets overridden by the
+ different formats.
+ """
+ M, N = self.shape
+ if k < 0:
+ if values.ndim == 0:
+ # broadcast
+ max_index = min(M+k, N)
+ for i in range(max_index):
+ self[i - k, i] = values
+ else:
+ max_index = min(M+k, N, len(values))
+ if max_index <= 0:
+ return
+ for i, v in enumerate(values[:max_index]):
+ self[i - k, i] = v
+ else:
+ if values.ndim == 0:
+ # broadcast
+ max_index = min(M, N-k)
+ for i in range(max_index):
+ self[i, i + k] = values
+ else:
+ max_index = min(M, N-k, len(values))
+ if max_index <= 0:
+ return
+ for i, v in enumerate(values[:max_index]):
+ self[i, i + k] = v
+
+ def _process_toarray_args(self, order, out):
+ if out is not None:
+ if order is not None:
+ raise ValueError('order cannot be specified if out '
+ 'is not None')
+ if out.shape != self.shape or out.dtype != self.dtype:
+ raise ValueError('out array must be same dtype and shape as '
+ 'sparse array')
+ out[...] = 0.
+ return out
+ else:
+ return np.zeros(self.shape, dtype=self.dtype, order=order)
+
+ def _get_index_dtype(self, arrays=(), maxval=None, check_contents=False):
+ """
+ Determine index dtype for array.
+
+ This wraps _sputils.get_index_dtype, providing compatibility for both
+ array and matrix API sparse matrices. Matrix API sparse matrices would
+ attempt to downcast the indices - which can be computationally
+ expensive and undesirable for users. The array API changes this
+ behaviour.
+
+ See discussion: https://github.com/scipy/scipy/issues/16774
+
+ The get_index_dtype import is due to implementation details of the test
+ suite. It allows the decorator ``with_64bit_maxval_limit`` to mock a
+ lower int32 max value for checks on the matrix API's downcasting
+ behaviour.
+ """
+ from ._sputils import get_index_dtype
+
+ # Don't check contents for array API
+ return get_index_dtype(arrays,
+ maxval,
+ (check_contents and not isinstance(self, sparray)))
+
+
+ ## All methods below are deprecated and should be removed in
+ ## scipy 1.14.0
+ ##
+ ## Also uncomment the definition of shape above.
+
+ def get_shape(self):
+ """Get shape of a sparse array/matrix.
+
+ .. deprecated:: 1.11.0
+ This method will be removed in SciPy 1.14.0.
+ Use `X.shape` instead.
+ """
+ msg = (
+ "`get_shape` is deprecated and will be removed in v1.14.0; "
+ "use `X.shape` instead."
+ )
+ warn(msg, DeprecationWarning, stacklevel=2)
+
+ return self._shape
+
+ def set_shape(self, shape):
+ """See `reshape`.
+
+ .. deprecated:: 1.11.0
+ This method will be removed in SciPy 1.14.0.
+ Use `X.reshape` instead.
+ """
+ msg = (
+ "Shape assignment is deprecated and will be removed in v1.14.0; "
+ "use `reshape` instead."
+ )
+ warn(msg, DeprecationWarning, stacklevel=2)
+
+ # Make sure copy is False since this is in place
+ # Make sure format is unchanged because we are doing a __dict__ swap
+ new_self = self.reshape(shape, copy=False).asformat(self.format)
+ self.__dict__ = new_self.__dict__
+
+ shape = property(
+ fget=lambda self: self._shape,
+ fset=set_shape,
+ doc="""The shape of the array.
+
+Note that, starting in SciPy 1.14.0, this property will no longer be
+settable. To change the array shape, use `X.reshape` instead.
+"""
+ )
+
+ def asfptype(self):
+ """Upcast array/matrix to a floating point format (if necessary)
+
+ .. deprecated:: 1.11.0
+ This method is for internal use only, and will be removed from the
+ public API in SciPy 1.14.0.
+ """
+ msg = (
+ "`asfptype` is an internal function, and is deprecated "
+ "as part of the public API. It will be removed in v1.14.0."
+ )
+ warn(msg, DeprecationWarning, stacklevel=2)
+ return self._asfptype()
+
+ def getmaxprint(self):
+ """Maximum number of elements to display when printed.
+
+ .. deprecated:: 1.11.0
+ This method is for internal use only, and will be removed from the
+ public API in SciPy 1.14.0.
+ """
+ msg = (
+ "`getmaxprint` is an internal function, and is deprecated "
+ "as part of the public API. It will be removed in v1.14.0."
+ )
+ warn(msg, DeprecationWarning, stacklevel=2)
+ return self._getmaxprint()
+
+ def getformat(self):
+ """Sparse array/matrix storage format.
+
+ .. deprecated:: 1.11.0
+ This method will be removed in SciPy 1.14.0.
+ Use `X.format` instead.
+ """
+ msg = (
+ "`getformat` is deprecated and will be removed in v1.14.0; "
+ "use `X.format` instead."
+ )
+ warn(msg, DeprecationWarning, stacklevel=2)
+ return self.format
+
+ def getnnz(self, axis=None):
+ """Number of stored values, including explicit zeros.
+
+ Parameters
+ ----------
+ axis : None, 0, or 1
+ Select between the number of values across the whole array/matrix, in
+ each column, or in each row.
+
+ See also
+ --------
+ count_nonzero : Number of non-zero entries
+ """
+ return self._getnnz(axis=axis)
+
+ def getH(self):
+ """Return the Hermitian transpose of this array/matrix.
+
+ .. deprecated:: 1.11.0
+ This method will be removed in SciPy 1.14.0.
+ Use `X.conj().T` instead.
+ """
+ msg = (
+ "`getH` is deprecated and will be removed in v1.14.0; "
+ "use `X.conj().T` instead."
+ )
+ warn(msg, DeprecationWarning, stacklevel=2)
+ return self.conjugate().transpose()
+
+ def getcol(self, j):
+ """Returns a copy of column j of the array/matrix, as an (m x 1) sparse
+ array/matrix (column vector).
+
+ .. deprecated:: 1.11.0
+ This method will be removed in SciPy 1.14.0.
+ Use array/matrix indexing instead.
+ """
+ msg = (
+ "`getcol` is deprecated and will be removed in v1.14.0; "
+ f"use `X[:, [{j}]]` instead."
+ )
+ warn(msg, DeprecationWarning, stacklevel=2)
+ return self._getcol(j)
+
+ def getrow(self, i):
+ """Returns a copy of row i of the array/matrix, as a (1 x n) sparse
+ array/matrix (row vector).
+
+ .. deprecated:: 1.11.0
+ This method will be removed in SciPy 1.14.0.
+ Use array/matrix indexing instead.
+ """
+ msg = (
+ "`getrow` is deprecated and will be removed in v1.14.0; "
+ f"use `X[[{i}]]` instead."
+ )
+ warn(msg, DeprecationWarning, stacklevel=2)
+ return self._getrow(i)
+
+ ## End 1.14.0 deprecated methods
+
+
+class sparray:
+ """A namespace class to separate sparray from spmatrix"""
+ pass
+
+sparray.__doc__ = _spbase.__doc__
+
+
+def issparse(x):
+ """Is `x` of a sparse array or sparse matrix type?
+
+ Parameters
+ ----------
+ x
+ object to check for being a sparse array or sparse matrix
+
+ Returns
+ -------
+ bool
+ True if `x` is a sparse array or a sparse matrix, False otherwise
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.sparse import csr_array, csr_matrix, issparse
+ >>> issparse(csr_matrix([[5]]))
+ True
+ >>> issparse(csr_array([[5]]))
+ True
+ >>> issparse(np.array([[5]]))
+ False
+ >>> issparse(5)
+ False
+ """
+ return isinstance(x, _spbase)
+
+
+def isspmatrix(x):
+ """Is `x` of a sparse matrix type?
+
+ Parameters
+ ----------
+ x
+ object to check for being a sparse matrix
+
+ Returns
+ -------
+ bool
+ True if `x` is a sparse matrix, False otherwise
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.sparse import csr_array, csr_matrix, isspmatrix
+ >>> isspmatrix(csr_matrix([[5]]))
+ True
+ >>> isspmatrix(csr_array([[5]]))
+ False
+ >>> isspmatrix(np.array([[5]]))
+ False
+ >>> isspmatrix(5)
+ False
+ """
+ return isinstance(x, spmatrix)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_coo.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_coo.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8c5039f94ed939cd149dbe1bc7e258153d0b32d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_coo.py
@@ -0,0 +1,858 @@
+""" A sparse matrix in COOrdinate or 'triplet' format"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['coo_array', 'coo_matrix', 'isspmatrix_coo']
+
+import math
+from warnings import warn
+
+import numpy as np
+
+from .._lib._util import copy_if_needed
+from ._matrix import spmatrix
+from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
+from ._base import issparse, SparseEfficiencyWarning, _spbase, sparray
+from ._data import _data_matrix, _minmax_mixin
+from ._sputils import (upcast_char, to_native, isshape, getdtype,
+ getdata, downcast_intp_index, get_index_dtype,
+ check_shape, check_reshape_kwargs)
+
+import operator
+
+
+class _coo_base(_data_matrix, _minmax_mixin):
+ _format = 'coo'
+
+ def __init__(self, arg1, shape=None, dtype=None, copy=False):
+ _data_matrix.__init__(self)
+ is_array = isinstance(self, sparray)
+ if not copy:
+ copy = copy_if_needed
+
+ if isinstance(arg1, tuple):
+ if isshape(arg1, allow_1d=is_array):
+ self._shape = check_shape(arg1, allow_1d=is_array)
+ idx_dtype = self._get_index_dtype(maxval=max(self._shape))
+ data_dtype = getdtype(dtype, default=float)
+ self.coords = tuple(np.array([], dtype=idx_dtype)
+ for _ in range(len(self._shape)))
+ self.data = np.array([], dtype=data_dtype)
+ self.has_canonical_format = True
+ else:
+ try:
+ obj, coords = arg1
+ except (TypeError, ValueError) as e:
+ raise TypeError('invalid input format') from e
+
+ if shape is None:
+ if any(len(idx) == 0 for idx in coords):
+ raise ValueError('cannot infer dimensions from zero '
+ 'sized index arrays')
+ shape = tuple(operator.index(np.max(idx)) + 1
+ for idx in coords)
+ self._shape = check_shape(shape, allow_1d=is_array)
+
+ idx_dtype = self._get_index_dtype(coords,
+ maxval=max(self.shape),
+ check_contents=True)
+ self.coords = tuple(np.array(idx, copy=copy, dtype=idx_dtype)
+ for idx in coords)
+ self.data = getdata(obj, copy=copy, dtype=dtype)
+ self.has_canonical_format = False
+ else:
+ if issparse(arg1):
+ if arg1.format == self.format and copy:
+ self.coords = tuple(idx.copy() for idx in arg1.coords)
+ self.data = arg1.data.copy()
+ self._shape = check_shape(arg1.shape, allow_1d=is_array)
+ self.has_canonical_format = arg1.has_canonical_format
+ else:
+ coo = arg1.tocoo()
+ self.coords = tuple(coo.coords)
+ self.data = coo.data
+ self._shape = check_shape(coo.shape, allow_1d=is_array)
+ self.has_canonical_format = False
+ else:
+ # dense argument
+ M = np.asarray(arg1)
+ if not is_array:
+ M = np.atleast_2d(M)
+ if M.ndim != 2:
+ raise TypeError('expected dimension <= 2 array or matrix')
+
+ self._shape = check_shape(M.shape, allow_1d=is_array)
+ if shape is not None:
+ if check_shape(shape, allow_1d=is_array) != self._shape:
+ message = f'inconsistent shapes: {shape} != {self._shape}'
+ raise ValueError(message)
+ index_dtype = self._get_index_dtype(maxval=max(self._shape))
+ coords = M.nonzero()
+ self.coords = tuple(idx.astype(index_dtype, copy=False)
+ for idx in coords)
+ self.data = M[coords]
+ self.has_canonical_format = True
+
+ if dtype is not None:
+ self.data = self.data.astype(dtype, copy=False)
+
+ self._check()
+
+ @property
+ def row(self):
+ if self.ndim > 1:
+ return self.coords[-2]
+ result = np.zeros_like(self.col)
+ result.setflags(write=False)
+ return result
+
+
+ @row.setter
+ def row(self, new_row):
+ if self.ndim < 2:
+ raise ValueError('cannot set row attribute of a 1-dimensional sparse array')
+ new_row = np.asarray(new_row, dtype=self.coords[-2].dtype)
+ self.coords = self.coords[:-2] + (new_row,) + self.coords[-1:]
+
+ @property
+ def col(self):
+ return self.coords[-1]
+
+ @col.setter
+ def col(self, new_col):
+ new_col = np.asarray(new_col, dtype=self.coords[-1].dtype)
+ self.coords = self.coords[:-1] + (new_col,)
+
+ def reshape(self, *args, **kwargs):
+ is_array = isinstance(self, sparray)
+ shape = check_shape(args, self.shape, allow_1d=is_array)
+ order, copy = check_reshape_kwargs(kwargs)
+
+ # Return early if reshape is not required
+ if shape == self.shape:
+ if copy:
+ return self.copy()
+ else:
+ return self
+
+ # When reducing the number of dimensions, we need to be careful about
+ # index overflow. This is why we can't simply call
+ # `np.ravel_multi_index()` followed by `np.unravel_index()` here.
+ flat_coords = _ravel_coords(self.coords, self.shape, order=order)
+ if len(shape) == 2:
+ if order == 'C':
+ new_coords = divmod(flat_coords, shape[1])
+ else:
+ new_coords = divmod(flat_coords, shape[0])[::-1]
+ else:
+ new_coords = np.unravel_index(flat_coords, shape, order=order)
+
+ # Handle copy here rather than passing on to the constructor so that no
+ # copy will be made of `new_coords` regardless.
+ if copy:
+ new_data = self.data.copy()
+ else:
+ new_data = self.data
+
+ return self.__class__((new_data, new_coords), shape=shape, copy=False)
+
+ reshape.__doc__ = _spbase.reshape.__doc__
+
+ def _getnnz(self, axis=None):
+ if axis is None or (axis == 0 and self.ndim == 1):
+ nnz = len(self.data)
+ if any(len(idx) != nnz for idx in self.coords):
+ raise ValueError('all index and data arrays must have the '
+ 'same length')
+
+ if self.data.ndim != 1 or any(idx.ndim != 1 for idx in self.coords):
+ raise ValueError('row, column, and data arrays must be 1-D')
+
+ return int(nnz)
+
+ if axis < 0:
+ axis += self.ndim
+ if axis >= self.ndim:
+ raise ValueError('axis out of bounds')
+ if self.ndim > 2:
+ raise NotImplementedError('per-axis nnz for COO arrays with >2 '
+ 'dimensions is not supported')
+ return np.bincount(downcast_intp_index(self.coords[1 - axis]),
+ minlength=self.shape[1 - axis])
+
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
+
+ def _check(self):
+ """ Checks data structure for consistency """
+ if self.ndim != len(self.coords):
+ raise ValueError('mismatching number of index arrays for shape; '
+ f'got {len(self.coords)}, expected {self.ndim}')
+
+ # index arrays should have integer data types
+ for i, idx in enumerate(self.coords):
+ if idx.dtype.kind != 'i':
+ warn(f'index array {i} has non-integer dtype ({idx.dtype.name})',
+ stacklevel=3)
+
+ idx_dtype = self._get_index_dtype(self.coords, maxval=max(self.shape))
+ self.coords = tuple(np.asarray(idx, dtype=idx_dtype)
+ for idx in self.coords)
+ self.data = to_native(self.data)
+
+ if self.nnz > 0:
+ for i, idx in enumerate(self.coords):
+ if idx.max() >= self.shape[i]:
+ raise ValueError(f'axis {i} index {idx.max()} exceeds '
+ f'matrix dimension {self.shape[i]}')
+ if idx.min() < 0:
+ raise ValueError(f'negative axis {i} index: {idx.min()}')
+
+ def transpose(self, axes=None, copy=False):
+ if axes is None:
+ axes = range(self.ndim)[::-1]
+ elif isinstance(self, sparray):
+ if len(axes) != self.ndim:
+ raise ValueError("axes don't match matrix dimensions")
+ if len(set(axes)) != self.ndim:
+ raise ValueError("repeated axis in transpose")
+ elif axes != (1, 0):
+ raise ValueError("Sparse matrices do not support an 'axes' "
+ "parameter because swapping dimensions is the "
+ "only logical permutation.")
+
+ permuted_shape = tuple(self._shape[i] for i in axes)
+ permuted_coords = tuple(self.coords[i] for i in axes)
+ return self.__class__((self.data, permuted_coords),
+ shape=permuted_shape, copy=copy)
+
+ transpose.__doc__ = _spbase.transpose.__doc__
+
+ def resize(self, *shape) -> None:
+ is_array = isinstance(self, sparray)
+ shape = check_shape(shape, allow_1d=is_array)
+
+ # Check for added dimensions.
+ if len(shape) > self.ndim:
+ flat_coords = _ravel_coords(self.coords, self.shape)
+ max_size = math.prod(shape)
+ self.coords = np.unravel_index(flat_coords[:max_size], shape)
+ self.data = self.data[:max_size]
+ self._shape = shape
+ return
+
+ # Check for removed dimensions.
+ if len(shape) < self.ndim:
+ tmp_shape = (
+ self._shape[:len(shape) - 1] # Original shape without last axis
+ + (-1,) # Last axis is used to flatten the array
+ + (1,) * (self.ndim - len(shape)) # Pad with ones
+ )
+ tmp = self.reshape(tmp_shape)
+ self.coords = tmp.coords[:len(shape)]
+ self._shape = tmp.shape[:len(shape)]
+
+ # Handle truncation of existing dimensions.
+ is_truncating = any(old > new for old, new in zip(self.shape, shape))
+ if is_truncating:
+ mask = np.logical_and.reduce([
+ idx < size for idx, size in zip(self.coords, shape)
+ ])
+ if not mask.all():
+ self.coords = tuple(idx[mask] for idx in self.coords)
+ self.data = self.data[mask]
+
+ self._shape = shape
+
+ resize.__doc__ = _spbase.resize.__doc__
+
+ def toarray(self, order=None, out=None):
+ B = self._process_toarray_args(order, out)
+ fortran = int(B.flags.f_contiguous)
+ if not fortran and not B.flags.c_contiguous:
+ raise ValueError("Output array must be C or F contiguous")
+ if self.ndim > 2:
+ raise ValueError("Cannot densify higher-rank sparse array")
+ # This handles both 0D and 1D cases correctly regardless of the
+ # original shape.
+ M, N = self._shape_as_2d
+ coo_todense(M, N, self.nnz, self.row, self.col, self.data,
+ B.ravel('A'), fortran)
+ # Note: reshape() doesn't copy here, but does return a new array (view).
+ return B.reshape(self.shape)
+
+ toarray.__doc__ = _spbase.toarray.__doc__
+
+ def tocsc(self, copy=False):
+ """Convert this array/matrix to Compressed Sparse Column format
+
+ Duplicate entries will be summed together.
+
+ Examples
+ --------
+ >>> from numpy import array
+ >>> from scipy.sparse import coo_array
+ >>> row = array([0, 0, 1, 3, 1, 0, 0])
+ >>> col = array([0, 2, 1, 3, 1, 0, 0])
+ >>> data = array([1, 1, 1, 1, 1, 1, 1])
+ >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsc()
+ >>> A.toarray()
+ array([[3, 0, 1, 0],
+ [0, 2, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 1]])
+
+ """
+ if self.ndim != 2:
+ raise ValueError("Cannot convert a 1d sparse array to csc format")
+ if self.nnz == 0:
+ return self._csc_container(self.shape, dtype=self.dtype)
+ else:
+ from ._csc import csc_array
+ indptr, indices, data, shape = self._coo_to_compressed(csc_array._swap)
+
+ x = self._csc_container((data, indices, indptr), shape=shape)
+ if not self.has_canonical_format:
+ x.sum_duplicates()
+ return x
+
+ def tocsr(self, copy=False):
+ """Convert this array/matrix to Compressed Sparse Row format
+
+ Duplicate entries will be summed together.
+
+ Examples
+ --------
+ >>> from numpy import array
+ >>> from scipy.sparse import coo_array
+ >>> row = array([0, 0, 1, 3, 1, 0, 0])
+ >>> col = array([0, 2, 1, 3, 1, 0, 0])
+ >>> data = array([1, 1, 1, 1, 1, 1, 1])
+ >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsr()
+ >>> A.toarray()
+ array([[3, 0, 1, 0],
+ [0, 2, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 1]])
+
+ """
+ if self.ndim != 2:
+ raise ValueError("Cannot convert a 1d sparse array to csr format")
+ if self.nnz == 0:
+ return self._csr_container(self.shape, dtype=self.dtype)
+ else:
+ from ._csr import csr_array
+ indptr, indices, data, shape = self._coo_to_compressed(csr_array._swap)
+
+ x = self._csr_container((data, indices, indptr), shape=self.shape)
+ if not self.has_canonical_format:
+ x.sum_duplicates()
+ return x
+
+ def _coo_to_compressed(self, swap):
+ """convert (shape, coords, data) to (indptr, indices, data, shape)"""
+ M, N = swap(self.shape)
+ major, minor = swap(self.coords)
+ nnz = len(major)
+ # convert idx_dtype intc to int32 for pythran.
+ # tested in scipy/optimize/tests/test__numdiff.py::test_group_columns
+ idx_dtype = self._get_index_dtype(self.coords, maxval=max(self.nnz, N))
+ major = major.astype(idx_dtype, copy=False)
+ minor = minor.astype(idx_dtype, copy=False)
+
+ indptr = np.empty(M + 1, dtype=idx_dtype)
+ indices = np.empty_like(minor, dtype=idx_dtype)
+ data = np.empty_like(self.data, dtype=self.dtype)
+
+ coo_tocsr(M, N, nnz, major, minor, self.data, indptr, indices, data)
+ return indptr, indices, data, self.shape
+
+ def tocoo(self, copy=False):
+ if copy:
+ return self.copy()
+ else:
+ return self
+
+ tocoo.__doc__ = _spbase.tocoo.__doc__
+
+ def todia(self, copy=False):
+ if self.ndim != 2:
+ raise ValueError("Cannot convert a 1d sparse array to dia format")
+ self.sum_duplicates()
+ ks = self.col - self.row # the diagonal for each nonzero
+ diags, diag_idx = np.unique(ks, return_inverse=True)
+
+ if len(diags) > 100:
+ # probably undesired, should todia() have a maxdiags parameter?
+ warn("Constructing a DIA matrix with %d diagonals "
+ "is inefficient" % len(diags),
+ SparseEfficiencyWarning, stacklevel=2)
+
+ #initialize and fill in data array
+ if self.data.size == 0:
+ data = np.zeros((0, 0), dtype=self.dtype)
+ else:
+ data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
+ data[diag_idx, self.col] = self.data
+
+ return self._dia_container((data, diags), shape=self.shape)
+
+ todia.__doc__ = _spbase.todia.__doc__
+
+ def todok(self, copy=False):
+ self.sum_duplicates()
+ dok = self._dok_container(self.shape, dtype=self.dtype)
+ # ensure that 1d coordinates are not tuples
+ if self.ndim == 1:
+ coords = self.coords[0]
+ else:
+ coords = zip(*self.coords)
+
+ dok._dict = dict(zip(coords, self.data))
+ return dok
+
+ todok.__doc__ = _spbase.todok.__doc__
+
+ def diagonal(self, k=0):
+ if self.ndim != 2:
+ raise ValueError("diagonal requires two dimensions")
+ rows, cols = self.shape
+ if k <= -rows or k >= cols:
+ return np.empty(0, dtype=self.data.dtype)
+ diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
+ dtype=self.dtype)
+ diag_mask = (self.row + k) == self.col
+
+ if self.has_canonical_format:
+ row = self.row[diag_mask]
+ data = self.data[diag_mask]
+ else:
+ inds = tuple(idx[diag_mask] for idx in self.coords)
+ (row, _), data = self._sum_duplicates(inds, self.data[diag_mask])
+ diag[row + min(k, 0)] = data
+
+ return diag
+
+ diagonal.__doc__ = _data_matrix.diagonal.__doc__
+
+ def _setdiag(self, values, k):
+ if self.ndim != 2:
+ raise ValueError("setting a diagonal requires two dimensions")
+ M, N = self.shape
+ if values.ndim and not len(values):
+ return
+ idx_dtype = self.row.dtype
+
+ # Determine which triples to keep and where to put the new ones.
+ full_keep = self.col - self.row != k
+ if k < 0:
+ max_index = min(M+k, N)
+ if values.ndim:
+ max_index = min(max_index, len(values))
+ keep = np.logical_or(full_keep, self.col >= max_index)
+ new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
+ new_col = np.arange(max_index, dtype=idx_dtype)
+ else:
+ max_index = min(M, N-k)
+ if values.ndim:
+ max_index = min(max_index, len(values))
+ keep = np.logical_or(full_keep, self.row >= max_index)
+ new_row = np.arange(max_index, dtype=idx_dtype)
+ new_col = np.arange(k, k + max_index, dtype=idx_dtype)
+
+ # Define the array of data consisting of the entries to be added.
+ if values.ndim:
+ new_data = values[:max_index]
+ else:
+ new_data = np.empty(max_index, dtype=self.dtype)
+ new_data[:] = values
+
+ # Update the internal structure.
+ self.coords = (np.concatenate((self.row[keep], new_row)),
+ np.concatenate((self.col[keep], new_col)))
+ self.data = np.concatenate((self.data[keep], new_data))
+ self.has_canonical_format = False
+
+ # needed by _data_matrix
+ def _with_data(self, data, copy=True):
+ """Returns a matrix with the same sparsity structure as self,
+ but with different data. By default the index arrays are copied.
+ """
+ if copy:
+ coords = tuple(idx.copy() for idx in self.coords)
+ else:
+ coords = self.coords
+ return self.__class__((data, coords), shape=self.shape, dtype=data.dtype)
+
+ def sum_duplicates(self) -> None:
+ """Eliminate duplicate entries by adding them together
+
+ This is an *in place* operation
+ """
+ if self.has_canonical_format:
+ return
+ summed = self._sum_duplicates(self.coords, self.data)
+ self.coords, self.data = summed
+ self.has_canonical_format = True
+
+ def _sum_duplicates(self, coords, data):
+ # Assumes coords not in canonical format.
+ if len(data) == 0:
+ return coords, data
+ # Sort coords w.r.t. rows, then cols. This corresponds to C-order,
+ # which we rely on for argmin/argmax to return the first index in the
+ # same way that numpy does (in the case of ties).
+ order = np.lexsort(coords[::-1])
+ coords = tuple(idx[order] for idx in coords)
+ data = data[order]
+ unique_mask = np.logical_or.reduce([
+ idx[1:] != idx[:-1] for idx in coords
+ ])
+ unique_mask = np.append(True, unique_mask)
+ coords = tuple(idx[unique_mask] for idx in coords)
+ unique_inds, = np.nonzero(unique_mask)
+ data = np.add.reduceat(data, unique_inds, dtype=self.dtype)
+ return coords, data
+
+ def eliminate_zeros(self):
+ """Remove zero entries from the array/matrix
+
+ This is an *in place* operation
+ """
+ mask = self.data != 0
+ self.data = self.data[mask]
+ self.coords = tuple(idx[mask] for idx in self.coords)
+
+ #######################
+ # Arithmetic handlers #
+ #######################
+
+ def _add_dense(self, other):
+ if other.shape != self.shape:
+ raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})')
+ dtype = upcast_char(self.dtype.char, other.dtype.char)
+ result = np.array(other, dtype=dtype, copy=True)
+ fortran = int(result.flags.f_contiguous)
+ M, N = self._shape_as_2d
+ coo_todense(M, N, self.nnz, self.row, self.col, self.data,
+ result.ravel('A'), fortran)
+ return self._container(result, copy=False)
+
+ def _matmul_vector(self, other):
+ result_shape = self.shape[0] if self.ndim > 1 else 1
+ result = np.zeros(result_shape,
+ dtype=upcast_char(self.dtype.char, other.dtype.char))
+
+ if self.ndim == 2:
+ col = self.col
+ row = self.row
+ elif self.ndim == 1:
+ col = self.coords[0]
+ row = np.zeros_like(col)
+ else:
+ raise NotImplementedError(
+ f"coo_matvec not implemented for ndim={self.ndim}")
+
+ coo_matvec(self.nnz, row, col, self.data, other, result)
+ # Array semantics return a scalar here, not a single-element array.
+ if isinstance(self, sparray) and result_shape == 1:
+ return result[0]
+ return result
+
+ def _matmul_multivector(self, other):
+ result_dtype = upcast_char(self.dtype.char, other.dtype.char)
+ if self.ndim == 2:
+ result_shape = (other.shape[1], self.shape[0])
+ col = self.col
+ row = self.row
+ elif self.ndim == 1:
+ result_shape = (other.shape[1],)
+ col = self.coords[0]
+ row = np.zeros_like(col)
+ else:
+ raise NotImplementedError(
+ f"coo_matvec not implemented for ndim={self.ndim}")
+
+ result = np.zeros(result_shape, dtype=result_dtype)
+ for i, other_col in enumerate(other.T):
+ coo_matvec(self.nnz, row, col, self.data, other_col, result[i:i + 1])
+ return result.T.view(type=type(other))
+
+
+def _ravel_coords(coords, shape, order='C'):
+ """Like np.ravel_multi_index, but avoids some overflow issues."""
+ if len(coords) == 1:
+ return coords[0]
+ # Handle overflow as in https://github.com/scipy/scipy/pull/9132
+ if len(coords) == 2:
+ nrows, ncols = shape
+ row, col = coords
+ if order == 'C':
+ maxval = (ncols * max(0, nrows - 1) + max(0, ncols - 1))
+ idx_dtype = get_index_dtype(maxval=maxval)
+ return np.multiply(ncols, row, dtype=idx_dtype) + col
+ elif order == 'F':
+ maxval = (nrows * max(0, ncols - 1) + max(0, nrows - 1))
+ idx_dtype = get_index_dtype(maxval=maxval)
+ return np.multiply(nrows, col, dtype=idx_dtype) + row
+ else:
+ raise ValueError("'order' must be 'C' or 'F'")
+ return np.ravel_multi_index(coords, shape, order=order)
+
+
+def isspmatrix_coo(x):
+ """Is `x` of coo_matrix type?
+
+ Parameters
+ ----------
+ x
+ object to check for being a coo matrix
+
+ Returns
+ -------
+ bool
+ True if `x` is a coo matrix, False otherwise
+
+ Examples
+ --------
+ >>> from scipy.sparse import coo_array, coo_matrix, csr_matrix, isspmatrix_coo
+ >>> isspmatrix_coo(coo_matrix([[5]]))
+ True
+ >>> isspmatrix_coo(coo_array([[5]]))
+ False
+ >>> isspmatrix_coo(csr_matrix([[5]]))
+ False
+ """
+ return isinstance(x, coo_matrix)
+
+
+# This namespace class separates array from matrix with isinstance
+class coo_array(_coo_base, sparray):
+ """
+ A sparse array in COOrdinate format.
+
+ Also known as the 'ijv' or 'triplet' format.
+
+ This can be instantiated in several ways:
+ coo_array(D)
+ where D is an ndarray
+
+ coo_array(S)
+ with another sparse array or matrix S (equivalent to S.tocoo())
+
+ coo_array(shape, [dtype])
+ to construct an empty sparse array with shape `shape`
+ dtype is optional, defaulting to dtype='d'.
+
+ coo_array((data, coords), [shape])
+ to construct from existing data and index arrays:
+ 1. data[:] the entries of the sparse array, in any order
+ 2. coords[i][:] the axis-i coordinates of the data entries
+
+ Where ``A[coords] = data``, and coords is a tuple of index arrays.
+ When shape is not specified, it is inferred from the index arrays.
+
+ Attributes
+ ----------
+ dtype : dtype
+ Data type of the sparse array
+ shape : tuple of integers
+ Shape of the sparse array
+ ndim : int
+ Number of dimensions of the sparse array
+ nnz
+ size
+ data
+ COO format data array of the sparse array
+ coords
+ COO format tuple of index arrays
+ has_canonical_format : bool
+ Whether the matrix has sorted coordinates and no duplicates
+ format
+ T
+
+ Notes
+ -----
+
+ Sparse arrays can be used in arithmetic operations: they support
+ addition, subtraction, multiplication, division, and matrix power.
+
+ Advantages of the COO format
+ - facilitates fast conversion among sparse formats
+ - permits duplicate entries (see example)
+ - very fast conversion to and from CSR/CSC formats
+
+ Disadvantages of the COO format
+ - does not directly support:
+ + arithmetic operations
+ + slicing
+
+ Intended Usage
+ - COO is a fast format for constructing sparse arrays
+ - Once a COO array has been constructed, convert to CSR or
+ CSC format for fast arithmetic and matrix vector operations
+ - By default when converting to CSR or CSC format, duplicate (i,j)
+ entries will be summed together. This facilitates efficient
+ construction of finite element matrices and the like. (see example)
+
+ Canonical format
+ - Entries and coordinates sorted by row, then column.
+ - There are no duplicate entries (i.e. duplicate (i,j) locations)
+ - Data arrays MAY have explicit zeros.
+
+ Examples
+ --------
+
+ >>> # Constructing an empty sparse array
+ >>> import numpy as np
+ >>> from scipy.sparse import coo_array
+ >>> coo_array((3, 4), dtype=np.int8).toarray()
+ array([[0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0]], dtype=int8)
+
+ >>> # Constructing a sparse array using ijv format
+ >>> row = np.array([0, 3, 1, 0])
+ >>> col = np.array([0, 3, 1, 2])
+ >>> data = np.array([4, 5, 7, 9])
+ >>> coo_array((data, (row, col)), shape=(4, 4)).toarray()
+ array([[4, 0, 9, 0],
+ [0, 7, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 5]])
+
+ >>> # Constructing a sparse array with duplicate coordinates
+ >>> row = np.array([0, 0, 1, 3, 1, 0, 0])
+ >>> col = np.array([0, 2, 1, 3, 1, 0, 0])
+ >>> data = np.array([1, 1, 1, 1, 1, 1, 1])
+ >>> coo = coo_array((data, (row, col)), shape=(4, 4))
+ >>> # Duplicate coordinates are maintained until implicitly or explicitly summed
+ >>> np.max(coo.data)
+ 1
+ >>> coo.toarray()
+ array([[3, 0, 1, 0],
+ [0, 2, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 1]])
+
+ """
+
+
+class coo_matrix(spmatrix, _coo_base):
+ """
+ A sparse matrix in COOrdinate format.
+
+ Also known as the 'ijv' or 'triplet' format.
+
+ This can be instantiated in several ways:
+ coo_matrix(D)
+ where D is a 2-D ndarray
+
+ coo_matrix(S)
+ with another sparse array or matrix S (equivalent to S.tocoo())
+
+ coo_matrix((M, N), [dtype])
+ to construct an empty matrix with shape (M, N)
+ dtype is optional, defaulting to dtype='d'.
+
+ coo_matrix((data, (i, j)), [shape=(M, N)])
+ to construct from three arrays:
+ 1. data[:] the entries of the matrix, in any order
+ 2. i[:] the row indices of the matrix entries
+ 3. j[:] the column indices of the matrix entries
+
+ Where ``A[i[k], j[k]] = data[k]``. When shape is not
+ specified, it is inferred from the index arrays
+
+ Attributes
+ ----------
+ dtype : dtype
+ Data type of the matrix
+ shape : 2-tuple
+ Shape of the matrix
+ ndim : int
+ Number of dimensions (this is always 2)
+ nnz
+ size
+ data
+ COO format data array of the matrix
+ row
+ COO format row index array of the matrix
+ col
+ COO format column index array of the matrix
+ has_canonical_format : bool
+ Whether the matrix has sorted indices and no duplicates
+ format
+ T
+
+ Notes
+ -----
+
+ Sparse matrices can be used in arithmetic operations: they support
+ addition, subtraction, multiplication, division, and matrix power.
+
+ Advantages of the COO format
+ - facilitates fast conversion among sparse formats
+ - permits duplicate entries (see example)
+ - very fast conversion to and from CSR/CSC formats
+
+ Disadvantages of the COO format
+ - does not directly support:
+ + arithmetic operations
+ + slicing
+
+ Intended Usage
+ - COO is a fast format for constructing sparse matrices
+ - Once a COO matrix has been constructed, convert to CSR or
+ CSC format for fast arithmetic and matrix vector operations
+ - By default when converting to CSR or CSC format, duplicate (i,j)
+ entries will be summed together. This facilitates efficient
+ construction of finite element matrices and the like. (see example)
+
+ Canonical format
+ - Entries and coordinates sorted by row, then column.
+ - There are no duplicate entries (i.e. duplicate (i,j) locations)
+ - Data arrays MAY have explicit zeros.
+
+ Examples
+ --------
+
+ >>> # Constructing an empty matrix
+ >>> import numpy as np
+ >>> from scipy.sparse import coo_matrix
+ >>> coo_matrix((3, 4), dtype=np.int8).toarray()
+ array([[0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0]], dtype=int8)
+
+ >>> # Constructing a matrix using ijv format
+ >>> row = np.array([0, 3, 1, 0])
+ >>> col = np.array([0, 3, 1, 2])
+ >>> data = np.array([4, 5, 7, 9])
+ >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
+ array([[4, 0, 9, 0],
+ [0, 7, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 5]])
+
+ >>> # Constructing a matrix with duplicate coordinates
+ >>> row = np.array([0, 0, 1, 3, 1, 0, 0])
+ >>> col = np.array([0, 2, 1, 3, 1, 0, 0])
+ >>> data = np.array([1, 1, 1, 1, 1, 1, 1])
+ >>> coo = coo_matrix((data, (row, col)), shape=(4, 4))
+ >>> # Duplicate coordinates are maintained until implicitly or explicitly summed
+ >>> np.max(coo.data)
+ 1
+ >>> coo.toarray()
+ array([[3, 0, 1, 0],
+ [0, 2, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 1]])
+
+ """
+
+ def __setstate__(self, state):
+ if 'coords' not in state:
+ # For retro-compatibility with the previous attributes
+ # storing nnz coordinates for 2D COO matrix.
+ state['coords'] = (state.pop('row'), state.pop('col'))
+ self.__dict__.update(state)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csc.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csc.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fcdeb49cc0a951b9a2df955b971d5148916f289
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csc.py
@@ -0,0 +1,364 @@
+"""Compressed Sparse Column matrix format"""
+__docformat__ = "restructuredtext en"
+
+__all__ = ['csc_array', 'csc_matrix', 'isspmatrix_csc']
+
+
+import numpy as np
+
+from ._matrix import spmatrix
+from ._base import _spbase, sparray
+from ._sparsetools import csc_tocsr, expandptr
+from ._sputils import upcast
+
+from ._compressed import _cs_matrix
+
+
+class _csc_base(_cs_matrix):
+ _format = 'csc'
+
+ def transpose(self, axes=None, copy=False):
+ if axes is not None and axes != (1, 0):
+ raise ValueError("Sparse arrays/matrices do not support "
+ "an 'axes' parameter because swapping "
+ "dimensions is the only logical permutation.")
+
+ M, N = self.shape
+
+ return self._csr_container((self.data, self.indices,
+ self.indptr), (N, M), copy=copy)
+
+ transpose.__doc__ = _spbase.transpose.__doc__
+
+ def __iter__(self):
+ yield from self.tocsr()
+
+ def tocsc(self, copy=False):
+ if copy:
+ return self.copy()
+ else:
+ return self
+
+ tocsc.__doc__ = _spbase.tocsc.__doc__
+
+ def tocsr(self, copy=False):
+ M,N = self.shape
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices),
+ maxval=max(self.nnz, N))
+ indptr = np.empty(M + 1, dtype=idx_dtype)
+ indices = np.empty(self.nnz, dtype=idx_dtype)
+ data = np.empty(self.nnz, dtype=upcast(self.dtype))
+
+ csc_tocsr(M, N,
+ self.indptr.astype(idx_dtype),
+ self.indices.astype(idx_dtype),
+ self.data,
+ indptr,
+ indices,
+ data)
+
+ A = self._csr_container(
+ (data, indices, indptr),
+ shape=self.shape, copy=False
+ )
+ A.has_sorted_indices = True
+ return A
+
+ tocsr.__doc__ = _spbase.tocsr.__doc__
+
+ def nonzero(self):
+ # CSC can't use _cs_matrix's .nonzero method because it
+ # returns the indices sorted for self transposed.
+
+ # Get row and col indices, from _cs_matrix.tocoo
+ major_dim, minor_dim = self._swap(self.shape)
+ minor_indices = self.indices
+ major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
+ expandptr(major_dim, self.indptr, major_indices)
+ row, col = self._swap((major_indices, minor_indices))
+
+ # Remove explicit zeros
+ nz_mask = self.data != 0
+ row = row[nz_mask]
+ col = col[nz_mask]
+
+ # Sort them to be in C-style order
+ ind = np.argsort(row, kind='mergesort')
+ row = row[ind]
+ col = col[ind]
+
+ return row, col
+
+ nonzero.__doc__ = _cs_matrix.nonzero.__doc__
+
+ def _getrow(self, i):
+ """Returns a copy of row i of the matrix, as a (1 x n)
+ CSR matrix (row vector).
+ """
+ M, N = self.shape
+ i = int(i)
+ if i < 0:
+ i += M
+ if i < 0 or i >= M:
+ raise IndexError('index (%d) out of range' % i)
+ return self._get_submatrix(minor=i).tocsr()
+
+ def _getcol(self, i):
+ """Returns a copy of column i of the matrix, as a (m x 1)
+ CSC matrix (column vector).
+ """
+ M, N = self.shape
+ i = int(i)
+ if i < 0:
+ i += N
+ if i < 0 or i >= N:
+ raise IndexError('index (%d) out of range' % i)
+ return self._get_submatrix(major=i, copy=True)
+
+ def _get_intXarray(self, row, col):
+ return self._major_index_fancy(col)._get_submatrix(minor=row)
+
+ def _get_intXslice(self, row, col):
+ if col.step in (1, None):
+ return self._get_submatrix(major=col, minor=row, copy=True)
+ return self._major_slice(col)._get_submatrix(minor=row)
+
+ def _get_sliceXint(self, row, col):
+ if row.step in (1, None):
+ return self._get_submatrix(major=col, minor=row, copy=True)
+ return self._get_submatrix(major=col)._minor_slice(row)
+
+ def _get_sliceXarray(self, row, col):
+ return self._major_index_fancy(col)._minor_slice(row)
+
+ def _get_arrayXint(self, row, col):
+ return self._get_submatrix(major=col)._minor_index_fancy(row)
+
+ def _get_arrayXslice(self, row, col):
+ return self._major_slice(col)._minor_index_fancy(row)
+
+ # these functions are used by the parent class (_cs_matrix)
+ # to remove redundancy between csc_array and csr_matrix
+ @staticmethod
+ def _swap(x):
+ """swap the members of x if this is a column-oriented matrix
+ """
+ return x[1], x[0]
+
+
+def isspmatrix_csc(x):
+ """Is `x` of csc_matrix type?
+
+ Parameters
+ ----------
+ x
+ object to check for being a csc matrix
+
+ Returns
+ -------
+ bool
+ True if `x` is a csc matrix, False otherwise
+
+ Examples
+ --------
+ >>> from scipy.sparse import csc_array, csc_matrix, coo_matrix, isspmatrix_csc
+ >>> isspmatrix_csc(csc_matrix([[5]]))
+ True
+ >>> isspmatrix_csc(csc_array([[5]]))
+ False
+ >>> isspmatrix_csc(coo_matrix([[5]]))
+ False
+ """
+ return isinstance(x, csc_matrix)
+
+
+# This namespace class separates array from matrix with isinstance
+class csc_array(_csc_base, sparray):
+ """
+ Compressed Sparse Column array.
+
+ This can be instantiated in several ways:
+ csc_array(D)
+ where D is a 2-D ndarray
+
+ csc_array(S)
+ with another sparse array or matrix S (equivalent to S.tocsc())
+
+ csc_array((M, N), [dtype])
+ to construct an empty array with shape (M, N)
+ dtype is optional, defaulting to dtype='d'.
+
+ csc_array((data, (row_ind, col_ind)), [shape=(M, N)])
+ where ``data``, ``row_ind`` and ``col_ind`` satisfy the
+ relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
+
+ csc_array((data, indices, indptr), [shape=(M, N)])
+ is the standard CSC representation where the row indices for
+ column i are stored in ``indices[indptr[i]:indptr[i+1]]``
+ and their corresponding values are stored in
+ ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
+ not supplied, the array dimensions are inferred from
+ the index arrays.
+
+ Attributes
+ ----------
+ dtype : dtype
+ Data type of the array
+ shape : 2-tuple
+ Shape of the array
+ ndim : int
+ Number of dimensions (this is always 2)
+ nnz
+ size
+ data
+ CSC format data array of the array
+ indices
+ CSC format index array of the array
+ indptr
+ CSC format index pointer array of the array
+ has_sorted_indices
+ has_canonical_format
+ T
+
+ Notes
+ -----
+
+ Sparse arrays can be used in arithmetic operations: they support
+ addition, subtraction, multiplication, division, and matrix power.
+
+ Advantages of the CSC format
+ - efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
+ - efficient column slicing
+ - fast matrix vector products (CSR, BSR may be faster)
+
+ Disadvantages of the CSC format
+ - slow row slicing operations (consider CSR)
+ - changes to the sparsity structure are expensive (consider LIL or DOK)
+
+ Canonical format
+ - Within each column, indices are sorted by row.
+ - There are no duplicate entries.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> from scipy.sparse import csc_array
+ >>> csc_array((3, 4), dtype=np.int8).toarray()
+ array([[0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0]], dtype=int8)
+
+ >>> row = np.array([0, 2, 2, 0, 1, 2])
+ >>> col = np.array([0, 0, 1, 2, 2, 2])
+ >>> data = np.array([1, 2, 3, 4, 5, 6])
+ >>> csc_array((data, (row, col)), shape=(3, 3)).toarray()
+ array([[1, 0, 4],
+ [0, 0, 5],
+ [2, 3, 6]])
+
+ >>> indptr = np.array([0, 2, 3, 6])
+ >>> indices = np.array([0, 2, 2, 0, 1, 2])
+ >>> data = np.array([1, 2, 3, 4, 5, 6])
+ >>> csc_array((data, indices, indptr), shape=(3, 3)).toarray()
+ array([[1, 0, 4],
+ [0, 0, 5],
+ [2, 3, 6]])
+
+ """
+
+
+class csc_matrix(spmatrix, _csc_base):
+ """
+ Compressed Sparse Column matrix.
+
+ This can be instantiated in several ways:
+ csc_matrix(D)
+ where D is a 2-D ndarray
+
+ csc_matrix(S)
+ with another sparse array or matrix S (equivalent to S.tocsc())
+
+ csc_matrix((M, N), [dtype])
+ to construct an empty matrix with shape (M, N)
+ dtype is optional, defaulting to dtype='d'.
+
+ csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
+ where ``data``, ``row_ind`` and ``col_ind`` satisfy the
+ relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
+
+ csc_matrix((data, indices, indptr), [shape=(M, N)])
+ is the standard CSC representation where the row indices for
+ column i are stored in ``indices[indptr[i]:indptr[i+1]]``
+ and their corresponding values are stored in
+ ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
+ not supplied, the matrix dimensions are inferred from
+ the index arrays.
+
+ Attributes
+ ----------
+ dtype : dtype
+ Data type of the matrix
+ shape : 2-tuple
+ Shape of the matrix
+ ndim : int
+ Number of dimensions (this is always 2)
+ nnz
+ size
+ data
+ CSC format data array of the matrix
+ indices
+ CSC format index array of the matrix
+ indptr
+ CSC format index pointer array of the matrix
+ has_sorted_indices
+ has_canonical_format
+ T
+
+ Notes
+ -----
+
+ Sparse matrices can be used in arithmetic operations: they support
+ addition, subtraction, multiplication, division, and matrix power.
+
+ Advantages of the CSC format
+ - efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
+ - efficient column slicing
+ - fast matrix vector products (CSR, BSR may be faster)
+
+ Disadvantages of the CSC format
+ - slow row slicing operations (consider CSR)
+ - changes to the sparsity structure are expensive (consider LIL or DOK)
+
+ Canonical format
+ - Within each column, indices are sorted by row.
+ - There are no duplicate entries.
+
+ Examples
+ --------
+
+ >>> import numpy as np
+ >>> from scipy.sparse import csc_matrix
+ >>> csc_matrix((3, 4), dtype=np.int8).toarray()
+ array([[0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0]], dtype=int8)
+
+ >>> row = np.array([0, 2, 2, 0, 1, 2])
+ >>> col = np.array([0, 0, 1, 2, 2, 2])
+ >>> data = np.array([1, 2, 3, 4, 5, 6])
+ >>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
+ array([[1, 0, 4],
+ [0, 0, 5],
+ [2, 3, 6]])
+
+ >>> indptr = np.array([0, 2, 3, 6])
+ >>> indices = np.array([0, 2, 2, 0, 1, 2])
+ >>> data = np.array([1, 2, 3, 4, 5, 6])
+ >>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
+ array([[1, 0, 4],
+ [0, 0, 5],
+ [2, 3, 6]])
+
+ """
+
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_extract.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_extract.py
new file mode 100644
index 0000000000000000000000000000000000000000..349a056bd2f673b0a0b5379fa8b18d720eb5a86d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_extract.py
@@ -0,0 +1,178 @@
+"""Functions to extract parts of sparse matrices
+"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['find', 'tril', 'triu']
+
+
+from ._coo import coo_matrix, coo_array
+from ._base import sparray
+
+
+def find(A):
+ """Return the indices and values of the nonzero elements of a matrix
+
+ Parameters
+ ----------
+ A : dense or sparse array or matrix
+ Matrix whose nonzero elements are desired.
+
+ Returns
+ -------
+ (I,J,V) : tuple of arrays
+ I,J, and V contain the row indices, column indices, and values
+ of the nonzero entries.
+
+
+ Examples
+ --------
+ >>> from scipy.sparse import csr_array, find
+ >>> A = csr_array([[7.0, 8.0, 0],[0, 0, 9.0]])
+ >>> find(A)
+ (array([0, 0, 1], dtype=int32),
+ array([0, 1, 2], dtype=int32),
+ array([ 7., 8., 9.]))
+
+ """
+
+ A = coo_array(A, copy=True)
+ A.sum_duplicates()
+ # remove explicit zeros
+ nz_mask = A.data != 0
+ return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask]
+
+
+def tril(A, k=0, format=None):
+ """Return the lower triangular portion of a sparse array or matrix
+
+ Returns the elements on or below the k-th diagonal of A.
+ - k = 0 corresponds to the main diagonal
+ - k > 0 is above the main diagonal
+ - k < 0 is below the main diagonal
+
+ Parameters
+ ----------
+ A : dense or sparse array or matrix
+ Matrix whose lower trianglar portion is desired.
+ k : integer : optional
+ The top-most diagonal of the lower triangle.
+ format : string
+ Sparse format of the result, e.g. format="csr", etc.
+
+ Returns
+ -------
+ L : sparse matrix
+ Lower triangular portion of A in sparse format.
+
+ See Also
+ --------
+ triu : upper triangle in sparse format
+
+ Examples
+ --------
+ >>> from scipy.sparse import csr_array, tril
+ >>> A = csr_array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
+ ... dtype='int32')
+ >>> A.toarray()
+ array([[1, 2, 0, 0, 3],
+ [4, 5, 0, 6, 7],
+ [0, 0, 8, 9, 0]])
+ >>> tril(A).toarray()
+ array([[1, 0, 0, 0, 0],
+ [4, 5, 0, 0, 0],
+ [0, 0, 8, 0, 0]])
+ >>> tril(A).nnz
+ 4
+ >>> tril(A, k=1).toarray()
+ array([[1, 2, 0, 0, 0],
+ [4, 5, 0, 0, 0],
+ [0, 0, 8, 9, 0]])
+ >>> tril(A, k=-1).toarray()
+ array([[0, 0, 0, 0, 0],
+ [4, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]])
+ >>> tril(A, format='csc')
+ <3x5 sparse array of type ''
+ with 4 stored elements in Compressed Sparse Column format>
+
+ """
+ coo_sparse = coo_array if isinstance(A, sparray) else coo_matrix
+
+ # convert to COOrdinate format where things are easy
+ A = coo_sparse(A, copy=False)
+ mask = A.row + k >= A.col
+
+ row = A.row[mask]
+ col = A.col[mask]
+ data = A.data[mask]
+ new_coo = coo_sparse((data, (row, col)), shape=A.shape, dtype=A.dtype)
+ return new_coo.asformat(format)
+
+
+def triu(A, k=0, format=None):
+ """Return the upper triangular portion of a sparse array or matrix
+
+ Returns the elements on or above the k-th diagonal of A.
+ - k = 0 corresponds to the main diagonal
+ - k > 0 is above the main diagonal
+ - k < 0 is below the main diagonal
+
+ Parameters
+ ----------
+ A : dense or sparse array or matrix
+ Matrix whose upper trianglar portion is desired.
+ k : integer : optional
+ The bottom-most diagonal of the upper triangle.
+ format : string
+ Sparse format of the result, e.g. format="csr", etc.
+
+ Returns
+ -------
+ L : sparse array or matrix
+ Upper triangular portion of A in sparse format.
+ Sparse array if A is a sparse array, otherwise matrix.
+
+ See Also
+ --------
+ tril : lower triangle in sparse format
+
+ Examples
+ --------
+ >>> from scipy.sparse import csr_array, triu
+ >>> A = csr_array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
+ ... dtype='int32')
+ >>> A.toarray()
+ array([[1, 2, 0, 0, 3],
+ [4, 5, 0, 6, 7],
+ [0, 0, 8, 9, 0]])
+ >>> triu(A).toarray()
+ array([[1, 2, 0, 0, 3],
+ [0, 5, 0, 6, 7],
+ [0, 0, 8, 9, 0]])
+ >>> triu(A).nnz
+ 8
+ >>> triu(A, k=1).toarray()
+ array([[0, 2, 0, 0, 3],
+ [0, 0, 0, 6, 7],
+ [0, 0, 0, 9, 0]])
+ >>> triu(A, k=-1).toarray()
+ array([[1, 2, 0, 0, 3],
+ [4, 5, 0, 6, 7],
+ [0, 0, 8, 9, 0]])
+ >>> triu(A, format='csc')
+ <3x5 sparse array of type ''
+ with 8 stored elements in Compressed Sparse Column format>
+
+ """
+ coo_sparse = coo_array if isinstance(A, sparray) else coo_matrix
+
+ # convert to COOrdinate format where things are easy
+ A = coo_sparse(A, copy=False)
+ mask = A.row + k <= A.col
+
+ row = A.row[mask]
+ col = A.col[mask]
+ data = A.data[mask]
+ new_coo = coo_sparse((data, (row, col)), shape=A.shape, dtype=A.dtype)
+ return new_coo.asformat(format)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_lil.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_lil.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5590010386190fa5fadcdb3e9fae3cc236a3b0e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_lil.py
@@ -0,0 +1,618 @@
+"""List of Lists sparse matrix class
+"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['lil_array', 'lil_matrix', 'isspmatrix_lil']
+
+from bisect import bisect_left
+
+import numpy as np
+
+from ._matrix import spmatrix
+from ._base import _spbase, sparray, issparse
+from ._index import IndexMixin, INT_TYPES, _broadcast_arrays
+from ._sputils import (getdtype, isshape, isscalarlike, upcast_scalar,
+ check_shape, check_reshape_kwargs)
+from . import _csparsetools
+
+
+class _lil_base(_spbase, IndexMixin):
+ _format = 'lil'
+
+ def __init__(self, arg1, shape=None, dtype=None, copy=False):
+ _spbase.__init__(self)
+ self.dtype = getdtype(dtype, arg1, default=float)
+
+ # First get the shape
+ if issparse(arg1):
+ if arg1.format == "lil" and copy:
+ A = arg1.copy()
+ else:
+ A = arg1.tolil()
+
+ if dtype is not None:
+ A = A.astype(dtype, copy=False)
+
+ self._shape = check_shape(A.shape)
+ self.dtype = A.dtype
+ self.rows = A.rows
+ self.data = A.data
+ elif isinstance(arg1,tuple):
+ if isshape(arg1):
+ if shape is not None:
+ raise ValueError('invalid use of shape parameter')
+ M, N = arg1
+ self._shape = check_shape((M, N))
+ self.rows = np.empty((M,), dtype=object)
+ self.data = np.empty((M,), dtype=object)
+ for i in range(M):
+ self.rows[i] = []
+ self.data[i] = []
+ else:
+ raise TypeError('unrecognized lil_array constructor usage')
+ else:
+ # assume A is dense
+ try:
+ A = self._ascontainer(arg1)
+ except TypeError as e:
+ raise TypeError('unsupported matrix type') from e
+ else:
+ A = self._csr_container(A, dtype=dtype).tolil()
+
+ self._shape = check_shape(A.shape)
+ self.dtype = A.dtype
+ self.rows = A.rows
+ self.data = A.data
+
+ def __iadd__(self,other):
+ self[:,:] = self + other
+ return self
+
+ def __isub__(self,other):
+ self[:,:] = self - other
+ return self
+
+ def __imul__(self,other):
+ if isscalarlike(other):
+ self[:,:] = self * other
+ return self
+ else:
+ return NotImplemented
+
+ def __itruediv__(self,other):
+ if isscalarlike(other):
+ self[:,:] = self / other
+ return self
+ else:
+ return NotImplemented
+
+ # Whenever the dimensions change, empty lists should be created for each
+ # row
+
+ def _getnnz(self, axis=None):
+ if axis is None:
+ return sum([len(rowvals) for rowvals in self.data])
+ if axis < 0:
+ axis += 2
+ if axis == 0:
+ out = np.zeros(self.shape[1], dtype=np.intp)
+ for row in self.rows:
+ out[row] += 1
+ return out
+ elif axis == 1:
+ return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp)
+ else:
+ raise ValueError('axis out of bounds')
+
+ def count_nonzero(self):
+ return sum(np.count_nonzero(rowvals) for rowvals in self.data)
+
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
+ count_nonzero.__doc__ = _spbase.count_nonzero.__doc__
+
+ def __str__(self):
+ val = ''
+ for i, row in enumerate(self.rows):
+ for pos, j in enumerate(row):
+ val += f" {str((i, j))}\t{str(self.data[i][pos])}\n"
+ return val[:-1]
+
+ def getrowview(self, i):
+ """Returns a view of the 'i'th row (without copying).
+ """
+ new = self._lil_container((1, self.shape[1]), dtype=self.dtype)
+ new.rows[0] = self.rows[i]
+ new.data[0] = self.data[i]
+ return new
+
+ def getrow(self, i):
+ """Returns a copy of the 'i'th row.
+ """
+ M, N = self.shape
+ if i < 0:
+ i += M
+ if i < 0 or i >= M:
+ raise IndexError('row index out of bounds')
+ new = self._lil_container((1, N), dtype=self.dtype)
+ new.rows[0] = self.rows[i][:]
+ new.data[0] = self.data[i][:]
+ return new
+
+ def __getitem__(self, key):
+ # Fast path for simple (int, int) indexing.
+ if (isinstance(key, tuple) and len(key) == 2 and
+ isinstance(key[0], INT_TYPES) and
+ isinstance(key[1], INT_TYPES)):
+ # lil_get1 handles validation for us.
+ return self._get_intXint(*key)
+ # Everything else takes the normal path.
+ return IndexMixin.__getitem__(self, key)
+
+ def _asindices(self, idx, N):
+ # LIL routines handle bounds-checking for us, so don't do it here.
+ try:
+ x = np.asarray(idx)
+ except (ValueError, TypeError, MemoryError) as e:
+ raise IndexError('invalid index') from e
+ if x.ndim not in (1, 2):
+ raise IndexError('Index dimension must be <= 2')
+ return x
+
+ def _get_intXint(self, row, col):
+ v = _csparsetools.lil_get1(self.shape[0], self.shape[1], self.rows,
+ self.data, row, col)
+ return self.dtype.type(v)
+
+ def _get_sliceXint(self, row, col):
+ row = range(*row.indices(self.shape[0]))
+ return self._get_row_ranges(row, slice(col, col+1))
+
+ def _get_arrayXint(self, row, col):
+ row = row.squeeze()
+ return self._get_row_ranges(row, slice(col, col+1))
+
+ def _get_intXslice(self, row, col):
+ return self._get_row_ranges((row,), col)
+
+ def _get_sliceXslice(self, row, col):
+ row = range(*row.indices(self.shape[0]))
+ return self._get_row_ranges(row, col)
+
+ def _get_arrayXslice(self, row, col):
+ return self._get_row_ranges(row, col)
+
+ def _get_intXarray(self, row, col):
+ row = np.array(row, dtype=col.dtype, ndmin=1)
+ return self._get_columnXarray(row, col)
+
+ def _get_sliceXarray(self, row, col):
+ row = np.arange(*row.indices(self.shape[0]))
+ return self._get_columnXarray(row, col)
+
+ def _get_columnXarray(self, row, col):
+ # outer indexing
+ row, col = _broadcast_arrays(row[:,None], col)
+ return self._get_arrayXarray(row, col)
+
+ def _get_arrayXarray(self, row, col):
+ # inner indexing
+ i, j = map(np.atleast_2d, _prepare_index_for_memoryview(row, col))
+ new = self._lil_container(i.shape, dtype=self.dtype)
+ _csparsetools.lil_fancy_get(self.shape[0], self.shape[1],
+ self.rows, self.data,
+ new.rows, new.data,
+ i, j)
+ return new
+
+ def _get_row_ranges(self, rows, col_slice):
+ """
+ Fast path for indexing in the case where column index is slice.
+
+ This gains performance improvement over brute force by more
+ efficient skipping of zeros, by accessing the elements
+ column-wise in order.
+
+ Parameters
+ ----------
+ rows : sequence or range
+ Rows indexed. If range, must be within valid bounds.
+ col_slice : slice
+ Columns indexed
+
+ """
+ j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
+ col_range = range(j_start, j_stop, j_stride)
+ nj = len(col_range)
+ new = self._lil_container((len(rows), nj), dtype=self.dtype)
+
+ _csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
+ self.rows, self.data,
+ new.rows, new.data,
+ rows,
+ j_start, j_stop, j_stride, nj)
+
+ return new
+
+ def _set_intXint(self, row, col, x):
+ _csparsetools.lil_insert(self.shape[0], self.shape[1], self.rows,
+ self.data, row, col, x)
+
+ def _set_arrayXarray(self, row, col, x):
+ i, j, x = map(np.atleast_2d, _prepare_index_for_memoryview(row, col, x))
+ _csparsetools.lil_fancy_set(self.shape[0], self.shape[1],
+ self.rows, self.data,
+ i, j, x)
+
+ def _set_arrayXarray_sparse(self, row, col, x):
+ # Fall back to densifying x
+ x = np.asarray(x.toarray(), dtype=self.dtype)
+ x, _ = _broadcast_arrays(x, row)
+ self._set_arrayXarray(row, col, x)
+
+ def __setitem__(self, key, x):
+ if isinstance(key, tuple) and len(key) == 2:
+ row, col = key
+ # Fast path for simple (int, int) indexing.
+ if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES):
+ x = self.dtype.type(x)
+ if x.size > 1:
+ raise ValueError("Trying to assign a sequence to an item")
+ return self._set_intXint(row, col, x)
+ # Fast path for full-matrix sparse assignment.
+ if (isinstance(row, slice) and isinstance(col, slice) and
+ row == slice(None) and col == slice(None) and
+ issparse(x) and x.shape == self.shape):
+ x = self._lil_container(x, dtype=self.dtype)
+ self.rows = x.rows
+ self.data = x.data
+ return
+ # Everything else takes the normal path.
+ IndexMixin.__setitem__(self, key, x)
+
+ def _mul_scalar(self, other):
+ if other == 0:
+ # Multiply by zero: return the zero matrix
+ new = self._lil_container(self.shape, dtype=self.dtype)
+ else:
+ res_dtype = upcast_scalar(self.dtype, other)
+
+ new = self.copy()
+ new = new.astype(res_dtype)
+ # Multiply this scalar by every element.
+ for j, rowvals in enumerate(new.data):
+ new.data[j] = [val*other for val in rowvals]
+ return new
+
+ def __truediv__(self, other): # self / other
+ if isscalarlike(other):
+ new = self.copy()
+ new.dtype = np.result_type(self, other)
+ # Divide every element by this scalar
+ for j, rowvals in enumerate(new.data):
+ new.data[j] = [val/other for val in rowvals]
+ return new
+ else:
+ return self.tocsr() / other
+
+ def copy(self):
+ M, N = self.shape
+ new = self._lil_container(self.shape, dtype=self.dtype)
+ # This is ~14x faster than calling deepcopy() on rows and data.
+ _csparsetools.lil_get_row_ranges(M, N, self.rows, self.data,
+ new.rows, new.data, range(M),
+ 0, N, 1, N)
+ return new
+
+ copy.__doc__ = _spbase.copy.__doc__
+
+ def reshape(self, *args, **kwargs):
+ shape = check_shape(args, self.shape)
+ order, copy = check_reshape_kwargs(kwargs)
+
+ # Return early if reshape is not required
+ if shape == self.shape:
+ if copy:
+ return self.copy()
+ else:
+ return self
+
+ new = self._lil_container(shape, dtype=self.dtype)
+
+ if order == 'C':
+ ncols = self.shape[1]
+ for i, row in enumerate(self.rows):
+ for col, j in enumerate(row):
+ new_r, new_c = np.unravel_index(i * ncols + j, shape)
+ new[new_r, new_c] = self[i, j]
+ elif order == 'F':
+ nrows = self.shape[0]
+ for i, row in enumerate(self.rows):
+ for col, j in enumerate(row):
+ new_r, new_c = np.unravel_index(i + j * nrows, shape, order)
+ new[new_r, new_c] = self[i, j]
+ else:
+ raise ValueError("'order' must be 'C' or 'F'")
+
+ return new
+
+ reshape.__doc__ = _spbase.reshape.__doc__
+
+ def resize(self, *shape):
+ shape = check_shape(shape)
+ new_M, new_N = shape
+ M, N = self.shape
+
+ if new_M < M:
+ self.rows = self.rows[:new_M]
+ self.data = self.data[:new_M]
+ elif new_M > M:
+ self.rows = np.resize(self.rows, new_M)
+ self.data = np.resize(self.data, new_M)
+ for i in range(M, new_M):
+ self.rows[i] = []
+ self.data[i] = []
+
+ if new_N < N:
+ for row, data in zip(self.rows, self.data):
+ trunc = bisect_left(row, new_N)
+ del row[trunc:]
+ del data[trunc:]
+
+ self._shape = shape
+
+ resize.__doc__ = _spbase.resize.__doc__
+
+ def toarray(self, order=None, out=None):
+ d = self._process_toarray_args(order, out)
+ for i, row in enumerate(self.rows):
+ for pos, j in enumerate(row):
+ d[i, j] = self.data[i][pos]
+ return d
+
+ toarray.__doc__ = _spbase.toarray.__doc__
+
+ def transpose(self, axes=None, copy=False):
+ return self.tocsr(copy=copy).transpose(axes=axes, copy=False).tolil(copy=False)
+
+ transpose.__doc__ = _spbase.transpose.__doc__
+
+ def tolil(self, copy=False):
+ if copy:
+ return self.copy()
+ else:
+ return self
+
+ tolil.__doc__ = _spbase.tolil.__doc__
+
+ def tocsr(self, copy=False):
+ M, N = self.shape
+ if M == 0 or N == 0:
+ return self._csr_container((M, N), dtype=self.dtype)
+
+ # construct indptr array
+ if M*N <= np.iinfo(np.int32).max:
+ # fast path: it is known that 64-bit indexing will not be needed.
+ idx_dtype = np.int32
+ indptr = np.empty(M + 1, dtype=idx_dtype)
+ indptr[0] = 0
+ _csparsetools.lil_get_lengths(self.rows, indptr[1:])
+ np.cumsum(indptr, out=indptr)
+ nnz = indptr[-1]
+ else:
+ idx_dtype = self._get_index_dtype(maxval=N)
+ lengths = np.empty(M, dtype=idx_dtype)
+ _csparsetools.lil_get_lengths(self.rows, lengths)
+ nnz = lengths.sum(dtype=np.int64)
+ idx_dtype = self._get_index_dtype(maxval=max(N, nnz))
+ indptr = np.empty(M + 1, dtype=idx_dtype)
+ indptr[0] = 0
+ np.cumsum(lengths, dtype=idx_dtype, out=indptr[1:])
+
+ indices = np.empty(nnz, dtype=idx_dtype)
+ data = np.empty(nnz, dtype=self.dtype)
+ _csparsetools.lil_flatten_to_array(self.rows, indices)
+ _csparsetools.lil_flatten_to_array(self.data, data)
+
+ # init csr matrix
+ return self._csr_container((data, indices, indptr), shape=self.shape)
+
+ tocsr.__doc__ = _spbase.tocsr.__doc__
+
+
+def _prepare_index_for_memoryview(i, j, x=None):
+ """
+ Convert index and data arrays to form suitable for passing to the
+ Cython fancy getset routines.
+
+ The conversions are necessary since to (i) ensure the integer
+ index arrays are in one of the accepted types, and (ii) to ensure
+ the arrays are writable so that Cython memoryview support doesn't
+ choke on them.
+
+ Parameters
+ ----------
+ i, j
+ Index arrays
+ x : optional
+ Data arrays
+
+ Returns
+ -------
+ i, j, x
+ Re-formatted arrays (x is omitted, if input was None)
+
+ """
+ if i.dtype > j.dtype:
+ j = j.astype(i.dtype)
+ elif i.dtype < j.dtype:
+ i = i.astype(j.dtype)
+
+ if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
+ i = i.astype(np.intp)
+ if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
+ j = j.astype(np.intp)
+
+ if x is not None:
+ if not x.flags.writeable:
+ x = x.copy()
+ return i, j, x
+ else:
+ return i, j
+
+
+def isspmatrix_lil(x):
+ """Is `x` of lil_matrix type?
+
+ Parameters
+ ----------
+ x
+ object to check for being a lil matrix
+
+ Returns
+ -------
+ bool
+ True if `x` is a lil matrix, False otherwise
+
+ Examples
+ --------
+ >>> from scipy.sparse import lil_array, lil_matrix, coo_matrix, isspmatrix_lil
+ >>> isspmatrix_lil(lil_matrix([[5]]))
+ True
+ >>> isspmatrix_lil(lil_array([[5]]))
+ False
+ >>> isspmatrix_lil(coo_matrix([[5]]))
+ False
+ """
+ return isinstance(x, lil_matrix)
+
+
+# This namespace class separates array from matrix with isinstance
+class lil_array(_lil_base, sparray):
+ """
+ Row-based LIst of Lists sparse array.
+
+ This is a structure for constructing sparse arrays incrementally.
+ Note that inserting a single item can take linear time in the worst case;
+ to construct the array efficiently, make sure the items are pre-sorted by
+ index, per row.
+
+ This can be instantiated in several ways:
+ lil_array(D)
+ where D is a 2-D ndarray
+
+ lil_array(S)
+ with another sparse array or matrix S (equivalent to S.tolil())
+
+ lil_array((M, N), [dtype])
+ to construct an empty array with shape (M, N)
+ dtype is optional, defaulting to dtype='d'.
+
+ Attributes
+ ----------
+ dtype : dtype
+ Data type of the array
+ shape : 2-tuple
+ Shape of the array
+ ndim : int
+ Number of dimensions (this is always 2)
+ nnz
+ size
+ data
+ LIL format data array of the array
+ rows
+ LIL format row index array of the array
+ T
+
+ Notes
+ -----
+ Sparse arrays can be used in arithmetic operations: they support
+ addition, subtraction, multiplication, division, and matrix power.
+
+ Advantages of the LIL format
+ - supports flexible slicing
+ - changes to the array sparsity structure are efficient
+
+ Disadvantages of the LIL format
+ - arithmetic operations LIL + LIL are slow (consider CSR or CSC)
+ - slow column slicing (consider CSC)
+ - slow matrix vector products (consider CSR or CSC)
+
+ Intended Usage
+ - LIL is a convenient format for constructing sparse arrays
+ - once an array has been constructed, convert to CSR or
+ CSC format for fast arithmetic and matrix vector operations
+ - consider using the COO format when constructing large arrays
+
+ Data Structure
+ - An array (``self.rows``) of rows, each of which is a sorted
+ list of column indices of non-zero elements.
+ - The corresponding nonzero values are stored in similar
+ fashion in ``self.data``.
+
+ """
+
+
+class lil_matrix(spmatrix, _lil_base):
+ """
+ Row-based LIst of Lists sparse matrix.
+
+ This is a structure for constructing sparse matrices incrementally.
+ Note that inserting a single item can take linear time in the worst case;
+ to construct the matrix efficiently, make sure the items are pre-sorted by
+ index, per row.
+
+ This can be instantiated in several ways:
+ lil_matrix(D)
+ where D is a 2-D ndarray
+
+ lil_matrix(S)
+ with another sparse array or matrix S (equivalent to S.tolil())
+
+ lil_matrix((M, N), [dtype])
+ to construct an empty matrix with shape (M, N)
+ dtype is optional, defaulting to dtype='d'.
+
+ Attributes
+ ----------
+ dtype : dtype
+ Data type of the matrix
+ shape : 2-tuple
+ Shape of the matrix
+ ndim : int
+ Number of dimensions (this is always 2)
+ nnz
+ size
+ data
+ LIL format data array of the matrix
+ rows
+ LIL format row index array of the matrix
+ T
+
+ Notes
+ -----
+ Sparse matrices can be used in arithmetic operations: they support
+ addition, subtraction, multiplication, division, and matrix power.
+
+ Advantages of the LIL format
+ - supports flexible slicing
+ - changes to the matrix sparsity structure are efficient
+
+ Disadvantages of the LIL format
+ - arithmetic operations LIL + LIL are slow (consider CSR or CSC)
+ - slow column slicing (consider CSC)
+ - slow matrix vector products (consider CSR or CSC)
+
+ Intended Usage
+ - LIL is a convenient format for constructing sparse matrices
+ - once a matrix has been constructed, convert to CSR or
+ CSC format for fast arithmetic and matrix vector operations
+ - consider using the COO format when constructing large matrices
+
+ Data Structure
+ - An array (``self.rows``) of rows, each of which is a sorted
+ list of column indices of non-zero elements.
+ - The corresponding nonzero values are stored in similar
+ fashion in ``self.data``.
+
+ """
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_spfuncs.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_spfuncs.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e9b0abcede6387e74538baf839a303c6cc1b6be
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_spfuncs.py
@@ -0,0 +1,76 @@
+""" Functions that operate on sparse matrices
+"""
+
+__all__ = ['count_blocks','estimate_blocksize']
+
+from ._base import issparse
+from ._csr import csr_array
+from ._sparsetools import csr_count_blocks
+
+
+def estimate_blocksize(A,efficiency=0.7):
+ """Attempt to determine the blocksize of a sparse matrix
+
+ Returns a blocksize=(r,c) such that
+ - A.nnz / A.tobsr( (r,c) ).nnz > efficiency
+ """
+ if not (issparse(A) and A.format in ("csc", "csr")):
+ A = csr_array(A)
+
+ if A.nnz == 0:
+ return (1,1)
+
+ if not 0 < efficiency < 1.0:
+ raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0')
+
+ high_efficiency = (1.0 + efficiency) / 2.0
+ nnz = float(A.nnz)
+ M,N = A.shape
+
+ if M % 2 == 0 and N % 2 == 0:
+ e22 = nnz / (4 * count_blocks(A,(2,2)))
+ else:
+ e22 = 0.0
+
+ if M % 3 == 0 and N % 3 == 0:
+ e33 = nnz / (9 * count_blocks(A,(3,3)))
+ else:
+ e33 = 0.0
+
+ if e22 > high_efficiency and e33 > high_efficiency:
+ e66 = nnz / (36 * count_blocks(A,(6,6)))
+ if e66 > efficiency:
+ return (6,6)
+ else:
+ return (3,3)
+ else:
+ if M % 4 == 0 and N % 4 == 0:
+ e44 = nnz / (16 * count_blocks(A,(4,4)))
+ else:
+ e44 = 0.0
+
+ if e44 > efficiency:
+ return (4,4)
+ elif e33 > efficiency:
+ return (3,3)
+ elif e22 > efficiency:
+ return (2,2)
+ else:
+ return (1,1)
+
+
+def count_blocks(A,blocksize):
+ """For a given blocksize=(r,c) count the number of occupied
+ blocks in a sparse matrix A
+ """
+ r,c = blocksize
+ if r < 1 or c < 1:
+ raise ValueError('r and c must be positive')
+
+ if issparse(A):
+ if A.format == "csr":
+ M,N = A.shape
+ return csr_count_blocks(M,N,r,c,A.indptr,A.indices)
+ elif A.format == "csc":
+ return count_blocks(A.T,(c,r))
+ return count_blocks(csr_array(A),blocksize)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/construct.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/construct.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3d34d2fd38887877980727bceaaa215129bf283
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/construct.py
@@ -0,0 +1,44 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'block_diag',
+ 'bmat',
+ 'bsr_matrix',
+ 'check_random_state',
+ 'coo_matrix',
+ 'csc_matrix',
+ 'csr_hstack',
+ 'csr_matrix',
+ 'dia_matrix',
+ 'diags',
+ 'eye',
+ 'get_index_dtype',
+ 'hstack',
+ 'identity',
+ 'isscalarlike',
+ 'issparse',
+ 'kron',
+ 'kronsum',
+ 'numbers',
+ 'rand',
+ 'random',
+ 'rng_integers',
+ 'spdiags',
+ 'upcast',
+ 'vstack',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="sparse", module="construct",
+ private_modules=["_construct"], all=__all__,
+ attribute=name)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/lil.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/lil.py
new file mode 100644
index 0000000000000000000000000000000000000000..31e5f20e4887c4e163aa2d807c89fe0768e3afb0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/lil.py
@@ -0,0 +1,31 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'INT_TYPES',
+ 'IndexMixin',
+ 'bisect_left',
+ 'check_reshape_kwargs',
+ 'check_shape',
+ 'getdtype',
+ 'isscalarlike',
+ 'isshape',
+ 'isspmatrix_lil',
+ 'lil_matrix',
+ 'spmatrix',
+ 'upcast_scalar',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="sparse", module="lil",
+ private_modules=["_lil"], all=__all__,
+ attribute=name)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/sparsetools.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/sparsetools.py
new file mode 100644
index 0000000000000000000000000000000000000000..47ac80adae7145a6192f9fb9b225a1762ff830ce
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/sparsetools.py
@@ -0,0 +1,98 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'bsr_diagonal',
+ 'bsr_eldiv_bsr',
+ 'bsr_elmul_bsr',
+ 'bsr_ge_bsr',
+ 'bsr_gt_bsr',
+ 'bsr_le_bsr',
+ 'bsr_lt_bsr',
+ 'bsr_matmat',
+ 'bsr_matvec',
+ 'bsr_matvecs',
+ 'bsr_maximum_bsr',
+ 'bsr_minimum_bsr',
+ 'bsr_minus_bsr',
+ 'bsr_ne_bsr',
+ 'bsr_plus_bsr',
+ 'bsr_scale_columns',
+ 'bsr_scale_rows',
+ 'bsr_sort_indices',
+ 'bsr_tocsr',
+ 'bsr_transpose',
+ 'coo_matvec',
+ 'coo_tocsr',
+ 'coo_todense',
+ 'cs_graph_components',
+ 'csc_diagonal',
+ 'csc_eldiv_csc',
+ 'csc_elmul_csc',
+ 'csc_ge_csc',
+ 'csc_gt_csc',
+ 'csc_le_csc',
+ 'csc_lt_csc',
+ 'csc_matmat',
+ 'csc_matmat_maxnnz',
+ 'csc_matvec',
+ 'csc_matvecs',
+ 'csc_maximum_csc',
+ 'csc_minimum_csc',
+ 'csc_minus_csc',
+ 'csc_ne_csc',
+ 'csc_plus_csc',
+ 'csc_tocsr',
+ 'csr_column_index1',
+ 'csr_column_index2',
+ 'csr_count_blocks',
+ 'csr_diagonal',
+ 'csr_eldiv_csr',
+ 'csr_eliminate_zeros',
+ 'csr_elmul_csr',
+ 'csr_ge_csr',
+ 'csr_gt_csr',
+ 'csr_has_canonical_format',
+ 'csr_has_sorted_indices',
+ 'csr_hstack',
+ 'csr_le_csr',
+ 'csr_lt_csr',
+ 'csr_matmat',
+ 'csr_matmat_maxnnz',
+ 'csr_matvec',
+ 'csr_matvecs',
+ 'csr_maximum_csr',
+ 'csr_minimum_csr',
+ 'csr_minus_csr',
+ 'csr_ne_csr',
+ 'csr_plus_csr',
+ 'csr_row_index',
+ 'csr_row_slice',
+ 'csr_sample_offsets',
+ 'csr_sample_values',
+ 'csr_scale_columns',
+ 'csr_scale_rows',
+ 'csr_sort_indices',
+ 'csr_sum_duplicates',
+ 'csr_tobsr',
+ 'csr_tocsc',
+ 'csr_todense',
+ 'dia_matvec',
+ 'expandptr',
+ 'get_csr_submatrix',
+ 'test_throw_error',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="sparse", module="sparsetools",
+ private_modules=["_sparsetools"], all=__all__,
+ attribute=name)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/sputils.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/sputils.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdacb42dd0fb23b956fb95f2eea913cfb933d029
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/sputils.py
@@ -0,0 +1,44 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'asmatrix',
+ 'check_reshape_kwargs',
+ 'check_shape',
+ 'downcast_intp_index',
+ 'get_index_dtype',
+ 'get_sum_dtype',
+ 'getdata',
+ 'getdtype',
+ 'is_pydata_spmatrix',
+ 'isdense',
+ 'isintlike',
+ 'ismatrix',
+ 'isscalarlike',
+ 'issequence',
+ 'isshape',
+ 'matrix',
+ 'operator',
+ 'prod',
+ 'supported_dtypes',
+ 'sys',
+ 'to_native',
+ 'upcast',
+ 'upcast_char',
+ 'upcast_scalar',
+ 'validateaxis',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="sparse", module="sputils",
+ private_modules=["_sputils"], all=__all__,
+ attribute=name)
diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/version.py b/env-llmeval/lib/python3.10/site-packages/scipy/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..db73d1943ef5d7a2fcdd510aea849c3b4901984b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/scipy/version.py
@@ -0,0 +1,12 @@
+# THIS FILE IS GENERATED DURING THE SCIPY BUILD
+# See tools/version_utils.py for details
+
+short_version = '1.13.0'
+version = '1.13.0'
+full_version = '1.13.0'
+git_revision = '7dcd8c5'
+commit_count = '1580'
+release = True
+
+if not release:
+ version = full_version