applied-ai-018 commited on
Commit
04dfba6
·
verified ·
1 Parent(s): a04104c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/conftest.py +238 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__init__.py +201 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_cubic.py +970 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py +805 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py +796 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py +358 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py +332 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_polyint.py +938 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbf.py +290 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py +550 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi.py +766 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/fitpack.py +32 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/ndgriddata.py +25 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/rbf.py +25 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__init__.py +0 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/scipy/conftest.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pytest customization
2
+ import json
3
+ import os
4
+ import warnings
5
+ import tempfile
6
+
7
+ import numpy as np
8
+ import numpy.testing as npt
9
+ import pytest
10
+ import hypothesis
11
+
12
+ from scipy._lib._fpumode import get_fpu_mode
13
+ from scipy._lib._testutils import FPUModeChangeWarning
14
+ from scipy._lib import _pep440
15
+ from scipy._lib._array_api import SCIPY_ARRAY_API, SCIPY_DEVICE
16
+
17
+
18
+ def pytest_configure(config):
19
+ config.addinivalue_line("markers",
20
+ "slow: Tests that are very slow.")
21
+ config.addinivalue_line("markers",
22
+ "xslow: mark test as extremely slow (not run unless explicitly requested)")
23
+ config.addinivalue_line("markers",
24
+ "xfail_on_32bit: mark test as failing on 32-bit platforms")
25
+ try:
26
+ import pytest_timeout # noqa:F401
27
+ except Exception:
28
+ config.addinivalue_line(
29
+ "markers", 'timeout: mark a test for a non-default timeout')
30
+ config.addinivalue_line("markers",
31
+ "skip_if_array_api(*backends, reasons=None, np_only=False, cpu_only=False): "
32
+ "mark the desired skip configuration for the `skip_if_array_api` fixture.")
33
+
34
+
35
+ def _get_mark(item, name):
36
+ if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"):
37
+ mark = item.get_closest_marker(name)
38
+ else:
39
+ mark = item.get_marker(name)
40
+ return mark
41
+
42
+
43
+ def pytest_runtest_setup(item):
44
+ mark = _get_mark(item, "xslow")
45
+ if mark is not None:
46
+ try:
47
+ v = int(os.environ.get('SCIPY_XSLOW', '0'))
48
+ except ValueError:
49
+ v = False
50
+ if not v:
51
+ pytest.skip("very slow test; "
52
+ "set environment variable SCIPY_XSLOW=1 to run it")
53
+ mark = _get_mark(item, 'xfail_on_32bit')
54
+ if mark is not None and np.intp(0).itemsize < 8:
55
+ pytest.xfail(f'Fails on our 32-bit test platform(s): {mark.args[0]}')
56
+
57
+ # Older versions of threadpoolctl have an issue that may lead to this
58
+ # warning being emitted, see gh-14441
59
+ with npt.suppress_warnings() as sup:
60
+ sup.filter(pytest.PytestUnraisableExceptionWarning)
61
+
62
+ try:
63
+ from threadpoolctl import threadpool_limits
64
+
65
+ HAS_THREADPOOLCTL = True
66
+ except Exception: # observed in gh-14441: (ImportError, AttributeError)
67
+ # Optional dependency only. All exceptions are caught, for robustness
68
+ HAS_THREADPOOLCTL = False
69
+
70
+ if HAS_THREADPOOLCTL:
71
+ # Set the number of openmp threads based on the number of workers
72
+ # xdist is using to prevent oversubscription. Simplified version of what
73
+ # sklearn does (it can rely on threadpoolctl and its builtin OpenMP helper
74
+ # functions)
75
+ try:
76
+ xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT'])
77
+ except KeyError:
78
+ # raises when pytest-xdist is not installed
79
+ return
80
+
81
+ if not os.getenv('OMP_NUM_THREADS'):
82
+ max_openmp_threads = os.cpu_count() // 2 # use nr of physical cores
83
+ threads_per_worker = max(max_openmp_threads // xdist_worker_count, 1)
84
+ try:
85
+ threadpool_limits(threads_per_worker, user_api='blas')
86
+ except Exception:
87
+ # May raise AttributeError for older versions of OpenBLAS.
88
+ # Catch any error for robustness.
89
+ return
90
+
91
+
92
+ @pytest.fixture(scope="function", autouse=True)
93
+ def check_fpu_mode(request):
94
+ """
95
+ Check FPU mode was not changed during the test.
96
+ """
97
+ old_mode = get_fpu_mode()
98
+ yield
99
+ new_mode = get_fpu_mode()
100
+
101
+ if old_mode != new_mode:
102
+ warnings.warn(f"FPU mode changed from {old_mode:#x} to {new_mode:#x} during "
103
+ "the test",
104
+ category=FPUModeChangeWarning, stacklevel=0)
105
+
106
+
107
+ # Array API backend handling
108
+ xp_available_backends = {'numpy': np}
109
+
110
+ if SCIPY_ARRAY_API and isinstance(SCIPY_ARRAY_API, str):
111
+ # fill the dict of backends with available libraries
112
+ try:
113
+ import array_api_strict
114
+ xp_available_backends.update({'array_api_strict': array_api_strict})
115
+ except ImportError:
116
+ pass
117
+
118
+ try:
119
+ import torch # type: ignore[import]
120
+ xp_available_backends.update({'pytorch': torch})
121
+ # can use `mps` or `cpu`
122
+ torch.set_default_device(SCIPY_DEVICE)
123
+ except ImportError:
124
+ pass
125
+
126
+ try:
127
+ import cupy # type: ignore[import]
128
+ xp_available_backends.update({'cupy': cupy})
129
+ except ImportError:
130
+ pass
131
+
132
+ # by default, use all available backends
133
+ if SCIPY_ARRAY_API.lower() not in ("1", "true"):
134
+ SCIPY_ARRAY_API_ = json.loads(SCIPY_ARRAY_API)
135
+
136
+ if 'all' in SCIPY_ARRAY_API_:
137
+ pass # same as True
138
+ else:
139
+ # only select a subset of backend by filtering out the dict
140
+ try:
141
+ xp_available_backends = {
142
+ backend: xp_available_backends[backend]
143
+ for backend in SCIPY_ARRAY_API_
144
+ }
145
+ except KeyError:
146
+ msg = f"'--array-api-backend' must be in {xp_available_backends.keys()}"
147
+ raise ValueError(msg)
148
+
149
+ if 'cupy' in xp_available_backends:
150
+ SCIPY_DEVICE = 'cuda'
151
+
152
+ array_api_compatible = pytest.mark.parametrize("xp", xp_available_backends.values())
153
+
154
+
155
+ @pytest.fixture
156
+ def skip_if_array_api(xp, request):
157
+ """
158
+ Skip based on the ``skip_if_array_api`` marker.
159
+
160
+ Parameters
161
+ ----------
162
+ *backends : tuple
163
+ Backends to skip, e.g. ``("array_api_strict", "torch")``.
164
+ These are overriden when ``np_only`` is ``True``, and are not
165
+ necessary to provide for non-CPU backends when ``cpu_only`` is ``True``.
166
+ reasons : list, optional
167
+ A list of reasons for each skip. When ``np_only`` is ``True``,
168
+ this should be a singleton list. Otherwise, this should be a list
169
+ of reasons, one for each corresponding backend in ``backends``.
170
+ If unprovided, default reasons are used. Note that it is not possible
171
+ to specify a custom reason with ``cpu_only``. Default: ``None``.
172
+ np_only : bool, optional
173
+ When ``True``, the test is skipped for all backends other
174
+ than the default NumPy backend. There is no need to provide
175
+ any ``backends`` in this case. To specify a reason, pass a
176
+ singleton list to ``reasons``. Default: ``False``.
177
+ cpu_only : bool, optional
178
+ When ``True``, the test is skipped on non-CPU devices.
179
+ There is no need to provide any ``backends`` in this case,
180
+ but any ``backends`` will also be skipped on the CPU.
181
+ Default: ``False``.
182
+ """
183
+ if "skip_if_array_api" not in request.keywords:
184
+ return
185
+ backends = request.keywords["skip_if_array_api"].args
186
+ kwargs = request.keywords["skip_if_array_api"].kwargs
187
+ np_only = kwargs.get("np_only", False)
188
+ cpu_only = kwargs.get("cpu_only", False)
189
+ if np_only:
190
+ reasons = kwargs.get("reasons", ["do not run with non-NumPy backends."])
191
+ reason = reasons[0]
192
+ if xp.__name__ != 'numpy':
193
+ pytest.skip(reason=reason)
194
+ return
195
+ if cpu_only:
196
+ reason = "do not run with `SCIPY_ARRAY_API` set and not on CPU"
197
+ if SCIPY_ARRAY_API and SCIPY_DEVICE != 'cpu':
198
+ if xp.__name__ == 'cupy':
199
+ pytest.skip(reason=reason)
200
+ elif xp.__name__ == 'torch':
201
+ if 'cpu' not in torch.empty(0).device.type:
202
+ pytest.skip(reason=reason)
203
+ if backends is not None:
204
+ reasons = kwargs.get("reasons", False)
205
+ for i, backend in enumerate(backends):
206
+ if xp.__name__ == backend:
207
+ if not reasons:
208
+ reason = f"do not run with array API backend: {backend}"
209
+ else:
210
+ reason = reasons[i]
211
+ pytest.skip(reason=reason)
212
+
213
+
214
+ # Following the approach of NumPy's conftest.py...
215
+ # Use a known and persistent tmpdir for hypothesis' caches, which
216
+ # can be automatically cleared by the OS or user.
217
+ hypothesis.configuration.set_hypothesis_home_dir(
218
+ os.path.join(tempfile.gettempdir(), ".hypothesis")
219
+ )
220
+
221
+ # We register two custom profiles for SciPy - for details see
222
+ # https://hypothesis.readthedocs.io/en/latest/settings.html
223
+ # The first is designed for our own CI runs; the latter also
224
+ # forces determinism and is designed for use via scipy.test()
225
+ hypothesis.settings.register_profile(
226
+ name="nondeterministic", deadline=None, print_blob=True,
227
+ )
228
+ hypothesis.settings.register_profile(
229
+ name="deterministic",
230
+ deadline=None, print_blob=True, database=None, derandomize=True,
231
+ suppress_health_check=list(hypothesis.HealthCheck),
232
+ )
233
+
234
+ # Profile is currently set by environment variable `SCIPY_HYPOTHESIS_PROFILE`
235
+ # In the future, it would be good to work the choice into dev.py.
236
+ SCIPY_HYPOTHESIS_PROFILE = os.environ.get("SCIPY_HYPOTHESIS_PROFILE",
237
+ "deterministic")
238
+ hypothesis.settings.load_profile(SCIPY_HYPOTHESIS_PROFILE)
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__init__.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ========================================
3
+ Interpolation (:mod:`scipy.interpolate`)
4
+ ========================================
5
+
6
+ .. currentmodule:: scipy.interpolate
7
+
8
+ Sub-package for objects used in interpolation.
9
+
10
+ As listed below, this sub-package contains spline functions and classes,
11
+ 1-D and multidimensional (univariate and multivariate)
12
+ interpolation classes, Lagrange and Taylor polynomial interpolators, and
13
+ wrappers for `FITPACK <http://www.netlib.org/dierckx/>`__
14
+ and DFITPACK functions.
15
+
16
+ Univariate interpolation
17
+ ========================
18
+
19
+ .. autosummary::
20
+ :toctree: generated/
21
+
22
+ interp1d
23
+ BarycentricInterpolator
24
+ KroghInterpolator
25
+ barycentric_interpolate
26
+ krogh_interpolate
27
+ pchip_interpolate
28
+ CubicHermiteSpline
29
+ PchipInterpolator
30
+ Akima1DInterpolator
31
+ CubicSpline
32
+ PPoly
33
+ BPoly
34
+
35
+
36
+ Multivariate interpolation
37
+ ==========================
38
+
39
+ Unstructured data:
40
+
41
+ .. autosummary::
42
+ :toctree: generated/
43
+
44
+ griddata
45
+ LinearNDInterpolator
46
+ NearestNDInterpolator
47
+ CloughTocher2DInterpolator
48
+ RBFInterpolator
49
+ Rbf
50
+ interp2d
51
+
52
+ For data on a grid:
53
+
54
+ .. autosummary::
55
+ :toctree: generated/
56
+
57
+ interpn
58
+ RegularGridInterpolator
59
+ RectBivariateSpline
60
+
61
+ .. seealso::
62
+
63
+ `scipy.ndimage.map_coordinates`
64
+
65
+ Tensor product polynomials:
66
+
67
+ .. autosummary::
68
+ :toctree: generated/
69
+
70
+ NdPPoly
71
+ NdBSpline
72
+
73
+ 1-D Splines
74
+ ===========
75
+
76
+ .. autosummary::
77
+ :toctree: generated/
78
+
79
+ BSpline
80
+ make_interp_spline
81
+ make_lsq_spline
82
+ make_smoothing_spline
83
+
84
+ Functional interface to FITPACK routines:
85
+
86
+ .. autosummary::
87
+ :toctree: generated/
88
+
89
+ splrep
90
+ splprep
91
+ splev
92
+ splint
93
+ sproot
94
+ spalde
95
+ splder
96
+ splantider
97
+ insert
98
+
99
+ Object-oriented FITPACK interface:
100
+
101
+ .. autosummary::
102
+ :toctree: generated/
103
+
104
+ UnivariateSpline
105
+ InterpolatedUnivariateSpline
106
+ LSQUnivariateSpline
107
+
108
+
109
+
110
+ 2-D Splines
111
+ ===========
112
+
113
+ For data on a grid:
114
+
115
+ .. autosummary::
116
+ :toctree: generated/
117
+
118
+ RectBivariateSpline
119
+ RectSphereBivariateSpline
120
+
121
+ For unstructured data:
122
+
123
+ .. autosummary::
124
+ :toctree: generated/
125
+
126
+ BivariateSpline
127
+ SmoothBivariateSpline
128
+ SmoothSphereBivariateSpline
129
+ LSQBivariateSpline
130
+ LSQSphereBivariateSpline
131
+
132
+ Low-level interface to FITPACK functions:
133
+
134
+ .. autosummary::
135
+ :toctree: generated/
136
+
137
+ bisplrep
138
+ bisplev
139
+
140
+ Additional tools
141
+ ================
142
+
143
+ .. autosummary::
144
+ :toctree: generated/
145
+
146
+ lagrange
147
+ approximate_taylor_polynomial
148
+ pade
149
+
150
+ .. seealso::
151
+
152
+ `scipy.ndimage.map_coordinates`,
153
+ `scipy.ndimage.spline_filter`,
154
+ `scipy.signal.resample`,
155
+ `scipy.signal.bspline`,
156
+ `scipy.signal.gauss_spline`,
157
+ `scipy.signal.qspline1d`,
158
+ `scipy.signal.cspline1d`,
159
+ `scipy.signal.qspline1d_eval`,
160
+ `scipy.signal.cspline1d_eval`,
161
+ `scipy.signal.qspline2d`,
162
+ `scipy.signal.cspline2d`.
163
+
164
+ ``pchip`` is an alias of `PchipInterpolator` for backward compatibility
165
+ (should not be used in new code).
166
+ """
167
+ from ._interpolate import *
168
+ from ._fitpack_py import *
169
+
170
+ # New interface to fitpack library:
171
+ from ._fitpack2 import *
172
+
173
+ from ._rbf import Rbf
174
+
175
+ from ._rbfinterp import *
176
+
177
+ from ._polyint import *
178
+
179
+ from ._cubic import *
180
+
181
+ from ._ndgriddata import *
182
+
183
+ from ._bsplines import *
184
+
185
+ from ._pade import *
186
+
187
+ from ._rgi import *
188
+
189
+ from ._ndbspline import NdBSpline
190
+
191
+ # Deprecated namespaces, to be removed in v2.0.0
192
+ from . import fitpack, fitpack2, interpolate, ndgriddata, polyint, rbf
193
+
194
+ __all__ = [s for s in dir() if not s.startswith('_')]
195
+
196
+ from scipy._lib._testutils import PytestTester
197
+ test = PytestTester(__name__)
198
+ del PytestTester
199
+
200
+ # Backward compatibility
201
+ pchip = PchipInterpolator
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.75 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc ADDED
Binary file (63.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc ADDED
Binary file (30.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc ADDED
Binary file (82.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc ADDED
Binary file (27.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc ADDED
Binary file (71.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc ADDED
Binary file (9.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc ADDED
Binary file (32.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc ADDED
Binary file (717 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc ADDED
Binary file (930 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc ADDED
Binary file (883 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc ADDED
Binary file (723 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc ADDED
Binary file (749 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc ADDED
Binary file (636 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_cubic.py ADDED
@@ -0,0 +1,970 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Interpolation algorithms using piecewise cubic polynomials."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ import warnings
8
+
9
+ import numpy as np
10
+
11
+ from scipy.linalg import solve, solve_banded
12
+
13
+ from . import PPoly
14
+ from ._polyint import _isscalar
15
+
16
+ if TYPE_CHECKING:
17
+ from typing import Literal
18
+
19
+ __all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
20
+ "Akima1DInterpolator", "CubicSpline"]
21
+
22
+
23
+ def prepare_input(x, y, axis, dydx=None):
24
+ """Prepare input for cubic spline interpolators.
25
+
26
+ All data are converted to numpy arrays and checked for correctness.
27
+ Axes equal to `axis` of arrays `y` and `dydx` are moved to be the 0th
28
+ axis. The value of `axis` is converted to lie in
29
+ [0, number of dimensions of `y`).
30
+ """
31
+
32
+ x, y = map(np.asarray, (x, y))
33
+ if np.issubdtype(x.dtype, np.complexfloating):
34
+ raise ValueError("`x` must contain real values.")
35
+ x = x.astype(float)
36
+
37
+ if np.issubdtype(y.dtype, np.complexfloating):
38
+ dtype = complex
39
+ else:
40
+ dtype = float
41
+
42
+ if dydx is not None:
43
+ dydx = np.asarray(dydx)
44
+ if y.shape != dydx.shape:
45
+ raise ValueError("The shapes of `y` and `dydx` must be identical.")
46
+ if np.issubdtype(dydx.dtype, np.complexfloating):
47
+ dtype = complex
48
+ dydx = dydx.astype(dtype, copy=False)
49
+
50
+ y = y.astype(dtype, copy=False)
51
+ axis = axis % y.ndim
52
+ if x.ndim != 1:
53
+ raise ValueError("`x` must be 1-dimensional.")
54
+ if x.shape[0] < 2:
55
+ raise ValueError("`x` must contain at least 2 elements.")
56
+ if x.shape[0] != y.shape[axis]:
57
+ raise ValueError(f"The length of `y` along `axis`={axis} doesn't "
58
+ "match the length of `x`")
59
+
60
+ if not np.all(np.isfinite(x)):
61
+ raise ValueError("`x` must contain only finite values.")
62
+ if not np.all(np.isfinite(y)):
63
+ raise ValueError("`y` must contain only finite values.")
64
+
65
+ if dydx is not None and not np.all(np.isfinite(dydx)):
66
+ raise ValueError("`dydx` must contain only finite values.")
67
+
68
+ dx = np.diff(x)
69
+ if np.any(dx <= 0):
70
+ raise ValueError("`x` must be strictly increasing sequence.")
71
+
72
+ y = np.moveaxis(y, axis, 0)
73
+ if dydx is not None:
74
+ dydx = np.moveaxis(dydx, axis, 0)
75
+
76
+ return x, dx, y, axis, dydx
77
+
78
+
79
+ class CubicHermiteSpline(PPoly):
80
+ """Piecewise-cubic interpolator matching values and first derivatives.
81
+
82
+ The result is represented as a `PPoly` instance.
83
+
84
+ Parameters
85
+ ----------
86
+ x : array_like, shape (n,)
87
+ 1-D array containing values of the independent variable.
88
+ Values must be real, finite and in strictly increasing order.
89
+ y : array_like
90
+ Array containing values of the dependent variable. It can have
91
+ arbitrary number of dimensions, but the length along ``axis``
92
+ (see below) must match the length of ``x``. Values must be finite.
93
+ dydx : array_like
94
+ Array containing derivatives of the dependent variable. It can have
95
+ arbitrary number of dimensions, but the length along ``axis``
96
+ (see below) must match the length of ``x``. Values must be finite.
97
+ axis : int, optional
98
+ Axis along which `y` is assumed to be varying. Meaning that for
99
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
100
+ Default is 0.
101
+ extrapolate : {bool, 'periodic', None}, optional
102
+ If bool, determines whether to extrapolate to out-of-bounds points
103
+ based on first and last intervals, or to return NaNs. If 'periodic',
104
+ periodic extrapolation is used. If None (default), it is set to True.
105
+
106
+ Attributes
107
+ ----------
108
+ x : ndarray, shape (n,)
109
+ Breakpoints. The same ``x`` which was passed to the constructor.
110
+ c : ndarray, shape (4, n-1, ...)
111
+ Coefficients of the polynomials on each segment. The trailing
112
+ dimensions match the dimensions of `y`, excluding ``axis``.
113
+ For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
114
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
115
+ axis : int
116
+ Interpolation axis. The same axis which was passed to the
117
+ constructor.
118
+
119
+ Methods
120
+ -------
121
+ __call__
122
+ derivative
123
+ antiderivative
124
+ integrate
125
+ roots
126
+
127
+ See Also
128
+ --------
129
+ Akima1DInterpolator : Akima 1D interpolator.
130
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
131
+ CubicSpline : Cubic spline data interpolator.
132
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
133
+
134
+ Notes
135
+ -----
136
+ If you want to create a higher-order spline matching higher-order
137
+ derivatives, use `BPoly.from_derivatives`.
138
+
139
+ References
140
+ ----------
141
+ .. [1] `Cubic Hermite spline
142
+ <https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_
143
+ on Wikipedia.
144
+ """
145
+
146
+ def __init__(self, x, y, dydx, axis=0, extrapolate=None):
147
+ if extrapolate is None:
148
+ extrapolate = True
149
+
150
+ x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
151
+
152
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
153
+ slope = np.diff(y, axis=0) / dxr
154
+ t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
155
+
156
+ c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
157
+ c[0] = t / dxr
158
+ c[1] = (slope - dydx[:-1]) / dxr - t
159
+ c[2] = dydx[:-1]
160
+ c[3] = y[:-1]
161
+
162
+ super().__init__(c, x, extrapolate=extrapolate)
163
+ self.axis = axis
164
+
165
+
166
+ class PchipInterpolator(CubicHermiteSpline):
167
+ r"""PCHIP 1-D monotonic cubic interpolation.
168
+
169
+ ``x`` and ``y`` are arrays of values used to approximate some function f,
170
+ with ``y = f(x)``. The interpolant uses monotonic cubic splines
171
+ to find the value of new points. (PCHIP stands for Piecewise Cubic
172
+ Hermite Interpolating Polynomial).
173
+
174
+ Parameters
175
+ ----------
176
+ x : ndarray, shape (npoints, )
177
+ A 1-D array of monotonically increasing real values. ``x`` cannot
178
+ include duplicate values (otherwise f is overspecified)
179
+ y : ndarray, shape (..., npoints, ...)
180
+ A N-D array of real values. ``y``'s length along the interpolation
181
+ axis must be equal to the length of ``x``. Use the ``axis``
182
+ parameter to select the interpolation axis.
183
+
184
+ .. deprecated:: 1.13.0
185
+ Complex data is deprecated and will raise an error in SciPy 1.15.0.
186
+ If you are trying to use the real components of the passed array,
187
+ use ``np.real`` on ``y``.
188
+
189
+ axis : int, optional
190
+ Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
191
+ to ``axis=0``.
192
+ extrapolate : bool, optional
193
+ Whether to extrapolate to out-of-bounds points based on first
194
+ and last intervals, or to return NaNs.
195
+
196
+ Methods
197
+ -------
198
+ __call__
199
+ derivative
200
+ antiderivative
201
+ roots
202
+
203
+ See Also
204
+ --------
205
+ CubicHermiteSpline : Piecewise-cubic interpolator.
206
+ Akima1DInterpolator : Akima 1D interpolator.
207
+ CubicSpline : Cubic spline data interpolator.
208
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
209
+
210
+ Notes
211
+ -----
212
+ The interpolator preserves monotonicity in the interpolation data and does
213
+ not overshoot if the data is not smooth.
214
+
215
+ The first derivatives are guaranteed to be continuous, but the second
216
+ derivatives may jump at :math:`x_k`.
217
+
218
+ Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
219
+ by using PCHIP algorithm [1]_.
220
+
221
+ Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
222
+ are the slopes at internal points :math:`x_k`.
223
+ If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
224
+ them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
225
+ weighted harmonic mean
226
+
227
+ .. math::
228
+
229
+ \frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
230
+
231
+ where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
232
+
233
+ The end slopes are set using a one-sided scheme [2]_.
234
+
235
+
236
+ References
237
+ ----------
238
+ .. [1] F. N. Fritsch and J. Butland,
239
+ A method for constructing local
240
+ monotone piecewise cubic interpolants,
241
+ SIAM J. Sci. Comput., 5(2), 300-304 (1984).
242
+ :doi:`10.1137/0905021`.
243
+ .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
244
+ :doi:`10.1137/1.9780898717952`
245
+
246
+ """
247
+
248
+ def __init__(self, x, y, axis=0, extrapolate=None):
249
+ x, _, y, axis, _ = prepare_input(x, y, axis)
250
+ if np.iscomplexobj(y):
251
+ msg = ("`PchipInterpolator` only works with real values for `y`. "
252
+ "Passing an array with a complex dtype for `y` is deprecated "
253
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
254
+ "use the real components of the passed array, use `np.real` on "
255
+ "the array before passing to `PchipInterpolator`.")
256
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
257
+ xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
258
+ dk = self._find_derivatives(xp, y)
259
+ super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
260
+ self.axis = axis
261
+
262
+ @staticmethod
263
+ def _edge_case(h0, h1, m0, m1):
264
+ # one-sided three-point estimate for the derivative
265
+ d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
266
+
267
+ # try to preserve shape
268
+ mask = np.sign(d) != np.sign(m0)
269
+ mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
270
+ mmm = (~mask) & mask2
271
+
272
+ d[mask] = 0.
273
+ d[mmm] = 3.*m0[mmm]
274
+
275
+ return d
276
+
277
+ @staticmethod
278
+ def _find_derivatives(x, y):
279
+ # Determine the derivatives at the points y_k, d_k, by using
280
+ # PCHIP algorithm is:
281
+ # We choose the derivatives at the point x_k by
282
+ # Let m_k be the slope of the kth segment (between k and k+1)
283
+ # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
284
+ # else use weighted harmonic mean:
285
+ # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
286
+ # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
287
+ # where h_k is the spacing between x_k and x_{k+1}
288
+ y_shape = y.shape
289
+ if y.ndim == 1:
290
+ # So that _edge_case doesn't end up assigning to scalars
291
+ x = x[:, None]
292
+ y = y[:, None]
293
+
294
+ hk = x[1:] - x[:-1]
295
+ mk = (y[1:] - y[:-1]) / hk
296
+
297
+ if y.shape[0] == 2:
298
+ # edge case: only have two points, use linear interpolation
299
+ dk = np.zeros_like(y)
300
+ dk[0] = mk
301
+ dk[1] = mk
302
+ return dk.reshape(y_shape)
303
+
304
+ smk = np.sign(mk)
305
+ condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
306
+
307
+ w1 = 2*hk[1:] + hk[:-1]
308
+ w2 = hk[1:] + 2*hk[:-1]
309
+
310
+ # values where division by zero occurs will be excluded
311
+ # by 'condition' afterwards
312
+ with np.errstate(divide='ignore', invalid='ignore'):
313
+ whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
314
+
315
+ dk = np.zeros_like(y)
316
+ dk[1:-1][condition] = 0.0
317
+ dk[1:-1][~condition] = 1.0 / whmean[~condition]
318
+
319
+ # special case endpoints, as suggested in
320
+ # Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
321
+ dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
322
+ dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
323
+
324
+ return dk.reshape(y_shape)
325
+
326
+
327
+ def pchip_interpolate(xi, yi, x, der=0, axis=0):
328
+ """
329
+ Convenience function for pchip interpolation.
330
+
331
+ xi and yi are arrays of values used to approximate some function f,
332
+ with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
333
+ to find the value of new points x and the derivatives there.
334
+
335
+ See `scipy.interpolate.PchipInterpolator` for details.
336
+
337
+ Parameters
338
+ ----------
339
+ xi : array_like
340
+ A sorted list of x-coordinates, of length N.
341
+ yi : array_like
342
+ A 1-D array of real values. `yi`'s length along the interpolation
343
+ axis must be equal to the length of `xi`. If N-D array, use axis
344
+ parameter to select correct axis.
345
+
346
+ .. deprecated:: 1.13.0
347
+ Complex data is deprecated and will raise an error in
348
+ SciPy 1.15.0. If you are trying to use the real components of
349
+ the passed array, use ``np.real`` on `yi`.
350
+
351
+ x : scalar or array_like
352
+ Of length M.
353
+ der : int or list, optional
354
+ Derivatives to extract. The 0th derivative can be included to
355
+ return the function value.
356
+ axis : int, optional
357
+ Axis in the yi array corresponding to the x-coordinate values.
358
+
359
+ Returns
360
+ -------
361
+ y : scalar or array_like
362
+ The result, of length R or length M or M by R.
363
+
364
+ See Also
365
+ --------
366
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
367
+
368
+ Examples
369
+ --------
370
+ We can interpolate 2D observed data using pchip interpolation:
371
+
372
+ >>> import numpy as np
373
+ >>> import matplotlib.pyplot as plt
374
+ >>> from scipy.interpolate import pchip_interpolate
375
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
376
+ >>> y_observed = np.sin(x_observed)
377
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
378
+ >>> y = pchip_interpolate(x_observed, y_observed, x)
379
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
380
+ >>> plt.plot(x, y, label="pchip interpolation")
381
+ >>> plt.legend()
382
+ >>> plt.show()
383
+
384
+ """
385
+ P = PchipInterpolator(xi, yi, axis=axis)
386
+
387
+ if der == 0:
388
+ return P(x)
389
+ elif _isscalar(der):
390
+ return P.derivative(der)(x)
391
+ else:
392
+ return [P.derivative(nu)(x) for nu in der]
393
+
394
+
395
+ class Akima1DInterpolator(CubicHermiteSpline):
396
+ r"""
397
+ Akima interpolator
398
+
399
+ Fit piecewise cubic polynomials, given vectors x and y. The interpolation
400
+ method by Akima uses a continuously differentiable sub-spline built from
401
+ piecewise cubic polynomials. The resultant curve passes through the given
402
+ data points and will appear smooth and natural.
403
+
404
+ Parameters
405
+ ----------
406
+ x : ndarray, shape (npoints, )
407
+ 1-D array of monotonically increasing real values.
408
+ y : ndarray, shape (..., npoints, ...)
409
+ N-D array of real values. The length of ``y`` along the interpolation axis
410
+ must be equal to the length of ``x``. Use the ``axis`` parameter to
411
+ select the interpolation axis.
412
+
413
+ .. deprecated:: 1.13.0
414
+ Complex data is deprecated and will raise an error in SciPy 1.15.0.
415
+ If you are trying to use the real components of the passed array,
416
+ use ``np.real`` on ``y``.
417
+
418
+ axis : int, optional
419
+ Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
420
+ to ``axis=0``.
421
+ method : {'akima', 'makima'}, optional
422
+ If ``"makima"``, use the modified Akima interpolation [2]_.
423
+ Defaults to ``"akima"``, use the Akima interpolation [1]_.
424
+
425
+ .. versionadded:: 1.13.0
426
+
427
+ Methods
428
+ -------
429
+ __call__
430
+ derivative
431
+ antiderivative
432
+ roots
433
+
434
+ See Also
435
+ --------
436
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
437
+ CubicSpline : Cubic spline data interpolator.
438
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
439
+
440
+ Notes
441
+ -----
442
+ .. versionadded:: 0.14
443
+
444
+ Use only for precise data, as the fitted curve passes through the given
445
+ points exactly. This routine is useful for plotting a pleasingly smooth
446
+ curve through a few given points for purposes of plotting.
447
+
448
+ Let :math:`\delta_i = (y_{i+1} - y_i) / (x_{i+1} - x_i)` be the slopes of
449
+ the interval :math:`\left[x_i, x_{i+1}\right)`. Akima's derivative at
450
+ :math:`x_i` is defined as:
451
+
452
+ .. math::
453
+
454
+ d_i = \frac{w_1}{w_1 + w_2}\delta_{i-1} + \frac{w_2}{w_1 + w_2}\delta_i
455
+
456
+ In the Akima interpolation [1]_ (``method="akima"``), the weights are:
457
+
458
+ .. math::
459
+
460
+ \begin{aligned}
461
+ w_1 &= |\delta_{i+1} - \delta_i| \\
462
+ w_2 &= |\delta_{i-1} - \delta_{i-2}|
463
+ \end{aligned}
464
+
465
+ In the modified Akima interpolation [2]_ (``method="makima"``),
466
+ to eliminate overshoot and avoid edge cases of both numerator and
467
+ denominator being equal to 0, the weights are modified as follows:
468
+
469
+ .. math::
470
+
471
+ \begin{align*}
472
+ w_1 &= |\delta_{i+1} - \delta_i| + |\delta_{i+1} + \delta_i| / 2 \\
473
+ w_2 &= |\delta_{i-1} - \delta_{i-2}| + |\delta_{i-1} + \delta_{i-2}| / 2
474
+ \end{align*}
475
+
476
+ Examples
477
+ --------
478
+ Comparison of ``method="akima"`` and ``method="makima"``:
479
+
480
+ >>> import numpy as np
481
+ >>> from scipy.interpolate import Akima1DInterpolator
482
+ >>> import matplotlib.pyplot as plt
483
+ >>> x = np.linspace(1, 7, 7)
484
+ >>> y = np.array([-1, -1, -1, 0, 1, 1, 1])
485
+ >>> xs = np.linspace(min(x), max(x), num=100)
486
+ >>> y_akima = Akima1DInterpolator(x, y, method="akima")(xs)
487
+ >>> y_makima = Akima1DInterpolator(x, y, method="makima")(xs)
488
+
489
+ >>> fig, ax = plt.subplots()
490
+ >>> ax.plot(x, y, "o", label="data")
491
+ >>> ax.plot(xs, y_akima, label="akima")
492
+ >>> ax.plot(xs, y_makima, label="makima")
493
+ >>> ax.legend()
494
+ >>> fig.show()
495
+
496
+ The overshoot that occured in ``"akima"`` has been avoided in ``"makima"``.
497
+
498
+ References
499
+ ----------
500
+ .. [1] A new method of interpolation and smooth curve fitting based
501
+ on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
502
+ 589-602. :doi:`10.1145/321607.321609`
503
+ .. [2] Makima Piecewise Cubic Interpolation. Cleve Moler and Cosmin Ionita, 2019.
504
+ https://blogs.mathworks.com/cleve/2019/04/29/makima-piecewise-cubic-interpolation/
505
+
506
+ """
507
+
508
+ def __init__(self, x, y, axis=0, *, method: Literal["akima", "makima"]="akima"):
509
+ if method not in {"akima", "makima"}:
510
+ raise NotImplementedError(f"`method`={method} is unsupported.")
511
+ # Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
512
+ # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
513
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
514
+
515
+ if np.iscomplexobj(y):
516
+ msg = ("`Akima1DInterpolator` only works with real values for `y`. "
517
+ "Passing an array with a complex dtype for `y` is deprecated "
518
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
519
+ "use the real components of the passed array, use `np.real` on "
520
+ "the array before passing to `Akima1DInterpolator`.")
521
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
522
+
523
+ # determine slopes between breakpoints
524
+ m = np.empty((x.size + 3, ) + y.shape[1:])
525
+ dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
526
+ m[2:-2] = np.diff(y, axis=0) / dx
527
+
528
+ # add two additional points on the left ...
529
+ m[1] = 2. * m[2] - m[3]
530
+ m[0] = 2. * m[1] - m[2]
531
+ # ... and on the right
532
+ m[-2] = 2. * m[-3] - m[-4]
533
+ m[-1] = 2. * m[-2] - m[-3]
534
+
535
+ # if m1 == m2 != m3 == m4, the slope at the breakpoint is not
536
+ # defined. This is the fill value:
537
+ t = .5 * (m[3:] + m[:-3])
538
+ # get the denominator of the slope t
539
+ dm = np.abs(np.diff(m, axis=0))
540
+ if method == "makima":
541
+ pm = np.abs(m[1:] + m[:-1])
542
+ f1 = dm[2:] + 0.5 * pm[2:]
543
+ f2 = dm[:-2] + 0.5 * pm[:-2]
544
+ else:
545
+ f1 = dm[2:]
546
+ f2 = dm[:-2]
547
+ f12 = f1 + f2
548
+ # These are the mask of where the slope at breakpoint is defined:
549
+ ind = np.nonzero(f12 > 1e-9 * np.max(f12, initial=-np.inf))
550
+ x_ind, y_ind = ind[0], ind[1:]
551
+ # Set the slope at breakpoint
552
+ t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
553
+ f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
554
+
555
+ super().__init__(x, y, t, axis=0, extrapolate=False)
556
+ self.axis = axis
557
+
558
+ def extend(self, c, x, right=True):
559
+ raise NotImplementedError("Extending a 1-D Akima interpolator is not "
560
+ "yet implemented")
561
+
562
+ # These are inherited from PPoly, but they do not produce an Akima
563
+ # interpolator. Hence stub them out.
564
+ @classmethod
565
+ def from_spline(cls, tck, extrapolate=None):
566
+ raise NotImplementedError("This method does not make sense for "
567
+ "an Akima interpolator.")
568
+
569
+ @classmethod
570
+ def from_bernstein_basis(cls, bp, extrapolate=None):
571
+ raise NotImplementedError("This method does not make sense for "
572
+ "an Akima interpolator.")
573
+
574
+
575
+ class CubicSpline(CubicHermiteSpline):
576
+ """Cubic spline data interpolator.
577
+
578
+ Interpolate data with a piecewise cubic polynomial which is twice
579
+ continuously differentiable [1]_. The result is represented as a `PPoly`
580
+ instance with breakpoints matching the given data.
581
+
582
+ Parameters
583
+ ----------
584
+ x : array_like, shape (n,)
585
+ 1-D array containing values of the independent variable.
586
+ Values must be real, finite and in strictly increasing order.
587
+ y : array_like
588
+ Array containing values of the dependent variable. It can have
589
+ arbitrary number of dimensions, but the length along ``axis``
590
+ (see below) must match the length of ``x``. Values must be finite.
591
+ axis : int, optional
592
+ Axis along which `y` is assumed to be varying. Meaning that for
593
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
594
+ Default is 0.
595
+ bc_type : string or 2-tuple, optional
596
+ Boundary condition type. Two additional equations, given by the
597
+ boundary conditions, are required to determine all coefficients of
598
+ polynomials on each segment [2]_.
599
+
600
+ If `bc_type` is a string, then the specified condition will be applied
601
+ at both ends of a spline. Available conditions are:
602
+
603
+ * 'not-a-knot' (default): The first and second segment at a curve end
604
+ are the same polynomial. It is a good default when there is no
605
+ information on boundary conditions.
606
+ * 'periodic': The interpolated functions is assumed to be periodic
607
+ of period ``x[-1] - x[0]``. The first and last value of `y` must be
608
+ identical: ``y[0] == y[-1]``. This boundary condition will result in
609
+ ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
610
+ * 'clamped': The first derivative at curves ends are zero. Assuming
611
+ a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
612
+ * 'natural': The second derivative at curve ends are zero. Assuming
613
+ a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
614
+
615
+ If `bc_type` is a 2-tuple, the first and the second value will be
616
+ applied at the curve start and end respectively. The tuple values can
617
+ be one of the previously mentioned strings (except 'periodic') or a
618
+ tuple `(order, deriv_values)` allowing to specify arbitrary
619
+ derivatives at curve ends:
620
+
621
+ * `order`: the derivative order, 1 or 2.
622
+ * `deriv_value`: array_like containing derivative values, shape must
623
+ be the same as `y`, excluding ``axis`` dimension. For example, if
624
+ `y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
625
+ the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
626
+ and have the shape (n0, n1).
627
+ extrapolate : {bool, 'periodic', None}, optional
628
+ If bool, determines whether to extrapolate to out-of-bounds points
629
+ based on first and last intervals, or to return NaNs. If 'periodic',
630
+ periodic extrapolation is used. If None (default), ``extrapolate`` is
631
+ set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
632
+
633
+ Attributes
634
+ ----------
635
+ x : ndarray, shape (n,)
636
+ Breakpoints. The same ``x`` which was passed to the constructor.
637
+ c : ndarray, shape (4, n-1, ...)
638
+ Coefficients of the polynomials on each segment. The trailing
639
+ dimensions match the dimensions of `y`, excluding ``axis``.
640
+ For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
641
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
642
+ axis : int
643
+ Interpolation axis. The same axis which was passed to the
644
+ constructor.
645
+
646
+ Methods
647
+ -------
648
+ __call__
649
+ derivative
650
+ antiderivative
651
+ integrate
652
+ roots
653
+
654
+ See Also
655
+ --------
656
+ Akima1DInterpolator : Akima 1D interpolator.
657
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
658
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
659
+
660
+ Notes
661
+ -----
662
+ Parameters `bc_type` and ``extrapolate`` work independently, i.e. the
663
+ former controls only construction of a spline, and the latter only
664
+ evaluation.
665
+
666
+ When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
667
+ a condition that the first derivative is equal to the linear interpolant
668
+ slope. When both boundary conditions are 'not-a-knot' and n = 3, the
669
+ solution is sought as a parabola passing through given points.
670
+
671
+ When 'not-a-knot' boundary conditions is applied to both ends, the
672
+ resulting spline will be the same as returned by `splrep` (with ``s=0``)
673
+ and `InterpolatedUnivariateSpline`, but these two methods use a
674
+ representation in B-spline basis.
675
+
676
+ .. versionadded:: 0.18.0
677
+
678
+ Examples
679
+ --------
680
+ In this example the cubic spline is used to interpolate a sampled sinusoid.
681
+ You can see that the spline continuity property holds for the first and
682
+ second derivatives and violates only for the third derivative.
683
+
684
+ >>> import numpy as np
685
+ >>> from scipy.interpolate import CubicSpline
686
+ >>> import matplotlib.pyplot as plt
687
+ >>> x = np.arange(10)
688
+ >>> y = np.sin(x)
689
+ >>> cs = CubicSpline(x, y)
690
+ >>> xs = np.arange(-0.5, 9.6, 0.1)
691
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
692
+ >>> ax.plot(x, y, 'o', label='data')
693
+ >>> ax.plot(xs, np.sin(xs), label='true')
694
+ >>> ax.plot(xs, cs(xs), label="S")
695
+ >>> ax.plot(xs, cs(xs, 1), label="S'")
696
+ >>> ax.plot(xs, cs(xs, 2), label="S''")
697
+ >>> ax.plot(xs, cs(xs, 3), label="S'''")
698
+ >>> ax.set_xlim(-0.5, 9.5)
699
+ >>> ax.legend(loc='lower left', ncol=2)
700
+ >>> plt.show()
701
+
702
+ In the second example, the unit circle is interpolated with a spline. A
703
+ periodic boundary condition is used. You can see that the first derivative
704
+ values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
705
+ computed. Note that a circle cannot be exactly represented by a cubic
706
+ spline. To increase precision, more breakpoints would be required.
707
+
708
+ >>> theta = 2 * np.pi * np.linspace(0, 1, 5)
709
+ >>> y = np.c_[np.cos(theta), np.sin(theta)]
710
+ >>> cs = CubicSpline(theta, y, bc_type='periodic')
711
+ >>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
712
+ ds/dx=0.0 ds/dy=1.0
713
+ >>> xs = 2 * np.pi * np.linspace(0, 1, 100)
714
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
715
+ >>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
716
+ >>> ax.plot(np.cos(xs), np.sin(xs), label='true')
717
+ >>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
718
+ >>> ax.axes.set_aspect('equal')
719
+ >>> ax.legend(loc='center')
720
+ >>> plt.show()
721
+
722
+ The third example is the interpolation of a polynomial y = x**3 on the
723
+ interval 0 <= x<= 1. A cubic spline can represent this function exactly.
724
+ To achieve that we need to specify values and first derivatives at
725
+ endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
726
+ y'(1) = 3.
727
+
728
+ >>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
729
+ >>> x = np.linspace(0, 1)
730
+ >>> np.allclose(x**3, cs(x))
731
+ True
732
+
733
+ References
734
+ ----------
735
+ .. [1] `Cubic Spline Interpolation
736
+ <https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
737
+ on Wikiversity.
738
+ .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
739
+ """
740
+
741
+ def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
742
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
743
+ n = len(x)
744
+
745
+ bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
746
+
747
+ if extrapolate is None:
748
+ if bc[0] == 'periodic':
749
+ extrapolate = 'periodic'
750
+ else:
751
+ extrapolate = True
752
+
753
+ if y.size == 0:
754
+ # bail out early for zero-sized arrays
755
+ s = np.zeros_like(y)
756
+ else:
757
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
758
+ slope = np.diff(y, axis=0) / dxr
759
+
760
+ # If bc is 'not-a-knot' this change is just a convention.
761
+ # If bc is 'periodic' then we already checked that y[0] == y[-1],
762
+ # and the spline is just a constant, we handle this case in the
763
+ # same way by setting the first derivatives to slope, which is 0.
764
+ if n == 2:
765
+ if bc[0] in ['not-a-knot', 'periodic']:
766
+ bc[0] = (1, slope[0])
767
+ if bc[1] in ['not-a-knot', 'periodic']:
768
+ bc[1] = (1, slope[0])
769
+
770
+ # This is a special case, when both conditions are 'not-a-knot'
771
+ # and n == 3. In this case 'not-a-knot' can't be handled regularly
772
+ # as the both conditions are identical. We handle this case by
773
+ # constructing a parabola passing through given points.
774
+ if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
775
+ A = np.zeros((3, 3)) # This is a standard matrix.
776
+ b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
777
+
778
+ A[0, 0] = 1
779
+ A[0, 1] = 1
780
+ A[1, 0] = dx[1]
781
+ A[1, 1] = 2 * (dx[0] + dx[1])
782
+ A[1, 2] = dx[0]
783
+ A[2, 1] = 1
784
+ A[2, 2] = 1
785
+
786
+ b[0] = 2 * slope[0]
787
+ b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
788
+ b[2] = 2 * slope[1]
789
+
790
+ s = solve(A, b, overwrite_a=True, overwrite_b=True,
791
+ check_finite=False)
792
+ elif n == 3 and bc[0] == 'periodic':
793
+ # In case when number of points is 3 we compute the derivatives
794
+ # manually
795
+ t = (slope / dxr).sum(0) / (1. / dxr).sum(0)
796
+ s = np.broadcast_to(t, (n,) + y.shape[1:])
797
+ else:
798
+ # Find derivative values at each x[i] by solving a tridiagonal
799
+ # system.
800
+ A = np.zeros((3, n)) # This is a banded matrix representation.
801
+ b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
802
+
803
+ # Filling the system for i=1..n-2
804
+ # (x[i-1] - x[i]) * s[i-1] +\
805
+ # 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
806
+ # (x[i] - x[i-1]) * s[i+1] =\
807
+ # 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
808
+ # (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
809
+
810
+ A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
811
+ A[0, 2:] = dx[:-1] # The upper diagonal
812
+ A[-1, :-2] = dx[1:] # The lower diagonal
813
+
814
+ b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
815
+
816
+ bc_start, bc_end = bc
817
+
818
+ if bc_start == 'periodic':
819
+ # Due to the periodicity, and because y[-1] = y[0], the
820
+ # linear system has (n-1) unknowns/equations instead of n:
821
+ A = A[:, 0:-1]
822
+ A[1, 0] = 2 * (dx[-1] + dx[0])
823
+ A[0, 1] = dx[-1]
824
+
825
+ b = b[:-1]
826
+
827
+ # Also, due to the periodicity, the system is not tri-diagonal.
828
+ # We need to compute a "condensed" matrix of shape (n-2, n-2).
829
+ # See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
830
+ # for more explanations.
831
+ # The condensed matrix is obtained by removing the last column
832
+ # and last row of the (n-1, n-1) system matrix. The removed
833
+ # values are saved in scalar variables with the (n-1, n-1)
834
+ # system matrix indices forming their names:
835
+ a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
836
+ a_m1_m2 = dx[-1]
837
+ a_m1_m1 = 2 * (dx[-1] + dx[-2])
838
+ a_m2_m1 = dx[-3]
839
+ a_0_m1 = dx[0]
840
+
841
+ b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
842
+ b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
843
+
844
+ Ac = A[:, :-1]
845
+ b1 = b[:-1]
846
+ b2 = np.zeros_like(b1)
847
+ b2[0] = -a_0_m1
848
+ b2[-1] = -a_m2_m1
849
+
850
+ # s1 and s2 are the solutions of (n-2, n-2) system
851
+ s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
852
+ overwrite_b=False, check_finite=False)
853
+
854
+ s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
855
+ overwrite_b=False, check_finite=False)
856
+
857
+ # computing the s[n-2] solution:
858
+ s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
859
+ (a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
860
+
861
+ # s is the solution of the (n, n) system:
862
+ s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
863
+ s[:-2] = s1 + s_m1 * s2
864
+ s[-2] = s_m1
865
+ s[-1] = s[0]
866
+ else:
867
+ if bc_start == 'not-a-knot':
868
+ A[1, 0] = dx[1]
869
+ A[0, 1] = x[2] - x[0]
870
+ d = x[2] - x[0]
871
+ b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
872
+ dxr[0]**2 * slope[1]) / d
873
+ elif bc_start[0] == 1:
874
+ A[1, 0] = 1
875
+ A[0, 1] = 0
876
+ b[0] = bc_start[1]
877
+ elif bc_start[0] == 2:
878
+ A[1, 0] = 2 * dx[0]
879
+ A[0, 1] = dx[0]
880
+ b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
881
+
882
+ if bc_end == 'not-a-knot':
883
+ A[1, -1] = dx[-2]
884
+ A[-1, -2] = x[-1] - x[-3]
885
+ d = x[-1] - x[-3]
886
+ b[-1] = ((dxr[-1]**2*slope[-2] +
887
+ (2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
888
+ elif bc_end[0] == 1:
889
+ A[1, -1] = 1
890
+ A[-1, -2] = 0
891
+ b[-1] = bc_end[1]
892
+ elif bc_end[0] == 2:
893
+ A[1, -1] = 2 * dx[-1]
894
+ A[-1, -2] = dx[-1]
895
+ b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
896
+
897
+ s = solve_banded((1, 1), A, b, overwrite_ab=True,
898
+ overwrite_b=True, check_finite=False)
899
+
900
+ super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
901
+ self.axis = axis
902
+
903
+ @staticmethod
904
+ def _validate_bc(bc_type, y, expected_deriv_shape, axis):
905
+ """Validate and prepare boundary conditions.
906
+
907
+ Returns
908
+ -------
909
+ validated_bc : 2-tuple
910
+ Boundary conditions for a curve start and end.
911
+ y : ndarray
912
+ y casted to complex dtype if one of the boundary conditions has
913
+ complex dtype.
914
+ """
915
+ if isinstance(bc_type, str):
916
+ if bc_type == 'periodic':
917
+ if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
918
+ raise ValueError(
919
+ f"The first and last `y` point along axis {axis} must "
920
+ "be identical (within machine precision) when "
921
+ "bc_type='periodic'.")
922
+
923
+ bc_type = (bc_type, bc_type)
924
+
925
+ else:
926
+ if len(bc_type) != 2:
927
+ raise ValueError("`bc_type` must contain 2 elements to "
928
+ "specify start and end conditions.")
929
+
930
+ if 'periodic' in bc_type:
931
+ raise ValueError("'periodic' `bc_type` is defined for both "
932
+ "curve ends and cannot be used with other "
933
+ "boundary conditions.")
934
+
935
+ validated_bc = []
936
+ for bc in bc_type:
937
+ if isinstance(bc, str):
938
+ if bc == 'clamped':
939
+ validated_bc.append((1, np.zeros(expected_deriv_shape)))
940
+ elif bc == 'natural':
941
+ validated_bc.append((2, np.zeros(expected_deriv_shape)))
942
+ elif bc in ['not-a-knot', 'periodic']:
943
+ validated_bc.append(bc)
944
+ else:
945
+ raise ValueError(f"bc_type={bc} is not allowed.")
946
+ else:
947
+ try:
948
+ deriv_order, deriv_value = bc
949
+ except Exception as e:
950
+ raise ValueError(
951
+ "A specified derivative value must be "
952
+ "given in the form (order, value)."
953
+ ) from e
954
+
955
+ if deriv_order not in [1, 2]:
956
+ raise ValueError("The specified derivative order must "
957
+ "be 1 or 2.")
958
+
959
+ deriv_value = np.asarray(deriv_value)
960
+ if deriv_value.shape != expected_deriv_shape:
961
+ raise ValueError(
962
+ "`deriv_value` shape {} is not the expected one {}."
963
+ .format(deriv_value.shape, expected_deriv_shape))
964
+
965
+ if np.issubdtype(deriv_value.dtype, np.complexfloating):
966
+ y = y.astype(complex, copy=False)
967
+
968
+ validated_bc.append((deriv_order, deriv_value))
969
+
970
+ return validated_bc, y
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (91.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py ADDED
@@ -0,0 +1,805 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
3
+ FITPACK is a collection of FORTRAN programs for curve and surface
4
+ fitting with splines and tensor product splines.
5
+
6
+ See
7
+ https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
8
+ or
9
+ http://www.netlib.org/dierckx/
10
+
11
+ Copyright 2002 Pearu Peterson all rights reserved,
12
+ Pearu Peterson <[email protected]>
13
+ Permission to use, modify, and distribute this software is given under the
14
+ terms of the SciPy (BSD style) license. See LICENSE.txt that came with
15
+ this distribution for specifics.
16
+
17
+ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
18
+
19
+ TODO: Make interfaces to the following fitpack functions:
20
+ For univariate splines: cocosp, concon, fourco, insert
21
+ For bivariate splines: profil, regrid, parsur, surev
22
+ """
23
+
24
+ __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
25
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
26
+
27
+ import warnings
28
+ import numpy as np
29
+ from . import _fitpack
30
+ from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
31
+ empty, iinfo, asarray)
32
+
33
+ # Try to replace _fitpack interface with
34
+ # f2py-generated version
35
+ from . import dfitpack
36
+
37
+
38
+ dfitpack_int = dfitpack.types.intvar.dtype
39
+
40
+
41
+ def _int_overflow(x, exception, msg=None):
42
+ """Cast the value to an dfitpack_int and raise an OverflowError if the value
43
+ cannot fit.
44
+ """
45
+ if x > iinfo(dfitpack_int).max:
46
+ if msg is None:
47
+ msg = f'{x!r} cannot fit into an {dfitpack_int!r}'
48
+ raise exception(msg)
49
+ return dfitpack_int.type(x)
50
+
51
+
52
+ _iermess = {
53
+ 0: ["The spline has a residual sum of squares fp such that "
54
+ "abs(fp-s)/s<=0.001", None],
55
+ -1: ["The spline is an interpolating spline (fp=0)", None],
56
+ -2: ["The spline is weighted least-squares polynomial of degree k.\n"
57
+ "fp gives the upper bound fp0 for the smoothing factor s", None],
58
+ 1: ["The required storage space exceeds the available storage space.\n"
59
+ "Probable causes: data (x,y) size is too small or smoothing parameter"
60
+ "\ns is too small (fp>s).", ValueError],
61
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
62
+ "with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
63
+ ValueError],
64
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
65
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
66
+ "(abs(fp-s)/s>0.001)", ValueError],
67
+ 10: ["Error on input data", ValueError],
68
+ 'unknown': ["An error occurred", TypeError]
69
+ }
70
+
71
+ _iermess2 = {
72
+ 0: ["The spline has a residual sum of squares fp such that "
73
+ "abs(fp-s)/s<=0.001", None],
74
+ -1: ["The spline is an interpolating spline (fp=0)", None],
75
+ -2: ["The spline is weighted least-squares polynomial of degree kx and ky."
76
+ "\nfp gives the upper bound fp0 for the smoothing factor s", None],
77
+ -3: ["Warning. The coefficients of the spline have been computed as the\n"
78
+ "minimal norm least-squares solution of a rank deficient system.",
79
+ None],
80
+ 1: ["The required storage space exceeds the available storage space.\n"
81
+ "Probable causes: nxest or nyest too small or s is too small. (fp>s)",
82
+ ValueError],
83
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
84
+ "with fp = s. Probable causes: s too small or badly chosen eps.\n"
85
+ "(abs(fp-s)/s>0.001)", ValueError],
86
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
87
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
88
+ "(abs(fp-s)/s>0.001)", ValueError],
89
+ 4: ["No more knots can be added because the number of B-spline\n"
90
+ "coefficients already exceeds the number of data points m.\n"
91
+ "Probable causes: either s or m too small. (fp>s)", ValueError],
92
+ 5: ["No more knots can be added because the additional knot would\n"
93
+ "coincide with an old one. Probable cause: s too small or too large\n"
94
+ "a weight to an inaccurate data point. (fp>s)", ValueError],
95
+ 10: ["Error on input data", ValueError],
96
+ 11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
97
+ "the minimal least-squares solution of a rank deficient system of\n"
98
+ "linear equations.", ValueError],
99
+ 'unknown': ["An error occurred", TypeError]
100
+ }
101
+
102
+ _parcur_cache = {'t': array([], float), 'wrk': array([], float),
103
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
104
+ 'ub': 0, 'ue': 1}
105
+
106
+
107
+ def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
108
+ full_output=0, nest=None, per=0, quiet=1):
109
+ # see the docstring of `_fitpack_py/splprep`
110
+ if task <= 0:
111
+ _parcur_cache = {'t': array([], float), 'wrk': array([], float),
112
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
113
+ 'ub': 0, 'ue': 1}
114
+ x = atleast_1d(x)
115
+ idim, m = x.shape
116
+ if per:
117
+ for i in range(idim):
118
+ if x[i][0] != x[i][-1]:
119
+ if not quiet:
120
+ warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
121
+ (i, m, i)),
122
+ stacklevel=2)
123
+ x[i][-1] = x[i][0]
124
+ if not 0 < idim < 11:
125
+ raise TypeError('0 < idim < 11 must hold')
126
+ if w is None:
127
+ w = ones(m, float)
128
+ else:
129
+ w = atleast_1d(w)
130
+ ipar = (u is not None)
131
+ if ipar:
132
+ _parcur_cache['u'] = u
133
+ if ub is None:
134
+ _parcur_cache['ub'] = u[0]
135
+ else:
136
+ _parcur_cache['ub'] = ub
137
+ if ue is None:
138
+ _parcur_cache['ue'] = u[-1]
139
+ else:
140
+ _parcur_cache['ue'] = ue
141
+ else:
142
+ _parcur_cache['u'] = zeros(m, float)
143
+ if not (1 <= k <= 5):
144
+ raise TypeError('1 <= k= %d <=5 must hold' % k)
145
+ if not (-1 <= task <= 1):
146
+ raise TypeError('task must be -1, 0 or 1')
147
+ if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
148
+ raise TypeError('Mismatch of input dimensions')
149
+ if s is None:
150
+ s = m - sqrt(2*m)
151
+ if t is None and task == -1:
152
+ raise TypeError('Knots must be given for task=-1')
153
+ if t is not None:
154
+ _parcur_cache['t'] = atleast_1d(t)
155
+ n = len(_parcur_cache['t'])
156
+ if task == -1 and n < 2*k + 2:
157
+ raise TypeError('There must be at least 2*k+2 knots for task=-1')
158
+ if m <= k:
159
+ raise TypeError('m > k must hold')
160
+ if nest is None:
161
+ nest = m + 2*k
162
+
163
+ if (task >= 0 and s == 0) or (nest < 0):
164
+ if per:
165
+ nest = m + 2*k
166
+ else:
167
+ nest = m + k + 1
168
+ nest = max(nest, 2*k + 3)
169
+ u = _parcur_cache['u']
170
+ ub = _parcur_cache['ub']
171
+ ue = _parcur_cache['ue']
172
+ t = _parcur_cache['t']
173
+ wrk = _parcur_cache['wrk']
174
+ iwrk = _parcur_cache['iwrk']
175
+ t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
176
+ task, ipar, s, t, nest, wrk, iwrk, per)
177
+ _parcur_cache['u'] = o['u']
178
+ _parcur_cache['ub'] = o['ub']
179
+ _parcur_cache['ue'] = o['ue']
180
+ _parcur_cache['t'] = t
181
+ _parcur_cache['wrk'] = o['wrk']
182
+ _parcur_cache['iwrk'] = o['iwrk']
183
+ ier = o['ier']
184
+ fp = o['fp']
185
+ n = len(t)
186
+ u = o['u']
187
+ c.shape = idim, n - k - 1
188
+ tcku = [t, list(c), k], u
189
+ if ier <= 0 and not quiet:
190
+ warnings.warn(RuntimeWarning(_iermess[ier][0] +
191
+ "\tk=%d n=%d m=%d fp=%f s=%f" %
192
+ (k, len(t), m, fp, s)),
193
+ stacklevel=2)
194
+ if ier > 0 and not full_output:
195
+ if ier in [1, 2, 3]:
196
+ warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
197
+ else:
198
+ try:
199
+ raise _iermess[ier][1](_iermess[ier][0])
200
+ except KeyError as e:
201
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
202
+ if full_output:
203
+ try:
204
+ return tcku, fp, ier, _iermess[ier][0]
205
+ except KeyError:
206
+ return tcku, fp, ier, _iermess['unknown'][0]
207
+ else:
208
+ return tcku
209
+
210
+
211
+ _curfit_cache = {'t': array([], float), 'wrk': array([], float),
212
+ 'iwrk': array([], dfitpack_int)}
213
+
214
+
215
+ def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
216
+ full_output=0, per=0, quiet=1):
217
+ # see the docstring of `_fitpack_py/splrep`
218
+ if task <= 0:
219
+ _curfit_cache = {}
220
+ x, y = map(atleast_1d, [x, y])
221
+ m = len(x)
222
+ if w is None:
223
+ w = ones(m, float)
224
+ if s is None:
225
+ s = 0.0
226
+ else:
227
+ w = atleast_1d(w)
228
+ if s is None:
229
+ s = m - sqrt(2*m)
230
+ if not len(w) == m:
231
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
232
+ if (m != len(y)) or (m != len(w)):
233
+ raise TypeError('Lengths of the first three arguments (x,y,w) must '
234
+ 'be equal')
235
+ if not (1 <= k <= 5):
236
+ raise TypeError('Given degree of the spline (k=%d) is not supported. '
237
+ '(1<=k<=5)' % k)
238
+ if m <= k:
239
+ raise TypeError('m > k must hold')
240
+ if xb is None:
241
+ xb = x[0]
242
+ if xe is None:
243
+ xe = x[-1]
244
+ if not (-1 <= task <= 1):
245
+ raise TypeError('task must be -1, 0 or 1')
246
+ if t is not None:
247
+ task = -1
248
+ if task == -1:
249
+ if t is None:
250
+ raise TypeError('Knots must be given for task=-1')
251
+ numknots = len(t)
252
+ _curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
253
+ _curfit_cache['t'][k+1:-k-1] = t
254
+ nest = len(_curfit_cache['t'])
255
+ elif task == 0:
256
+ if per:
257
+ nest = max(m + 2*k, 2*k + 3)
258
+ else:
259
+ nest = max(m + k + 1, 2*k + 3)
260
+ t = empty((nest,), float)
261
+ _curfit_cache['t'] = t
262
+ if task <= 0:
263
+ if per:
264
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
265
+ else:
266
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
267
+ _curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
268
+ try:
269
+ t = _curfit_cache['t']
270
+ wrk = _curfit_cache['wrk']
271
+ iwrk = _curfit_cache['iwrk']
272
+ except KeyError as e:
273
+ raise TypeError("must call with task=1 only after"
274
+ " call with task=0,-1") from e
275
+ if not per:
276
+ n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
277
+ xb, xe, k, s)
278
+ else:
279
+ n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
280
+ tck = (t[:n], c[:n], k)
281
+ if ier <= 0 and not quiet:
282
+ _mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
283
+ (k, len(t), m, fp, s))
284
+ warnings.warn(RuntimeWarning(_mess), stacklevel=2)
285
+ if ier > 0 and not full_output:
286
+ if ier in [1, 2, 3]:
287
+ warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
288
+ else:
289
+ try:
290
+ raise _iermess[ier][1](_iermess[ier][0])
291
+ except KeyError as e:
292
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
293
+ if full_output:
294
+ try:
295
+ return tck, fp, ier, _iermess[ier][0]
296
+ except KeyError:
297
+ return tck, fp, ier, _iermess['unknown'][0]
298
+ else:
299
+ return tck
300
+
301
+
302
+ def splev(x, tck, der=0, ext=0):
303
+ # see the docstring of `_fitpack_py/splev`
304
+ t, c, k = tck
305
+ try:
306
+ c[0][0]
307
+ parametric = True
308
+ except Exception:
309
+ parametric = False
310
+ if parametric:
311
+ return list(map(lambda c, x=x, t=t, k=k, der=der:
312
+ splev(x, [t, c, k], der, ext), c))
313
+ else:
314
+ if not (0 <= der <= k):
315
+ raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
316
+ if ext not in (0, 1, 2, 3):
317
+ raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
318
+
319
+ x = asarray(x)
320
+ shape = x.shape
321
+ x = atleast_1d(x).ravel()
322
+ if der == 0:
323
+ y, ier = dfitpack.splev(t, c, k, x, ext)
324
+ else:
325
+ y, ier = dfitpack.splder(t, c, k, x, der, ext)
326
+
327
+ if ier == 10:
328
+ raise ValueError("Invalid input data")
329
+ if ier == 1:
330
+ raise ValueError("Found x value not in the domain")
331
+ if ier:
332
+ raise TypeError("An error occurred")
333
+
334
+ return y.reshape(shape)
335
+
336
+
337
+ def splint(a, b, tck, full_output=0):
338
+ # see the docstring of `_fitpack_py/splint`
339
+ t, c, k = tck
340
+ try:
341
+ c[0][0]
342
+ parametric = True
343
+ except Exception:
344
+ parametric = False
345
+ if parametric:
346
+ return list(map(lambda c, a=a, b=b, t=t, k=k:
347
+ splint(a, b, [t, c, k]), c))
348
+ else:
349
+ aint, wrk = dfitpack.splint(t, c, k, a, b)
350
+ if full_output:
351
+ return aint, wrk
352
+ else:
353
+ return aint
354
+
355
+
356
+ def sproot(tck, mest=10):
357
+ # see the docstring of `_fitpack_py/sproot`
358
+ t, c, k = tck
359
+ if k != 3:
360
+ raise ValueError("sproot works only for cubic (k=3) splines")
361
+ try:
362
+ c[0][0]
363
+ parametric = True
364
+ except Exception:
365
+ parametric = False
366
+ if parametric:
367
+ return list(map(lambda c, t=t, k=k, mest=mest:
368
+ sproot([t, c, k], mest), c))
369
+ else:
370
+ if len(t) < 8:
371
+ raise TypeError("The number of knots %d>=8" % len(t))
372
+ z, m, ier = dfitpack.sproot(t, c, mest)
373
+ if ier == 10:
374
+ raise TypeError("Invalid input data. "
375
+ "t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
376
+ if ier == 0:
377
+ return z[:m]
378
+ if ier == 1:
379
+ warnings.warn(RuntimeWarning("The number of zeros exceeds mest"),
380
+ stacklevel=2)
381
+ return z[:m]
382
+ raise TypeError("Unknown error")
383
+
384
+
385
+ def spalde(x, tck):
386
+ # see the docstring of `_fitpack_py/spalde`
387
+ t, c, k = tck
388
+ try:
389
+ c[0][0]
390
+ parametric = True
391
+ except Exception:
392
+ parametric = False
393
+ if parametric:
394
+ return list(map(lambda c, x=x, t=t, k=k:
395
+ spalde(x, [t, c, k]), c))
396
+ else:
397
+ x = atleast_1d(x)
398
+ if len(x) > 1:
399
+ return list(map(lambda x, tck=tck: spalde(x, tck), x))
400
+ d, ier = dfitpack.spalde(t, c, k+1, x[0])
401
+ if ier == 0:
402
+ return d
403
+ if ier == 10:
404
+ raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
405
+ raise TypeError("Unknown error")
406
+
407
+ # def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
408
+ # full_output=0,nest=None,per=0,quiet=1):
409
+
410
+
411
+ _surfit_cache = {'tx': array([], float), 'ty': array([], float),
412
+ 'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
413
+
414
+
415
+ def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
416
+ kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
417
+ full_output=0, nxest=None, nyest=None, quiet=1):
418
+ """
419
+ Find a bivariate B-spline representation of a surface.
420
+
421
+ Given a set of data points (x[i], y[i], z[i]) representing a surface
422
+ z=f(x,y), compute a B-spline representation of the surface. Based on
423
+ the routine SURFIT from FITPACK.
424
+
425
+ Parameters
426
+ ----------
427
+ x, y, z : ndarray
428
+ Rank-1 arrays of data points.
429
+ w : ndarray, optional
430
+ Rank-1 array of weights. By default ``w=np.ones(len(x))``.
431
+ xb, xe : float, optional
432
+ End points of approximation interval in `x`.
433
+ By default ``xb = x.min(), xe=x.max()``.
434
+ yb, ye : float, optional
435
+ End points of approximation interval in `y`.
436
+ By default ``yb=y.min(), ye = y.max()``.
437
+ kx, ky : int, optional
438
+ The degrees of the spline (1 <= kx, ky <= 5).
439
+ Third order (kx=ky=3) is recommended.
440
+ task : int, optional
441
+ If task=0, find knots in x and y and coefficients for a given
442
+ smoothing factor, s.
443
+ If task=1, find knots and coefficients for another value of the
444
+ smoothing factor, s. bisplrep must have been previously called
445
+ with task=0 or task=1.
446
+ If task=-1, find coefficients for a given set of knots tx, ty.
447
+ s : float, optional
448
+ A non-negative smoothing factor. If weights correspond
449
+ to the inverse of the standard-deviation of the errors in z,
450
+ then a good s-value should be found in the range
451
+ ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
452
+ eps : float, optional
453
+ A threshold for determining the effective rank of an
454
+ over-determined linear system of equations (0 < eps < 1).
455
+ `eps` is not likely to need changing.
456
+ tx, ty : ndarray, optional
457
+ Rank-1 arrays of the knots of the spline for task=-1
458
+ full_output : int, optional
459
+ Non-zero to return optional outputs.
460
+ nxest, nyest : int, optional
461
+ Over-estimates of the total number of knots. If None then
462
+ ``nxest = max(kx+sqrt(m/2),2*kx+3)``,
463
+ ``nyest = max(ky+sqrt(m/2),2*ky+3)``.
464
+ quiet : int, optional
465
+ Non-zero to suppress printing of messages.
466
+
467
+ Returns
468
+ -------
469
+ tck : array_like
470
+ A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
471
+ coefficients (c) of the bivariate B-spline representation of the
472
+ surface along with the degree of the spline.
473
+ fp : ndarray
474
+ The weighted sum of squared residuals of the spline approximation.
475
+ ier : int
476
+ An integer flag about splrep success. Success is indicated if
477
+ ier<=0. If ier in [1,2,3] an error occurred but was not raised.
478
+ Otherwise an error is raised.
479
+ msg : str
480
+ A message corresponding to the integer flag, ier.
481
+
482
+ See Also
483
+ --------
484
+ splprep, splrep, splint, sproot, splev
485
+ UnivariateSpline, BivariateSpline
486
+
487
+ Notes
488
+ -----
489
+ See `bisplev` to evaluate the value of the B-spline given its tck
490
+ representation.
491
+
492
+ If the input data is such that input dimensions have incommensurate
493
+ units and differ by many orders of magnitude, the interpolant may have
494
+ numerical artifacts. Consider rescaling the data before interpolation.
495
+
496
+ References
497
+ ----------
498
+ .. [1] Dierckx P.:An algorithm for surface fitting with spline functions
499
+ Ima J. Numer. Anal. 1 (1981) 267-283.
500
+ .. [2] Dierckx P.:An algorithm for surface fitting with spline functions
501
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
502
+ .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
503
+ Numerical Analysis, Oxford University Press, 1993.
504
+
505
+ Examples
506
+ --------
507
+ Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
508
+
509
+ """
510
+ x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
511
+ m = len(x)
512
+ if not (m == len(y) == len(z)):
513
+ raise TypeError('len(x)==len(y)==len(z) must hold.')
514
+ if w is None:
515
+ w = ones(m, float)
516
+ else:
517
+ w = atleast_1d(w)
518
+ if not len(w) == m:
519
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
520
+ if xb is None:
521
+ xb = x.min()
522
+ if xe is None:
523
+ xe = x.max()
524
+ if yb is None:
525
+ yb = y.min()
526
+ if ye is None:
527
+ ye = y.max()
528
+ if not (-1 <= task <= 1):
529
+ raise TypeError('task must be -1, 0 or 1')
530
+ if s is None:
531
+ s = m - sqrt(2*m)
532
+ if tx is None and task == -1:
533
+ raise TypeError('Knots_x must be given for task=-1')
534
+ if tx is not None:
535
+ _surfit_cache['tx'] = atleast_1d(tx)
536
+ nx = len(_surfit_cache['tx'])
537
+ if ty is None and task == -1:
538
+ raise TypeError('Knots_y must be given for task=-1')
539
+ if ty is not None:
540
+ _surfit_cache['ty'] = atleast_1d(ty)
541
+ ny = len(_surfit_cache['ty'])
542
+ if task == -1 and nx < 2*kx+2:
543
+ raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
544
+ if task == -1 and ny < 2*ky+2:
545
+ raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
546
+ if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
547
+ raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
548
+ 'supported. (1<=k<=5)' % (kx, ky))
549
+ if m < (kx + 1)*(ky + 1):
550
+ raise TypeError('m >= (kx+1)(ky+1) must hold')
551
+ if nxest is None:
552
+ nxest = int(kx + sqrt(m/2))
553
+ if nyest is None:
554
+ nyest = int(ky + sqrt(m/2))
555
+ nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
556
+ if task >= 0 and s == 0:
557
+ nxest = int(kx + sqrt(3*m))
558
+ nyest = int(ky + sqrt(3*m))
559
+ if task == -1:
560
+ _surfit_cache['tx'] = atleast_1d(tx)
561
+ _surfit_cache['ty'] = atleast_1d(ty)
562
+ tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
563
+ wrk = _surfit_cache['wrk']
564
+ u = nxest - kx - 1
565
+ v = nyest - ky - 1
566
+ km = max(kx, ky) + 1
567
+ ne = max(nxest, nyest)
568
+ bx, by = kx*v + ky + 1, ky*u + kx + 1
569
+ b1, b2 = bx, bx + v - ky
570
+ if bx > by:
571
+ b1, b2 = by, by + u - kx
572
+ msg = "Too many data points to interpolate"
573
+ lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
574
+ 2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
575
+ OverflowError,
576
+ msg=msg)
577
+ lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, OverflowError, msg=msg)
578
+ tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
579
+ task, s, eps, tx, ty, nxest, nyest,
580
+ wrk, lwrk1, lwrk2)
581
+ _curfit_cache['tx'] = tx
582
+ _curfit_cache['ty'] = ty
583
+ _curfit_cache['wrk'] = o['wrk']
584
+ ier, fp = o['ier'], o['fp']
585
+ tck = [tx, ty, c, kx, ky]
586
+
587
+ ierm = min(11, max(-3, ier))
588
+ if ierm <= 0 and not quiet:
589
+ _mess = (_iermess2[ierm][0] +
590
+ "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
591
+ (kx, ky, len(tx), len(ty), m, fp, s))
592
+ warnings.warn(RuntimeWarning(_mess), stacklevel=2)
593
+ if ierm > 0 and not full_output:
594
+ if ier in [1, 2, 3, 4, 5]:
595
+ _mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
596
+ (kx, ky, len(tx), len(ty), m, fp, s))
597
+ warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess), stacklevel=2)
598
+ else:
599
+ try:
600
+ raise _iermess2[ierm][1](_iermess2[ierm][0])
601
+ except KeyError as e:
602
+ raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
603
+ if full_output:
604
+ try:
605
+ return tck, fp, ier, _iermess2[ierm][0]
606
+ except KeyError:
607
+ return tck, fp, ier, _iermess2['unknown'][0]
608
+ else:
609
+ return tck
610
+
611
+
612
+ def bisplev(x, y, tck, dx=0, dy=0):
613
+ """
614
+ Evaluate a bivariate B-spline and its derivatives.
615
+
616
+ Return a rank-2 array of spline function values (or spline derivative
617
+ values) at points given by the cross-product of the rank-1 arrays `x` and
618
+ `y`. In special cases, return an array or just a float if either `x` or
619
+ `y` or both are floats. Based on BISPEV and PARDER from FITPACK.
620
+
621
+ Parameters
622
+ ----------
623
+ x, y : ndarray
624
+ Rank-1 arrays specifying the domain over which to evaluate the
625
+ spline or its derivative.
626
+ tck : tuple
627
+ A sequence of length 5 returned by `bisplrep` containing the knot
628
+ locations, the coefficients, and the degree of the spline:
629
+ [tx, ty, c, kx, ky].
630
+ dx, dy : int, optional
631
+ The orders of the partial derivatives in `x` and `y` respectively.
632
+
633
+ Returns
634
+ -------
635
+ vals : ndarray
636
+ The B-spline or its derivative evaluated over the set formed by
637
+ the cross-product of `x` and `y`.
638
+
639
+ See Also
640
+ --------
641
+ splprep, splrep, splint, sproot, splev
642
+ UnivariateSpline, BivariateSpline
643
+
644
+ Notes
645
+ -----
646
+ See `bisplrep` to generate the `tck` representation.
647
+
648
+ References
649
+ ----------
650
+ .. [1] Dierckx P. : An algorithm for surface fitting
651
+ with spline functions
652
+ Ima J. Numer. Anal. 1 (1981) 267-283.
653
+ .. [2] Dierckx P. : An algorithm for surface fitting
654
+ with spline functions
655
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
656
+ .. [3] Dierckx P. : Curve and surface fitting with splines,
657
+ Monographs on Numerical Analysis, Oxford University Press, 1993.
658
+
659
+ Examples
660
+ --------
661
+ Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
662
+
663
+ """
664
+ tx, ty, c, kx, ky = tck
665
+ if not (0 <= dx < kx):
666
+ raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
667
+ if not (0 <= dy < ky):
668
+ raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
669
+ x, y = map(atleast_1d, [x, y])
670
+ if (len(x.shape) != 1) or (len(y.shape) != 1):
671
+ raise ValueError("First two entries should be rank-1 arrays.")
672
+
673
+ msg = "Too many data points to interpolate."
674
+
675
+ _int_overflow(x.size * y.size, MemoryError, msg=msg)
676
+
677
+ if dx != 0 or dy != 0:
678
+ _int_overflow((tx.size - kx - 1)*(ty.size - ky - 1),
679
+ MemoryError, msg=msg)
680
+ z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
681
+ else:
682
+ z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
683
+
684
+ if ier == 10:
685
+ raise ValueError("Invalid input data")
686
+ if ier:
687
+ raise TypeError("An error occurred")
688
+ z.shape = len(x), len(y)
689
+ if len(z) > 1:
690
+ return z
691
+ if len(z[0]) > 1:
692
+ return z[0]
693
+ return z[0][0]
694
+
695
+
696
+ def dblint(xa, xb, ya, yb, tck):
697
+ """Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
698
+
699
+ Parameters
700
+ ----------
701
+ xa, xb : float
702
+ The end-points of the x integration interval.
703
+ ya, yb : float
704
+ The end-points of the y integration interval.
705
+ tck : list [tx, ty, c, kx, ky]
706
+ A sequence of length 5 returned by bisplrep containing the knot
707
+ locations tx, ty, the coefficients c, and the degrees kx, ky
708
+ of the spline.
709
+
710
+ Returns
711
+ -------
712
+ integ : float
713
+ The value of the resulting integral.
714
+ """
715
+ tx, ty, c, kx, ky = tck
716
+ return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
717
+
718
+
719
+ def insert(x, tck, m=1, per=0):
720
+ # see the docstring of `_fitpack_py/insert`
721
+ t, c, k = tck
722
+ try:
723
+ c[0][0]
724
+ parametric = True
725
+ except Exception:
726
+ parametric = False
727
+ if parametric:
728
+ cc = []
729
+ for c_vals in c:
730
+ tt, cc_val, kk = insert(x, [t, c_vals, k], m)
731
+ cc.append(cc_val)
732
+ return (tt, cc, kk)
733
+ else:
734
+ tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
735
+ if ier == 10:
736
+ raise ValueError("Invalid input data")
737
+ if ier:
738
+ raise TypeError("An error occurred")
739
+ return (tt, cc, k)
740
+
741
+
742
+ def splder(tck, n=1):
743
+ # see the docstring of `_fitpack_py/splder`
744
+ if n < 0:
745
+ return splantider(tck, -n)
746
+
747
+ t, c, k = tck
748
+
749
+ if n > k:
750
+ raise ValueError(f"Order of derivative (n = {n!r}) must be <= "
751
+ f"order of spline (k = {tck[2]!r})")
752
+
753
+ # Extra axes for the trailing dims of the `c` array:
754
+ sh = (slice(None),) + ((None,)*len(c.shape[1:]))
755
+
756
+ with np.errstate(invalid='raise', divide='raise'):
757
+ try:
758
+ for j in range(n):
759
+ # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
760
+
761
+ # Compute the denominator in the differentiation formula.
762
+ # (and append trailing dims, if necessary)
763
+ dt = t[k+1:-1] - t[1:-k-1]
764
+ dt = dt[sh]
765
+ # Compute the new coefficients
766
+ c = (c[1:-1-k] - c[:-2-k]) * k / dt
767
+ # Pad coefficient array to same size as knots (FITPACK
768
+ # convention)
769
+ c = np.r_[c, np.zeros((k,) + c.shape[1:])]
770
+ # Adjust knots
771
+ t = t[1:-1]
772
+ k -= 1
773
+ except FloatingPointError as e:
774
+ raise ValueError(("The spline has internal repeated knots "
775
+ "and is not differentiable %d times") % n) from e
776
+
777
+ return t, c, k
778
+
779
+
780
+ def splantider(tck, n=1):
781
+ # see the docstring of `_fitpack_py/splantider`
782
+ if n < 0:
783
+ return splder(tck, -n)
784
+
785
+ t, c, k = tck
786
+
787
+ # Extra axes for the trailing dims of the `c` array:
788
+ sh = (slice(None),) + (None,)*len(c.shape[1:])
789
+
790
+ for j in range(n):
791
+ # This is the inverse set of operations to splder.
792
+
793
+ # Compute the multiplier in the antiderivative formula.
794
+ dt = t[k+1:] - t[:-k-1]
795
+ dt = dt[sh]
796
+ # Compute the new coefficients
797
+ c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
798
+ c = np.r_[np.zeros((1,) + c.shape[1:]),
799
+ c,
800
+ [c[-1]] * (k+2)]
801
+ # New knots
802
+ t = np.r_[t[0], t, t[-1]]
803
+ k += 1
804
+
805
+ return t, c, k
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
2
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
3
+
4
+
5
+ import numpy as np
6
+
7
+ # These are in the API for fitpack even if not used in fitpack.py itself.
8
+ from ._fitpack_impl import bisplrep, bisplev, dblint # noqa: F401
9
+ from . import _fitpack_impl as _impl
10
+ from ._bsplines import BSpline
11
+
12
+
13
+ def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
14
+ full_output=0, nest=None, per=0, quiet=1):
15
+ """
16
+ Find the B-spline representation of an N-D curve.
17
+
18
+ Given a list of N rank-1 arrays, `x`, which represent a curve in
19
+ N-dimensional space parametrized by `u`, find a smooth approximating
20
+ spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
21
+
22
+ Parameters
23
+ ----------
24
+ x : array_like
25
+ A list of sample vector arrays representing the curve.
26
+ w : array_like, optional
27
+ Strictly positive rank-1 array of weights the same length as `x[0]`.
28
+ The weights are used in computing the weighted least-squares spline
29
+ fit. If the errors in the `x` values have standard-deviation given by
30
+ the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
31
+ u : array_like, optional
32
+ An array of parameter values. If not given, these values are
33
+ calculated automatically as ``M = len(x[0])``, where
34
+
35
+ v[0] = 0
36
+
37
+ v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
38
+
39
+ u[i] = v[i] / v[M-1]
40
+
41
+ ub, ue : int, optional
42
+ The end-points of the parameters interval. Defaults to
43
+ u[0] and u[-1].
44
+ k : int, optional
45
+ Degree of the spline. Cubic splines are recommended.
46
+ Even values of `k` should be avoided especially with a small s-value.
47
+ ``1 <= k <= 5``, default is 3.
48
+ task : int, optional
49
+ If task==0 (default), find t and c for a given smoothing factor, s.
50
+ If task==1, find t and c for another value of the smoothing factor, s.
51
+ There must have been a previous call with task=0 or task=1
52
+ for the same set of data.
53
+ If task=-1 find the weighted least square spline for a given set of
54
+ knots, t.
55
+ s : float, optional
56
+ A smoothing condition. The amount of smoothness is determined by
57
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
58
+ where g(x) is the smoothed interpolation of (x,y). The user can
59
+ use `s` to control the trade-off between closeness and smoothness
60
+ of fit. Larger `s` means more smoothing while smaller values of `s`
61
+ indicate less smoothing. Recommended values of `s` depend on the
62
+ weights, w. If the weights represent the inverse of the
63
+ standard-deviation of y, then a good `s` value should be found in
64
+ the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
65
+ data points in x, y, and w.
66
+ t : array, optional
67
+ The knots needed for ``task=-1``.
68
+ There must be at least ``2*k+2`` knots.
69
+ full_output : int, optional
70
+ If non-zero, then return optional outputs.
71
+ nest : int, optional
72
+ An over-estimate of the total number of knots of the spline to
73
+ help in determining the storage space. By default nest=m/2.
74
+ Always large enough is nest=m+k+1.
75
+ per : int, optional
76
+ If non-zero, data points are considered periodic with period
77
+ ``x[m-1] - x[0]`` and a smooth periodic spline approximation is
78
+ returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
79
+ quiet : int, optional
80
+ Non-zero to suppress messages.
81
+
82
+ Returns
83
+ -------
84
+ tck : tuple
85
+ A tuple, ``(t,c,k)`` containing the vector of knots, the B-spline
86
+ coefficients, and the degree of the spline.
87
+ u : array
88
+ An array of the values of the parameter.
89
+ fp : float
90
+ The weighted sum of squared residuals of the spline approximation.
91
+ ier : int
92
+ An integer flag about splrep success. Success is indicated
93
+ if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
94
+ Otherwise an error is raised.
95
+ msg : str
96
+ A message corresponding to the integer flag, ier.
97
+
98
+ See Also
99
+ --------
100
+ splrep, splev, sproot, spalde, splint,
101
+ bisplrep, bisplev
102
+ UnivariateSpline, BivariateSpline
103
+ BSpline
104
+ make_interp_spline
105
+
106
+ Notes
107
+ -----
108
+ See `splev` for evaluation of the spline and its derivatives.
109
+ The number of dimensions N must be smaller than 11.
110
+
111
+ The number of coefficients in the `c` array is ``k+1`` less than the number
112
+ of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
113
+ the array of coefficients to have the same length as the array of knots.
114
+ These additional coefficients are ignored by evaluation routines, `splev`
115
+ and `BSpline`.
116
+
117
+ References
118
+ ----------
119
+ .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
120
+ parametric splines, Computer Graphics and Image Processing",
121
+ 20 (1982) 171-184.
122
+ .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
123
+ parametric splines", report tw55, Dept. Computer Science,
124
+ K.U.Leuven, 1981.
125
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
126
+ Numerical Analysis, Oxford University Press, 1993.
127
+
128
+ Examples
129
+ --------
130
+ Generate a discretization of a limacon curve in the polar coordinates:
131
+
132
+ >>> import numpy as np
133
+ >>> phi = np.linspace(0, 2.*np.pi, 40)
134
+ >>> r = 0.5 + np.cos(phi) # polar coords
135
+ >>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
136
+
137
+ And interpolate:
138
+
139
+ >>> from scipy.interpolate import splprep, splev
140
+ >>> tck, u = splprep([x, y], s=0)
141
+ >>> new_points = splev(u, tck)
142
+
143
+ Notice that (i) we force interpolation by using `s=0`,
144
+ (ii) the parameterization, ``u``, is generated automatically.
145
+ Now plot the result:
146
+
147
+ >>> import matplotlib.pyplot as plt
148
+ >>> fig, ax = plt.subplots()
149
+ >>> ax.plot(x, y, 'ro')
150
+ >>> ax.plot(new_points[0], new_points[1], 'r-')
151
+ >>> plt.show()
152
+
153
+ """
154
+
155
+ res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
156
+ quiet)
157
+ return res
158
+
159
+
160
+ def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
161
+ full_output=0, per=0, quiet=1):
162
+ """
163
+ Find the B-spline representation of a 1-D curve.
164
+
165
+ Given the set of data points ``(x[i], y[i])`` determine a smooth spline
166
+ approximation of degree k on the interval ``xb <= x <= xe``.
167
+
168
+ Parameters
169
+ ----------
170
+ x, y : array_like
171
+ The data points defining a curve ``y = f(x)``.
172
+ w : array_like, optional
173
+ Strictly positive rank-1 array of weights the same length as `x` and `y`.
174
+ The weights are used in computing the weighted least-squares spline
175
+ fit. If the errors in the `y` values have standard-deviation given by the
176
+ vector ``d``, then `w` should be ``1/d``. Default is ``ones(len(x))``.
177
+ xb, xe : float, optional
178
+ The interval to fit. If None, these default to ``x[0]`` and ``x[-1]``
179
+ respectively.
180
+ k : int, optional
181
+ The degree of the spline fit. It is recommended to use cubic splines.
182
+ Even values of `k` should be avoided especially with small `s` values.
183
+ ``1 <= k <= 5``.
184
+ task : {1, 0, -1}, optional
185
+ If ``task==0``, find ``t`` and ``c`` for a given smoothing factor, `s`.
186
+
187
+ If ``task==1`` find ``t`` and ``c`` for another value of the smoothing factor,
188
+ `s`. There must have been a previous call with ``task=0`` or ``task=1`` for
189
+ the same set of data (``t`` will be stored an used internally)
190
+
191
+ If ``task=-1`` find the weighted least square spline for a given set of
192
+ knots, ``t``. These should be interior knots as knots on the ends will be
193
+ added automatically.
194
+ s : float, optional
195
+ A smoothing condition. The amount of smoothness is determined by
196
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s`` where ``g(x)``
197
+ is the smoothed interpolation of ``(x,y)``. The user can use `s` to control
198
+ the tradeoff between closeness and smoothness of fit. Larger `s` means
199
+ more smoothing while smaller values of `s` indicate less smoothing.
200
+ Recommended values of `s` depend on the weights, `w`. If the weights
201
+ represent the inverse of the standard-deviation of `y`, then a good `s`
202
+ value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))`` where ``m`` is
203
+ the number of datapoints in `x`, `y`, and `w`. default : ``s=m-sqrt(2*m)`` if
204
+ weights are supplied. ``s = 0.0`` (interpolating) if no weights are
205
+ supplied.
206
+ t : array_like, optional
207
+ The knots needed for ``task=-1``. If given then task is automatically set
208
+ to ``-1``.
209
+ full_output : bool, optional
210
+ If non-zero, then return optional outputs.
211
+ per : bool, optional
212
+ If non-zero, data points are considered periodic with period ``x[m-1]`` -
213
+ ``x[0]`` and a smooth periodic spline approximation is returned. Values of
214
+ ``y[m-1]`` and ``w[m-1]`` are not used.
215
+ The default is zero, corresponding to boundary condition 'not-a-knot'.
216
+ quiet : bool, optional
217
+ Non-zero to suppress messages.
218
+
219
+ Returns
220
+ -------
221
+ tck : tuple
222
+ A tuple ``(t,c,k)`` containing the vector of knots, the B-spline
223
+ coefficients, and the degree of the spline.
224
+ fp : array, optional
225
+ The weighted sum of squared residuals of the spline approximation.
226
+ ier : int, optional
227
+ An integer flag about splrep success. Success is indicated if ``ier<=0``.
228
+ If ``ier in [1,2,3]``, an error occurred but was not raised. Otherwise an
229
+ error is raised.
230
+ msg : str, optional
231
+ A message corresponding to the integer flag, `ier`.
232
+
233
+ See Also
234
+ --------
235
+ UnivariateSpline, BivariateSpline
236
+ splprep, splev, sproot, spalde, splint
237
+ bisplrep, bisplev
238
+ BSpline
239
+ make_interp_spline
240
+
241
+ Notes
242
+ -----
243
+ See `splev` for evaluation of the spline and its derivatives. Uses the
244
+ FORTRAN routine ``curfit`` from FITPACK.
245
+
246
+ The user is responsible for assuring that the values of `x` are unique.
247
+ Otherwise, `splrep` will not return sensible results.
248
+
249
+ If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
250
+ i.e., there must be a subset of data points ``x[j]`` such that
251
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
252
+
253
+ This routine zero-pads the coefficients array ``c`` to have the same length
254
+ as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
255
+ by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
256
+ `splprep`, which does not zero-pad the coefficients.
257
+
258
+ The default boundary condition is 'not-a-knot', i.e. the first and second
259
+ segment at a curve end are the same polynomial. More boundary conditions are
260
+ available in `CubicSpline`.
261
+
262
+ References
263
+ ----------
264
+ Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
265
+
266
+ .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
267
+ integration of experimental data using spline functions",
268
+ J.Comp.Appl.Maths 1 (1975) 165-184.
269
+ .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
270
+ grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
271
+ 1286-1304.
272
+ .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
273
+ functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
274
+ .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
275
+ Numerical Analysis, Oxford University Press, 1993.
276
+
277
+ Examples
278
+ --------
279
+ You can interpolate 1-D points with a B-spline curve.
280
+ Further examples are given in
281
+ :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
282
+
283
+ >>> import numpy as np
284
+ >>> import matplotlib.pyplot as plt
285
+ >>> from scipy.interpolate import splev, splrep
286
+ >>> x = np.linspace(0, 10, 10)
287
+ >>> y = np.sin(x)
288
+ >>> spl = splrep(x, y)
289
+ >>> x2 = np.linspace(0, 10, 200)
290
+ >>> y2 = splev(x2, spl)
291
+ >>> plt.plot(x, y, 'o', x2, y2)
292
+ >>> plt.show()
293
+
294
+ """
295
+ res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
296
+ return res
297
+
298
+
299
+ def splev(x, tck, der=0, ext=0):
300
+ """
301
+ Evaluate a B-spline or its derivatives.
302
+
303
+ Given the knots and coefficients of a B-spline representation, evaluate
304
+ the value of the smoothing polynomial and its derivatives. This is a
305
+ wrapper around the FORTRAN routines splev and splder of FITPACK.
306
+
307
+ Parameters
308
+ ----------
309
+ x : array_like
310
+ An array of points at which to return the value of the smoothed
311
+ spline or its derivatives. If `tck` was returned from `splprep`,
312
+ then the parameter values, u should be given.
313
+ tck : 3-tuple or a BSpline object
314
+ If a tuple, then it should be a sequence of length 3 returned by
315
+ `splrep` or `splprep` containing the knots, coefficients, and degree
316
+ of the spline. (Also see Notes.)
317
+ der : int, optional
318
+ The order of derivative of the spline to compute (must be less than
319
+ or equal to k, the degree of the spline).
320
+ ext : int, optional
321
+ Controls the value returned for elements of ``x`` not in the
322
+ interval defined by the knot sequence.
323
+
324
+ * if ext=0, return the extrapolated value.
325
+ * if ext=1, return 0
326
+ * if ext=2, raise a ValueError
327
+ * if ext=3, return the boundary value.
328
+
329
+ The default value is 0.
330
+
331
+ Returns
332
+ -------
333
+ y : ndarray or list of ndarrays
334
+ An array of values representing the spline function evaluated at
335
+ the points in `x`. If `tck` was returned from `splprep`, then this
336
+ is a list of arrays representing the curve in an N-D space.
337
+
338
+ See Also
339
+ --------
340
+ splprep, splrep, sproot, spalde, splint
341
+ bisplrep, bisplev
342
+ BSpline
343
+
344
+ Notes
345
+ -----
346
+ Manipulating the tck-tuples directly is not recommended. In new code,
347
+ prefer using `BSpline` objects.
348
+
349
+ References
350
+ ----------
351
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
352
+ Theory, 6, p.50-62, 1972.
353
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
354
+ Applics, 10, p.134-149, 1972.
355
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
356
+ on Numerical Analysis, Oxford University Press, 1993.
357
+
358
+ Examples
359
+ --------
360
+ Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
361
+
362
+ """
363
+ if isinstance(tck, BSpline):
364
+ if tck.c.ndim > 1:
365
+ mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
366
+ "not allowed. Use BSpline.__call__(x) instead.")
367
+ raise ValueError(mesg)
368
+
369
+ # remap the out-of-bounds behavior
370
+ try:
371
+ extrapolate = {0: True, }[ext]
372
+ except KeyError as e:
373
+ raise ValueError("Extrapolation mode %s is not supported "
374
+ "by BSpline." % ext) from e
375
+
376
+ return tck(x, der, extrapolate=extrapolate)
377
+ else:
378
+ return _impl.splev(x, tck, der, ext)
379
+
380
+
381
+ def splint(a, b, tck, full_output=0):
382
+ """
383
+ Evaluate the definite integral of a B-spline between two given points.
384
+
385
+ Parameters
386
+ ----------
387
+ a, b : float
388
+ The end-points of the integration interval.
389
+ tck : tuple or a BSpline instance
390
+ If a tuple, then it should be a sequence of length 3, containing the
391
+ vector of knots, the B-spline coefficients, and the degree of the
392
+ spline (see `splev`).
393
+ full_output : int, optional
394
+ Non-zero to return optional output.
395
+
396
+ Returns
397
+ -------
398
+ integral : float
399
+ The resulting integral.
400
+ wrk : ndarray
401
+ An array containing the integrals of the normalized B-splines
402
+ defined on the set of knots.
403
+ (Only returned if `full_output` is non-zero)
404
+
405
+ See Also
406
+ --------
407
+ splprep, splrep, sproot, spalde, splev
408
+ bisplrep, bisplev
409
+ BSpline
410
+
411
+ Notes
412
+ -----
413
+ `splint` silently assumes that the spline function is zero outside the data
414
+ interval (`a`, `b`).
415
+
416
+ Manipulating the tck-tuples directly is not recommended. In new code,
417
+ prefer using the `BSpline` objects.
418
+
419
+ References
420
+ ----------
421
+ .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
422
+ J. Inst. Maths Applics, 17, p.37-41, 1976.
423
+ .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
424
+ on Numerical Analysis, Oxford University Press, 1993.
425
+
426
+ Examples
427
+ --------
428
+ Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
429
+
430
+ """
431
+ if isinstance(tck, BSpline):
432
+ if tck.c.ndim > 1:
433
+ mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
434
+ "not allowed. Use BSpline.integrate() instead.")
435
+ raise ValueError(mesg)
436
+
437
+ if full_output != 0:
438
+ mesg = ("full_output = %s is not supported. Proceeding as if "
439
+ "full_output = 0" % full_output)
440
+
441
+ return tck.integrate(a, b, extrapolate=False)
442
+ else:
443
+ return _impl.splint(a, b, tck, full_output)
444
+
445
+
446
+ def sproot(tck, mest=10):
447
+ """
448
+ Find the roots of a cubic B-spline.
449
+
450
+ Given the knots (>=8) and coefficients of a cubic B-spline return the
451
+ roots of the spline.
452
+
453
+ Parameters
454
+ ----------
455
+ tck : tuple or a BSpline object
456
+ If a tuple, then it should be a sequence of length 3, containing the
457
+ vector of knots, the B-spline coefficients, and the degree of the
458
+ spline.
459
+ The number of knots must be >= 8, and the degree must be 3.
460
+ The knots must be a montonically increasing sequence.
461
+ mest : int, optional
462
+ An estimate of the number of zeros (Default is 10).
463
+
464
+ Returns
465
+ -------
466
+ zeros : ndarray
467
+ An array giving the roots of the spline.
468
+
469
+ See Also
470
+ --------
471
+ splprep, splrep, splint, spalde, splev
472
+ bisplrep, bisplev
473
+ BSpline
474
+
475
+ Notes
476
+ -----
477
+ Manipulating the tck-tuples directly is not recommended. In new code,
478
+ prefer using the `BSpline` objects.
479
+
480
+ References
481
+ ----------
482
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
483
+ Theory, 6, p.50-62, 1972.
484
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
485
+ Applics, 10, p.134-149, 1972.
486
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
487
+ on Numerical Analysis, Oxford University Press, 1993.
488
+
489
+ Examples
490
+ --------
491
+
492
+ For some data, this method may miss a root. This happens when one of
493
+ the spline knots (which FITPACK places automatically) happens to
494
+ coincide with the true root. A workaround is to convert to `PPoly`,
495
+ which uses a different root-finding algorithm.
496
+
497
+ For example,
498
+
499
+ >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
500
+ >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
501
+ ... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03,
502
+ ... 6.520430e-03, 8.170770e-03]
503
+ >>> from scipy.interpolate import splrep, sproot, PPoly
504
+ >>> tck = splrep(x, y, s=0)
505
+ >>> sproot(tck)
506
+ array([], dtype=float64)
507
+
508
+ Converting to a PPoly object does find the roots at `x=2`:
509
+
510
+ >>> ppoly = PPoly.from_spline(tck)
511
+ >>> ppoly.roots(extrapolate=False)
512
+ array([2.])
513
+
514
+
515
+ Further examples are given :ref:`in the tutorial
516
+ <tutorial-interpolate_splXXX>`.
517
+
518
+ """
519
+ if isinstance(tck, BSpline):
520
+ if tck.c.ndim > 1:
521
+ mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
522
+ "not allowed.")
523
+ raise ValueError(mesg)
524
+
525
+ t, c, k = tck.tck
526
+
527
+ # _impl.sproot expects the interpolation axis to be last, so roll it.
528
+ # NB: This transpose is a no-op if c is 1D.
529
+ sh = tuple(range(c.ndim))
530
+ c = c.transpose(sh[1:] + (0,))
531
+ return _impl.sproot((t, c, k), mest)
532
+ else:
533
+ return _impl.sproot(tck, mest)
534
+
535
+
536
+ def spalde(x, tck):
537
+ """
538
+ Evaluate all derivatives of a B-spline.
539
+
540
+ Given the knots and coefficients of a cubic B-spline compute all
541
+ derivatives up to order k at a point (or set of points).
542
+
543
+ Parameters
544
+ ----------
545
+ x : array_like
546
+ A point or a set of points at which to evaluate the derivatives.
547
+ Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
548
+ tck : tuple
549
+ A tuple (t,c,k) containing the vector of knots,
550
+ the B-spline coefficients, and the degree of the spline.
551
+
552
+ Returns
553
+ -------
554
+ results : {ndarray, list of ndarrays}
555
+ An array (or a list of arrays) containing all derivatives
556
+ up to order k inclusive for each point `x`.
557
+
558
+ See Also
559
+ --------
560
+ splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
561
+ UnivariateSpline, BivariateSpline
562
+
563
+ References
564
+ ----------
565
+ .. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
566
+ 6 (1972) 50-62.
567
+ .. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
568
+ applics 10 (1972) 134-149.
569
+ .. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
570
+ Numerical Analysis, Oxford University Press, 1993.
571
+
572
+ """
573
+ if isinstance(tck, BSpline):
574
+ raise TypeError("spalde does not accept BSpline instances.")
575
+ else:
576
+ return _impl.spalde(x, tck)
577
+
578
+
579
+ def insert(x, tck, m=1, per=0):
580
+ """
581
+ Insert knots into a B-spline.
582
+
583
+ Given the knots and coefficients of a B-spline representation, create a
584
+ new B-spline with a knot inserted `m` times at point `x`.
585
+ This is a wrapper around the FORTRAN routine insert of FITPACK.
586
+
587
+ Parameters
588
+ ----------
589
+ x (u) : float
590
+ A knot value at which to insert a new knot. If `tck` was returned
591
+ from ``splprep``, then the parameter values, u should be given.
592
+ tck : a `BSpline` instance or a tuple
593
+ If tuple, then it is expected to be a tuple (t,c,k) containing
594
+ the vector of knots, the B-spline coefficients, and the degree of
595
+ the spline.
596
+ m : int, optional
597
+ The number of times to insert the given knot (its multiplicity).
598
+ Default is 1.
599
+ per : int, optional
600
+ If non-zero, the input spline is considered periodic.
601
+
602
+ Returns
603
+ -------
604
+ BSpline instance or a tuple
605
+ A new B-spline with knots t, coefficients c, and degree k.
606
+ ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
607
+ In case of a periodic spline (``per != 0``) there must be
608
+ either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
609
+ or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
610
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
611
+ a BSpline object is constructed and returned.
612
+
613
+ Notes
614
+ -----
615
+ Based on algorithms from [1]_ and [2]_.
616
+
617
+ Manipulating the tck-tuples directly is not recommended. In new code,
618
+ prefer using the `BSpline` objects, in particular `BSpline.insert_knot`
619
+ method.
620
+
621
+ See Also
622
+ --------
623
+ BSpline.insert_knot
624
+
625
+ References
626
+ ----------
627
+ .. [1] W. Boehm, "Inserting new knots into b-spline curves.",
628
+ Computer Aided Design, 12, p.199-201, 1980.
629
+ .. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
630
+ Numerical Analysis", Oxford University Press, 1993.
631
+
632
+ Examples
633
+ --------
634
+ You can insert knots into a B-spline.
635
+
636
+ >>> from scipy.interpolate import splrep, insert
637
+ >>> import numpy as np
638
+ >>> x = np.linspace(0, 10, 5)
639
+ >>> y = np.sin(x)
640
+ >>> tck = splrep(x, y)
641
+ >>> tck[0]
642
+ array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
643
+
644
+ A knot is inserted:
645
+
646
+ >>> tck_inserted = insert(3, tck)
647
+ >>> tck_inserted[0]
648
+ array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
649
+
650
+ Some knots are inserted:
651
+
652
+ >>> tck_inserted2 = insert(8, tck, m=3)
653
+ >>> tck_inserted2[0]
654
+ array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
655
+
656
+ """
657
+ if isinstance(tck, BSpline):
658
+
659
+ t, c, k = tck.tck
660
+
661
+ # FITPACK expects the interpolation axis to be last, so roll it over
662
+ # NB: if c array is 1D, transposes are no-ops
663
+ sh = tuple(range(c.ndim))
664
+ c = c.transpose(sh[1:] + (0,))
665
+ t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
666
+
667
+ # and roll the last axis back
668
+ c_ = np.asarray(c_)
669
+ c_ = c_.transpose((sh[-1],) + sh[:-1])
670
+ return BSpline(t_, c_, k_)
671
+ else:
672
+ return _impl.insert(x, tck, m, per)
673
+
674
+
675
+ def splder(tck, n=1):
676
+ """
677
+ Compute the spline representation of the derivative of a given spline
678
+
679
+ Parameters
680
+ ----------
681
+ tck : BSpline instance or a tuple of (t, c, k)
682
+ Spline whose derivative to compute
683
+ n : int, optional
684
+ Order of derivative to evaluate. Default: 1
685
+
686
+ Returns
687
+ -------
688
+ `BSpline` instance or tuple
689
+ Spline of order k2=k-n representing the derivative
690
+ of the input spline.
691
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
692
+ a BSpline object is constructed and returned.
693
+
694
+ See Also
695
+ --------
696
+ splantider, splev, spalde
697
+ BSpline
698
+
699
+ Notes
700
+ -----
701
+
702
+ .. versionadded:: 0.13.0
703
+
704
+ Examples
705
+ --------
706
+ This can be used for finding maxima of a curve:
707
+
708
+ >>> from scipy.interpolate import splrep, splder, sproot
709
+ >>> import numpy as np
710
+ >>> x = np.linspace(0, 10, 70)
711
+ >>> y = np.sin(x)
712
+ >>> spl = splrep(x, y, k=4)
713
+
714
+ Now, differentiate the spline and find the zeros of the
715
+ derivative. (NB: `sproot` only works for order 3 splines, so we
716
+ fit an order 4 spline):
717
+
718
+ >>> dspl = splder(spl)
719
+ >>> sproot(dspl) / np.pi
720
+ array([ 0.50000001, 1.5 , 2.49999998])
721
+
722
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
723
+ :math:`\\cos(x) = \\sin'(x)`.
724
+
725
+ """
726
+ if isinstance(tck, BSpline):
727
+ return tck.derivative(n)
728
+ else:
729
+ return _impl.splder(tck, n)
730
+
731
+
732
+ def splantider(tck, n=1):
733
+ """
734
+ Compute the spline for the antiderivative (integral) of a given spline.
735
+
736
+ Parameters
737
+ ----------
738
+ tck : BSpline instance or a tuple of (t, c, k)
739
+ Spline whose antiderivative to compute
740
+ n : int, optional
741
+ Order of antiderivative to evaluate. Default: 1
742
+
743
+ Returns
744
+ -------
745
+ BSpline instance or a tuple of (t2, c2, k2)
746
+ Spline of order k2=k+n representing the antiderivative of the input
747
+ spline.
748
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
749
+ a BSpline object is constructed and returned.
750
+
751
+ See Also
752
+ --------
753
+ splder, splev, spalde
754
+ BSpline
755
+
756
+ Notes
757
+ -----
758
+ The `splder` function is the inverse operation of this function.
759
+ Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
760
+ rounding error.
761
+
762
+ .. versionadded:: 0.13.0
763
+
764
+ Examples
765
+ --------
766
+ >>> from scipy.interpolate import splrep, splder, splantider, splev
767
+ >>> import numpy as np
768
+ >>> x = np.linspace(0, np.pi/2, 70)
769
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
770
+ >>> spl = splrep(x, y)
771
+
772
+ The derivative is the inverse operation of the antiderivative,
773
+ although some floating point error accumulates:
774
+
775
+ >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
776
+ (array(2.1565429877197317), array(2.1565429877201865))
777
+
778
+ Antiderivative can be used to evaluate definite integrals:
779
+
780
+ >>> ispl = splantider(spl)
781
+ >>> splev(np.pi/2, ispl) - splev(0, ispl)
782
+ 2.2572053588768486
783
+
784
+ This is indeed an approximation to the complete elliptic integral
785
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
786
+
787
+ >>> from scipy.special import ellipk
788
+ >>> ellipk(0.8)
789
+ 2.2572053268208538
790
+
791
+ """
792
+ if isinstance(tck, BSpline):
793
+ return tck.antiderivative(n)
794
+ else:
795
+ return _impl.splantider(tck, n)
796
+
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import functools
3
+ import operator
4
+ import numpy as np
5
+
6
+ from math import prod
7
+
8
+ from . import _bspl # type: ignore
9
+
10
+ import scipy.sparse.linalg as ssl
11
+ from scipy.sparse import csr_array
12
+
13
+ from ._bsplines import _not_a_knot
14
+
15
+ __all__ = ["NdBSpline"]
16
+
17
+
18
+ def _get_dtype(dtype):
19
+ """Return np.complex128 for complex dtypes, np.float64 otherwise."""
20
+ if np.issubdtype(dtype, np.complexfloating):
21
+ return np.complex128
22
+ else:
23
+ return np.float64
24
+
25
+
26
+ class NdBSpline:
27
+ """Tensor product spline object.
28
+
29
+ The value at point ``xp = (x1, x2, ..., xN)`` is evaluated as a linear
30
+ combination of products of one-dimensional b-splines in each of the ``N``
31
+ dimensions::
32
+
33
+ c[i1, i2, ..., iN] * B(x1; i1, t1) * B(x2; i2, t2) * ... * B(xN; iN, tN)
34
+
35
+
36
+ Here ``B(x; i, t)`` is the ``i``-th b-spline defined by the knot vector
37
+ ``t`` evaluated at ``x``.
38
+
39
+ Parameters
40
+ ----------
41
+ t : tuple of 1D ndarrays
42
+ knot vectors in directions 1, 2, ... N,
43
+ ``len(t[i]) == n[i] + k + 1``
44
+ c : ndarray, shape (n1, n2, ..., nN, ...)
45
+ b-spline coefficients
46
+ k : int or length-d tuple of integers
47
+ spline degrees.
48
+ A single integer is interpreted as having this degree for
49
+ all dimensions.
50
+ extrapolate : bool, optional
51
+ Whether to extrapolate out-of-bounds inputs, or return `nan`.
52
+ Default is to extrapolate.
53
+
54
+ Attributes
55
+ ----------
56
+ t : tuple of ndarrays
57
+ Knots vectors.
58
+ c : ndarray
59
+ Coefficients of the tensor-produce spline.
60
+ k : tuple of integers
61
+ Degrees for each dimension.
62
+ extrapolate : bool, optional
63
+ Whether to extrapolate or return nans for out-of-bounds inputs.
64
+ Defaults to true.
65
+
66
+ Methods
67
+ -------
68
+ __call__
69
+ design_matrix
70
+
71
+ See Also
72
+ --------
73
+ BSpline : a one-dimensional B-spline object
74
+ NdPPoly : an N-dimensional piecewise tensor product polynomial
75
+
76
+ """
77
+ def __init__(self, t, c, k, *, extrapolate=None):
78
+ ndim = len(t)
79
+
80
+ try:
81
+ len(k)
82
+ except TypeError:
83
+ # make k a tuple
84
+ k = (k,)*ndim
85
+
86
+ if len(k) != ndim:
87
+ raise ValueError(f"{len(t) = } != {len(k) = }.")
88
+
89
+ self.k = tuple(operator.index(ki) for ki in k)
90
+ self.t = tuple(np.ascontiguousarray(ti, dtype=float) for ti in t)
91
+ self.c = np.asarray(c)
92
+
93
+ if extrapolate is None:
94
+ extrapolate = True
95
+ self.extrapolate = bool(extrapolate)
96
+
97
+ self.c = np.asarray(c)
98
+
99
+ for d in range(ndim):
100
+ td = self.t[d]
101
+ kd = self.k[d]
102
+ n = td.shape[0] - kd - 1
103
+ if kd < 0:
104
+ raise ValueError(f"Spline degree in dimension {d} cannot be"
105
+ f" negative.")
106
+ if td.ndim != 1:
107
+ raise ValueError(f"Knot vector in dimension {d} must be"
108
+ f" one-dimensional.")
109
+ if n < kd + 1:
110
+ raise ValueError(f"Need at least {2*kd + 2} knots for degree"
111
+ f" {kd} in dimension {d}.")
112
+ if (np.diff(td) < 0).any():
113
+ raise ValueError(f"Knots in dimension {d} must be in a"
114
+ f" non-decreasing order.")
115
+ if len(np.unique(td[kd:n + 1])) < 2:
116
+ raise ValueError(f"Need at least two internal knots in"
117
+ f" dimension {d}.")
118
+ if not np.isfinite(td).all():
119
+ raise ValueError(f"Knots in dimension {d} should not have"
120
+ f" nans or infs.")
121
+ if self.c.ndim < ndim:
122
+ raise ValueError(f"Coefficients must be at least"
123
+ f" {d}-dimensional.")
124
+ if self.c.shape[d] != n:
125
+ raise ValueError(f"Knots, coefficients and degree in dimension"
126
+ f" {d} are inconsistent:"
127
+ f" got {self.c.shape[d]} coefficients for"
128
+ f" {len(td)} knots, need at least {n} for"
129
+ f" k={k}.")
130
+
131
+ dt = _get_dtype(self.c.dtype)
132
+ self.c = np.ascontiguousarray(self.c, dtype=dt)
133
+
134
+ def __call__(self, xi, *, nu=None, extrapolate=None):
135
+ """Evaluate the tensor product b-spline at ``xi``.
136
+
137
+ Parameters
138
+ ----------
139
+ xi : array_like, shape(..., ndim)
140
+ The coordinates to evaluate the interpolator at.
141
+ This can be a list or tuple of ndim-dimensional points
142
+ or an array with the shape (num_points, ndim).
143
+ nu : array_like, optional, shape (ndim,)
144
+ Orders of derivatives to evaluate. Each must be non-negative.
145
+ Defaults to the zeroth derivivative.
146
+ extrapolate : bool, optional
147
+ Whether to exrapolate based on first and last intervals in each
148
+ dimension, or return `nan`. Default is to ``self.extrapolate``.
149
+
150
+ Returns
151
+ -------
152
+ values : ndarray, shape ``xi.shape[:-1] + self.c.shape[ndim:]``
153
+ Interpolated values at ``xi``
154
+ """
155
+ ndim = len(self.t)
156
+
157
+ if extrapolate is None:
158
+ extrapolate = self.extrapolate
159
+ extrapolate = bool(extrapolate)
160
+
161
+ if nu is None:
162
+ nu = np.zeros((ndim,), dtype=np.intc)
163
+ else:
164
+ nu = np.asarray(nu, dtype=np.intc)
165
+ if nu.ndim != 1 or nu.shape[0] != ndim:
166
+ raise ValueError(
167
+ f"invalid number of derivative orders {nu = } for "
168
+ f"ndim = {len(self.t)}.")
169
+ if any(nu < 0):
170
+ raise ValueError(f"derivatives must be positive, got {nu = }")
171
+
172
+ # prepare xi : shape (..., m1, ..., md) -> (1, m1, ..., md)
173
+ xi = np.asarray(xi, dtype=float)
174
+ xi_shape = xi.shape
175
+ xi = xi.reshape(-1, xi_shape[-1])
176
+ xi = np.ascontiguousarray(xi)
177
+
178
+ if xi_shape[-1] != ndim:
179
+ raise ValueError(f"Shapes: xi.shape={xi_shape} and ndim={ndim}")
180
+
181
+ # prepare k & t
182
+ _k = np.asarray(self.k, dtype=np.dtype("long"))
183
+
184
+ # pack the knots into a single array
185
+ len_t = [len(ti) for ti in self.t]
186
+ _t = np.empty((ndim, max(len_t)), dtype=float)
187
+ _t.fill(np.nan)
188
+ for d in range(ndim):
189
+ _t[d, :len(self.t[d])] = self.t[d]
190
+ len_t = np.asarray(len_t, dtype=np.dtype("long"))
191
+
192
+ # tabulate the flat indices for iterating over the (k+1)**ndim subarray
193
+ shape = tuple(kd + 1 for kd in self.k)
194
+ indices = np.unravel_index(np.arange(prod(shape)), shape)
195
+ _indices_k1d = np.asarray(indices, dtype=np.intp).T
196
+
197
+ # prepare the coefficients: flatten the trailing dimensions
198
+ c1 = self.c.reshape(self.c.shape[:ndim] + (-1,))
199
+ c1r = c1.ravel()
200
+
201
+ # replacement for np.ravel_multi_index for indexing of `c1`:
202
+ _strides_c1 = np.asarray([s // c1.dtype.itemsize
203
+ for s in c1.strides], dtype=np.intp)
204
+
205
+ num_c_tr = c1.shape[-1] # # of trailing coefficients
206
+ out = np.empty(xi.shape[:-1] + (num_c_tr,), dtype=c1.dtype)
207
+
208
+ _bspl.evaluate_ndbspline(xi,
209
+ _t,
210
+ len_t,
211
+ _k,
212
+ nu,
213
+ extrapolate,
214
+ c1r,
215
+ num_c_tr,
216
+ _strides_c1,
217
+ _indices_k1d,
218
+ out,)
219
+
220
+ return out.reshape(xi_shape[:-1] + self.c.shape[ndim:])
221
+
222
+ @classmethod
223
+ def design_matrix(cls, xvals, t, k, extrapolate=True):
224
+ """Construct the design matrix as a CSR format sparse array.
225
+
226
+ Parameters
227
+ ----------
228
+ xvals : ndarray, shape(npts, ndim)
229
+ Data points. ``xvals[j, :]`` gives the ``j``-th data point as an
230
+ ``ndim``-dimensional array.
231
+ t : tuple of 1D ndarrays, length-ndim
232
+ Knot vectors in directions 1, 2, ... ndim,
233
+ k : int
234
+ B-spline degree.
235
+ extrapolate : bool, optional
236
+ Whether to extrapolate out-of-bounds values of raise a `ValueError`
237
+
238
+ Returns
239
+ -------
240
+ design_matrix : a CSR array
241
+ Each row of the design matrix corresponds to a value in `xvals` and
242
+ contains values of b-spline basis elements which are non-zero
243
+ at this value.
244
+
245
+ """
246
+ xvals = np.asarray(xvals, dtype=float)
247
+ ndim = xvals.shape[-1]
248
+ if len(t) != ndim:
249
+ raise ValueError(
250
+ f"Data and knots are inconsistent: len(t) = {len(t)} for "
251
+ f" {ndim = }."
252
+ )
253
+ try:
254
+ len(k)
255
+ except TypeError:
256
+ # make k a tuple
257
+ k = (k,)*ndim
258
+
259
+ kk = np.asarray(k, dtype=np.int32)
260
+ data, indices, indptr = _bspl._colloc_nd(xvals, t, kk)
261
+ return csr_array((data, indices, indptr))
262
+
263
+
264
+ def _iter_solve(a, b, solver=ssl.gcrotmk, **solver_args):
265
+ # work around iterative solvers not accepting multiple r.h.s.
266
+
267
+ # also work around a.dtype == float64 and b.dtype == complex128
268
+ # cf https://github.com/scipy/scipy/issues/19644
269
+ if np.issubdtype(b.dtype, np.complexfloating):
270
+ real = _iter_solve(a, b.real, solver, **solver_args)
271
+ imag = _iter_solve(a, b.imag, solver, **solver_args)
272
+ return real + 1j*imag
273
+
274
+ if b.ndim == 2 and b.shape[1] !=1:
275
+ res = np.empty_like(b)
276
+ for j in range(b.shape[1]):
277
+ res[:, j], info = solver(a, b[:, j], **solver_args)
278
+ if info != 0:
279
+ raise ValueError(f"{solver = } returns {info =} for column {j}.")
280
+ return res
281
+ else:
282
+ res, info = solver(a, b, **solver_args)
283
+ if info != 0:
284
+ raise ValueError(f"{solver = } returns {info = }.")
285
+ return res
286
+
287
+
288
+ def make_ndbspl(points, values, k=3, *, solver=ssl.gcrotmk, **solver_args):
289
+ """Construct an interpolating NdBspline.
290
+
291
+ Parameters
292
+ ----------
293
+ points : tuple of ndarrays of float, with shapes (m1,), ... (mN,)
294
+ The points defining the regular grid in N dimensions. The points in
295
+ each dimension (i.e. every element of the `points` tuple) must be
296
+ strictly ascending or descending.
297
+ values : ndarray of float, shape (m1, ..., mN, ...)
298
+ The data on the regular grid in n dimensions.
299
+ k : int, optional
300
+ The spline degree. Must be odd. Default is cubic, k=3
301
+ solver : a `scipy.sparse.linalg` solver (iterative or direct), optional.
302
+ An iterative solver from `scipy.sparse.linalg` or a direct one,
303
+ `sparse.sparse.linalg.spsolve`.
304
+ Used to solve the sparse linear system
305
+ ``design_matrix @ coefficients = rhs`` for the coefficients.
306
+ Default is `scipy.sparse.linalg.gcrotmk`
307
+ solver_args : dict, optional
308
+ Additional arguments for the solver. The call signature is
309
+ ``solver(csr_array, rhs_vector, **solver_args)``
310
+
311
+ Returns
312
+ -------
313
+ spl : NdBSpline object
314
+
315
+ Notes
316
+ -----
317
+ Boundary conditions are not-a-knot in all dimensions.
318
+ """
319
+ ndim = len(points)
320
+ xi_shape = tuple(len(x) for x in points)
321
+
322
+ try:
323
+ len(k)
324
+ except TypeError:
325
+ # make k a tuple
326
+ k = (k,)*ndim
327
+
328
+ for d, point in enumerate(points):
329
+ numpts = len(np.atleast_1d(point))
330
+ if numpts <= k[d]:
331
+ raise ValueError(f"There are {numpts} points in dimension {d},"
332
+ f" but order {k[d]} requires at least "
333
+ f" {k[d]+1} points per dimension.")
334
+
335
+ t = tuple(_not_a_knot(np.asarray(points[d], dtype=float), k[d])
336
+ for d in range(ndim))
337
+ xvals = np.asarray([xv for xv in itertools.product(*points)], dtype=float)
338
+
339
+ # construct the colocation matrix
340
+ matr = NdBSpline.design_matrix(xvals, t, k)
341
+
342
+ # Solve for the coefficients given `values`.
343
+ # Trailing dimensions: first ndim dimensions are data, the rest are batch
344
+ # dimensions, so stack `values` into a 2D array for `spsolve` to undestand.
345
+ v_shape = values.shape
346
+ vals_shape = (prod(v_shape[:ndim]), prod(v_shape[ndim:]))
347
+ vals = values.reshape(vals_shape)
348
+
349
+ if solver != ssl.spsolve:
350
+ solver = functools.partial(_iter_solve, solver=solver)
351
+ if "atol" not in solver_args:
352
+ # avoid a DeprecationWarning, grumble grumble
353
+ solver_args["atol"] = 1e-6
354
+
355
+ coef = solver(matr, vals, **solver_args)
356
+ coef = coef.reshape(xi_shape + v_shape[ndim:])
357
+ return NdBSpline(t, coef, k)
358
+
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Convenience interface to N-D interpolation
3
+
4
+ .. versionadded:: 0.9
5
+
6
+ """
7
+ import numpy as np
8
+ from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
9
+ CloughTocher2DInterpolator, _ndim_coords_from_arrays
10
+ from scipy.spatial import cKDTree
11
+
12
+ __all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
13
+ 'CloughTocher2DInterpolator']
14
+
15
+ #------------------------------------------------------------------------------
16
+ # Nearest-neighbor interpolation
17
+ #------------------------------------------------------------------------------
18
+
19
+
20
+ class NearestNDInterpolator(NDInterpolatorBase):
21
+ """NearestNDInterpolator(x, y).
22
+
23
+ Nearest-neighbor interpolator in N > 1 dimensions.
24
+
25
+ .. versionadded:: 0.9
26
+
27
+ Methods
28
+ -------
29
+ __call__
30
+
31
+ Parameters
32
+ ----------
33
+ x : (npoints, ndims) 2-D ndarray of floats
34
+ Data point coordinates.
35
+ y : (npoints, ) 1-D ndarray of float or complex
36
+ Data values.
37
+ rescale : boolean, optional
38
+ Rescale points to unit cube before performing interpolation.
39
+ This is useful if some of the input dimensions have
40
+ incommensurable units and differ by many orders of magnitude.
41
+
42
+ .. versionadded:: 0.14.0
43
+ tree_options : dict, optional
44
+ Options passed to the underlying ``cKDTree``.
45
+
46
+ .. versionadded:: 0.17.0
47
+
48
+ See Also
49
+ --------
50
+ griddata :
51
+ Interpolate unstructured D-D data.
52
+ LinearNDInterpolator :
53
+ Piecewise linear interpolator in N dimensions.
54
+ CloughTocher2DInterpolator :
55
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
56
+ interpn : Interpolation on a regular grid or rectilinear grid.
57
+ RegularGridInterpolator : Interpolator on a regular or rectilinear grid
58
+ in arbitrary dimensions (`interpn` wraps this
59
+ class).
60
+
61
+ Notes
62
+ -----
63
+ Uses ``scipy.spatial.cKDTree``
64
+
65
+ .. note:: For data on a regular grid use `interpn` instead.
66
+
67
+ Examples
68
+ --------
69
+ We can interpolate values on a 2D plane:
70
+
71
+ >>> from scipy.interpolate import NearestNDInterpolator
72
+ >>> import numpy as np
73
+ >>> import matplotlib.pyplot as plt
74
+ >>> rng = np.random.default_rng()
75
+ >>> x = rng.random(10) - 0.5
76
+ >>> y = rng.random(10) - 0.5
77
+ >>> z = np.hypot(x, y)
78
+ >>> X = np.linspace(min(x), max(x))
79
+ >>> Y = np.linspace(min(y), max(y))
80
+ >>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
81
+ >>> interp = NearestNDInterpolator(list(zip(x, y)), z)
82
+ >>> Z = interp(X, Y)
83
+ >>> plt.pcolormesh(X, Y, Z, shading='auto')
84
+ >>> plt.plot(x, y, "ok", label="input point")
85
+ >>> plt.legend()
86
+ >>> plt.colorbar()
87
+ >>> plt.axis("equal")
88
+ >>> plt.show()
89
+
90
+ """
91
+
92
+ def __init__(self, x, y, rescale=False, tree_options=None):
93
+ NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
94
+ need_contiguous=False,
95
+ need_values=False)
96
+ if tree_options is None:
97
+ tree_options = dict()
98
+ self.tree = cKDTree(self.points, **tree_options)
99
+ self.values = np.asarray(y)
100
+
101
+ def __call__(self, *args, **query_options):
102
+ """
103
+ Evaluate interpolator at given points.
104
+
105
+ Parameters
106
+ ----------
107
+ x1, x2, ... xn : array-like of float
108
+ Points where to interpolate data at.
109
+ x1, x2, ... xn can be array-like of float with broadcastable shape.
110
+ or x1 can be array-like of float with shape ``(..., ndim)``
111
+ **query_options
112
+ This allows ``eps``, ``p``, ``distance_upper_bound``, and ``workers``
113
+ being passed to the cKDTree's query function to be explicitly set.
114
+ See `scipy.spatial.cKDTree.query` for an overview of the different options.
115
+
116
+ .. versionadded:: 1.12.0
117
+
118
+ """
119
+ # For the sake of enabling subclassing, NDInterpolatorBase._set_xi performs
120
+ # some operations which are not required by NearestNDInterpolator.__call__,
121
+ # hence here we operate on xi directly, without calling a parent class function.
122
+ xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
123
+ xi = self._check_call_shape(xi)
124
+ xi = self._scale_x(xi)
125
+
126
+ # We need to handle two important cases:
127
+ # (1) the case where xi has trailing dimensions (..., ndim), and
128
+ # (2) the case where y has trailing dimensions
129
+ # We will first flatten xi to deal with case (1),
130
+ # do the computation in flattened array while retaining y's dimensionality,
131
+ # and then reshape the interpolated values back to match xi's shape.
132
+
133
+ # Flatten xi for the query
134
+ xi_flat = xi.reshape(-1, xi.shape[-1])
135
+ original_shape = xi.shape
136
+ flattened_shape = xi_flat.shape
137
+
138
+ # if distance_upper_bound is set to not be infinite,
139
+ # then we need to consider the case where cKDtree
140
+ # does not find any points within distance_upper_bound to return.
141
+ # It marks those points as having infinte distance, which is what will be used
142
+ # below to mask the array and return only the points that were deemed
143
+ # to have a close enough neighbor to return something useful.
144
+ dist, i = self.tree.query(xi_flat, **query_options)
145
+ valid_mask = np.isfinite(dist)
146
+
147
+ # create a holder interp_values array and fill with nans.
148
+ if self.values.ndim > 1:
149
+ interp_shape = flattened_shape[:-1] + self.values.shape[1:]
150
+ else:
151
+ interp_shape = flattened_shape[:-1]
152
+
153
+ if np.issubdtype(self.values.dtype, np.complexfloating):
154
+ interp_values = np.full(interp_shape, np.nan, dtype=self.values.dtype)
155
+ else:
156
+ interp_values = np.full(interp_shape, np.nan)
157
+
158
+ interp_values[valid_mask] = self.values[i[valid_mask], ...]
159
+
160
+ if self.values.ndim > 1:
161
+ new_shape = original_shape[:-1] + self.values.shape[1:]
162
+ else:
163
+ new_shape = original_shape[:-1]
164
+ interp_values = interp_values.reshape(new_shape)
165
+
166
+ return interp_values
167
+
168
+
169
+ #------------------------------------------------------------------------------
170
+ # Convenience interface function
171
+ #------------------------------------------------------------------------------
172
+
173
+
174
+ def griddata(points, values, xi, method='linear', fill_value=np.nan,
175
+ rescale=False):
176
+ """
177
+ Interpolate unstructured D-D data.
178
+
179
+ Parameters
180
+ ----------
181
+ points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
182
+ Data point coordinates.
183
+ values : ndarray of float or complex, shape (n,)
184
+ Data values.
185
+ xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
186
+ Points at which to interpolate data.
187
+ method : {'linear', 'nearest', 'cubic'}, optional
188
+ Method of interpolation. One of
189
+
190
+ ``nearest``
191
+ return the value at the data point closest to
192
+ the point of interpolation. See `NearestNDInterpolator` for
193
+ more details.
194
+
195
+ ``linear``
196
+ tessellate the input point set to N-D
197
+ simplices, and interpolate linearly on each simplex. See
198
+ `LinearNDInterpolator` for more details.
199
+
200
+ ``cubic`` (1-D)
201
+ return the value determined from a cubic
202
+ spline.
203
+
204
+ ``cubic`` (2-D)
205
+ return the value determined from a
206
+ piecewise cubic, continuously differentiable (C1), and
207
+ approximately curvature-minimizing polynomial surface. See
208
+ `CloughTocher2DInterpolator` for more details.
209
+ fill_value : float, optional
210
+ Value used to fill in for requested points outside of the
211
+ convex hull of the input points. If not provided, then the
212
+ default is ``nan``. This option has no effect for the
213
+ 'nearest' method.
214
+ rescale : bool, optional
215
+ Rescale points to unit cube before performing interpolation.
216
+ This is useful if some of the input dimensions have
217
+ incommensurable units and differ by many orders of magnitude.
218
+
219
+ .. versionadded:: 0.14.0
220
+
221
+ Returns
222
+ -------
223
+ ndarray
224
+ Array of interpolated values.
225
+
226
+ See Also
227
+ --------
228
+ LinearNDInterpolator :
229
+ Piecewise linear interpolator in N dimensions.
230
+ NearestNDInterpolator :
231
+ Nearest-neighbor interpolator in N dimensions.
232
+ CloughTocher2DInterpolator :
233
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
234
+ interpn : Interpolation on a regular grid or rectilinear grid.
235
+ RegularGridInterpolator : Interpolator on a regular or rectilinear grid
236
+ in arbitrary dimensions (`interpn` wraps this
237
+ class).
238
+
239
+ Notes
240
+ -----
241
+
242
+ .. versionadded:: 0.9
243
+
244
+ .. note:: For data on a regular grid use `interpn` instead.
245
+
246
+ Examples
247
+ --------
248
+
249
+ Suppose we want to interpolate the 2-D function
250
+
251
+ >>> import numpy as np
252
+ >>> def func(x, y):
253
+ ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
254
+
255
+ on a grid in [0, 1]x[0, 1]
256
+
257
+ >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
258
+
259
+ but we only know its values at 1000 data points:
260
+
261
+ >>> rng = np.random.default_rng()
262
+ >>> points = rng.random((1000, 2))
263
+ >>> values = func(points[:,0], points[:,1])
264
+
265
+ This can be done with `griddata` -- below we try out all of the
266
+ interpolation methods:
267
+
268
+ >>> from scipy.interpolate import griddata
269
+ >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
270
+ >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
271
+ >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
272
+
273
+ One can see that the exact result is reproduced by all of the
274
+ methods to some degree, but for this smooth function the piecewise
275
+ cubic interpolant gives the best results:
276
+
277
+ >>> import matplotlib.pyplot as plt
278
+ >>> plt.subplot(221)
279
+ >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
280
+ >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
281
+ >>> plt.title('Original')
282
+ >>> plt.subplot(222)
283
+ >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
284
+ >>> plt.title('Nearest')
285
+ >>> plt.subplot(223)
286
+ >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
287
+ >>> plt.title('Linear')
288
+ >>> plt.subplot(224)
289
+ >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
290
+ >>> plt.title('Cubic')
291
+ >>> plt.gcf().set_size_inches(6, 6)
292
+ >>> plt.show()
293
+
294
+ """ # numpy/numpydoc#87 # noqa: E501
295
+
296
+ points = _ndim_coords_from_arrays(points)
297
+
298
+ if points.ndim < 2:
299
+ ndim = points.ndim
300
+ else:
301
+ ndim = points.shape[-1]
302
+
303
+ if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
304
+ from ._interpolate import interp1d
305
+ points = points.ravel()
306
+ if isinstance(xi, tuple):
307
+ if len(xi) != 1:
308
+ raise ValueError("invalid number of dimensions in xi")
309
+ xi, = xi
310
+ # Sort points/values together, necessary as input for interp1d
311
+ idx = np.argsort(points)
312
+ points = points[idx]
313
+ values = values[idx]
314
+ if method == 'nearest':
315
+ fill_value = 'extrapolate'
316
+ ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
317
+ fill_value=fill_value)
318
+ return ip(xi)
319
+ elif method == 'nearest':
320
+ ip = NearestNDInterpolator(points, values, rescale=rescale)
321
+ return ip(xi)
322
+ elif method == 'linear':
323
+ ip = LinearNDInterpolator(points, values, fill_value=fill_value,
324
+ rescale=rescale)
325
+ return ip(xi)
326
+ elif method == 'cubic' and ndim == 2:
327
+ ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
328
+ rescale=rescale)
329
+ return ip(xi)
330
+ else:
331
+ raise ValueError("Unknown interpolation method %r for "
332
+ "%d dimensional data" % (method, ndim))
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_polyint.py ADDED
@@ -0,0 +1,938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+ from scipy.special import factorial
5
+ from scipy._lib._util import _asarray_validated, float_factorial, check_random_state
6
+
7
+
8
+ __all__ = ["KroghInterpolator", "krogh_interpolate",
9
+ "BarycentricInterpolator", "barycentric_interpolate",
10
+ "approximate_taylor_polynomial"]
11
+
12
+
13
+ def _isscalar(x):
14
+ """Check whether x is if a scalar type, or 0-dim"""
15
+ return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
16
+
17
+
18
+ class _Interpolator1D:
19
+ """
20
+ Common features in univariate interpolation
21
+
22
+ Deal with input data type and interpolation axis rolling. The
23
+ actual interpolator can assume the y-data is of shape (n, r) where
24
+ `n` is the number of x-points, and `r` the number of variables,
25
+ and use self.dtype as the y-data type.
26
+
27
+ Attributes
28
+ ----------
29
+ _y_axis
30
+ Axis along which the interpolation goes in the original array
31
+ _y_extra_shape
32
+ Additional trailing shape of the input arrays, excluding
33
+ the interpolation axis.
34
+ dtype
35
+ Dtype of the y-data arrays. Can be set via _set_dtype, which
36
+ forces it to be float or complex.
37
+
38
+ Methods
39
+ -------
40
+ __call__
41
+ _prepare_x
42
+ _finish_y
43
+ _reshape_yi
44
+ _set_yi
45
+ _set_dtype
46
+ _evaluate
47
+
48
+ """
49
+
50
+ __slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
51
+
52
+ def __init__(self, xi=None, yi=None, axis=None):
53
+ self._y_axis = axis
54
+ self._y_extra_shape = None
55
+ self.dtype = None
56
+ if yi is not None:
57
+ self._set_yi(yi, xi=xi, axis=axis)
58
+
59
+ def __call__(self, x):
60
+ """
61
+ Evaluate the interpolant
62
+
63
+ Parameters
64
+ ----------
65
+ x : array_like
66
+ Point or points at which to evaluate the interpolant.
67
+
68
+ Returns
69
+ -------
70
+ y : array_like
71
+ Interpolated values. Shape is determined by replacing
72
+ the interpolation axis in the original array with the shape of `x`.
73
+
74
+ Notes
75
+ -----
76
+ Input values `x` must be convertible to `float` values like `int`
77
+ or `float`.
78
+
79
+ """
80
+ x, x_shape = self._prepare_x(x)
81
+ y = self._evaluate(x)
82
+ return self._finish_y(y, x_shape)
83
+
84
+ def _evaluate(self, x):
85
+ """
86
+ Actually evaluate the value of the interpolator.
87
+ """
88
+ raise NotImplementedError()
89
+
90
+ def _prepare_x(self, x):
91
+ """Reshape input x array to 1-D"""
92
+ x = _asarray_validated(x, check_finite=False, as_inexact=True)
93
+ x_shape = x.shape
94
+ return x.ravel(), x_shape
95
+
96
+ def _finish_y(self, y, x_shape):
97
+ """Reshape interpolated y back to an N-D array similar to initial y"""
98
+ y = y.reshape(x_shape + self._y_extra_shape)
99
+ if self._y_axis != 0 and x_shape != ():
100
+ nx = len(x_shape)
101
+ ny = len(self._y_extra_shape)
102
+ s = (list(range(nx, nx + self._y_axis))
103
+ + list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
104
+ y = y.transpose(s)
105
+ return y
106
+
107
+ def _reshape_yi(self, yi, check=False):
108
+ yi = np.moveaxis(np.asarray(yi), self._y_axis, 0)
109
+ if check and yi.shape[1:] != self._y_extra_shape:
110
+ ok_shape = "{!r} + (N,) + {!r}".format(self._y_extra_shape[-self._y_axis:],
111
+ self._y_extra_shape[:-self._y_axis])
112
+ raise ValueError("Data must be of shape %s" % ok_shape)
113
+ return yi.reshape((yi.shape[0], -1))
114
+
115
+ def _set_yi(self, yi, xi=None, axis=None):
116
+ if axis is None:
117
+ axis = self._y_axis
118
+ if axis is None:
119
+ raise ValueError("no interpolation axis specified")
120
+
121
+ yi = np.asarray(yi)
122
+
123
+ shape = yi.shape
124
+ if shape == ():
125
+ shape = (1,)
126
+ if xi is not None and shape[axis] != len(xi):
127
+ raise ValueError("x and y arrays must be equal in length along "
128
+ "interpolation axis.")
129
+
130
+ self._y_axis = (axis % yi.ndim)
131
+ self._y_extra_shape = yi.shape[:self._y_axis] + yi.shape[self._y_axis+1:]
132
+ self.dtype = None
133
+ self._set_dtype(yi.dtype)
134
+
135
+ def _set_dtype(self, dtype, union=False):
136
+ if np.issubdtype(dtype, np.complexfloating) \
137
+ or np.issubdtype(self.dtype, np.complexfloating):
138
+ self.dtype = np.complex128
139
+ else:
140
+ if not union or self.dtype != np.complex128:
141
+ self.dtype = np.float64
142
+
143
+
144
+ class _Interpolator1DWithDerivatives(_Interpolator1D):
145
+ def derivatives(self, x, der=None):
146
+ """
147
+ Evaluate several derivatives of the polynomial at the point `x`
148
+
149
+ Produce an array of derivatives evaluated at the point `x`.
150
+
151
+ Parameters
152
+ ----------
153
+ x : array_like
154
+ Point or points at which to evaluate the derivatives
155
+ der : int or list or None, optional
156
+ How many derivatives to evaluate, or None for all potentially
157
+ nonzero derivatives (that is, a number equal to the number
158
+ of points), or a list of derivatives to evaluate. This number
159
+ includes the function value as the '0th' derivative.
160
+
161
+ Returns
162
+ -------
163
+ d : ndarray
164
+ Array with derivatives; ``d[j]`` contains the jth derivative.
165
+ Shape of ``d[j]`` is determined by replacing the interpolation
166
+ axis in the original array with the shape of `x`.
167
+
168
+ Examples
169
+ --------
170
+ >>> from scipy.interpolate import KroghInterpolator
171
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
172
+ array([1.0,2.0,3.0])
173
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
174
+ array([[1.0,1.0],
175
+ [2.0,2.0],
176
+ [3.0,3.0]])
177
+
178
+ """
179
+ x, x_shape = self._prepare_x(x)
180
+ y = self._evaluate_derivatives(x, der)
181
+
182
+ y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
183
+ if self._y_axis != 0 and x_shape != ():
184
+ nx = len(x_shape)
185
+ ny = len(self._y_extra_shape)
186
+ s = ([0] + list(range(nx+1, nx + self._y_axis+1))
187
+ + list(range(1, nx+1)) +
188
+ list(range(nx+1+self._y_axis, nx+ny+1)))
189
+ y = y.transpose(s)
190
+ return y
191
+
192
+ def derivative(self, x, der=1):
193
+ """
194
+ Evaluate a single derivative of the polynomial at the point `x`.
195
+
196
+ Parameters
197
+ ----------
198
+ x : array_like
199
+ Point or points at which to evaluate the derivatives
200
+
201
+ der : integer, optional
202
+ Which derivative to evaluate (default: first derivative).
203
+ This number includes the function value as 0th derivative.
204
+
205
+ Returns
206
+ -------
207
+ d : ndarray
208
+ Derivative interpolated at the x-points. Shape of `d` is
209
+ determined by replacing the interpolation axis in the
210
+ original array with the shape of `x`.
211
+
212
+ Notes
213
+ -----
214
+ This may be computed by evaluating all derivatives up to the desired
215
+ one (using self.derivatives()) and then discarding the rest.
216
+
217
+ """
218
+ x, x_shape = self._prepare_x(x)
219
+ y = self._evaluate_derivatives(x, der+1)
220
+ return self._finish_y(y[der], x_shape)
221
+
222
+ def _evaluate_derivatives(self, x, der=None):
223
+ """
224
+ Actually evaluate the derivatives.
225
+
226
+ Parameters
227
+ ----------
228
+ x : array_like
229
+ 1D array of points at which to evaluate the derivatives
230
+ der : integer, optional
231
+ The number of derivatives to evaluate, from 'order 0' (der=1)
232
+ to order der-1. If omitted, return all possibly-non-zero
233
+ derivatives, ie 0 to order n-1.
234
+
235
+ Returns
236
+ -------
237
+ d : ndarray
238
+ Array of shape ``(der, x.size, self.yi.shape[1])`` containing
239
+ the derivatives from 0 to der-1
240
+ """
241
+ raise NotImplementedError()
242
+
243
+
244
+ class KroghInterpolator(_Interpolator1DWithDerivatives):
245
+ """
246
+ Interpolating polynomial for a set of points.
247
+
248
+ The polynomial passes through all the pairs ``(xi, yi)``. One may
249
+ additionally specify a number of derivatives at each point `xi`;
250
+ this is done by repeating the value `xi` and specifying the
251
+ derivatives as successive `yi` values.
252
+
253
+ Allows evaluation of the polynomial and all its derivatives.
254
+ For reasons of numerical stability, this function does not compute
255
+ the coefficients of the polynomial, although they can be obtained
256
+ by evaluating all the derivatives.
257
+
258
+ Parameters
259
+ ----------
260
+ xi : array_like, shape (npoints, )
261
+ Known x-coordinates. Must be sorted in increasing order.
262
+ yi : array_like, shape (..., npoints, ...)
263
+ Known y-coordinates. When an xi occurs two or more times in
264
+ a row, the corresponding yi's represent derivative values. The length of `yi`
265
+ along the interpolation axis must be equal to the length of `xi`. Use the
266
+ `axis` parameter to select the correct axis.
267
+ axis : int, optional
268
+ Axis in the `yi` array corresponding to the x-coordinate values. Defaults to
269
+ ``axis=0``.
270
+
271
+ Notes
272
+ -----
273
+ Be aware that the algorithms implemented here are not necessarily
274
+ the most numerically stable known. Moreover, even in a world of
275
+ exact computation, unless the x coordinates are chosen very
276
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
277
+ polynomial interpolation itself is a very ill-conditioned process
278
+ due to the Runge phenomenon. In general, even with well-chosen
279
+ x values, degrees higher than about thirty cause problems with
280
+ numerical instability in this code.
281
+
282
+ Based on [1]_.
283
+
284
+ References
285
+ ----------
286
+ .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
287
+ and Numerical Differentiation", 1970.
288
+
289
+ Examples
290
+ --------
291
+ To produce a polynomial that is zero at 0 and 1 and has
292
+ derivative 2 at 0, call
293
+
294
+ >>> from scipy.interpolate import KroghInterpolator
295
+ >>> KroghInterpolator([0,0,1],[0,2,0])
296
+
297
+ This constructs the quadratic :math:`2x^2-2x`. The derivative condition
298
+ is indicated by the repeated zero in the `xi` array; the corresponding
299
+ yi values are 0, the function value, and 2, the derivative value.
300
+
301
+ For another example, given `xi`, `yi`, and a derivative `ypi` for each
302
+ point, appropriate arrays can be constructed as:
303
+
304
+ >>> import numpy as np
305
+ >>> rng = np.random.default_rng()
306
+ >>> xi = np.linspace(0, 1, 5)
307
+ >>> yi, ypi = rng.random((2, 5))
308
+ >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
309
+ >>> KroghInterpolator(xi_k, yi_k)
310
+
311
+ To produce a vector-valued polynomial, supply a higher-dimensional
312
+ array for `yi`:
313
+
314
+ >>> KroghInterpolator([0,1],[[2,3],[4,5]])
315
+
316
+ This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
317
+
318
+ """
319
+
320
+ def __init__(self, xi, yi, axis=0):
321
+ super().__init__(xi, yi, axis)
322
+
323
+ self.xi = np.asarray(xi)
324
+ self.yi = self._reshape_yi(yi)
325
+ self.n, self.r = self.yi.shape
326
+
327
+ if (deg := self.xi.size) > 30:
328
+ warnings.warn(f"{deg} degrees provided, degrees higher than about"
329
+ " thirty cause problems with numerical instability "
330
+ "with 'KroghInterpolator'", stacklevel=2)
331
+
332
+ c = np.zeros((self.n+1, self.r), dtype=self.dtype)
333
+ c[0] = self.yi[0]
334
+ Vk = np.zeros((self.n, self.r), dtype=self.dtype)
335
+ for k in range(1, self.n):
336
+ s = 0
337
+ while s <= k and xi[k-s] == xi[k]:
338
+ s += 1
339
+ s -= 1
340
+ Vk[0] = self.yi[k]/float_factorial(s)
341
+ for i in range(k-s):
342
+ if xi[i] == xi[k]:
343
+ raise ValueError("Elements of `xi` can't be equal.")
344
+ if s == 0:
345
+ Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
346
+ else:
347
+ Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
348
+ c[k] = Vk[k-s]
349
+ self.c = c
350
+
351
+ def _evaluate(self, x):
352
+ pi = 1
353
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
354
+ p += self.c[0,np.newaxis,:]
355
+ for k in range(1, self.n):
356
+ w = x - self.xi[k-1]
357
+ pi = w*pi
358
+ p += pi[:,np.newaxis] * self.c[k]
359
+ return p
360
+
361
+ def _evaluate_derivatives(self, x, der=None):
362
+ n = self.n
363
+ r = self.r
364
+
365
+ if der is None:
366
+ der = self.n
367
+
368
+ pi = np.zeros((n, len(x)))
369
+ w = np.zeros((n, len(x)))
370
+ pi[0] = 1
371
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
372
+ p += self.c[0, np.newaxis, :]
373
+
374
+ for k in range(1, n):
375
+ w[k-1] = x - self.xi[k-1]
376
+ pi[k] = w[k-1] * pi[k-1]
377
+ p += pi[k, :, np.newaxis] * self.c[k]
378
+
379
+ cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
380
+ cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
381
+ cn[0] = p
382
+ for k in range(1, n):
383
+ for i in range(1, n-k+1):
384
+ pi[i] = w[k+i-1]*pi[i-1] + pi[i]
385
+ cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
386
+ cn[k] *= float_factorial(k)
387
+
388
+ cn[n, :, :] = 0
389
+ return cn[:der]
390
+
391
+
392
+ def krogh_interpolate(xi, yi, x, der=0, axis=0):
393
+ """
394
+ Convenience function for polynomial interpolation.
395
+
396
+ See `KroghInterpolator` for more details.
397
+
398
+ Parameters
399
+ ----------
400
+ xi : array_like
401
+ Interpolation points (known x-coordinates).
402
+ yi : array_like
403
+ Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
404
+ vectors of length R, or scalars if R=1.
405
+ x : array_like
406
+ Point or points at which to evaluate the derivatives.
407
+ der : int or list or None, optional
408
+ How many derivatives to evaluate, or None for all potentially
409
+ nonzero derivatives (that is, a number equal to the number
410
+ of points), or a list of derivatives to evaluate. This number
411
+ includes the function value as the '0th' derivative.
412
+ axis : int, optional
413
+ Axis in the `yi` array corresponding to the x-coordinate values.
414
+
415
+ Returns
416
+ -------
417
+ d : ndarray
418
+ If the interpolator's values are R-D then the
419
+ returned array will be the number of derivatives by N by R.
420
+ If `x` is a scalar, the middle dimension will be dropped; if
421
+ the `yi` are scalars then the last dimension will be dropped.
422
+
423
+ See Also
424
+ --------
425
+ KroghInterpolator : Krogh interpolator
426
+
427
+ Notes
428
+ -----
429
+ Construction of the interpolating polynomial is a relatively expensive
430
+ process. If you want to evaluate it repeatedly consider using the class
431
+ KroghInterpolator (which is what this function uses).
432
+
433
+ Examples
434
+ --------
435
+ We can interpolate 2D observed data using Krogh interpolation:
436
+
437
+ >>> import numpy as np
438
+ >>> import matplotlib.pyplot as plt
439
+ >>> from scipy.interpolate import krogh_interpolate
440
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
441
+ >>> y_observed = np.sin(x_observed)
442
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
443
+ >>> y = krogh_interpolate(x_observed, y_observed, x)
444
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
445
+ >>> plt.plot(x, y, label="krogh interpolation")
446
+ >>> plt.legend()
447
+ >>> plt.show()
448
+ """
449
+
450
+ P = KroghInterpolator(xi, yi, axis=axis)
451
+ if der == 0:
452
+ return P(x)
453
+ elif _isscalar(der):
454
+ return P.derivative(x, der=der)
455
+ else:
456
+ return P.derivatives(x, der=np.amax(der)+1)[der]
457
+
458
+
459
+ def approximate_taylor_polynomial(f,x,degree,scale,order=None):
460
+ """
461
+ Estimate the Taylor polynomial of f at x by polynomial fitting.
462
+
463
+ Parameters
464
+ ----------
465
+ f : callable
466
+ The function whose Taylor polynomial is sought. Should accept
467
+ a vector of `x` values.
468
+ x : scalar
469
+ The point at which the polynomial is to be evaluated.
470
+ degree : int
471
+ The degree of the Taylor polynomial
472
+ scale : scalar
473
+ The width of the interval to use to evaluate the Taylor polynomial.
474
+ Function values spread over a range this wide are used to fit the
475
+ polynomial. Must be chosen carefully.
476
+ order : int or None, optional
477
+ The order of the polynomial to be used in the fitting; `f` will be
478
+ evaluated ``order+1`` times. If None, use `degree`.
479
+
480
+ Returns
481
+ -------
482
+ p : poly1d instance
483
+ The Taylor polynomial (translated to the origin, so that
484
+ for example p(0)=f(x)).
485
+
486
+ Notes
487
+ -----
488
+ The appropriate choice of "scale" is a trade-off; too large and the
489
+ function differs from its Taylor polynomial too much to get a good
490
+ answer, too small and round-off errors overwhelm the higher-order terms.
491
+ The algorithm used becomes numerically unstable around order 30 even
492
+ under ideal circumstances.
493
+
494
+ Choosing order somewhat larger than degree may improve the higher-order
495
+ terms.
496
+
497
+ Examples
498
+ --------
499
+ We can calculate Taylor approximation polynomials of sin function with
500
+ various degrees:
501
+
502
+ >>> import numpy as np
503
+ >>> import matplotlib.pyplot as plt
504
+ >>> from scipy.interpolate import approximate_taylor_polynomial
505
+ >>> x = np.linspace(-10.0, 10.0, num=100)
506
+ >>> plt.plot(x, np.sin(x), label="sin curve")
507
+ >>> for degree in np.arange(1, 15, step=2):
508
+ ... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
509
+ ... order=degree + 2)
510
+ ... plt.plot(x, sin_taylor(x), label=f"degree={degree}")
511
+ >>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
512
+ ... borderaxespad=0.0, shadow=True)
513
+ >>> plt.tight_layout()
514
+ >>> plt.axis([-10, 10, -10, 10])
515
+ >>> plt.show()
516
+
517
+ """
518
+ if order is None:
519
+ order = degree
520
+
521
+ n = order+1
522
+ # Choose n points that cluster near the endpoints of the interval in
523
+ # a way that avoids the Runge phenomenon. Ensure, by including the
524
+ # endpoint or not as appropriate, that one point always falls at x
525
+ # exactly.
526
+ xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
527
+
528
+ P = KroghInterpolator(xs, f(xs))
529
+ d = P.derivatives(x,der=degree+1)
530
+
531
+ return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
532
+
533
+
534
+ class BarycentricInterpolator(_Interpolator1DWithDerivatives):
535
+ r"""Interpolating polynomial for a set of points.
536
+
537
+ Constructs a polynomial that passes through a given set of points.
538
+ Allows evaluation of the polynomial and all its derivatives,
539
+ efficient changing of the y-values to be interpolated,
540
+ and updating by adding more x- and y-values.
541
+
542
+ For reasons of numerical stability, this function does not compute
543
+ the coefficients of the polynomial.
544
+
545
+ The values `yi` need to be provided before the function is
546
+ evaluated, but none of the preprocessing depends on them, so rapid
547
+ updates are possible.
548
+
549
+ Parameters
550
+ ----------
551
+ xi : array_like, shape (npoints, )
552
+ 1-D array of x coordinates of the points the polynomial
553
+ should pass through
554
+ yi : array_like, shape (..., npoints, ...), optional
555
+ N-D array of y coordinates of the points the polynomial should pass through.
556
+ If None, the y values will be supplied later via the `set_y` method.
557
+ The length of `yi` along the interpolation axis must be equal to the length
558
+ of `xi`. Use the ``axis`` parameter to select correct axis.
559
+ axis : int, optional
560
+ Axis in the yi array corresponding to the x-coordinate values. Defaults
561
+ to ``axis=0``.
562
+ wi : array_like, optional
563
+ The barycentric weights for the chosen interpolation points `xi`.
564
+ If absent or None, the weights will be computed from `xi` (default).
565
+ This allows for the reuse of the weights `wi` if several interpolants
566
+ are being calculated using the same nodes `xi`, without re-computation.
567
+ random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
568
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
569
+ singleton is used.
570
+ If `seed` is an int, a new ``RandomState`` instance is used,
571
+ seeded with `seed`.
572
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
573
+ that instance is used.
574
+
575
+ Notes
576
+ -----
577
+ This class uses a "barycentric interpolation" method that treats
578
+ the problem as a special case of rational function interpolation.
579
+ This algorithm is quite stable, numerically, but even in a world of
580
+ exact computation, unless the x coordinates are chosen very
581
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
582
+ polynomial interpolation itself is a very ill-conditioned process
583
+ due to the Runge phenomenon.
584
+
585
+ Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
586
+
587
+ Examples
588
+ --------
589
+ To produce a quintic barycentric interpolant approximating the function
590
+ :math:`\sin x`, and its first four derivatives, using six randomly-spaced
591
+ nodes in :math:`(0, \frac{\pi}{2})`:
592
+
593
+ >>> import numpy as np
594
+ >>> import matplotlib.pyplot as plt
595
+ >>> from scipy.interpolate import BarycentricInterpolator
596
+ >>> rng = np.random.default_rng()
597
+ >>> xi = rng.random(6) * np.pi/2
598
+ >>> f, f_d1, f_d2, f_d3, f_d4 = np.sin, np.cos, lambda x: -np.sin(x), lambda x: -np.cos(x), np.sin
599
+ >>> P = BarycentricInterpolator(xi, f(xi), random_state=rng)
600
+ >>> fig, axs = plt.subplots(5, 1, sharex=True, layout='constrained', figsize=(7,10))
601
+ >>> x = np.linspace(0, np.pi, 100)
602
+ >>> axs[0].plot(x, P(x), 'r:', x, f(x), 'k--', xi, f(xi), 'xk')
603
+ >>> axs[1].plot(x, P.derivative(x), 'r:', x, f_d1(x), 'k--', xi, f_d1(xi), 'xk')
604
+ >>> axs[2].plot(x, P.derivative(x, 2), 'r:', x, f_d2(x), 'k--', xi, f_d2(xi), 'xk')
605
+ >>> axs[3].plot(x, P.derivative(x, 3), 'r:', x, f_d3(x), 'k--', xi, f_d3(xi), 'xk')
606
+ >>> axs[4].plot(x, P.derivative(x, 4), 'r:', x, f_d4(x), 'k--', xi, f_d4(xi), 'xk')
607
+ >>> axs[0].set_xlim(0, np.pi)
608
+ >>> axs[4].set_xlabel(r"$x$")
609
+ >>> axs[4].set_xticks([i * np.pi / 4 for i in range(5)],
610
+ ... ["0", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"])
611
+ >>> axs[0].set_ylabel("$f(x)$")
612
+ >>> axs[1].set_ylabel("$f'(x)$")
613
+ >>> axs[2].set_ylabel("$f''(x)$")
614
+ >>> axs[3].set_ylabel("$f^{(3)}(x)$")
615
+ >>> axs[4].set_ylabel("$f^{(4)}(x)$")
616
+ >>> labels = ['Interpolation nodes', 'True function $f$', 'Barycentric interpolation']
617
+ >>> axs[0].legend(axs[0].get_lines()[::-1], labels, bbox_to_anchor=(0., 1.02, 1., .102),
618
+ ... loc='lower left', ncols=3, mode="expand", borderaxespad=0., frameon=False)
619
+ >>> plt.show()
620
+ """ # numpy/numpydoc#87 # noqa: E501
621
+
622
+ def __init__(self, xi, yi=None, axis=0, *, wi=None, random_state=None):
623
+ super().__init__(xi, yi, axis)
624
+
625
+ random_state = check_random_state(random_state)
626
+
627
+ self.xi = np.asarray(xi, dtype=np.float64)
628
+ self.set_yi(yi)
629
+ self.n = len(self.xi)
630
+
631
+ # cache derivative object to avoid re-computing the weights with every call.
632
+ self._diff_cij = None
633
+
634
+ if wi is not None:
635
+ self.wi = wi
636
+ else:
637
+ # See page 510 of Berrut and Trefethen 2004 for an explanation of the
638
+ # capacity scaling and the suggestion of using a random permutation of
639
+ # the input factors.
640
+ # At the moment, the permutation is not performed for xi that are
641
+ # appended later through the add_xi interface. It's not clear to me how
642
+ # to implement that and it seems that most situations that require
643
+ # these numerical stability improvements will be able to provide all
644
+ # the points to the constructor.
645
+ self._inv_capacity = 4.0 / (np.max(self.xi) - np.min(self.xi))
646
+ permute = random_state.permutation(self.n, )
647
+ inv_permute = np.zeros(self.n, dtype=np.int32)
648
+ inv_permute[permute] = np.arange(self.n)
649
+ self.wi = np.zeros(self.n)
650
+
651
+ for i in range(self.n):
652
+ dist = self._inv_capacity * (self.xi[i] - self.xi[permute])
653
+ dist[inv_permute[i]] = 1.0
654
+ prod = np.prod(dist)
655
+ if prod == 0.0:
656
+ raise ValueError("Interpolation points xi must be"
657
+ " distinct.")
658
+ self.wi[i] = 1.0 / prod
659
+
660
+ def set_yi(self, yi, axis=None):
661
+ """
662
+ Update the y values to be interpolated
663
+
664
+ The barycentric interpolation algorithm requires the calculation
665
+ of weights, but these depend only on the `xi`. The `yi` can be changed
666
+ at any time.
667
+
668
+ Parameters
669
+ ----------
670
+ yi : array_like
671
+ The y-coordinates of the points the polynomial will pass through.
672
+ If None, the y values must be supplied later.
673
+ axis : int, optional
674
+ Axis in the `yi` array corresponding to the x-coordinate values.
675
+
676
+ """
677
+ if yi is None:
678
+ self.yi = None
679
+ return
680
+ self._set_yi(yi, xi=self.xi, axis=axis)
681
+ self.yi = self._reshape_yi(yi)
682
+ self.n, self.r = self.yi.shape
683
+ self._diff_baryint = None
684
+
685
+ def add_xi(self, xi, yi=None):
686
+ """
687
+ Add more x values to the set to be interpolated
688
+
689
+ The barycentric interpolation algorithm allows easy updating by
690
+ adding more points for the polynomial to pass through.
691
+
692
+ Parameters
693
+ ----------
694
+ xi : array_like
695
+ The x coordinates of the points that the polynomial should pass
696
+ through.
697
+ yi : array_like, optional
698
+ The y coordinates of the points the polynomial should pass through.
699
+ Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
700
+ vector-valued.
701
+ If `yi` is not given, the y values will be supplied later. `yi`
702
+ should be given if and only if the interpolator has y values
703
+ specified.
704
+
705
+ Notes
706
+ -----
707
+ The new points added by `add_xi` are not randomly permuted
708
+ so there is potential for numerical instability,
709
+ especially for a large number of points. If this
710
+ happens, please reconstruct interpolation from scratch instead.
711
+ """
712
+ if yi is not None:
713
+ if self.yi is None:
714
+ raise ValueError("No previous yi value to update!")
715
+ yi = self._reshape_yi(yi, check=True)
716
+ self.yi = np.vstack((self.yi,yi))
717
+ else:
718
+ if self.yi is not None:
719
+ raise ValueError("No update to yi provided!")
720
+ old_n = self.n
721
+ self.xi = np.concatenate((self.xi,xi))
722
+ self.n = len(self.xi)
723
+ self.wi **= -1
724
+ old_wi = self.wi
725
+ self.wi = np.zeros(self.n)
726
+ self.wi[:old_n] = old_wi
727
+ for j in range(old_n, self.n):
728
+ self.wi[:j] *= self._inv_capacity * (self.xi[j]-self.xi[:j])
729
+ self.wi[j] = np.multiply.reduce(
730
+ self._inv_capacity * (self.xi[:j]-self.xi[j])
731
+ )
732
+ self.wi **= -1
733
+ self._diff_cij = None
734
+ self._diff_baryint = None
735
+
736
+ def __call__(self, x):
737
+ """Evaluate the interpolating polynomial at the points x
738
+
739
+ Parameters
740
+ ----------
741
+ x : array_like
742
+ Point or points at which to evaluate the interpolant.
743
+
744
+ Returns
745
+ -------
746
+ y : array_like
747
+ Interpolated values. Shape is determined by replacing
748
+ the interpolation axis in the original array with the shape of `x`.
749
+
750
+ Notes
751
+ -----
752
+ Currently the code computes an outer product between `x` and the
753
+ weights, that is, it constructs an intermediate array of size
754
+ ``(N, len(x))``, where N is the degree of the polynomial.
755
+ """
756
+ return _Interpolator1D.__call__(self, x)
757
+
758
+ def _evaluate(self, x):
759
+ if x.size == 0:
760
+ p = np.zeros((0, self.r), dtype=self.dtype)
761
+ else:
762
+ c = x[..., np.newaxis] - self.xi
763
+ z = c == 0
764
+ c[z] = 1
765
+ c = self.wi / c
766
+ with np.errstate(divide='ignore'):
767
+ p = np.dot(c, self.yi) / np.sum(c, axis=-1)[..., np.newaxis]
768
+ # Now fix where x==some xi
769
+ r = np.nonzero(z)
770
+ if len(r) == 1: # evaluation at a scalar
771
+ if len(r[0]) > 0: # equals one of the points
772
+ p = self.yi[r[0][0]]
773
+ else:
774
+ p[r[:-1]] = self.yi[r[-1]]
775
+ return p
776
+
777
+ def derivative(self, x, der=1):
778
+ """
779
+ Evaluate a single derivative of the polynomial at the point x.
780
+
781
+ Parameters
782
+ ----------
783
+ x : array_like
784
+ Point or points at which to evaluate the derivatives
785
+ der : integer, optional
786
+ Which derivative to evaluate (default: first derivative).
787
+ This number includes the function value as 0th derivative.
788
+
789
+ Returns
790
+ -------
791
+ d : ndarray
792
+ Derivative interpolated at the x-points. Shape of `d` is
793
+ determined by replacing the interpolation axis in the
794
+ original array with the shape of `x`.
795
+ """
796
+ x, x_shape = self._prepare_x(x)
797
+ y = self._evaluate_derivatives(x, der+1, all_lower=False)
798
+ return self._finish_y(y, x_shape)
799
+
800
+ def _evaluate_derivatives(self, x, der=None, all_lower=True):
801
+ # NB: der here is not the order of the highest derivative;
802
+ # instead, it is the size of the derivatives matrix that
803
+ # would be returned with all_lower=True, including the
804
+ # '0th' derivative (the undifferentiated function).
805
+ # E.g. to evaluate the 5th derivative alone, call
806
+ # _evaluate_derivatives(x, der=6, all_lower=False).
807
+
808
+ if (not all_lower) and (x.size == 0 or self.r == 0):
809
+ return np.zeros((0, self.r), dtype=self.dtype)
810
+
811
+ if (not all_lower) and der == 1:
812
+ return self._evaluate(x)
813
+
814
+ if (not all_lower) and (der > self.n):
815
+ return np.zeros((len(x), self.r), dtype=self.dtype)
816
+
817
+ if der is None:
818
+ der = self.n
819
+
820
+ if all_lower and (x.size == 0 or self.r == 0):
821
+ return np.zeros((der, len(x), self.r), dtype=self.dtype)
822
+
823
+ if self._diff_cij is None:
824
+ # c[i,j] = xi[i] - xi[j]
825
+ c = self.xi[:, np.newaxis] - self.xi
826
+
827
+ # avoid division by 0 (diagonal entries are so far zero by construction)
828
+ np.fill_diagonal(c, 1)
829
+
830
+ # c[i,j] = (w[j] / w[i]) / (xi[i] - xi[j]) (equation 9.4)
831
+ c = self.wi/ (c * self.wi[..., np.newaxis])
832
+
833
+ # fill in correct diagonal entries: each column sums to 0
834
+ np.fill_diagonal(c, 0)
835
+
836
+ # calculate diagonal
837
+ # c[j,j] = -sum_{i != j} c[i,j] (equation 9.5)
838
+ d = -c.sum(axis=1)
839
+ # c[i,j] = l_j(x_i)
840
+ np.fill_diagonal(c, d)
841
+
842
+ self._diff_cij = c
843
+
844
+ if self._diff_baryint is None:
845
+ # initialise and cache derivative interpolator and cijs;
846
+ # reuse weights wi (which depend only on interpolation points xi),
847
+ # to avoid unnecessary re-computation
848
+ self._diff_baryint = BarycentricInterpolator(xi=self.xi,
849
+ yi=self._diff_cij @ self.yi,
850
+ wi=self.wi)
851
+ self._diff_baryint._diff_cij = self._diff_cij
852
+
853
+ if all_lower:
854
+ # assemble matrix of derivatives from order 0 to order der-1,
855
+ # in the format required by _Interpolator1DWithDerivatives.
856
+ cn = np.zeros((der, len(x), self.r), dtype=self.dtype)
857
+ for d in range(der):
858
+ cn[d, :, :] = self._evaluate_derivatives(x, d+1, all_lower=False)
859
+ return cn
860
+
861
+ # recursively evaluate only the derivative requested
862
+ return self._diff_baryint._evaluate_derivatives(x, der-1, all_lower=False)
863
+
864
+
865
+ def barycentric_interpolate(xi, yi, x, axis=0, *, der=0):
866
+ """
867
+ Convenience function for polynomial interpolation.
868
+
869
+ Constructs a polynomial that passes through a given set of points,
870
+ then evaluates the polynomial. For reasons of numerical stability,
871
+ this function does not compute the coefficients of the polynomial.
872
+
873
+ This function uses a "barycentric interpolation" method that treats
874
+ the problem as a special case of rational function interpolation.
875
+ This algorithm is quite stable, numerically, but even in a world of
876
+ exact computation, unless the `x` coordinates are chosen very
877
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
878
+ polynomial interpolation itself is a very ill-conditioned process
879
+ due to the Runge phenomenon.
880
+
881
+ Parameters
882
+ ----------
883
+ xi : array_like
884
+ 1-D array of x coordinates of the points the polynomial should
885
+ pass through
886
+ yi : array_like
887
+ The y coordinates of the points the polynomial should pass through.
888
+ x : scalar or array_like
889
+ Point or points at which to evaluate the interpolant.
890
+ der : int or list or None, optional
891
+ How many derivatives to evaluate, or None for all potentially
892
+ nonzero derivatives (that is, a number equal to the number
893
+ of points), or a list of derivatives to evaluate. This number
894
+ includes the function value as the '0th' derivative.
895
+ axis : int, optional
896
+ Axis in the `yi` array corresponding to the x-coordinate values.
897
+
898
+ Returns
899
+ -------
900
+ y : scalar or array_like
901
+ Interpolated values. Shape is determined by replacing
902
+ the interpolation axis in the original array with the shape of `x`.
903
+
904
+ See Also
905
+ --------
906
+ BarycentricInterpolator : Barycentric interpolator
907
+
908
+ Notes
909
+ -----
910
+ Construction of the interpolation weights is a relatively slow process.
911
+ If you want to call this many times with the same xi (but possibly
912
+ varying yi or x) you should use the class `BarycentricInterpolator`.
913
+ This is what this function uses internally.
914
+
915
+ Examples
916
+ --------
917
+ We can interpolate 2D observed data using barycentric interpolation:
918
+
919
+ >>> import numpy as np
920
+ >>> import matplotlib.pyplot as plt
921
+ >>> from scipy.interpolate import barycentric_interpolate
922
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
923
+ >>> y_observed = np.sin(x_observed)
924
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
925
+ >>> y = barycentric_interpolate(x_observed, y_observed, x)
926
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
927
+ >>> plt.plot(x, y, label="barycentric interpolation")
928
+ >>> plt.legend()
929
+ >>> plt.show()
930
+
931
+ """
932
+ P = BarycentricInterpolator(xi, yi, axis=axis)
933
+ if der == 0:
934
+ return P(x)
935
+ elif _isscalar(der):
936
+ return P.derivative(x, der=der)
937
+ else:
938
+ return P.derivatives(x, der=np.amax(der)+1)[der]
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbf.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
2
+
3
+ Written by John Travers <[email protected]>, February 2007
4
+ Based closely on Matlab code by Alex Chirokov
5
+ Additional, large, improvements by Robert Hetland
6
+ Some additional alterations by Travis Oliphant
7
+ Interpolation with multi-dimensional target domain by Josua Sassen
8
+
9
+ Permission to use, modify, and distribute this software is given under the
10
+ terms of the SciPy (BSD style) license. See LICENSE.txt that came with
11
+ this distribution for specifics.
12
+
13
+ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
14
+
15
+ Copyright (c) 2006-2007, Robert Hetland <[email protected]>
16
+ Copyright (c) 2007, John Travers <[email protected]>
17
+
18
+ Redistribution and use in source and binary forms, with or without
19
+ modification, are permitted provided that the following conditions are
20
+ met:
21
+
22
+ * Redistributions of source code must retain the above copyright
23
+ notice, this list of conditions and the following disclaimer.
24
+
25
+ * Redistributions in binary form must reproduce the above
26
+ copyright notice, this list of conditions and the following
27
+ disclaimer in the documentation and/or other materials provided
28
+ with the distribution.
29
+
30
+ * Neither the name of Robert Hetland nor the names of any
31
+ contributors may be used to endorse or promote products derived
32
+ from this software without specific prior written permission.
33
+
34
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45
+ """
46
+ import numpy as np
47
+
48
+ from scipy import linalg
49
+ from scipy.special import xlogy
50
+ from scipy.spatial.distance import cdist, pdist, squareform
51
+
52
+ __all__ = ['Rbf']
53
+
54
+
55
+ class Rbf:
56
+ """
57
+ Rbf(*args, **kwargs)
58
+
59
+ A class for radial basis function interpolation of functions from
60
+ N-D scattered data to an M-D domain.
61
+
62
+ .. legacy:: class
63
+
64
+ `Rbf` is legacy code, for new usage please use `RBFInterpolator`
65
+ instead.
66
+
67
+ Parameters
68
+ ----------
69
+ *args : arrays
70
+ x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
71
+ and d is the array of values at the nodes
72
+ function : str or callable, optional
73
+ The radial basis function, based on the radius, r, given by the norm
74
+ (default is Euclidean distance); the default is 'multiquadric'::
75
+
76
+ 'multiquadric': sqrt((r/self.epsilon)**2 + 1)
77
+ 'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
78
+ 'gaussian': exp(-(r/self.epsilon)**2)
79
+ 'linear': r
80
+ 'cubic': r**3
81
+ 'quintic': r**5
82
+ 'thin_plate': r**2 * log(r)
83
+
84
+ If callable, then it must take 2 arguments (self, r). The epsilon
85
+ parameter will be available as self.epsilon. Other keyword
86
+ arguments passed in will be available as well.
87
+
88
+ epsilon : float, optional
89
+ Adjustable constant for gaussian or multiquadrics functions
90
+ - defaults to approximate average distance between nodes (which is
91
+ a good start).
92
+ smooth : float, optional
93
+ Values greater than zero increase the smoothness of the
94
+ approximation. 0 is for interpolation (default), the function will
95
+ always go through the nodal points in this case.
96
+ norm : str, callable, optional
97
+ A function that returns the 'distance' between two points, with
98
+ inputs as arrays of positions (x, y, z, ...), and an output as an
99
+ array of distance. E.g., the default: 'euclidean', such that the result
100
+ is a matrix of the distances from each point in ``x1`` to each point in
101
+ ``x2``. For more options, see documentation of
102
+ `scipy.spatial.distances.cdist`.
103
+ mode : str, optional
104
+ Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
105
+ '1-D' the data `d` will be considered as 1-D and flattened
106
+ internally. When it is 'N-D' the data `d` is assumed to be an array of
107
+ shape (n_samples, m), where m is the dimension of the target domain.
108
+
109
+
110
+ Attributes
111
+ ----------
112
+ N : int
113
+ The number of data points (as determined by the input arrays).
114
+ di : ndarray
115
+ The 1-D array of data values at each of the data coordinates `xi`.
116
+ xi : ndarray
117
+ The 2-D array of data coordinates.
118
+ function : str or callable
119
+ The radial basis function. See description under Parameters.
120
+ epsilon : float
121
+ Parameter used by gaussian or multiquadrics functions. See Parameters.
122
+ smooth : float
123
+ Smoothing parameter. See description under Parameters.
124
+ norm : str or callable
125
+ The distance function. See description under Parameters.
126
+ mode : str
127
+ Mode of the interpolation. See description under Parameters.
128
+ nodes : ndarray
129
+ A 1-D array of node values for the interpolation.
130
+ A : internal property, do not use
131
+
132
+ See Also
133
+ --------
134
+ RBFInterpolator
135
+
136
+ Examples
137
+ --------
138
+ >>> import numpy as np
139
+ >>> from scipy.interpolate import Rbf
140
+ >>> rng = np.random.default_rng()
141
+ >>> x, y, z, d = rng.random((4, 50))
142
+ >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
143
+ >>> xi = yi = zi = np.linspace(0, 1, 20)
144
+ >>> di = rbfi(xi, yi, zi) # interpolated values
145
+ >>> di.shape
146
+ (20,)
147
+
148
+ """
149
+ # Available radial basis functions that can be selected as strings;
150
+ # they all start with _h_ (self._init_function relies on that)
151
+ def _h_multiquadric(self, r):
152
+ return np.sqrt((1.0/self.epsilon*r)**2 + 1)
153
+
154
+ def _h_inverse_multiquadric(self, r):
155
+ return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
156
+
157
+ def _h_gaussian(self, r):
158
+ return np.exp(-(1.0/self.epsilon*r)**2)
159
+
160
+ def _h_linear(self, r):
161
+ return r
162
+
163
+ def _h_cubic(self, r):
164
+ return r**3
165
+
166
+ def _h_quintic(self, r):
167
+ return r**5
168
+
169
+ def _h_thin_plate(self, r):
170
+ return xlogy(r**2, r)
171
+
172
+ # Setup self._function and do smoke test on initial r
173
+ def _init_function(self, r):
174
+ if isinstance(self.function, str):
175
+ self.function = self.function.lower()
176
+ _mapped = {'inverse': 'inverse_multiquadric',
177
+ 'inverse multiquadric': 'inverse_multiquadric',
178
+ 'thin-plate': 'thin_plate'}
179
+ if self.function in _mapped:
180
+ self.function = _mapped[self.function]
181
+
182
+ func_name = "_h_" + self.function
183
+ if hasattr(self, func_name):
184
+ self._function = getattr(self, func_name)
185
+ else:
186
+ functionlist = [x[3:] for x in dir(self)
187
+ if x.startswith('_h_')]
188
+ raise ValueError("function must be a callable or one of " +
189
+ ", ".join(functionlist))
190
+ self._function = getattr(self, "_h_"+self.function)
191
+ elif callable(self.function):
192
+ allow_one = False
193
+ if hasattr(self.function, 'func_code') or \
194
+ hasattr(self.function, '__code__'):
195
+ val = self.function
196
+ allow_one = True
197
+ elif hasattr(self.function, "__call__"):
198
+ val = self.function.__call__.__func__
199
+ else:
200
+ raise ValueError("Cannot determine number of arguments to "
201
+ "function")
202
+
203
+ argcount = val.__code__.co_argcount
204
+ if allow_one and argcount == 1:
205
+ self._function = self.function
206
+ elif argcount == 2:
207
+ self._function = self.function.__get__(self, Rbf)
208
+ else:
209
+ raise ValueError("Function argument must take 1 or 2 "
210
+ "arguments.")
211
+
212
+ a0 = self._function(r)
213
+ if a0.shape != r.shape:
214
+ raise ValueError("Callable must take array and return array of "
215
+ "the same shape")
216
+ return a0
217
+
218
+ def __init__(self, *args, **kwargs):
219
+ # `args` can be a variable number of arrays; we flatten them and store
220
+ # them as a single 2-D array `xi` of shape (n_args-1, array_size),
221
+ # plus a 1-D array `di` for the values.
222
+ # All arrays must have the same number of elements
223
+ self.xi = np.asarray([np.asarray(a, dtype=np.float64).flatten()
224
+ for a in args[:-1]])
225
+ self.N = self.xi.shape[-1]
226
+
227
+ self.mode = kwargs.pop('mode', '1-D')
228
+
229
+ if self.mode == '1-D':
230
+ self.di = np.asarray(args[-1]).flatten()
231
+ self._target_dim = 1
232
+ elif self.mode == 'N-D':
233
+ self.di = np.asarray(args[-1])
234
+ self._target_dim = self.di.shape[-1]
235
+ else:
236
+ raise ValueError("Mode has to be 1-D or N-D.")
237
+
238
+ if not all([x.size == self.di.shape[0] for x in self.xi]):
239
+ raise ValueError("All arrays must be equal length.")
240
+
241
+ self.norm = kwargs.pop('norm', 'euclidean')
242
+ self.epsilon = kwargs.pop('epsilon', None)
243
+ if self.epsilon is None:
244
+ # default epsilon is the "the average distance between nodes" based
245
+ # on a bounding hypercube
246
+ ximax = np.amax(self.xi, axis=1)
247
+ ximin = np.amin(self.xi, axis=1)
248
+ edges = ximax - ximin
249
+ edges = edges[np.nonzero(edges)]
250
+ self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
251
+
252
+ self.smooth = kwargs.pop('smooth', 0.0)
253
+ self.function = kwargs.pop('function', 'multiquadric')
254
+
255
+ # attach anything left in kwargs to self for use by any user-callable
256
+ # function or to save on the object returned.
257
+ for item, value in kwargs.items():
258
+ setattr(self, item, value)
259
+
260
+ # Compute weights
261
+ if self._target_dim > 1: # If we have more than one target dimension,
262
+ # we first factorize the matrix
263
+ self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
264
+ lu, piv = linalg.lu_factor(self.A)
265
+ for i in range(self._target_dim):
266
+ self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
267
+ else:
268
+ self.nodes = linalg.solve(self.A, self.di)
269
+
270
+ @property
271
+ def A(self):
272
+ # this only exists for backwards compatibility: self.A was available
273
+ # and, at least technically, public.
274
+ r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
275
+ return self._init_function(r) - np.eye(self.N)*self.smooth
276
+
277
+ def _call_norm(self, x1, x2):
278
+ return cdist(x1.T, x2.T, self.norm)
279
+
280
+ def __call__(self, *args):
281
+ args = [np.asarray(x) for x in args]
282
+ if not all([x.shape == y.shape for x in args for y in args]):
283
+ raise ValueError("Array lengths must be equal")
284
+ if self._target_dim > 1:
285
+ shp = args[0].shape + (self._target_dim,)
286
+ else:
287
+ shp = args[0].shape
288
+ xa = np.asarray([a.flatten() for a in args], dtype=np.float64)
289
+ r = self._call_norm(xa, self.xi)
290
+ return np.dot(self._function(r), self.nodes).reshape(shp)
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module for RBF interpolation."""
2
+ import warnings
3
+ from itertools import combinations_with_replacement
4
+
5
+ import numpy as np
6
+ from numpy.linalg import LinAlgError
7
+ from scipy.spatial import KDTree
8
+ from scipy.special import comb
9
+ from scipy.linalg.lapack import dgesv # type: ignore[attr-defined]
10
+
11
+ from ._rbfinterp_pythran import (_build_system,
12
+ _build_evaluation_coefficients,
13
+ _polynomial_matrix)
14
+
15
+
16
+ __all__ = ["RBFInterpolator"]
17
+
18
+
19
+ # These RBFs are implemented.
20
+ _AVAILABLE = {
21
+ "linear",
22
+ "thin_plate_spline",
23
+ "cubic",
24
+ "quintic",
25
+ "multiquadric",
26
+ "inverse_multiquadric",
27
+ "inverse_quadratic",
28
+ "gaussian"
29
+ }
30
+
31
+
32
+ # The shape parameter does not need to be specified when using these RBFs.
33
+ _SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
34
+
35
+
36
+ # For RBFs that are conditionally positive definite of order m, the interpolant
37
+ # should include polynomial terms with degree >= m - 1. Define the minimum
38
+ # degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
39
+ # Approximation Methods with MATLAB". The RBFs that are not in this dictionary
40
+ # are positive definite and do not need polynomial terms.
41
+ _NAME_TO_MIN_DEGREE = {
42
+ "multiquadric": 0,
43
+ "linear": 0,
44
+ "thin_plate_spline": 1,
45
+ "cubic": 1,
46
+ "quintic": 2
47
+ }
48
+
49
+
50
+ def _monomial_powers(ndim, degree):
51
+ """Return the powers for each monomial in a polynomial.
52
+
53
+ Parameters
54
+ ----------
55
+ ndim : int
56
+ Number of variables in the polynomial.
57
+ degree : int
58
+ Degree of the polynomial.
59
+
60
+ Returns
61
+ -------
62
+ (nmonos, ndim) int ndarray
63
+ Array where each row contains the powers for each variable in a
64
+ monomial.
65
+
66
+ """
67
+ nmonos = comb(degree + ndim, ndim, exact=True)
68
+ out = np.zeros((nmonos, ndim), dtype=np.dtype("long"))
69
+ count = 0
70
+ for deg in range(degree + 1):
71
+ for mono in combinations_with_replacement(range(ndim), deg):
72
+ # `mono` is a tuple of variables in the current monomial with
73
+ # multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
74
+ for var in mono:
75
+ out[count, var] += 1
76
+
77
+ count += 1
78
+
79
+ return out
80
+
81
+
82
+ def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
83
+ """Build and solve the RBF interpolation system of equations.
84
+
85
+ Parameters
86
+ ----------
87
+ y : (P, N) float ndarray
88
+ Data point coordinates.
89
+ d : (P, S) float ndarray
90
+ Data values at `y`.
91
+ smoothing : (P,) float ndarray
92
+ Smoothing parameter for each data point.
93
+ kernel : str
94
+ Name of the RBF.
95
+ epsilon : float
96
+ Shape parameter.
97
+ powers : (R, N) int ndarray
98
+ The exponents for each monomial in the polynomial.
99
+
100
+ Returns
101
+ -------
102
+ coeffs : (P + R, S) float ndarray
103
+ Coefficients for each RBF and monomial.
104
+ shift : (N,) float ndarray
105
+ Domain shift used to create the polynomial matrix.
106
+ scale : (N,) float ndarray
107
+ Domain scaling used to create the polynomial matrix.
108
+
109
+ """
110
+ lhs, rhs, shift, scale = _build_system(
111
+ y, d, smoothing, kernel, epsilon, powers
112
+ )
113
+ _, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
114
+ if info < 0:
115
+ raise ValueError(f"The {-info}-th argument had an illegal value.")
116
+ elif info > 0:
117
+ msg = "Singular matrix."
118
+ nmonos = powers.shape[0]
119
+ if nmonos > 0:
120
+ pmat = _polynomial_matrix((y - shift)/scale, powers)
121
+ rank = np.linalg.matrix_rank(pmat)
122
+ if rank < nmonos:
123
+ msg = (
124
+ "Singular matrix. The matrix of monomials evaluated at "
125
+ "the data point coordinates does not have full column "
126
+ f"rank ({rank}/{nmonos})."
127
+ )
128
+
129
+ raise LinAlgError(msg)
130
+
131
+ return shift, scale, coeffs
132
+
133
+
134
+ class RBFInterpolator:
135
+ """Radial basis function (RBF) interpolation in N dimensions.
136
+
137
+ Parameters
138
+ ----------
139
+ y : (npoints, ndims) array_like
140
+ 2-D array of data point coordinates.
141
+ d : (npoints, ...) array_like
142
+ N-D array of data values at `y`. The length of `d` along the first
143
+ axis must be equal to the length of `y`. Unlike some interpolators, the
144
+ interpolation axis cannot be changed.
145
+ neighbors : int, optional
146
+ If specified, the value of the interpolant at each evaluation point
147
+ will be computed using only this many nearest data points. All the data
148
+ points are used by default.
149
+ smoothing : float or (npoints, ) array_like, optional
150
+ Smoothing parameter. The interpolant perfectly fits the data when this
151
+ is set to 0. For large values, the interpolant approaches a least
152
+ squares fit of a polynomial with the specified degree. Default is 0.
153
+ kernel : str, optional
154
+ Type of RBF. This should be one of
155
+
156
+ - 'linear' : ``-r``
157
+ - 'thin_plate_spline' : ``r**2 * log(r)``
158
+ - 'cubic' : ``r**3``
159
+ - 'quintic' : ``-r**5``
160
+ - 'multiquadric' : ``-sqrt(1 + r**2)``
161
+ - 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
162
+ - 'inverse_quadratic' : ``1/(1 + r**2)``
163
+ - 'gaussian' : ``exp(-r**2)``
164
+
165
+ Default is 'thin_plate_spline'.
166
+ epsilon : float, optional
167
+ Shape parameter that scales the input to the RBF. If `kernel` is
168
+ 'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
169
+ 1 and can be ignored because it has the same effect as scaling the
170
+ smoothing parameter. Otherwise, this must be specified.
171
+ degree : int, optional
172
+ Degree of the added polynomial. For some RBFs the interpolant may not
173
+ be well-posed if the polynomial degree is too small. Those RBFs and
174
+ their corresponding minimum degrees are
175
+
176
+ - 'multiquadric' : 0
177
+ - 'linear' : 0
178
+ - 'thin_plate_spline' : 1
179
+ - 'cubic' : 1
180
+ - 'quintic' : 2
181
+
182
+ The default value is the minimum degree for `kernel` or 0 if there is
183
+ no minimum degree. Set this to -1 for no added polynomial.
184
+
185
+ Notes
186
+ -----
187
+ An RBF is a scalar valued function in N-dimensional space whose value at
188
+ :math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
189
+ is the center of the RBF.
190
+
191
+ An RBF interpolant for the vector of data values :math:`d`, which are from
192
+ locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
193
+ plus a polynomial with a specified degree. The RBF interpolant is written
194
+ as
195
+
196
+ .. math::
197
+ f(x) = K(x, y) a + P(x) b,
198
+
199
+ where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
200
+ evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
201
+ monomials, which span polynomials with the specified degree, evaluated at
202
+ :math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
203
+ linear equations
204
+
205
+ .. math::
206
+ (K(y, y) + \\lambda I) a + P(y) b = d
207
+
208
+ and
209
+
210
+ .. math::
211
+ P(y)^T a = 0,
212
+
213
+ where :math:`\\lambda` is a non-negative smoothing parameter that controls
214
+ how well we want to fit the data. The data are fit exactly when the
215
+ smoothing parameter is 0.
216
+
217
+ The above system is uniquely solvable if the following requirements are
218
+ met:
219
+
220
+ - :math:`P(y)` must have full column rank. :math:`P(y)` always has full
221
+ column rank when `degree` is -1 or 0. When `degree` is 1,
222
+ :math:`P(y)` has full column rank if the data point locations are not
223
+ all collinear (N=2), coplanar (N=3), etc.
224
+ - If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
225
+ 'cubic', or 'quintic', then `degree` must not be lower than the
226
+ minimum value listed above.
227
+ - If `smoothing` is 0, then each data point location must be distinct.
228
+
229
+ When using an RBF that is not scale invariant ('multiquadric',
230
+ 'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
231
+ shape parameter must be chosen (e.g., through cross validation). Smaller
232
+ values for the shape parameter correspond to wider RBFs. The problem can
233
+ become ill-conditioned or singular when the shape parameter is too small.
234
+
235
+ The memory required to solve for the RBF interpolation coefficients
236
+ increases quadratically with the number of data points, which can become
237
+ impractical when interpolating more than about a thousand data points.
238
+ To overcome memory limitations for large interpolation problems, the
239
+ `neighbors` argument can be specified to compute an RBF interpolant for
240
+ each evaluation point using only the nearest data points.
241
+
242
+ .. versionadded:: 1.7.0
243
+
244
+ See Also
245
+ --------
246
+ NearestNDInterpolator
247
+ LinearNDInterpolator
248
+ CloughTocher2DInterpolator
249
+
250
+ References
251
+ ----------
252
+ .. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
253
+ World Scientific Publishing Co.
254
+
255
+ .. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
256
+
257
+ .. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
258
+
259
+ .. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
260
+
261
+ Examples
262
+ --------
263
+ Demonstrate interpolating scattered data to a grid in 2-D.
264
+
265
+ >>> import numpy as np
266
+ >>> import matplotlib.pyplot as plt
267
+ >>> from scipy.interpolate import RBFInterpolator
268
+ >>> from scipy.stats.qmc import Halton
269
+
270
+ >>> rng = np.random.default_rng()
271
+ >>> xobs = 2*Halton(2, seed=rng).random(100) - 1
272
+ >>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
273
+
274
+ >>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
275
+ >>> xflat = xgrid.reshape(2, -1).T
276
+ >>> yflat = RBFInterpolator(xobs, yobs)(xflat)
277
+ >>> ygrid = yflat.reshape(50, 50)
278
+
279
+ >>> fig, ax = plt.subplots()
280
+ >>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
281
+ >>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
282
+ >>> fig.colorbar(p)
283
+ >>> plt.show()
284
+
285
+ """
286
+
287
+ def __init__(self, y, d,
288
+ neighbors=None,
289
+ smoothing=0.0,
290
+ kernel="thin_plate_spline",
291
+ epsilon=None,
292
+ degree=None):
293
+ y = np.asarray(y, dtype=float, order="C")
294
+ if y.ndim != 2:
295
+ raise ValueError("`y` must be a 2-dimensional array.")
296
+
297
+ ny, ndim = y.shape
298
+
299
+ d_dtype = complex if np.iscomplexobj(d) else float
300
+ d = np.asarray(d, dtype=d_dtype, order="C")
301
+ if d.shape[0] != ny:
302
+ raise ValueError(
303
+ f"Expected the first axis of `d` to have length {ny}."
304
+ )
305
+
306
+ d_shape = d.shape[1:]
307
+ d = d.reshape((ny, -1))
308
+ # If `d` is complex, convert it to a float array with twice as many
309
+ # columns. Otherwise, the LHS matrix would need to be converted to
310
+ # complex and take up 2x more memory than necessary.
311
+ d = d.view(float)
312
+
313
+ if np.isscalar(smoothing):
314
+ smoothing = np.full(ny, smoothing, dtype=float)
315
+ else:
316
+ smoothing = np.asarray(smoothing, dtype=float, order="C")
317
+ if smoothing.shape != (ny,):
318
+ raise ValueError(
319
+ "Expected `smoothing` to be a scalar or have shape "
320
+ f"({ny},)."
321
+ )
322
+
323
+ kernel = kernel.lower()
324
+ if kernel not in _AVAILABLE:
325
+ raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
326
+
327
+ if epsilon is None:
328
+ if kernel in _SCALE_INVARIANT:
329
+ epsilon = 1.0
330
+ else:
331
+ raise ValueError(
332
+ "`epsilon` must be specified if `kernel` is not one of "
333
+ f"{_SCALE_INVARIANT}."
334
+ )
335
+ else:
336
+ epsilon = float(epsilon)
337
+
338
+ min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
339
+ if degree is None:
340
+ degree = max(min_degree, 0)
341
+ else:
342
+ degree = int(degree)
343
+ if degree < -1:
344
+ raise ValueError("`degree` must be at least -1.")
345
+ elif -1 < degree < min_degree:
346
+ warnings.warn(
347
+ f"`degree` should not be below {min_degree} except -1 "
348
+ f"when `kernel` is '{kernel}'."
349
+ f"The interpolant may not be uniquely "
350
+ f"solvable, and the smoothing parameter may have an "
351
+ f"unintuitive effect.",
352
+ UserWarning, stacklevel=2
353
+ )
354
+
355
+ if neighbors is None:
356
+ nobs = ny
357
+ else:
358
+ # Make sure the number of nearest neighbors used for interpolation
359
+ # does not exceed the number of observations.
360
+ neighbors = int(min(neighbors, ny))
361
+ nobs = neighbors
362
+
363
+ powers = _monomial_powers(ndim, degree)
364
+ # The polynomial matrix must have full column rank in order for the
365
+ # interpolant to be well-posed, which is not possible if there are
366
+ # fewer observations than monomials.
367
+ if powers.shape[0] > nobs:
368
+ raise ValueError(
369
+ f"At least {powers.shape[0]} data points are required when "
370
+ f"`degree` is {degree} and the number of dimensions is {ndim}."
371
+ )
372
+
373
+ if neighbors is None:
374
+ shift, scale, coeffs = _build_and_solve_system(
375
+ y, d, smoothing, kernel, epsilon, powers
376
+ )
377
+
378
+ # Make these attributes private since they do not always exist.
379
+ self._shift = shift
380
+ self._scale = scale
381
+ self._coeffs = coeffs
382
+
383
+ else:
384
+ self._tree = KDTree(y)
385
+
386
+ self.y = y
387
+ self.d = d
388
+ self.d_shape = d_shape
389
+ self.d_dtype = d_dtype
390
+ self.neighbors = neighbors
391
+ self.smoothing = smoothing
392
+ self.kernel = kernel
393
+ self.epsilon = epsilon
394
+ self.powers = powers
395
+
396
+ def _chunk_evaluator(
397
+ self,
398
+ x,
399
+ y,
400
+ shift,
401
+ scale,
402
+ coeffs,
403
+ memory_budget=1000000
404
+ ):
405
+ """
406
+ Evaluate the interpolation while controlling memory consumption.
407
+ We chunk the input if we need more memory than specified.
408
+
409
+ Parameters
410
+ ----------
411
+ x : (Q, N) float ndarray
412
+ array of points on which to evaluate
413
+ y: (P, N) float ndarray
414
+ array of points on which we know function values
415
+ shift: (N, ) ndarray
416
+ Domain shift used to create the polynomial matrix.
417
+ scale : (N,) float ndarray
418
+ Domain scaling used to create the polynomial matrix.
419
+ coeffs: (P+R, S) float ndarray
420
+ Coefficients in front of basis functions
421
+ memory_budget: int
422
+ Total amount of memory (in units of sizeof(float)) we wish
423
+ to devote for storing the array of coefficients for
424
+ interpolated points. If we need more memory than that, we
425
+ chunk the input.
426
+
427
+ Returns
428
+ -------
429
+ (Q, S) float ndarray
430
+ Interpolated array
431
+ """
432
+ nx, ndim = x.shape
433
+ if self.neighbors is None:
434
+ nnei = len(y)
435
+ else:
436
+ nnei = self.neighbors
437
+ # in each chunk we consume the same space we already occupy
438
+ chunksize = memory_budget // (self.powers.shape[0] + nnei) + 1
439
+ if chunksize <= nx:
440
+ out = np.empty((nx, self.d.shape[1]), dtype=float)
441
+ for i in range(0, nx, chunksize):
442
+ vec = _build_evaluation_coefficients(
443
+ x[i:i + chunksize, :],
444
+ y,
445
+ self.kernel,
446
+ self.epsilon,
447
+ self.powers,
448
+ shift,
449
+ scale)
450
+ out[i:i + chunksize, :] = np.dot(vec, coeffs)
451
+ else:
452
+ vec = _build_evaluation_coefficients(
453
+ x,
454
+ y,
455
+ self.kernel,
456
+ self.epsilon,
457
+ self.powers,
458
+ shift,
459
+ scale)
460
+ out = np.dot(vec, coeffs)
461
+ return out
462
+
463
+ def __call__(self, x):
464
+ """Evaluate the interpolant at `x`.
465
+
466
+ Parameters
467
+ ----------
468
+ x : (Q, N) array_like
469
+ Evaluation point coordinates.
470
+
471
+ Returns
472
+ -------
473
+ (Q, ...) ndarray
474
+ Values of the interpolant at `x`.
475
+
476
+ """
477
+ x = np.asarray(x, dtype=float, order="C")
478
+ if x.ndim != 2:
479
+ raise ValueError("`x` must be a 2-dimensional array.")
480
+
481
+ nx, ndim = x.shape
482
+ if ndim != self.y.shape[1]:
483
+ raise ValueError("Expected the second axis of `x` to have length "
484
+ f"{self.y.shape[1]}.")
485
+
486
+ # Our memory budget for storing RBF coefficients is
487
+ # based on how many floats in memory we already occupy
488
+ # If this number is below 1e6 we just use 1e6
489
+ # This memory budget is used to decide how we chunk
490
+ # the inputs
491
+ memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
492
+
493
+ if self.neighbors is None:
494
+ out = self._chunk_evaluator(
495
+ x,
496
+ self.y,
497
+ self._shift,
498
+ self._scale,
499
+ self._coeffs,
500
+ memory_budget=memory_budget)
501
+ else:
502
+ # Get the indices of the k nearest observation points to each
503
+ # evaluation point.
504
+ _, yindices = self._tree.query(x, self.neighbors)
505
+ if self.neighbors == 1:
506
+ # `KDTree` squeezes the output when neighbors=1.
507
+ yindices = yindices[:, None]
508
+
509
+ # Multiple evaluation points may have the same neighborhood of
510
+ # observation points. Make the neighborhoods unique so that we only
511
+ # compute the interpolation coefficients once for each
512
+ # neighborhood.
513
+ yindices = np.sort(yindices, axis=1)
514
+ yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
515
+ inv = np.reshape(inv, (-1,)) # flatten, we need 1-D indices
516
+ # `inv` tells us which neighborhood will be used by each evaluation
517
+ # point. Now we find which evaluation points will be using each
518
+ # neighborhood.
519
+ xindices = [[] for _ in range(len(yindices))]
520
+ for i, j in enumerate(inv):
521
+ xindices[j].append(i)
522
+
523
+ out = np.empty((nx, self.d.shape[1]), dtype=float)
524
+ for xidx, yidx in zip(xindices, yindices):
525
+ # `yidx` are the indices of the observations in this
526
+ # neighborhood. `xidx` are the indices of the evaluation points
527
+ # that are using this neighborhood.
528
+ xnbr = x[xidx]
529
+ ynbr = self.y[yidx]
530
+ dnbr = self.d[yidx]
531
+ snbr = self.smoothing[yidx]
532
+ shift, scale, coeffs = _build_and_solve_system(
533
+ ynbr,
534
+ dnbr,
535
+ snbr,
536
+ self.kernel,
537
+ self.epsilon,
538
+ self.powers,
539
+ )
540
+ out[xidx] = self._chunk_evaluator(
541
+ xnbr,
542
+ ynbr,
543
+ shift,
544
+ scale,
545
+ coeffs,
546
+ memory_budget=memory_budget)
547
+
548
+ out = out.view(self.d_dtype)
549
+ out = out.reshape((nx, ) + self.d_shape)
550
+ return out
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi.py ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ['RegularGridInterpolator', 'interpn']
2
+
3
+ import itertools
4
+ import warnings
5
+
6
+ import numpy as np
7
+
8
+ import scipy.sparse.linalg as ssl
9
+
10
+ from .interpnd import _ndim_coords_from_arrays
11
+ from ._cubic import PchipInterpolator
12
+ from ._rgi_cython import evaluate_linear_2d, find_indices
13
+ from ._bsplines import make_interp_spline
14
+ from ._fitpack2 import RectBivariateSpline
15
+ from ._ndbspline import make_ndbspl
16
+
17
+
18
+ def _check_points(points):
19
+ descending_dimensions = []
20
+ grid = []
21
+ for i, p in enumerate(points):
22
+ # early make points float
23
+ # see https://github.com/scipy/scipy/pull/17230
24
+ p = np.asarray(p, dtype=float)
25
+ if not np.all(p[1:] > p[:-1]):
26
+ if np.all(p[1:] < p[:-1]):
27
+ # input is descending, so make it ascending
28
+ descending_dimensions.append(i)
29
+ p = np.flip(p)
30
+ else:
31
+ raise ValueError(
32
+ "The points in dimension %d must be strictly "
33
+ "ascending or descending" % i)
34
+ # see https://github.com/scipy/scipy/issues/17716
35
+ p = np.ascontiguousarray(p)
36
+ grid.append(p)
37
+ return tuple(grid), tuple(descending_dimensions)
38
+
39
+
40
+ def _check_dimensionality(points, values):
41
+ if len(points) > values.ndim:
42
+ raise ValueError("There are %d point arrays, but values has %d "
43
+ "dimensions" % (len(points), values.ndim))
44
+ for i, p in enumerate(points):
45
+ if not np.asarray(p).ndim == 1:
46
+ raise ValueError("The points in dimension %d must be "
47
+ "1-dimensional" % i)
48
+ if not values.shape[i] == len(p):
49
+ raise ValueError("There are %d points and %d values in "
50
+ "dimension %d" % (len(p), values.shape[i], i))
51
+
52
+
53
+ class RegularGridInterpolator:
54
+ """
55
+ Interpolator on a regular or rectilinear grid in arbitrary dimensions.
56
+
57
+ The data must be defined on a rectilinear grid; that is, a rectangular
58
+ grid with even or uneven spacing. Linear, nearest-neighbor, spline
59
+ interpolations are supported. After setting up the interpolator object,
60
+ the interpolation method may be chosen at each evaluation.
61
+
62
+ Parameters
63
+ ----------
64
+ points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
65
+ The points defining the regular grid in n dimensions. The points in
66
+ each dimension (i.e. every elements of the points tuple) must be
67
+ strictly ascending or descending.
68
+
69
+ values : array_like, shape (m1, ..., mn, ...)
70
+ The data on the regular grid in n dimensions. Complex data is
71
+ accepted.
72
+
73
+ .. deprecated:: 1.13.0
74
+ Complex data is deprecated with ``method="pchip"`` and will raise an
75
+ error in SciPy 1.15.0. This is because ``PchipInterpolator`` only
76
+ works with real values. If you are trying to use the real components of
77
+ the passed array, use ``np.real`` on ``values``.
78
+
79
+ method : str, optional
80
+ The method of interpolation to perform. Supported are "linear",
81
+ "nearest", "slinear", "cubic", "quintic" and "pchip". This
82
+ parameter will become the default for the object's ``__call__``
83
+ method. Default is "linear".
84
+
85
+ bounds_error : bool, optional
86
+ If True, when interpolated values are requested outside of the
87
+ domain of the input data, a ValueError is raised.
88
+ If False, then `fill_value` is used.
89
+ Default is True.
90
+
91
+ fill_value : float or None, optional
92
+ The value to use for points outside of the interpolation domain.
93
+ If None, values outside the domain are extrapolated.
94
+ Default is ``np.nan``.
95
+
96
+ solver : callable, optional
97
+ Only used for methods "slinear", "cubic" and "quintic".
98
+ Sparse linear algebra solver for construction of the NdBSpline instance.
99
+ Default is the iterative solver `scipy.sparse.linalg.gcrotmk`.
100
+
101
+ .. versionadded:: 1.13
102
+
103
+ solver_args: dict, optional
104
+ Additional arguments to pass to `solver`, if any.
105
+
106
+ .. versionadded:: 1.13
107
+
108
+ Methods
109
+ -------
110
+ __call__
111
+
112
+ Attributes
113
+ ----------
114
+ grid : tuple of ndarrays
115
+ The points defining the regular grid in n dimensions.
116
+ This tuple defines the full grid via
117
+ ``np.meshgrid(*grid, indexing='ij')``
118
+ values : ndarray
119
+ Data values at the grid.
120
+ method : str
121
+ Interpolation method.
122
+ fill_value : float or ``None``
123
+ Use this value for out-of-bounds arguments to `__call__`.
124
+ bounds_error : bool
125
+ If ``True``, out-of-bounds argument raise a ``ValueError``.
126
+
127
+ Notes
128
+ -----
129
+ Contrary to `LinearNDInterpolator` and `NearestNDInterpolator`, this class
130
+ avoids expensive triangulation of the input data by taking advantage of the
131
+ regular grid structure.
132
+
133
+ In other words, this class assumes that the data is defined on a
134
+ *rectilinear* grid.
135
+
136
+ .. versionadded:: 0.14
137
+
138
+ The 'slinear'(k=1), 'cubic'(k=3), and 'quintic'(k=5) methods are
139
+ tensor-product spline interpolators, where `k` is the spline degree,
140
+ If any dimension has fewer points than `k` + 1, an error will be raised.
141
+
142
+ .. versionadded:: 1.9
143
+
144
+ If the input data is such that dimensions have incommensurate
145
+ units and differ by many orders of magnitude, the interpolant may have
146
+ numerical artifacts. Consider rescaling the data before interpolating.
147
+
148
+ **Choosing a solver for spline methods**
149
+
150
+ Spline methods, "slinear", "cubic" and "quintic" involve solving a
151
+ large sparse linear system at instantiation time. Depending on data,
152
+ the default solver may or may not be adequate. When it is not, you may
153
+ need to experiment with an optional `solver` argument, where you may
154
+ choose between the direct solver (`scipy.sparse.linalg.spsolve`) or
155
+ iterative solvers from `scipy.sparse.linalg`. You may need to supply
156
+ additional parameters via the optional `solver_args` parameter (for instance,
157
+ you may supply the starting value or target tolerance). See the
158
+ `scipy.sparse.linalg` documentation for the full list of available options.
159
+
160
+ Alternatively, you may instead use the legacy methods, "slinear_legacy",
161
+ "cubic_legacy" and "quintic_legacy". These methods allow faster construction
162
+ but evaluations will be much slower.
163
+
164
+ Examples
165
+ --------
166
+ **Evaluate a function on the points of a 3-D grid**
167
+
168
+ As a first example, we evaluate a simple example function on the points of
169
+ a 3-D grid:
170
+
171
+ >>> from scipy.interpolate import RegularGridInterpolator
172
+ >>> import numpy as np
173
+ >>> def f(x, y, z):
174
+ ... return 2 * x**3 + 3 * y**2 - z
175
+ >>> x = np.linspace(1, 4, 11)
176
+ >>> y = np.linspace(4, 7, 22)
177
+ >>> z = np.linspace(7, 9, 33)
178
+ >>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
179
+ >>> data = f(xg, yg, zg)
180
+
181
+ ``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``.
182
+ Next, define an interpolating function from this data:
183
+
184
+ >>> interp = RegularGridInterpolator((x, y, z), data)
185
+
186
+ Evaluate the interpolating function at the two points
187
+ ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
188
+
189
+ >>> pts = np.array([[2.1, 6.2, 8.3],
190
+ ... [3.3, 5.2, 7.1]])
191
+ >>> interp(pts)
192
+ array([ 125.80469388, 146.30069388])
193
+
194
+ which is indeed a close approximation to
195
+
196
+ >>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)
197
+ (125.54200000000002, 145.894)
198
+
199
+ **Interpolate and extrapolate a 2D dataset**
200
+
201
+ As a second example, we interpolate and extrapolate a 2D data set:
202
+
203
+ >>> x, y = np.array([-2, 0, 4]), np.array([-2, 0, 2, 5])
204
+ >>> def ff(x, y):
205
+ ... return x**2 + y**2
206
+
207
+ >>> xg, yg = np.meshgrid(x, y, indexing='ij')
208
+ >>> data = ff(xg, yg)
209
+ >>> interp = RegularGridInterpolator((x, y), data,
210
+ ... bounds_error=False, fill_value=None)
211
+
212
+ >>> import matplotlib.pyplot as plt
213
+ >>> fig = plt.figure()
214
+ >>> ax = fig.add_subplot(projection='3d')
215
+ >>> ax.scatter(xg.ravel(), yg.ravel(), data.ravel(),
216
+ ... s=60, c='k', label='data')
217
+
218
+ Evaluate and plot the interpolator on a finer grid
219
+
220
+ >>> xx = np.linspace(-4, 9, 31)
221
+ >>> yy = np.linspace(-4, 9, 31)
222
+ >>> X, Y = np.meshgrid(xx, yy, indexing='ij')
223
+
224
+ >>> # interpolator
225
+ >>> ax.plot_wireframe(X, Y, interp((X, Y)), rstride=3, cstride=3,
226
+ ... alpha=0.4, color='m', label='linear interp')
227
+
228
+ >>> # ground truth
229
+ >>> ax.plot_wireframe(X, Y, ff(X, Y), rstride=3, cstride=3,
230
+ ... alpha=0.4, label='ground truth')
231
+ >>> plt.legend()
232
+ >>> plt.show()
233
+
234
+ Other examples are given
235
+ :ref:`in the tutorial <tutorial-interpolate_regular_grid_interpolator>`.
236
+
237
+ See Also
238
+ --------
239
+ NearestNDInterpolator : Nearest neighbor interpolator on *unstructured*
240
+ data in N dimensions
241
+
242
+ LinearNDInterpolator : Piecewise linear interpolator on *unstructured* data
243
+ in N dimensions
244
+
245
+ interpn : a convenience function which wraps `RegularGridInterpolator`
246
+
247
+ scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
248
+ (suitable for e.g., N-D image resampling)
249
+
250
+ References
251
+ ----------
252
+ .. [1] Python package *regulargrid* by Johannes Buchner, see
253
+ https://pypi.python.org/pypi/regulargrid/
254
+ .. [2] Wikipedia, "Trilinear interpolation",
255
+ https://en.wikipedia.org/wiki/Trilinear_interpolation
256
+ .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
257
+ and multilinear table interpolation in many dimensions." MATH.
258
+ COMPUT. 50.181 (1988): 189-196.
259
+ https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
260
+ :doi:`10.1090/S0025-5718-1988-0917826-0`
261
+
262
+ """
263
+ # this class is based on code originally programmed by Johannes Buchner,
264
+ # see https://github.com/JohannesBuchner/regulargrid
265
+
266
+ _SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3,
267
+ "slinear_legacy": 1, "cubic_legacy": 3, "quintic_legacy": 5,}
268
+ _SPLINE_METHODS_recursive = {"slinear_legacy", "cubic_legacy",
269
+ "quintic_legacy", "pchip"}
270
+ _SPLINE_METHODS_ndbspl = {"slinear", "cubic", "quintic"}
271
+ _SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys())
272
+ _ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS
273
+
274
+ def __init__(self, points, values, method="linear", bounds_error=True,
275
+ fill_value=np.nan, *, solver=None, solver_args=None):
276
+ if method not in self._ALL_METHODS:
277
+ raise ValueError("Method '%s' is not defined" % method)
278
+ elif method in self._SPLINE_METHODS:
279
+ self._validate_grid_dimensions(points, method)
280
+ self.method = method
281
+ self.bounds_error = bounds_error
282
+ self.grid, self._descending_dimensions = _check_points(points)
283
+ self.values = self._check_values(values)
284
+ self._check_dimensionality(self.grid, self.values)
285
+ self.fill_value = self._check_fill_value(self.values, fill_value)
286
+ if self._descending_dimensions:
287
+ self.values = np.flip(values, axis=self._descending_dimensions)
288
+ if self.method == "pchip" and np.iscomplexobj(self.values):
289
+ msg = ("`PchipInterpolator` only works with real values. Passing "
290
+ "complex-dtyped `values` with `method='pchip'` is deprecated "
291
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
292
+ "use the real components of the passed array, use `np.real` on "
293
+ "the array before passing to `RegularGridInterpolator`.")
294
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
295
+ if method in self._SPLINE_METHODS_ndbspl:
296
+ if solver_args is None:
297
+ solver_args = {}
298
+ self._spline = self._construct_spline(method, solver, **solver_args)
299
+ else:
300
+ if solver is not None or solver_args:
301
+ raise ValueError(
302
+ f"{method =} does not accept the 'solver' argument. Got "
303
+ f" {solver = } and with arguments {solver_args}."
304
+ )
305
+
306
+ def _construct_spline(self, method, solver=None, **solver_args):
307
+ if solver is None:
308
+ solver = ssl.gcrotmk
309
+ spl = make_ndbspl(
310
+ self.grid, self.values, self._SPLINE_DEGREE_MAP[method],
311
+ solver=solver, **solver_args
312
+ )
313
+ return spl
314
+
315
+ def _check_dimensionality(self, grid, values):
316
+ _check_dimensionality(grid, values)
317
+
318
+ def _check_points(self, points):
319
+ return _check_points(points)
320
+
321
+ def _check_values(self, values):
322
+ if not hasattr(values, 'ndim'):
323
+ # allow reasonable duck-typed values
324
+ values = np.asarray(values)
325
+
326
+ if hasattr(values, 'dtype') and hasattr(values, 'astype'):
327
+ if not np.issubdtype(values.dtype, np.inexact):
328
+ values = values.astype(float)
329
+
330
+ return values
331
+
332
+ def _check_fill_value(self, values, fill_value):
333
+ if fill_value is not None:
334
+ fill_value_dtype = np.asarray(fill_value).dtype
335
+ if (hasattr(values, 'dtype') and not
336
+ np.can_cast(fill_value_dtype, values.dtype,
337
+ casting='same_kind')):
338
+ raise ValueError("fill_value must be either 'None' or "
339
+ "of a type compatible with values")
340
+ return fill_value
341
+
342
+ def __call__(self, xi, method=None, *, nu=None):
343
+ """
344
+ Interpolation at coordinates.
345
+
346
+ Parameters
347
+ ----------
348
+ xi : ndarray of shape (..., ndim)
349
+ The coordinates to evaluate the interpolator at.
350
+
351
+ method : str, optional
352
+ The method of interpolation to perform. Supported are "linear",
353
+ "nearest", "slinear", "cubic", "quintic" and "pchip". Default is
354
+ the method chosen when the interpolator was created.
355
+
356
+ nu : sequence of ints, length ndim, optional
357
+ If not None, the orders of the derivatives to evaluate.
358
+ Each entry must be non-negative.
359
+ Only allowed for methods "slinear", "cubic" and "quintic".
360
+
361
+ .. versionadded:: 1.13
362
+
363
+ Returns
364
+ -------
365
+ values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
366
+ Interpolated values at `xi`. See notes for behaviour when
367
+ ``xi.ndim == 1``.
368
+
369
+ Notes
370
+ -----
371
+ In the case that ``xi.ndim == 1`` a new axis is inserted into
372
+ the 0 position of the returned array, values_x, so its shape is
373
+ instead ``(1,) + values.shape[ndim:]``.
374
+
375
+ Examples
376
+ --------
377
+ Here we define a nearest-neighbor interpolator of a simple function
378
+
379
+ >>> import numpy as np
380
+ >>> x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
381
+ >>> def f(x, y):
382
+ ... return x**2 + y**2
383
+ >>> data = f(*np.meshgrid(x, y, indexing='ij', sparse=True))
384
+ >>> from scipy.interpolate import RegularGridInterpolator
385
+ >>> interp = RegularGridInterpolator((x, y), data, method='nearest')
386
+
387
+ By construction, the interpolator uses the nearest-neighbor
388
+ interpolation
389
+
390
+ >>> interp([[1.5, 1.3], [0.3, 4.5]])
391
+ array([2., 9.])
392
+
393
+ We can however evaluate the linear interpolant by overriding the
394
+ `method` parameter
395
+
396
+ >>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear')
397
+ array([ 4.7, 24.3])
398
+ """
399
+ method = self.method if method is None else method
400
+ is_method_changed = self.method != method
401
+ if method not in self._ALL_METHODS:
402
+ raise ValueError("Method '%s' is not defined" % method)
403
+ if is_method_changed and method in self._SPLINE_METHODS_ndbspl:
404
+ self._spline = self._construct_spline(method)
405
+
406
+ if nu is not None and method not in self._SPLINE_METHODS_ndbspl:
407
+ raise ValueError(
408
+ f"Can only compute derivatives for methods "
409
+ f"{self._SPLINE_METHODS_ndbspl}, got {method =}."
410
+ )
411
+
412
+ xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi)
413
+
414
+ if method == "linear":
415
+ indices, norm_distances = self._find_indices(xi.T)
416
+ if (ndim == 2 and hasattr(self.values, 'dtype') and
417
+ self.values.ndim == 2 and self.values.flags.writeable and
418
+ self.values.dtype in (np.float64, np.complex128) and
419
+ self.values.dtype.byteorder == '='):
420
+ # until cython supports const fused types, the fast path
421
+ # cannot support non-writeable values
422
+ # a fast path
423
+ out = np.empty(indices.shape[1], dtype=self.values.dtype)
424
+ result = evaluate_linear_2d(self.values,
425
+ indices,
426
+ norm_distances,
427
+ self.grid,
428
+ out)
429
+ else:
430
+ result = self._evaluate_linear(indices, norm_distances)
431
+ elif method == "nearest":
432
+ indices, norm_distances = self._find_indices(xi.T)
433
+ result = self._evaluate_nearest(indices, norm_distances)
434
+ elif method in self._SPLINE_METHODS:
435
+ if is_method_changed:
436
+ self._validate_grid_dimensions(self.grid, method)
437
+ if method in self._SPLINE_METHODS_recursive:
438
+ result = self._evaluate_spline(xi, method)
439
+ else:
440
+ result = self._spline(xi, nu=nu)
441
+
442
+ if not self.bounds_error and self.fill_value is not None:
443
+ result[out_of_bounds] = self.fill_value
444
+
445
+ # f(nan) = nan, if any
446
+ if np.any(nans):
447
+ result[nans] = np.nan
448
+ return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
449
+
450
+ def _prepare_xi(self, xi):
451
+ ndim = len(self.grid)
452
+ xi = _ndim_coords_from_arrays(xi, ndim=ndim)
453
+ if xi.shape[-1] != len(self.grid):
454
+ raise ValueError("The requested sample points xi have dimension "
455
+ f"{xi.shape[-1]} but this "
456
+ f"RegularGridInterpolator has dimension {ndim}")
457
+
458
+ xi_shape = xi.shape
459
+ xi = xi.reshape(-1, xi_shape[-1])
460
+ xi = np.asarray(xi, dtype=float)
461
+
462
+ # find nans in input
463
+ nans = np.any(np.isnan(xi), axis=-1)
464
+
465
+ if self.bounds_error:
466
+ for i, p in enumerate(xi.T):
467
+ if not np.logical_and(np.all(self.grid[i][0] <= p),
468
+ np.all(p <= self.grid[i][-1])):
469
+ raise ValueError("One of the requested xi is out of bounds "
470
+ "in dimension %d" % i)
471
+ out_of_bounds = None
472
+ else:
473
+ out_of_bounds = self._find_out_of_bounds(xi.T)
474
+
475
+ return xi, xi_shape, ndim, nans, out_of_bounds
476
+
477
+ def _evaluate_linear(self, indices, norm_distances):
478
+ # slice for broadcasting over trailing dimensions in self.values
479
+ vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
480
+
481
+ # Compute shifting up front before zipping everything together
482
+ shift_norm_distances = [1 - yi for yi in norm_distances]
483
+ shift_indices = [i + 1 for i in indices]
484
+
485
+ # The formula for linear interpolation in 2d takes the form:
486
+ # values = self.values[(i0, i1)] * (1 - y0) * (1 - y1) + \
487
+ # self.values[(i0, i1 + 1)] * (1 - y0) * y1 + \
488
+ # self.values[(i0 + 1, i1)] * y0 * (1 - y1) + \
489
+ # self.values[(i0 + 1, i1 + 1)] * y0 * y1
490
+ # We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2)
491
+ zipped1 = zip(indices, shift_norm_distances)
492
+ zipped2 = zip(shift_indices, norm_distances)
493
+
494
+ # Take all products of zipped1 and zipped2 and iterate over them
495
+ # to get the terms in the above formula. This corresponds to iterating
496
+ # over the vertices of a hypercube.
497
+ hypercube = itertools.product(*zip(zipped1, zipped2))
498
+ value = np.array([0.])
499
+ for h in hypercube:
500
+ edge_indices, weights = zip(*h)
501
+ weight = np.array([1.])
502
+ for w in weights:
503
+ weight = weight * w
504
+ term = np.asarray(self.values[edge_indices]) * weight[vslice]
505
+ value = value + term # cannot use += because broadcasting
506
+ return value
507
+
508
+ def _evaluate_nearest(self, indices, norm_distances):
509
+ idx_res = [np.where(yi <= .5, i, i + 1)
510
+ for i, yi in zip(indices, norm_distances)]
511
+ return self.values[tuple(idx_res)]
512
+
513
+ def _validate_grid_dimensions(self, points, method):
514
+ k = self._SPLINE_DEGREE_MAP[method]
515
+ for i, point in enumerate(points):
516
+ ndim = len(np.atleast_1d(point))
517
+ if ndim <= k:
518
+ raise ValueError(f"There are {ndim} points in dimension {i},"
519
+ f" but method {method} requires at least "
520
+ f" {k+1} points per dimension.")
521
+
522
+ def _evaluate_spline(self, xi, method):
523
+ # ensure xi is 2D list of points to evaluate (`m` is the number of
524
+ # points and `n` is the number of interpolation dimensions,
525
+ # ``n == len(self.grid)``.)
526
+ if xi.ndim == 1:
527
+ xi = xi.reshape((1, xi.size))
528
+ m, n = xi.shape
529
+
530
+ # Reorder the axes: n-dimensional process iterates over the
531
+ # interpolation axes from the last axis downwards: E.g. for a 4D grid
532
+ # the order of axes is 3, 2, 1, 0. Each 1D interpolation works along
533
+ # the 0th axis of its argument array (for 1D routine it's its ``y``
534
+ # array). Thus permute the interpolation axes of `values` *and keep
535
+ # trailing dimensions trailing*.
536
+ axes = tuple(range(self.values.ndim))
537
+ axx = axes[:n][::-1] + axes[n:]
538
+ values = self.values.transpose(axx)
539
+
540
+ if method == 'pchip':
541
+ _eval_func = self._do_pchip
542
+ else:
543
+ _eval_func = self._do_spline_fit
544
+ k = self._SPLINE_DEGREE_MAP[method]
545
+
546
+ # Non-stationary procedure: difficult to vectorize this part entirely
547
+ # into numpy-level operations. Unfortunately this requires explicit
548
+ # looping over each point in xi.
549
+
550
+ # can at least vectorize the first pass across all points in the
551
+ # last variable of xi.
552
+ last_dim = n - 1
553
+ first_values = _eval_func(self.grid[last_dim],
554
+ values,
555
+ xi[:, last_dim],
556
+ k)
557
+
558
+ # the rest of the dimensions have to be on a per point-in-xi basis
559
+ shape = (m, *self.values.shape[n:])
560
+ result = np.empty(shape, dtype=self.values.dtype)
561
+ for j in range(m):
562
+ # Main process: Apply 1D interpolate in each dimension
563
+ # sequentially, starting with the last dimension.
564
+ # These are then "folded" into the next dimension in-place.
565
+ folded_values = first_values[j, ...]
566
+ for i in range(last_dim-1, -1, -1):
567
+ # Interpolate for each 1D from the last dimensions.
568
+ # This collapses each 1D sequence into a scalar.
569
+ folded_values = _eval_func(self.grid[i],
570
+ folded_values,
571
+ xi[j, i],
572
+ k)
573
+ result[j, ...] = folded_values
574
+
575
+ return result
576
+
577
+ @staticmethod
578
+ def _do_spline_fit(x, y, pt, k):
579
+ local_interp = make_interp_spline(x, y, k=k, axis=0)
580
+ values = local_interp(pt)
581
+ return values
582
+
583
+ @staticmethod
584
+ def _do_pchip(x, y, pt, k):
585
+ local_interp = PchipInterpolator(x, y, axis=0)
586
+ values = local_interp(pt)
587
+ return values
588
+
589
+ def _find_indices(self, xi):
590
+ return find_indices(self.grid, xi)
591
+
592
+ def _find_out_of_bounds(self, xi):
593
+ # check for out of bounds xi
594
+ out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
595
+ # iterate through dimensions
596
+ for x, grid in zip(xi, self.grid):
597
+ out_of_bounds += x < grid[0]
598
+ out_of_bounds += x > grid[-1]
599
+ return out_of_bounds
600
+
601
+
602
+ def interpn(points, values, xi, method="linear", bounds_error=True,
603
+ fill_value=np.nan):
604
+ """
605
+ Multidimensional interpolation on regular or rectilinear grids.
606
+
607
+ Strictly speaking, not all regular grids are supported - this function
608
+ works on *rectilinear* grids, that is, a rectangular grid with even or
609
+ uneven spacing.
610
+
611
+ Parameters
612
+ ----------
613
+ points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
614
+ The points defining the regular grid in n dimensions. The points in
615
+ each dimension (i.e. every elements of the points tuple) must be
616
+ strictly ascending or descending.
617
+
618
+ values : array_like, shape (m1, ..., mn, ...)
619
+ The data on the regular grid in n dimensions. Complex data is
620
+ accepted.
621
+
622
+ .. deprecated:: 1.13.0
623
+ Complex data is deprecated with ``method="pchip"`` and will raise an
624
+ error in SciPy 1.15.0. This is because ``PchipInterpolator`` only
625
+ works with real values. If you are trying to use the real components of
626
+ the passed array, use ``np.real`` on ``values``.
627
+
628
+ xi : ndarray of shape (..., ndim)
629
+ The coordinates to sample the gridded data at
630
+
631
+ method : str, optional
632
+ The method of interpolation to perform. Supported are "linear",
633
+ "nearest", "slinear", "cubic", "quintic", "pchip", and "splinef2d".
634
+ "splinef2d" is only supported for 2-dimensional data.
635
+
636
+ bounds_error : bool, optional
637
+ If True, when interpolated values are requested outside of the
638
+ domain of the input data, a ValueError is raised.
639
+ If False, then `fill_value` is used.
640
+
641
+ fill_value : number, optional
642
+ If provided, the value to use for points outside of the
643
+ interpolation domain. If None, values outside
644
+ the domain are extrapolated. Extrapolation is not supported by method
645
+ "splinef2d".
646
+
647
+ Returns
648
+ -------
649
+ values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
650
+ Interpolated values at `xi`. See notes for behaviour when
651
+ ``xi.ndim == 1``.
652
+
653
+ See Also
654
+ --------
655
+ NearestNDInterpolator : Nearest neighbor interpolation on unstructured
656
+ data in N dimensions
657
+ LinearNDInterpolator : Piecewise linear interpolant on unstructured data
658
+ in N dimensions
659
+ RegularGridInterpolator : interpolation on a regular or rectilinear grid
660
+ in arbitrary dimensions (`interpn` wraps this
661
+ class).
662
+ RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
663
+ scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
664
+ (suitable for e.g., N-D image resampling)
665
+
666
+ Notes
667
+ -----
668
+
669
+ .. versionadded:: 0.14
670
+
671
+ In the case that ``xi.ndim == 1`` a new axis is inserted into
672
+ the 0 position of the returned array, values_x, so its shape is
673
+ instead ``(1,) + values.shape[ndim:]``.
674
+
675
+ If the input data is such that input dimensions have incommensurate
676
+ units and differ by many orders of magnitude, the interpolant may have
677
+ numerical artifacts. Consider rescaling the data before interpolation.
678
+
679
+ Examples
680
+ --------
681
+ Evaluate a simple example function on the points of a regular 3-D grid:
682
+
683
+ >>> import numpy as np
684
+ >>> from scipy.interpolate import interpn
685
+ >>> def value_func_3d(x, y, z):
686
+ ... return 2 * x + 3 * y - z
687
+ >>> x = np.linspace(0, 4, 5)
688
+ >>> y = np.linspace(0, 5, 6)
689
+ >>> z = np.linspace(0, 6, 7)
690
+ >>> points = (x, y, z)
691
+ >>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
692
+
693
+ Evaluate the interpolating function at a point
694
+
695
+ >>> point = np.array([2.21, 3.12, 1.15])
696
+ >>> print(interpn(points, values, point))
697
+ [12.63]
698
+
699
+ """
700
+ # sanity check 'method' kwarg
701
+ if method not in ["linear", "nearest", "cubic", "quintic", "pchip",
702
+ "splinef2d", "slinear",
703
+ "slinear_legacy", "cubic_legacy", "quintic_legacy"]:
704
+ raise ValueError("interpn only understands the methods 'linear', "
705
+ "'nearest', 'slinear', 'cubic', 'quintic', 'pchip', "
706
+ f"and 'splinef2d'. You provided {method}.")
707
+
708
+ if not hasattr(values, 'ndim'):
709
+ values = np.asarray(values)
710
+
711
+ ndim = values.ndim
712
+ if ndim > 2 and method == "splinef2d":
713
+ raise ValueError("The method splinef2d can only be used for "
714
+ "2-dimensional input data")
715
+ if not bounds_error and fill_value is None and method == "splinef2d":
716
+ raise ValueError("The method splinef2d does not support extrapolation.")
717
+
718
+ # sanity check consistency of input dimensions
719
+ if len(points) > ndim:
720
+ raise ValueError("There are %d point arrays, but values has %d "
721
+ "dimensions" % (len(points), ndim))
722
+ if len(points) != ndim and method == 'splinef2d':
723
+ raise ValueError("The method splinef2d can only be used for "
724
+ "scalar data with one point per coordinate")
725
+
726
+ grid, descending_dimensions = _check_points(points)
727
+ _check_dimensionality(grid, values)
728
+
729
+ # sanity check requested xi
730
+ xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
731
+ if xi.shape[-1] != len(grid):
732
+ raise ValueError("The requested sample points xi have dimension "
733
+ "%d, but this RegularGridInterpolator has "
734
+ "dimension %d" % (xi.shape[-1], len(grid)))
735
+
736
+ if bounds_error:
737
+ for i, p in enumerate(xi.T):
738
+ if not np.logical_and(np.all(grid[i][0] <= p),
739
+ np.all(p <= grid[i][-1])):
740
+ raise ValueError("One of the requested xi is out of bounds "
741
+ "in dimension %d" % i)
742
+
743
+ # perform interpolation
744
+ if method in RegularGridInterpolator._ALL_METHODS:
745
+ interp = RegularGridInterpolator(points, values, method=method,
746
+ bounds_error=bounds_error,
747
+ fill_value=fill_value)
748
+ return interp(xi)
749
+ elif method == "splinef2d":
750
+ xi_shape = xi.shape
751
+ xi = xi.reshape(-1, xi.shape[-1])
752
+
753
+ # RectBivariateSpline doesn't support fill_value; we need to wrap here
754
+ idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
755
+ grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
756
+ axis=0)
757
+ result = np.empty_like(xi[:, 0])
758
+
759
+ # make a copy of values for RectBivariateSpline
760
+ interp = RectBivariateSpline(points[0], points[1], values[:])
761
+ result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
762
+ result[np.logical_not(idx_valid)] = fill_value
763
+
764
+ return result.reshape(xi_shape[:-1])
765
+ else:
766
+ raise ValueError(f"unknown {method = }")
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (296 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (338 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/fitpack.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.interpolate` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'BSpline',
10
+ 'bisplev',
11
+ 'bisplrep',
12
+ 'dblint',
13
+ 'insert',
14
+ 'spalde',
15
+ 'splantider',
16
+ 'splder',
17
+ 'splev',
18
+ 'splint',
19
+ 'splprep',
20
+ 'splrep',
21
+ 'sproot',
22
+ ]
23
+
24
+
25
+ def __dir__():
26
+ return __all__
27
+
28
+
29
+ def __getattr__(name):
30
+ return _sub_module_deprecation(sub_package="interpolate", module="fitpack",
31
+ private_modules=["_fitpack_py"], all=__all__,
32
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/ndgriddata.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.interpolate` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'CloughTocher2DInterpolator',
10
+ 'LinearNDInterpolator',
11
+ 'NDInterpolatorBase',
12
+ 'NearestNDInterpolator',
13
+ 'cKDTree',
14
+ 'griddata',
15
+ ]
16
+
17
+
18
+ def __dir__():
19
+ return __all__
20
+
21
+
22
+ def __getattr__(name):
23
+ return _sub_module_deprecation(sub_package="interpolate", module="ndgriddata",
24
+ private_modules=["_ndgriddata"], all=__all__,
25
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/rbf.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.interpolate` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'Rbf',
10
+ 'cdist',
11
+ 'linalg',
12
+ 'pdist',
13
+ 'squareform',
14
+ 'xlogy',
15
+ ]
16
+
17
+
18
+ def __dir__():
19
+ return __all__
20
+
21
+
22
+ def __getattr__(name):
23
+ return _sub_module_deprecation(sub_package="interpolate", module="rbf",
24
+ private_modules=["_rbf"], all=__all__,
25
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc ADDED
Binary file (85.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc ADDED
Binary file (16.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc ADDED
Binary file (46.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc ADDED
Binary file (2.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc ADDED
Binary file (75 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc ADDED
Binary file (8.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc ADDED
Binary file (32.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc ADDED
Binary file (6.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc ADDED
Binary file (16 kB). View file