applied-ai-018 commited on
Commit
ecb8b1c
·
verified ·
1 Parent(s): 0919000

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py +0 -0
  8. venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py +202 -0
  14. venv/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py +421 -0
  15. venv/lib/python3.10/site-packages/scipy/interpolate/__init__.py +201 -0
  16. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so +0 -0
  37. venv/lib/python3.10/site-packages/scipy/interpolate/_bsplines.py +2215 -0
  38. venv/lib/python3.10/site-packages/scipy/interpolate/_cubic.py +970 -0
  39. venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so +0 -0
  40. venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack2.py +2362 -0
  41. venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py +805 -0
  42. venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py +796 -0
  43. venv/lib/python3.10/site-packages/scipy/interpolate/_interpolate.py +2473 -0
  44. venv/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py +358 -0
  45. venv/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py +332 -0
  46. venv/lib/python3.10/site-packages/scipy/interpolate/_pade.py +67 -0
  47. venv/lib/python3.10/site-packages/scipy/interpolate/_polyint.py +938 -0
  48. venv/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so +0 -0
  49. venv/lib/python3.10/site-packages/scipy/interpolate/_rbf.py +290 -0
  50. venv/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py +550 -0
ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a322f1416dfe5118ab829ae03b1ae52eff67c1b1047fdadd230dfcde4661b26
3
+ size 9372
ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbc015ef43b7e35900effc49169eba511c5ed150c7a219254ba6ccde443569d5
3
+ size 9387
ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6f6b67a50f3e2aa755f42e43f2799c14c2150c5a818dc1600d9c109135e5787
3
+ size 33555533
venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc ADDED
Binary file (131 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc ADDED
Binary file (28.2 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc ADDED
Binary file (4.69 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc ADDED
Binary file (6.21 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
venv/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from pytest import raises as assert_raises
3
+ import numpy as np
4
+ from scipy.cluster.hierarchy import DisjointSet
5
+ import string
6
+
7
+
8
+ def generate_random_token():
9
+ k = len(string.ascii_letters)
10
+ tokens = list(np.arange(k, dtype=int))
11
+ tokens += list(np.arange(k, dtype=float))
12
+ tokens += list(string.ascii_letters)
13
+ tokens += [None for i in range(k)]
14
+ tokens = np.array(tokens, dtype=object)
15
+ rng = np.random.RandomState(seed=0)
16
+
17
+ while 1:
18
+ size = rng.randint(1, 3)
19
+ element = rng.choice(tokens, size)
20
+ if size == 1:
21
+ yield element[0]
22
+ else:
23
+ yield tuple(element)
24
+
25
+
26
+ def get_elements(n):
27
+ # dict is deterministic without difficulty of comparing numpy ints
28
+ elements = {}
29
+ for element in generate_random_token():
30
+ if element not in elements:
31
+ elements[element] = len(elements)
32
+ if len(elements) >= n:
33
+ break
34
+ return list(elements.keys())
35
+
36
+
37
+ def test_init():
38
+ n = 10
39
+ elements = get_elements(n)
40
+ dis = DisjointSet(elements)
41
+ assert dis.n_subsets == n
42
+ assert list(dis) == elements
43
+
44
+
45
+ def test_len():
46
+ n = 10
47
+ elements = get_elements(n)
48
+ dis = DisjointSet(elements)
49
+ assert len(dis) == n
50
+
51
+ dis.add("dummy")
52
+ assert len(dis) == n + 1
53
+
54
+
55
+ @pytest.mark.parametrize("n", [10, 100])
56
+ def test_contains(n):
57
+ elements = get_elements(n)
58
+ dis = DisjointSet(elements)
59
+ for x in elements:
60
+ assert x in dis
61
+
62
+ assert "dummy" not in dis
63
+
64
+
65
+ @pytest.mark.parametrize("n", [10, 100])
66
+ def test_add(n):
67
+ elements = get_elements(n)
68
+ dis1 = DisjointSet(elements)
69
+
70
+ dis2 = DisjointSet()
71
+ for i, x in enumerate(elements):
72
+ dis2.add(x)
73
+ assert len(dis2) == i + 1
74
+
75
+ # test idempotency by adding element again
76
+ dis2.add(x)
77
+ assert len(dis2) == i + 1
78
+
79
+ assert list(dis1) == list(dis2)
80
+
81
+
82
+ def test_element_not_present():
83
+ elements = get_elements(n=10)
84
+ dis = DisjointSet(elements)
85
+
86
+ with assert_raises(KeyError):
87
+ dis["dummy"]
88
+
89
+ with assert_raises(KeyError):
90
+ dis.merge(elements[0], "dummy")
91
+
92
+ with assert_raises(KeyError):
93
+ dis.connected(elements[0], "dummy")
94
+
95
+
96
+ @pytest.mark.parametrize("direction", ["forwards", "backwards"])
97
+ @pytest.mark.parametrize("n", [10, 100])
98
+ def test_linear_union_sequence(n, direction):
99
+ elements = get_elements(n)
100
+ dis = DisjointSet(elements)
101
+ assert elements == list(dis)
102
+
103
+ indices = list(range(n - 1))
104
+ if direction == "backwards":
105
+ indices = indices[::-1]
106
+
107
+ for it, i in enumerate(indices):
108
+ assert not dis.connected(elements[i], elements[i + 1])
109
+ assert dis.merge(elements[i], elements[i + 1])
110
+ assert dis.connected(elements[i], elements[i + 1])
111
+ assert dis.n_subsets == n - 1 - it
112
+
113
+ roots = [dis[i] for i in elements]
114
+ if direction == "forwards":
115
+ assert all(elements[0] == r for r in roots)
116
+ else:
117
+ assert all(elements[-2] == r for r in roots)
118
+ assert not dis.merge(elements[0], elements[-1])
119
+
120
+
121
+ @pytest.mark.parametrize("n", [10, 100])
122
+ def test_self_unions(n):
123
+ elements = get_elements(n)
124
+ dis = DisjointSet(elements)
125
+
126
+ for x in elements:
127
+ assert dis.connected(x, x)
128
+ assert not dis.merge(x, x)
129
+ assert dis.connected(x, x)
130
+ assert dis.n_subsets == len(elements)
131
+
132
+ assert elements == list(dis)
133
+ roots = [dis[x] for x in elements]
134
+ assert elements == roots
135
+
136
+
137
+ @pytest.mark.parametrize("order", ["ab", "ba"])
138
+ @pytest.mark.parametrize("n", [10, 100])
139
+ def test_equal_size_ordering(n, order):
140
+ elements = get_elements(n)
141
+ dis = DisjointSet(elements)
142
+
143
+ rng = np.random.RandomState(seed=0)
144
+ indices = np.arange(n)
145
+ rng.shuffle(indices)
146
+
147
+ for i in range(0, len(indices), 2):
148
+ a, b = elements[indices[i]], elements[indices[i + 1]]
149
+ if order == "ab":
150
+ assert dis.merge(a, b)
151
+ else:
152
+ assert dis.merge(b, a)
153
+
154
+ expected = elements[min(indices[i], indices[i + 1])]
155
+ assert dis[a] == expected
156
+ assert dis[b] == expected
157
+
158
+
159
+ @pytest.mark.parametrize("kmax", [5, 10])
160
+ def test_binary_tree(kmax):
161
+ n = 2**kmax
162
+ elements = get_elements(n)
163
+ dis = DisjointSet(elements)
164
+ rng = np.random.RandomState(seed=0)
165
+
166
+ for k in 2**np.arange(kmax):
167
+ for i in range(0, n, 2 * k):
168
+ r1, r2 = rng.randint(0, k, size=2)
169
+ a, b = elements[i + r1], elements[i + k + r2]
170
+ assert not dis.connected(a, b)
171
+ assert dis.merge(a, b)
172
+ assert dis.connected(a, b)
173
+
174
+ assert elements == list(dis)
175
+ roots = [dis[i] for i in elements]
176
+ expected_indices = np.arange(n) - np.arange(n) % (2 * k)
177
+ expected = [elements[i] for i in expected_indices]
178
+ assert roots == expected
179
+
180
+
181
+ @pytest.mark.parametrize("n", [10, 100])
182
+ def test_subsets(n):
183
+ elements = get_elements(n)
184
+ dis = DisjointSet(elements)
185
+
186
+ rng = np.random.RandomState(seed=0)
187
+ for i, j in rng.randint(0, n, (n, 2)):
188
+ x = elements[i]
189
+ y = elements[j]
190
+
191
+ expected = {element for element in dis if {dis[element]} == {dis[x]}}
192
+ assert dis.subset_size(x) == len(dis.subset(x))
193
+ assert expected == dis.subset(x)
194
+
195
+ expected = {dis[element]: set() for element in dis}
196
+ for element in dis:
197
+ expected[dis[element]].add(element)
198
+ expected = list(expected.values())
199
+ assert expected == dis.subsets()
200
+
201
+ dis.merge(x, y)
202
+ assert dis.subset(x) == dis.subset(y)
venv/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import sys
3
+ from copy import deepcopy
4
+
5
+ import numpy as np
6
+ from numpy.testing import (
7
+ assert_array_equal, assert_equal, assert_, suppress_warnings
8
+ )
9
+ import pytest
10
+ from pytest import raises as assert_raises
11
+
12
+ from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
13
+ ClusterError, _krandinit)
14
+ from scipy.cluster import _vq
15
+ from scipy.conftest import array_api_compatible
16
+ from scipy.sparse._sputils import matrix
17
+
18
+ from scipy._lib._array_api import (
19
+ SCIPY_ARRAY_API, copy, cov, xp_assert_close, xp_assert_equal
20
+ )
21
+
22
+ pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")]
23
+ skip_if_array_api = pytest.mark.skip_if_array_api
24
+
25
+ TESTDATA_2D = np.array([
26
+ -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
27
+ -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
28
+ 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
29
+ -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
30
+ -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
31
+ -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
32
+ 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
33
+ -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
34
+ -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
35
+ -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
36
+ 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
37
+ -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
38
+ 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
39
+ -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
40
+ 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
41
+ -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
42
+ 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
43
+ 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
44
+ -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
45
+ 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
46
+ -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
47
+ -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
48
+ -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
49
+ 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
50
+ -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
51
+ 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
52
+ 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
53
+ -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
54
+ 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
55
+ 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
56
+ -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
57
+ -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
58
+ 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
59
+ -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
60
+ -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
61
+ -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
62
+ 2.11]).reshape((200, 2))
63
+
64
+
65
+ # Global data
66
+ X = np.array([[3.0, 3], [4, 3], [4, 2],
67
+ [9, 2], [5, 1], [6, 2], [9, 4],
68
+ [5, 2], [5, 4], [7, 4], [6, 5]])
69
+
70
+ CODET1 = np.array([[3.0000, 3.0000],
71
+ [6.2000, 4.0000],
72
+ [5.8000, 1.8000]])
73
+
74
+ CODET2 = np.array([[11.0/3, 8.0/3],
75
+ [6.7500, 4.2500],
76
+ [6.2500, 1.7500]])
77
+
78
+ LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
79
+
80
+
81
+ class TestWhiten:
82
+
83
+ def test_whiten(self, xp):
84
+ desired = xp.asarray([[5.08738849, 2.97091878],
85
+ [3.19909255, 0.69660580],
86
+ [4.51041982, 0.02640918],
87
+ [4.38567074, 0.95120889],
88
+ [2.32191480, 1.63195503]])
89
+
90
+ obs = xp.asarray([[0.98744510, 0.82766775],
91
+ [0.62093317, 0.19406729],
92
+ [0.87545741, 0.00735733],
93
+ [0.85124403, 0.26499712],
94
+ [0.45067590, 0.45464607]])
95
+ xp_assert_close(whiten(obs), desired, rtol=1e-5)
96
+
97
+ def test_whiten_zero_std(self, xp):
98
+ desired = xp.asarray([[0., 1.0, 2.86666544],
99
+ [0., 1.0, 1.32460034],
100
+ [0., 1.0, 3.74382172]])
101
+
102
+ obs = xp.asarray([[0., 1., 0.74109533],
103
+ [0., 1., 0.34243798],
104
+ [0., 1., 0.96785929]])
105
+ with warnings.catch_warnings(record=True) as w:
106
+ warnings.simplefilter('always')
107
+
108
+ xp_assert_close(whiten(obs), desired, rtol=1e-5)
109
+
110
+ assert_equal(len(w), 1)
111
+ assert_(issubclass(w[-1].category, RuntimeWarning))
112
+
113
+ def test_whiten_not_finite(self, xp):
114
+ for bad_value in xp.nan, xp.inf, -xp.inf:
115
+ obs = xp.asarray([[0.98744510, bad_value],
116
+ [0.62093317, 0.19406729],
117
+ [0.87545741, 0.00735733],
118
+ [0.85124403, 0.26499712],
119
+ [0.45067590, 0.45464607]])
120
+ assert_raises(ValueError, whiten, obs)
121
+
122
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
123
+ reason='`np.matrix` unsupported in array API mode')
124
+ def test_whiten_not_finite_matrix(self, xp):
125
+ for bad_value in np.nan, np.inf, -np.inf:
126
+ obs = matrix([[0.98744510, bad_value],
127
+ [0.62093317, 0.19406729],
128
+ [0.87545741, 0.00735733],
129
+ [0.85124403, 0.26499712],
130
+ [0.45067590, 0.45464607]])
131
+ assert_raises(ValueError, whiten, obs)
132
+
133
+
134
+ class TestVq:
135
+
136
+ @skip_if_array_api(cpu_only=True)
137
+ def test_py_vq(self, xp):
138
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
139
+ # label1.dtype varies between int32 and int64 over platforms
140
+ label1 = py_vq(xp.asarray(X), xp.asarray(initc))[0]
141
+ xp_assert_equal(label1, xp.asarray(LABEL1, dtype=xp.int64),
142
+ check_dtype=False)
143
+
144
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
145
+ reason='`np.matrix` unsupported in array API mode')
146
+ def test_py_vq_matrix(self, xp):
147
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
148
+ # label1.dtype varies between int32 and int64 over platforms
149
+ label1 = py_vq(matrix(X), matrix(initc))[0]
150
+ assert_array_equal(label1, LABEL1)
151
+
152
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
153
+ def test_vq(self, xp):
154
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
155
+ label1, _ = _vq.vq(xp.asarray(X), xp.asarray(initc))
156
+ assert_array_equal(label1, LABEL1)
157
+ _, _ = vq(xp.asarray(X), xp.asarray(initc))
158
+
159
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
160
+ reason='`np.matrix` unsupported in array API mode')
161
+ def test_vq_matrix(self, xp):
162
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
163
+ label1, _ = _vq.vq(matrix(X), matrix(initc))
164
+ assert_array_equal(label1, LABEL1)
165
+ _, _ = vq(matrix(X), matrix(initc))
166
+
167
+ @skip_if_array_api(cpu_only=True)
168
+ def test_vq_1d(self, xp):
169
+ # Test special rank 1 vq algo, python implementation.
170
+ data = X[:, 0]
171
+ initc = data[:3]
172
+ a, b = _vq.vq(data, initc)
173
+ data = xp.asarray(data)
174
+ initc = xp.asarray(initc)
175
+ ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
176
+ # ta.dtype varies between int32 and int64 over platforms
177
+ xp_assert_equal(ta, xp.asarray(a, dtype=xp.int64), check_dtype=False)
178
+ xp_assert_equal(tb, xp.asarray(b))
179
+
180
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
181
+ def test__vq_sametype(self, xp):
182
+ a = xp.asarray([1.0, 2.0], dtype=xp.float64)
183
+ b = a.astype(xp.float32)
184
+ assert_raises(TypeError, _vq.vq, a, b)
185
+
186
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
187
+ def test__vq_invalid_type(self, xp):
188
+ a = xp.asarray([1, 2], dtype=int)
189
+ assert_raises(TypeError, _vq.vq, a, a)
190
+
191
+ @skip_if_array_api(cpu_only=True)
192
+ def test_vq_large_nfeat(self, xp):
193
+ X = np.random.rand(20, 20)
194
+ code_book = np.random.rand(3, 20)
195
+
196
+ codes0, dis0 = _vq.vq(X, code_book)
197
+ codes1, dis1 = py_vq(
198
+ xp.asarray(X), xp.asarray(code_book)
199
+ )
200
+ xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
201
+ # codes1.dtype varies between int32 and int64 over platforms
202
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
203
+
204
+ X = X.astype(np.float32)
205
+ code_book = code_book.astype(np.float32)
206
+
207
+ codes0, dis0 = _vq.vq(X, code_book)
208
+ codes1, dis1 = py_vq(
209
+ xp.asarray(X), xp.asarray(code_book)
210
+ )
211
+ xp_assert_close(dis1, xp.asarray(dis0, dtype=xp.float64), rtol=1e-5)
212
+ # codes1.dtype varies between int32 and int64 over platforms
213
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
214
+
215
+ @skip_if_array_api(cpu_only=True)
216
+ def test_vq_large_features(self, xp):
217
+ X = np.random.rand(10, 5) * 1000000
218
+ code_book = np.random.rand(2, 5) * 1000000
219
+
220
+ codes0, dis0 = _vq.vq(X, code_book)
221
+ codes1, dis1 = py_vq(
222
+ xp.asarray(X), xp.asarray(code_book)
223
+ )
224
+ xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
225
+ # codes1.dtype varies between int32 and int64 over platforms
226
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
227
+
228
+
229
+ # Whole class skipped on GPU for now;
230
+ # once pdist/cdist are hooked up for CuPy, more tests will work
231
+ @skip_if_array_api(cpu_only=True)
232
+ class TestKMean:
233
+
234
+ def test_large_features(self, xp):
235
+ # Generate a data set with large values, and run kmeans on it to
236
+ # (regression for 1077).
237
+ d = 300
238
+ n = 100
239
+
240
+ m1 = np.random.randn(d)
241
+ m2 = np.random.randn(d)
242
+ x = 10000 * np.random.randn(n, d) - 20000 * m1
243
+ y = 10000 * np.random.randn(n, d) + 20000 * m2
244
+
245
+ data = np.empty((x.shape[0] + y.shape[0], d), np.float64)
246
+ data[:x.shape[0]] = x
247
+ data[x.shape[0]:] = y
248
+
249
+ kmeans(xp.asarray(data), 2)
250
+
251
+ def test_kmeans_simple(self, xp):
252
+ np.random.seed(54321)
253
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
254
+ code1 = kmeans(xp.asarray(X), xp.asarray(initc), iter=1)[0]
255
+ xp_assert_close(code1, xp.asarray(CODET2))
256
+
257
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
258
+ reason='`np.matrix` unsupported in array API mode')
259
+ def test_kmeans_simple_matrix(self, xp):
260
+ np.random.seed(54321)
261
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
262
+ code1 = kmeans(matrix(X), matrix(initc), iter=1)[0]
263
+ xp_assert_close(code1, CODET2)
264
+
265
+ def test_kmeans_lost_cluster(self, xp):
266
+ # This will cause kmeans to have a cluster with no points.
267
+ data = xp.asarray(TESTDATA_2D)
268
+ initk = xp.asarray([[-1.8127404, -0.67128041],
269
+ [2.04621601, 0.07401111],
270
+ [-2.31149087, -0.05160469]])
271
+
272
+ kmeans(data, initk)
273
+ with suppress_warnings() as sup:
274
+ sup.filter(UserWarning,
275
+ "One of the clusters is empty. Re-run kmeans with a "
276
+ "different initialization")
277
+ kmeans2(data, initk, missing='warn')
278
+
279
+ assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
280
+
281
+ def test_kmeans2_simple(self, xp):
282
+ np.random.seed(12345678)
283
+ initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
284
+ arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix]
285
+ for tp in arrays:
286
+ code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
287
+ code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
288
+
289
+ xp_assert_close(code1, xp.asarray(CODET1))
290
+ xp_assert_close(code2, xp.asarray(CODET2))
291
+
292
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
293
+ reason='`np.matrix` unsupported in array API mode')
294
+ def test_kmeans2_simple_matrix(self, xp):
295
+ np.random.seed(12345678)
296
+ initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
297
+ code1 = kmeans2(matrix(X), matrix(initc), iter=1)[0]
298
+ code2 = kmeans2(matrix(X), matrix(initc), iter=2)[0]
299
+
300
+ xp_assert_close(code1, CODET1)
301
+ xp_assert_close(code2, CODET2)
302
+
303
+ def test_kmeans2_rank1(self, xp):
304
+ data = xp.asarray(TESTDATA_2D)
305
+ data1 = data[:, 0]
306
+
307
+ initc = data1[:3]
308
+ code = copy(initc, xp=xp)
309
+ kmeans2(data1, code, iter=1)[0]
310
+ kmeans2(data1, code, iter=2)[0]
311
+
312
+ def test_kmeans2_rank1_2(self, xp):
313
+ data = xp.asarray(TESTDATA_2D)
314
+ data1 = data[:, 0]
315
+ kmeans2(data1, 2, iter=1)
316
+
317
+ def test_kmeans2_high_dim(self, xp):
318
+ # test kmeans2 when the number of dimensions exceeds the number
319
+ # of input points
320
+ data = xp.asarray(TESTDATA_2D)
321
+ data = xp.reshape(data, (20, 20))[:10, :]
322
+ kmeans2(data, 2)
323
+
324
+ def test_kmeans2_init(self, xp):
325
+ np.random.seed(12345)
326
+ data = xp.asarray(TESTDATA_2D)
327
+ k = 3
328
+
329
+ kmeans2(data, k, minit='points')
330
+ kmeans2(data[:, 1], k, minit='points') # special case (1-D)
331
+
332
+ kmeans2(data, k, minit='++')
333
+ kmeans2(data[:, 1], k, minit='++') # special case (1-D)
334
+
335
+ # minit='random' can give warnings, filter those
336
+ with suppress_warnings() as sup:
337
+ sup.filter(message="One of the clusters is empty. Re-run.")
338
+ kmeans2(data, k, minit='random')
339
+ kmeans2(data[:, 1], k, minit='random') # special case (1-D)
340
+
341
+ @pytest.mark.skipif(sys.platform == 'win32',
342
+ reason='Fails with MemoryError in Wine.')
343
+ def test_krandinit(self, xp):
344
+ data = xp.asarray(TESTDATA_2D)
345
+ datas = [xp.reshape(data, (200, 2)),
346
+ xp.reshape(data, (20, 20))[:10, :]]
347
+ k = int(1e6)
348
+ for data in datas:
349
+ rng = np.random.default_rng(1234)
350
+ init = _krandinit(data, k, rng, xp)
351
+ orig_cov = cov(data.T)
352
+ init_cov = cov(init.T)
353
+ xp_assert_close(orig_cov, init_cov, atol=1e-2)
354
+
355
+ def test_kmeans2_empty(self, xp):
356
+ # Regression test for gh-1032.
357
+ assert_raises(ValueError, kmeans2, xp.asarray([]), 2)
358
+
359
+ def test_kmeans_0k(self, xp):
360
+ # Regression test for gh-1073: fail when k arg is 0.
361
+ assert_raises(ValueError, kmeans, xp.asarray(X), 0)
362
+ assert_raises(ValueError, kmeans2, xp.asarray(X), 0)
363
+ assert_raises(ValueError, kmeans2, xp.asarray(X), xp.asarray([]))
364
+
365
+ def test_kmeans_large_thres(self, xp):
366
+ # Regression test for gh-1774
367
+ x = xp.asarray([1, 2, 3, 4, 10], dtype=xp.float64)
368
+ res = kmeans(x, 1, thresh=1e16)
369
+ xp_assert_close(res[0], xp.asarray([4.], dtype=xp.float64))
370
+ xp_assert_close(res[1], xp.asarray(2.3999999999999999, dtype=xp.float64)[()])
371
+
372
+ def test_kmeans2_kpp_low_dim(self, xp):
373
+ # Regression test for gh-11462
374
+ prev_res = xp.asarray([[-1.95266667, 0.898],
375
+ [-3.153375, 3.3945]], dtype=xp.float64)
376
+ np.random.seed(42)
377
+ res, _ = kmeans2(xp.asarray(TESTDATA_2D), 2, minit='++')
378
+ xp_assert_close(res, prev_res)
379
+
380
+ def test_kmeans2_kpp_high_dim(self, xp):
381
+ # Regression test for gh-11462
382
+ n_dim = 100
383
+ size = 10
384
+ centers = np.vstack([5 * np.ones(n_dim),
385
+ -5 * np.ones(n_dim)])
386
+ np.random.seed(42)
387
+ data = np.vstack([
388
+ np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
389
+ np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
390
+ ])
391
+
392
+ data = xp.asarray(data)
393
+ res, _ = kmeans2(data, 2, minit='++')
394
+ xp_assert_equal(xp.sign(res), xp.sign(xp.asarray(centers)))
395
+
396
+ def test_kmeans_diff_convergence(self, xp):
397
+ # Regression test for gh-8727
398
+ obs = xp.asarray([-3, -1, 0, 1, 1, 8], dtype=xp.float64)
399
+ res = kmeans(obs, xp.asarray([-3., 0.99]))
400
+ xp_assert_close(res[0], xp.asarray([-0.4, 8.], dtype=xp.float64))
401
+ xp_assert_close(res[1], xp.asarray(1.0666666666666667, dtype=xp.float64)[()])
402
+
403
+ def test_kmeans_and_kmeans2_random_seed(self, xp):
404
+
405
+ seed_list = [
406
+ 1234, np.random.RandomState(1234), np.random.default_rng(1234)
407
+ ]
408
+
409
+ for seed in seed_list:
410
+ seed1 = deepcopy(seed)
411
+ seed2 = deepcopy(seed)
412
+ data = xp.asarray(TESTDATA_2D)
413
+ # test for kmeans
414
+ res1, _ = kmeans(data, 2, seed=seed1)
415
+ res2, _ = kmeans(data, 2, seed=seed2)
416
+ xp_assert_close(res1, res2, xp=xp) # should be same results
417
+ # test for kmeans2
418
+ for minit in ["random", "points", "++"]:
419
+ res1, _ = kmeans2(data, 2, minit=minit, seed=seed1)
420
+ res2, _ = kmeans2(data, 2, minit=minit, seed=seed2)
421
+ xp_assert_close(res1, res2, xp=xp) # should be same results
venv/lib/python3.10/site-packages/scipy/interpolate/__init__.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ========================================
3
+ Interpolation (:mod:`scipy.interpolate`)
4
+ ========================================
5
+
6
+ .. currentmodule:: scipy.interpolate
7
+
8
+ Sub-package for objects used in interpolation.
9
+
10
+ As listed below, this sub-package contains spline functions and classes,
11
+ 1-D and multidimensional (univariate and multivariate)
12
+ interpolation classes, Lagrange and Taylor polynomial interpolators, and
13
+ wrappers for `FITPACK <http://www.netlib.org/dierckx/>`__
14
+ and DFITPACK functions.
15
+
16
+ Univariate interpolation
17
+ ========================
18
+
19
+ .. autosummary::
20
+ :toctree: generated/
21
+
22
+ interp1d
23
+ BarycentricInterpolator
24
+ KroghInterpolator
25
+ barycentric_interpolate
26
+ krogh_interpolate
27
+ pchip_interpolate
28
+ CubicHermiteSpline
29
+ PchipInterpolator
30
+ Akima1DInterpolator
31
+ CubicSpline
32
+ PPoly
33
+ BPoly
34
+
35
+
36
+ Multivariate interpolation
37
+ ==========================
38
+
39
+ Unstructured data:
40
+
41
+ .. autosummary::
42
+ :toctree: generated/
43
+
44
+ griddata
45
+ LinearNDInterpolator
46
+ NearestNDInterpolator
47
+ CloughTocher2DInterpolator
48
+ RBFInterpolator
49
+ Rbf
50
+ interp2d
51
+
52
+ For data on a grid:
53
+
54
+ .. autosummary::
55
+ :toctree: generated/
56
+
57
+ interpn
58
+ RegularGridInterpolator
59
+ RectBivariateSpline
60
+
61
+ .. seealso::
62
+
63
+ `scipy.ndimage.map_coordinates`
64
+
65
+ Tensor product polynomials:
66
+
67
+ .. autosummary::
68
+ :toctree: generated/
69
+
70
+ NdPPoly
71
+ NdBSpline
72
+
73
+ 1-D Splines
74
+ ===========
75
+
76
+ .. autosummary::
77
+ :toctree: generated/
78
+
79
+ BSpline
80
+ make_interp_spline
81
+ make_lsq_spline
82
+ make_smoothing_spline
83
+
84
+ Functional interface to FITPACK routines:
85
+
86
+ .. autosummary::
87
+ :toctree: generated/
88
+
89
+ splrep
90
+ splprep
91
+ splev
92
+ splint
93
+ sproot
94
+ spalde
95
+ splder
96
+ splantider
97
+ insert
98
+
99
+ Object-oriented FITPACK interface:
100
+
101
+ .. autosummary::
102
+ :toctree: generated/
103
+
104
+ UnivariateSpline
105
+ InterpolatedUnivariateSpline
106
+ LSQUnivariateSpline
107
+
108
+
109
+
110
+ 2-D Splines
111
+ ===========
112
+
113
+ For data on a grid:
114
+
115
+ .. autosummary::
116
+ :toctree: generated/
117
+
118
+ RectBivariateSpline
119
+ RectSphereBivariateSpline
120
+
121
+ For unstructured data:
122
+
123
+ .. autosummary::
124
+ :toctree: generated/
125
+
126
+ BivariateSpline
127
+ SmoothBivariateSpline
128
+ SmoothSphereBivariateSpline
129
+ LSQBivariateSpline
130
+ LSQSphereBivariateSpline
131
+
132
+ Low-level interface to FITPACK functions:
133
+
134
+ .. autosummary::
135
+ :toctree: generated/
136
+
137
+ bisplrep
138
+ bisplev
139
+
140
+ Additional tools
141
+ ================
142
+
143
+ .. autosummary::
144
+ :toctree: generated/
145
+
146
+ lagrange
147
+ approximate_taylor_polynomial
148
+ pade
149
+
150
+ .. seealso::
151
+
152
+ `scipy.ndimage.map_coordinates`,
153
+ `scipy.ndimage.spline_filter`,
154
+ `scipy.signal.resample`,
155
+ `scipy.signal.bspline`,
156
+ `scipy.signal.gauss_spline`,
157
+ `scipy.signal.qspline1d`,
158
+ `scipy.signal.cspline1d`,
159
+ `scipy.signal.qspline1d_eval`,
160
+ `scipy.signal.cspline1d_eval`,
161
+ `scipy.signal.qspline2d`,
162
+ `scipy.signal.cspline2d`.
163
+
164
+ ``pchip`` is an alias of `PchipInterpolator` for backward compatibility
165
+ (should not be used in new code).
166
+ """
167
+ from ._interpolate import *
168
+ from ._fitpack_py import *
169
+
170
+ # New interface to fitpack library:
171
+ from ._fitpack2 import *
172
+
173
+ from ._rbf import Rbf
174
+
175
+ from ._rbfinterp import *
176
+
177
+ from ._polyint import *
178
+
179
+ from ._cubic import *
180
+
181
+ from ._ndgriddata import *
182
+
183
+ from ._bsplines import *
184
+
185
+ from ._pade import *
186
+
187
+ from ._rgi import *
188
+
189
+ from ._ndbspline import NdBSpline
190
+
191
+ # Deprecated namespaces, to be removed in v2.0.0
192
+ from . import fitpack, fitpack2, interpolate, ndgriddata, polyint, rbf
193
+
194
+ __all__ = [s for s in dir() if not s.startswith('_')]
195
+
196
+ from scipy._lib._testutils import PytestTester
197
+ test = PytestTester(__name__)
198
+ del PytestTester
199
+
200
+ # Backward compatibility
201
+ pchip = PchipInterpolator
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.76 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc ADDED
Binary file (63.9 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc ADDED
Binary file (30.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc ADDED
Binary file (82.6 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc ADDED
Binary file (27.6 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc ADDED
Binary file (71.2 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc ADDED
Binary file (9.68 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc ADDED
Binary file (32.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc ADDED
Binary file (720 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc ADDED
Binary file (933 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc ADDED
Binary file (886 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc ADDED
Binary file (726 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc ADDED
Binary file (752 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc ADDED
Binary file (639 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (617 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/_bsplines.py ADDED
@@ -0,0 +1,2215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from math import prod
3
+
4
+ import numpy as np
5
+ from scipy._lib._util import normalize_axis_index
6
+ from scipy.linalg import (get_lapack_funcs, LinAlgError,
7
+ cholesky_banded, cho_solve_banded,
8
+ solve, solve_banded)
9
+ from scipy.optimize import minimize_scalar
10
+ from . import _bspl
11
+ from . import _fitpack_impl
12
+ from scipy.sparse import csr_array
13
+ from scipy.special import poch
14
+ from itertools import combinations
15
+
16
+ __all__ = ["BSpline", "make_interp_spline", "make_lsq_spline",
17
+ "make_smoothing_spline"]
18
+
19
+
20
+ def _get_dtype(dtype):
21
+ """Return np.complex128 for complex dtypes, np.float64 otherwise."""
22
+ if np.issubdtype(dtype, np.complexfloating):
23
+ return np.complex128
24
+ else:
25
+ return np.float64
26
+
27
+
28
+ def _as_float_array(x, check_finite=False):
29
+ """Convert the input into a C contiguous float array.
30
+
31
+ NB: Upcasts half- and single-precision floats to double precision.
32
+ """
33
+ x = np.ascontiguousarray(x)
34
+ dtyp = _get_dtype(x.dtype)
35
+ x = x.astype(dtyp, copy=False)
36
+ if check_finite and not np.isfinite(x).all():
37
+ raise ValueError("Array must not contain infs or nans.")
38
+ return x
39
+
40
+
41
+ def _dual_poly(j, k, t, y):
42
+ """
43
+ Dual polynomial of the B-spline B_{j,k,t} -
44
+ polynomial which is associated with B_{j,k,t}:
45
+ $p_{j,k}(y) = (y - t_{j+1})(y - t_{j+2})...(y - t_{j+k})$
46
+ """
47
+ if k == 0:
48
+ return 1
49
+ return np.prod([(y - t[j + i]) for i in range(1, k + 1)])
50
+
51
+
52
+ def _diff_dual_poly(j, k, y, d, t):
53
+ """
54
+ d-th derivative of the dual polynomial $p_{j,k}(y)$
55
+ """
56
+ if d == 0:
57
+ return _dual_poly(j, k, t, y)
58
+ if d == k:
59
+ return poch(1, k)
60
+ comb = list(combinations(range(j + 1, j + k + 1), d))
61
+ res = 0
62
+ for i in range(len(comb) * len(comb[0])):
63
+ res += np.prod([(y - t[j + p]) for p in range(1, k + 1)
64
+ if (j + p) not in comb[i//d]])
65
+ return res
66
+
67
+
68
+ class BSpline:
69
+ r"""Univariate spline in the B-spline basis.
70
+
71
+ .. math::
72
+
73
+ S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x)
74
+
75
+ where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
76
+ and knots `t`.
77
+
78
+ Parameters
79
+ ----------
80
+ t : ndarray, shape (n+k+1,)
81
+ knots
82
+ c : ndarray, shape (>=n, ...)
83
+ spline coefficients
84
+ k : int
85
+ B-spline degree
86
+ extrapolate : bool or 'periodic', optional
87
+ whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
88
+ or to return nans.
89
+ If True, extrapolates the first and last polynomial pieces of b-spline
90
+ functions active on the base interval.
91
+ If 'periodic', periodic extrapolation is used.
92
+ Default is True.
93
+ axis : int, optional
94
+ Interpolation axis. Default is zero.
95
+
96
+ Attributes
97
+ ----------
98
+ t : ndarray
99
+ knot vector
100
+ c : ndarray
101
+ spline coefficients
102
+ k : int
103
+ spline degree
104
+ extrapolate : bool
105
+ If True, extrapolates the first and last polynomial pieces of b-spline
106
+ functions active on the base interval.
107
+ axis : int
108
+ Interpolation axis.
109
+ tck : tuple
110
+ A read-only equivalent of ``(self.t, self.c, self.k)``
111
+
112
+ Methods
113
+ -------
114
+ __call__
115
+ basis_element
116
+ derivative
117
+ antiderivative
118
+ integrate
119
+ insert_knot
120
+ construct_fast
121
+ design_matrix
122
+ from_power_basis
123
+
124
+ Notes
125
+ -----
126
+ B-spline basis elements are defined via
127
+
128
+ .. math::
129
+
130
+ B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
131
+
132
+ B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
133
+ + \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
134
+
135
+ **Implementation details**
136
+
137
+ - At least ``k+1`` coefficients are required for a spline of degree `k`,
138
+ so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
139
+ ``j > n``, are ignored.
140
+
141
+ - B-spline basis elements of degree `k` form a partition of unity on the
142
+ *base interval*, ``t[k] <= x <= t[n]``.
143
+
144
+
145
+ Examples
146
+ --------
147
+
148
+ Translating the recursive definition of B-splines into Python code, we have:
149
+
150
+ >>> def B(x, k, i, t):
151
+ ... if k == 0:
152
+ ... return 1.0 if t[i] <= x < t[i+1] else 0.0
153
+ ... if t[i+k] == t[i]:
154
+ ... c1 = 0.0
155
+ ... else:
156
+ ... c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
157
+ ... if t[i+k+1] == t[i+1]:
158
+ ... c2 = 0.0
159
+ ... else:
160
+ ... c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
161
+ ... return c1 + c2
162
+
163
+ >>> def bspline(x, t, c, k):
164
+ ... n = len(t) - k - 1
165
+ ... assert (n >= k+1) and (len(c) >= n)
166
+ ... return sum(c[i] * B(x, k, i, t) for i in range(n))
167
+
168
+ Note that this is an inefficient (if straightforward) way to
169
+ evaluate B-splines --- this spline class does it in an equivalent,
170
+ but much more efficient way.
171
+
172
+ Here we construct a quadratic spline function on the base interval
173
+ ``2 <= x <= 4`` and compare with the naive way of evaluating the spline:
174
+
175
+ >>> from scipy.interpolate import BSpline
176
+ >>> k = 2
177
+ >>> t = [0, 1, 2, 3, 4, 5, 6]
178
+ >>> c = [-1, 2, 0, -1]
179
+ >>> spl = BSpline(t, c, k)
180
+ >>> spl(2.5)
181
+ array(1.375)
182
+ >>> bspline(2.5, t, c, k)
183
+ 1.375
184
+
185
+ Note that outside of the base interval results differ. This is because
186
+ `BSpline` extrapolates the first and last polynomial pieces of B-spline
187
+ functions active on the base interval.
188
+
189
+ >>> import matplotlib.pyplot as plt
190
+ >>> import numpy as np
191
+ >>> fig, ax = plt.subplots()
192
+ >>> xx = np.linspace(1.5, 4.5, 50)
193
+ >>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive')
194
+ >>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')
195
+ >>> ax.grid(True)
196
+ >>> ax.legend(loc='best')
197
+ >>> plt.show()
198
+
199
+
200
+ References
201
+ ----------
202
+ .. [1] Tom Lyche and Knut Morken, Spline methods,
203
+ http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
204
+ .. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
205
+
206
+ """
207
+
208
+ def __init__(self, t, c, k, extrapolate=True, axis=0):
209
+ super().__init__()
210
+
211
+ self.k = operator.index(k)
212
+ self.c = np.asarray(c)
213
+ self.t = np.ascontiguousarray(t, dtype=np.float64)
214
+
215
+ if extrapolate == 'periodic':
216
+ self.extrapolate = extrapolate
217
+ else:
218
+ self.extrapolate = bool(extrapolate)
219
+
220
+ n = self.t.shape[0] - self.k - 1
221
+
222
+ axis = normalize_axis_index(axis, self.c.ndim)
223
+
224
+ # Note that the normalized axis is stored in the object.
225
+ self.axis = axis
226
+ if axis != 0:
227
+ # roll the interpolation axis to be the first one in self.c
228
+ # More specifically, the target shape for self.c is (n, ...),
229
+ # and axis !=0 means that we have c.shape (..., n, ...)
230
+ # ^
231
+ # axis
232
+ self.c = np.moveaxis(self.c, axis, 0)
233
+
234
+ if k < 0:
235
+ raise ValueError("Spline order cannot be negative.")
236
+ if self.t.ndim != 1:
237
+ raise ValueError("Knot vector must be one-dimensional.")
238
+ if n < self.k + 1:
239
+ raise ValueError("Need at least %d knots for degree %d" %
240
+ (2*k + 2, k))
241
+ if (np.diff(self.t) < 0).any():
242
+ raise ValueError("Knots must be in a non-decreasing order.")
243
+ if len(np.unique(self.t[k:n+1])) < 2:
244
+ raise ValueError("Need at least two internal knots.")
245
+ if not np.isfinite(self.t).all():
246
+ raise ValueError("Knots should not have nans or infs.")
247
+ if self.c.ndim < 1:
248
+ raise ValueError("Coefficients must be at least 1-dimensional.")
249
+ if self.c.shape[0] < n:
250
+ raise ValueError("Knots, coefficients and degree are inconsistent.")
251
+
252
+ dt = _get_dtype(self.c.dtype)
253
+ self.c = np.ascontiguousarray(self.c, dtype=dt)
254
+
255
+ @classmethod
256
+ def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
257
+ """Construct a spline without making checks.
258
+
259
+ Accepts same parameters as the regular constructor. Input arrays
260
+ `t` and `c` must of correct shape and dtype.
261
+ """
262
+ self = object.__new__(cls)
263
+ self.t, self.c, self.k = t, c, k
264
+ self.extrapolate = extrapolate
265
+ self.axis = axis
266
+ return self
267
+
268
+ @property
269
+ def tck(self):
270
+ """Equivalent to ``(self.t, self.c, self.k)`` (read-only).
271
+ """
272
+ return self.t, self.c, self.k
273
+
274
+ @classmethod
275
+ def basis_element(cls, t, extrapolate=True):
276
+ """Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
277
+
278
+ Parameters
279
+ ----------
280
+ t : ndarray, shape (k+2,)
281
+ internal knots
282
+ extrapolate : bool or 'periodic', optional
283
+ whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``,
284
+ or to return nans.
285
+ If 'periodic', periodic extrapolation is used.
286
+ Default is True.
287
+
288
+ Returns
289
+ -------
290
+ basis_element : callable
291
+ A callable representing a B-spline basis element for the knot
292
+ vector `t`.
293
+
294
+ Notes
295
+ -----
296
+ The degree of the B-spline, `k`, is inferred from the length of `t` as
297
+ ``len(t)-2``. The knot vector is constructed by appending and prepending
298
+ ``k+1`` elements to internal knots `t`.
299
+
300
+ Examples
301
+ --------
302
+
303
+ Construct a cubic B-spline:
304
+
305
+ >>> import numpy as np
306
+ >>> from scipy.interpolate import BSpline
307
+ >>> b = BSpline.basis_element([0, 1, 2, 3, 4])
308
+ >>> k = b.k
309
+ >>> b.t[k:-k]
310
+ array([ 0., 1., 2., 3., 4.])
311
+ >>> k
312
+ 3
313
+
314
+ Construct a quadratic B-spline on ``[0, 1, 1, 2]``, and compare
315
+ to its explicit form:
316
+
317
+ >>> t = [0, 1, 1, 2]
318
+ >>> b = BSpline.basis_element(t)
319
+ >>> def f(x):
320
+ ... return np.where(x < 1, x*x, (2. - x)**2)
321
+
322
+ >>> import matplotlib.pyplot as plt
323
+ >>> fig, ax = plt.subplots()
324
+ >>> x = np.linspace(0, 2, 51)
325
+ >>> ax.plot(x, b(x), 'g', lw=3)
326
+ >>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4)
327
+ >>> ax.grid(True)
328
+ >>> plt.show()
329
+
330
+ """
331
+ k = len(t) - 2
332
+ t = _as_float_array(t)
333
+ t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
334
+ c = np.zeros_like(t)
335
+ c[k] = 1.
336
+ return cls.construct_fast(t, c, k, extrapolate)
337
+
338
+ @classmethod
339
+ def design_matrix(cls, x, t, k, extrapolate=False):
340
+ """
341
+ Returns a design matrix as a CSR format sparse array.
342
+
343
+ Parameters
344
+ ----------
345
+ x : array_like, shape (n,)
346
+ Points to evaluate the spline at.
347
+ t : array_like, shape (nt,)
348
+ Sorted 1D array of knots.
349
+ k : int
350
+ B-spline degree.
351
+ extrapolate : bool or 'periodic', optional
352
+ Whether to extrapolate based on the first and last intervals
353
+ or raise an error. If 'periodic', periodic extrapolation is used.
354
+ Default is False.
355
+
356
+ .. versionadded:: 1.10.0
357
+
358
+ Returns
359
+ -------
360
+ design_matrix : `csr_array` object
361
+ Sparse matrix in CSR format where each row contains all the basis
362
+ elements of the input row (first row = basis elements of x[0],
363
+ ..., last row = basis elements x[-1]).
364
+
365
+ Examples
366
+ --------
367
+ Construct a design matrix for a B-spline
368
+
369
+ >>> from scipy.interpolate import make_interp_spline, BSpline
370
+ >>> import numpy as np
371
+ >>> x = np.linspace(0, np.pi * 2, 4)
372
+ >>> y = np.sin(x)
373
+ >>> k = 3
374
+ >>> bspl = make_interp_spline(x, y, k=k)
375
+ >>> design_matrix = bspl.design_matrix(x, bspl.t, k)
376
+ >>> design_matrix.toarray()
377
+ [[1. , 0. , 0. , 0. ],
378
+ [0.2962963 , 0.44444444, 0.22222222, 0.03703704],
379
+ [0.03703704, 0.22222222, 0.44444444, 0.2962963 ],
380
+ [0. , 0. , 0. , 1. ]]
381
+
382
+ Construct a design matrix for some vector of knots
383
+
384
+ >>> k = 2
385
+ >>> t = [-1, 0, 1, 2, 3, 4, 5, 6]
386
+ >>> x = [1, 2, 3, 4]
387
+ >>> design_matrix = BSpline.design_matrix(x, t, k).toarray()
388
+ >>> design_matrix
389
+ [[0.5, 0.5, 0. , 0. , 0. ],
390
+ [0. , 0.5, 0.5, 0. , 0. ],
391
+ [0. , 0. , 0.5, 0.5, 0. ],
392
+ [0. , 0. , 0. , 0.5, 0.5]]
393
+
394
+ This result is equivalent to the one created in the sparse format
395
+
396
+ >>> c = np.eye(len(t) - k - 1)
397
+ >>> design_matrix_gh = BSpline(t, c, k)(x)
398
+ >>> np.allclose(design_matrix, design_matrix_gh, atol=1e-14)
399
+ True
400
+
401
+ Notes
402
+ -----
403
+ .. versionadded:: 1.8.0
404
+
405
+ In each row of the design matrix all the basis elements are evaluated
406
+ at the certain point (first row - x[0], ..., last row - x[-1]).
407
+
408
+ `nt` is a length of the vector of knots: as far as there are
409
+ `nt - k - 1` basis elements, `nt` should be not less than `2 * k + 2`
410
+ to have at least `k + 1` basis element.
411
+
412
+ Out of bounds `x` raises a ValueError.
413
+ """
414
+ x = _as_float_array(x, True)
415
+ t = _as_float_array(t, True)
416
+
417
+ if extrapolate != 'periodic':
418
+ extrapolate = bool(extrapolate)
419
+
420
+ if k < 0:
421
+ raise ValueError("Spline order cannot be negative.")
422
+ if t.ndim != 1 or np.any(t[1:] < t[:-1]):
423
+ raise ValueError(f"Expect t to be a 1-D sorted array_like, but "
424
+ f"got t={t}.")
425
+ # There are `nt - k - 1` basis elements in a BSpline built on the
426
+ # vector of knots with length `nt`, so to have at least `k + 1` basis
427
+ # elements we need to have at least `2 * k + 2` elements in the vector
428
+ # of knots.
429
+ if len(t) < 2 * k + 2:
430
+ raise ValueError(f"Length t is not enough for k={k}.")
431
+
432
+ if extrapolate == 'periodic':
433
+ # With periodic extrapolation we map x to the segment
434
+ # [t[k], t[n]].
435
+ n = t.size - k - 1
436
+ x = t[k] + (x - t[k]) % (t[n] - t[k])
437
+ extrapolate = False
438
+ elif not extrapolate and (
439
+ (min(x) < t[k]) or (max(x) > t[t.shape[0] - k - 1])
440
+ ):
441
+ # Checks from `find_interval` function
442
+ raise ValueError(f'Out of bounds w/ x = {x}.')
443
+
444
+ # Compute number of non-zeros of final CSR array in order to determine
445
+ # the dtype of indices and indptr of the CSR array.
446
+ n = x.shape[0]
447
+ nnz = n * (k + 1)
448
+ if nnz < np.iinfo(np.int32).max:
449
+ int_dtype = np.int32
450
+ else:
451
+ int_dtype = np.int64
452
+ # Preallocate indptr and indices
453
+ indices = np.empty(n * (k + 1), dtype=int_dtype)
454
+ indptr = np.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype)
455
+
456
+ # indptr is not passed to Cython as it is already fully computed
457
+ data, indices = _bspl._make_design_matrix(
458
+ x, t, k, extrapolate, indices
459
+ )
460
+ return csr_array(
461
+ (data, indices, indptr),
462
+ shape=(x.shape[0], t.shape[0] - k - 1)
463
+ )
464
+
465
+ def __call__(self, x, nu=0, extrapolate=None):
466
+ """
467
+ Evaluate a spline function.
468
+
469
+ Parameters
470
+ ----------
471
+ x : array_like
472
+ points to evaluate the spline at.
473
+ nu : int, optional
474
+ derivative to evaluate (default is 0).
475
+ extrapolate : bool or 'periodic', optional
476
+ whether to extrapolate based on the first and last intervals
477
+ or return nans. If 'periodic', periodic extrapolation is used.
478
+ Default is `self.extrapolate`.
479
+
480
+ Returns
481
+ -------
482
+ y : array_like
483
+ Shape is determined by replacing the interpolation axis
484
+ in the coefficient array with the shape of `x`.
485
+
486
+ """
487
+ if extrapolate is None:
488
+ extrapolate = self.extrapolate
489
+ x = np.asarray(x)
490
+ x_shape, x_ndim = x.shape, x.ndim
491
+ x = np.ascontiguousarray(x.ravel(), dtype=np.float64)
492
+
493
+ # With periodic extrapolation we map x to the segment
494
+ # [self.t[k], self.t[n]].
495
+ if extrapolate == 'periodic':
496
+ n = self.t.size - self.k - 1
497
+ x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] -
498
+ self.t[self.k])
499
+ extrapolate = False
500
+
501
+ out = np.empty((len(x), prod(self.c.shape[1:])), dtype=self.c.dtype)
502
+ self._ensure_c_contiguous()
503
+ self._evaluate(x, nu, extrapolate, out)
504
+ out = out.reshape(x_shape + self.c.shape[1:])
505
+ if self.axis != 0:
506
+ # transpose to move the calculated values to the interpolation axis
507
+ l = list(range(out.ndim))
508
+ l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
509
+ out = out.transpose(l)
510
+ return out
511
+
512
+ def _evaluate(self, xp, nu, extrapolate, out):
513
+ _bspl.evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),
514
+ self.k, xp, nu, extrapolate, out)
515
+
516
+ def _ensure_c_contiguous(self):
517
+ """
518
+ c and t may be modified by the user. The Cython code expects
519
+ that they are C contiguous.
520
+
521
+ """
522
+ if not self.t.flags.c_contiguous:
523
+ self.t = self.t.copy()
524
+ if not self.c.flags.c_contiguous:
525
+ self.c = self.c.copy()
526
+
527
+ def derivative(self, nu=1):
528
+ """Return a B-spline representing the derivative.
529
+
530
+ Parameters
531
+ ----------
532
+ nu : int, optional
533
+ Derivative order.
534
+ Default is 1.
535
+
536
+ Returns
537
+ -------
538
+ b : BSpline object
539
+ A new instance representing the derivative.
540
+
541
+ See Also
542
+ --------
543
+ splder, splantider
544
+
545
+ """
546
+ c = self.c
547
+ # pad the c array if needed
548
+ ct = len(self.t) - len(c)
549
+ if ct > 0:
550
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
551
+ tck = _fitpack_impl.splder((self.t, c, self.k), nu)
552
+ return self.construct_fast(*tck, extrapolate=self.extrapolate,
553
+ axis=self.axis)
554
+
555
+ def antiderivative(self, nu=1):
556
+ """Return a B-spline representing the antiderivative.
557
+
558
+ Parameters
559
+ ----------
560
+ nu : int, optional
561
+ Antiderivative order. Default is 1.
562
+
563
+ Returns
564
+ -------
565
+ b : BSpline object
566
+ A new instance representing the antiderivative.
567
+
568
+ Notes
569
+ -----
570
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
571
+ it will be set to False for the returned instance. This is done because
572
+ the antiderivative is no longer periodic and its correct evaluation
573
+ outside of the initially given x interval is difficult.
574
+
575
+ See Also
576
+ --------
577
+ splder, splantider
578
+
579
+ """
580
+ c = self.c
581
+ # pad the c array if needed
582
+ ct = len(self.t) - len(c)
583
+ if ct > 0:
584
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
585
+ tck = _fitpack_impl.splantider((self.t, c, self.k), nu)
586
+
587
+ if self.extrapolate == 'periodic':
588
+ extrapolate = False
589
+ else:
590
+ extrapolate = self.extrapolate
591
+
592
+ return self.construct_fast(*tck, extrapolate=extrapolate,
593
+ axis=self.axis)
594
+
595
+ def integrate(self, a, b, extrapolate=None):
596
+ """Compute a definite integral of the spline.
597
+
598
+ Parameters
599
+ ----------
600
+ a : float
601
+ Lower limit of integration.
602
+ b : float
603
+ Upper limit of integration.
604
+ extrapolate : bool or 'periodic', optional
605
+ whether to extrapolate beyond the base interval,
606
+ ``t[k] .. t[-k-1]``, or take the spline to be zero outside of the
607
+ base interval. If 'periodic', periodic extrapolation is used.
608
+ If None (default), use `self.extrapolate`.
609
+
610
+ Returns
611
+ -------
612
+ I : array_like
613
+ Definite integral of the spline over the interval ``[a, b]``.
614
+
615
+ Examples
616
+ --------
617
+ Construct the linear spline ``x if x < 1 else 2 - x`` on the base
618
+ interval :math:`[0, 2]`, and integrate it
619
+
620
+ >>> from scipy.interpolate import BSpline
621
+ >>> b = BSpline.basis_element([0, 1, 2])
622
+ >>> b.integrate(0, 1)
623
+ array(0.5)
624
+
625
+ If the integration limits are outside of the base interval, the result
626
+ is controlled by the `extrapolate` parameter
627
+
628
+ >>> b.integrate(-1, 1)
629
+ array(0.0)
630
+ >>> b.integrate(-1, 1, extrapolate=False)
631
+ array(0.5)
632
+
633
+ >>> import matplotlib.pyplot as plt
634
+ >>> fig, ax = plt.subplots()
635
+ >>> ax.grid(True)
636
+ >>> ax.axvline(0, c='r', lw=5, alpha=0.5) # base interval
637
+ >>> ax.axvline(2, c='r', lw=5, alpha=0.5)
638
+ >>> xx = [-1, 1, 2]
639
+ >>> ax.plot(xx, b(xx))
640
+ >>> plt.show()
641
+
642
+ """
643
+ if extrapolate is None:
644
+ extrapolate = self.extrapolate
645
+
646
+ # Prepare self.t and self.c.
647
+ self._ensure_c_contiguous()
648
+
649
+ # Swap integration bounds if needed.
650
+ sign = 1
651
+ if b < a:
652
+ a, b = b, a
653
+ sign = -1
654
+ n = self.t.size - self.k - 1
655
+
656
+ if extrapolate != "periodic" and not extrapolate:
657
+ # Shrink the integration interval, if needed.
658
+ a = max(a, self.t[self.k])
659
+ b = min(b, self.t[n])
660
+
661
+ if self.c.ndim == 1:
662
+ # Fast path: use FITPACK's routine
663
+ # (cf _fitpack_impl.splint).
664
+ integral = _fitpack_impl.splint(a, b, self.tck)
665
+ return integral * sign
666
+
667
+ out = np.empty((2, prod(self.c.shape[1:])), dtype=self.c.dtype)
668
+
669
+ # Compute the antiderivative.
670
+ c = self.c
671
+ ct = len(self.t) - len(c)
672
+ if ct > 0:
673
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
674
+ ta, ca, ka = _fitpack_impl.splantider((self.t, c, self.k), 1)
675
+
676
+ if extrapolate == 'periodic':
677
+ # Split the integral into the part over period (can be several
678
+ # of them) and the remaining part.
679
+
680
+ ts, te = self.t[self.k], self.t[n]
681
+ period = te - ts
682
+ interval = b - a
683
+ n_periods, left = divmod(interval, period)
684
+
685
+ if n_periods > 0:
686
+ # Evaluate the difference of antiderivatives.
687
+ x = np.asarray([ts, te], dtype=np.float64)
688
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
689
+ ka, x, 0, False, out)
690
+ integral = out[1] - out[0]
691
+ integral *= n_periods
692
+ else:
693
+ integral = np.zeros((1, prod(self.c.shape[1:])),
694
+ dtype=self.c.dtype)
695
+
696
+ # Map a to [ts, te], b is always a + left.
697
+ a = ts + (a - ts) % period
698
+ b = a + left
699
+
700
+ # If b <= te then we need to integrate over [a, b], otherwise
701
+ # over [a, te] and from xs to what is remained.
702
+ if b <= te:
703
+ x = np.asarray([a, b], dtype=np.float64)
704
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
705
+ ka, x, 0, False, out)
706
+ integral += out[1] - out[0]
707
+ else:
708
+ x = np.asarray([a, te], dtype=np.float64)
709
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
710
+ ka, x, 0, False, out)
711
+ integral += out[1] - out[0]
712
+
713
+ x = np.asarray([ts, ts + b - te], dtype=np.float64)
714
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
715
+ ka, x, 0, False, out)
716
+ integral += out[1] - out[0]
717
+ else:
718
+ # Evaluate the difference of antiderivatives.
719
+ x = np.asarray([a, b], dtype=np.float64)
720
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
721
+ ka, x, 0, extrapolate, out)
722
+ integral = out[1] - out[0]
723
+
724
+ integral *= sign
725
+ return integral.reshape(ca.shape[1:])
726
+
727
+ @classmethod
728
+ def from_power_basis(cls, pp, bc_type='not-a-knot'):
729
+ r"""
730
+ Construct a polynomial in the B-spline basis
731
+ from a piecewise polynomial in the power basis.
732
+
733
+ For now, accepts ``CubicSpline`` instances only.
734
+
735
+ Parameters
736
+ ----------
737
+ pp : CubicSpline
738
+ A piecewise polynomial in the power basis, as created
739
+ by ``CubicSpline``
740
+ bc_type : string, optional
741
+ Boundary condition type as in ``CubicSpline``: one of the
742
+ ``not-a-knot``, ``natural``, ``clamped``, or ``periodic``.
743
+ Necessary for construction an instance of ``BSpline`` class.
744
+ Default is ``not-a-knot``.
745
+
746
+ Returns
747
+ -------
748
+ b : BSpline object
749
+ A new instance representing the initial polynomial
750
+ in the B-spline basis.
751
+
752
+ Notes
753
+ -----
754
+ .. versionadded:: 1.8.0
755
+
756
+ Accepts only ``CubicSpline`` instances for now.
757
+
758
+ The algorithm follows from differentiation
759
+ the Marsden's identity [1]: each of coefficients of spline
760
+ interpolation function in the B-spline basis is computed as follows:
761
+
762
+ .. math::
763
+
764
+ c_j = \sum_{m=0}^{k} \frac{(k-m)!}{k!}
765
+ c_{m,i} (-1)^{k-m} D^m p_{j,k}(x_i)
766
+
767
+ :math:`c_{m, i}` - a coefficient of CubicSpline,
768
+ :math:`D^m p_{j, k}(x_i)` - an m-th defivative of a dual polynomial
769
+ in :math:`x_i`.
770
+
771
+ ``k`` always equals 3 for now.
772
+
773
+ First ``n - 2`` coefficients are computed in :math:`x_i = x_j`, e.g.
774
+
775
+ .. math::
776
+
777
+ c_1 = \sum_{m=0}^{k} \frac{(k-1)!}{k!} c_{m,1} D^m p_{j,3}(x_1)
778
+
779
+ Last ``nod + 2`` coefficients are computed in ``x[-2]``,
780
+ ``nod`` - number of derivatives at the ends.
781
+
782
+ For example, consider :math:`x = [0, 1, 2, 3, 4]`,
783
+ :math:`y = [1, 1, 1, 1, 1]` and bc_type = ``natural``
784
+
785
+ The coefficients of CubicSpline in the power basis:
786
+
787
+ :math:`[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
788
+ [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]`
789
+
790
+ The knot vector: :math:`t = [0, 0, 0, 0, 1, 2, 3, 4, 4, 4, 4]`
791
+
792
+ In this case
793
+
794
+ .. math::
795
+
796
+ c_j = \frac{0!}{k!} c_{3, i} k! = c_{3, i} = 1,~j = 0, ..., 6
797
+
798
+ References
799
+ ----------
800
+ .. [1] Tom Lyche and Knut Morken, Spline Methods, 2005, Section 3.1.2
801
+
802
+ """
803
+ from ._cubic import CubicSpline
804
+ if not isinstance(pp, CubicSpline):
805
+ raise NotImplementedError("Only CubicSpline objects are accepted"
806
+ "for now. Got %s instead." % type(pp))
807
+ x = pp.x
808
+ coef = pp.c
809
+ k = pp.c.shape[0] - 1
810
+ n = x.shape[0]
811
+
812
+ if bc_type == 'not-a-knot':
813
+ t = _not_a_knot(x, k)
814
+ elif bc_type == 'natural' or bc_type == 'clamped':
815
+ t = _augknt(x, k)
816
+ elif bc_type == 'periodic':
817
+ t = _periodic_knots(x, k)
818
+ else:
819
+ raise TypeError('Unknown boundary condition: %s' % bc_type)
820
+
821
+ nod = t.shape[0] - (n + k + 1) # number of derivatives at the ends
822
+ c = np.zeros(n + nod, dtype=pp.c.dtype)
823
+ for m in range(k + 1):
824
+ for i in range(n - 2):
825
+ c[i] += poch(k + 1, -m) * coef[m, i]\
826
+ * np.power(-1, k - m)\
827
+ * _diff_dual_poly(i, k, x[i], m, t)
828
+ for j in range(n - 2, n + nod):
829
+ c[j] += poch(k + 1, -m) * coef[m, n - 2]\
830
+ * np.power(-1, k - m)\
831
+ * _diff_dual_poly(j, k, x[n - 2], m, t)
832
+ return cls.construct_fast(t, c, k, pp.extrapolate, pp.axis)
833
+
834
+ def insert_knot(self, x, m=1):
835
+ """Insert a new knot at `x` of multiplicity `m`.
836
+
837
+ Given the knots and coefficients of a B-spline representation, create a
838
+ new B-spline with a knot inserted `m` times at point `x`.
839
+
840
+ Parameters
841
+ ----------
842
+ x : float
843
+ The position of the new knot
844
+ m : int, optional
845
+ The number of times to insert the given knot (its multiplicity).
846
+ Default is 1.
847
+
848
+ Returns
849
+ -------
850
+ spl : BSpline object
851
+ A new BSpline object with the new knot inserted.
852
+
853
+ Notes
854
+ -----
855
+ Based on algorithms from [1]_ and [2]_.
856
+
857
+ In case of a periodic spline (``self.extrapolate == "periodic"``)
858
+ there must be either at least k interior knots t(j) satisfying
859
+ ``t(k+1)<t(j)<=x`` or at least k interior knots t(j) satisfying
860
+ ``x<=t(j)<t(n-k)``.
861
+
862
+ This routine is functionally equivalent to `scipy.interpolate.insert`.
863
+
864
+ .. versionadded:: 1.13
865
+
866
+ References
867
+ ----------
868
+ .. [1] W. Boehm, "Inserting new knots into b-spline curves.",
869
+ Computer Aided Design, 12, p.199-201, 1980.
870
+ :doi:`10.1016/0010-4485(80)90154-2`.
871
+ .. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
872
+ Numerical Analysis", Oxford University Press, 1993.
873
+
874
+ See Also
875
+ --------
876
+ scipy.interpolate.insert
877
+
878
+ Examples
879
+ --------
880
+ You can insert knots into a B-spline:
881
+
882
+ >>> import numpy as np
883
+ >>> from scipy.interpolate import BSpline, make_interp_spline
884
+ >>> x = np.linspace(0, 10, 5)
885
+ >>> y = np.sin(x)
886
+ >>> spl = make_interp_spline(x, y, k=3)
887
+ >>> spl.t
888
+ array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
889
+
890
+ Insert a single knot
891
+
892
+ >>> spl_1 = spl.insert_knot(3)
893
+ >>> spl_1.t
894
+ array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
895
+
896
+ Insert a multiple knot
897
+
898
+ >>> spl_2 = spl.insert_knot(8, m=3)
899
+ >>> spl_2.t
900
+ array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
901
+
902
+ """
903
+ if x < self.t[self.k] or x > self.t[-self.k-1]:
904
+ raise ValueError(f"Cannot insert a knot at {x}.")
905
+ if m <= 0:
906
+ raise ValueError(f"`m` must be positive, got {m = }.")
907
+
908
+ extradim = self.c.shape[1:]
909
+ num_extra = prod(extradim)
910
+
911
+ tt = self.t.copy()
912
+ cc = self.c.copy()
913
+ cc = cc.reshape(-1, num_extra)
914
+
915
+ for _ in range(m):
916
+ tt, cc = _bspl.insert(x, tt, cc, self.k, self.extrapolate == "periodic")
917
+
918
+ return self.construct_fast(
919
+ tt, cc.reshape((-1,) + extradim), self.k, self.extrapolate, self.axis
920
+ )
921
+
922
+
923
+ #################################
924
+ # Interpolating spline helpers #
925
+ #################################
926
+
927
+ def _not_a_knot(x, k):
928
+ """Given data x, construct the knot vector w/ not-a-knot BC.
929
+ cf de Boor, XIII(12)."""
930
+ x = np.asarray(x)
931
+ if k % 2 != 1:
932
+ raise ValueError("Odd degree for now only. Got %s." % k)
933
+
934
+ m = (k - 1) // 2
935
+ t = x[m+1:-m-1]
936
+ t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)]
937
+ return t
938
+
939
+
940
+ def _augknt(x, k):
941
+ """Construct a knot vector appropriate for the order-k interpolation."""
942
+ return np.r_[(x[0],)*k, x, (x[-1],)*k]
943
+
944
+
945
+ def _convert_string_aliases(deriv, target_shape):
946
+ if isinstance(deriv, str):
947
+ if deriv == "clamped":
948
+ deriv = [(1, np.zeros(target_shape))]
949
+ elif deriv == "natural":
950
+ deriv = [(2, np.zeros(target_shape))]
951
+ else:
952
+ raise ValueError("Unknown boundary condition : %s" % deriv)
953
+ return deriv
954
+
955
+
956
+ def _process_deriv_spec(deriv):
957
+ if deriv is not None:
958
+ try:
959
+ ords, vals = zip(*deriv)
960
+ except TypeError as e:
961
+ msg = ("Derivatives, `bc_type`, should be specified as a pair of "
962
+ "iterables of pairs of (order, value).")
963
+ raise ValueError(msg) from e
964
+ else:
965
+ ords, vals = [], []
966
+ return np.atleast_1d(ords, vals)
967
+
968
+
969
+ def _woodbury_algorithm(A, ur, ll, b, k):
970
+ '''
971
+ Solve a cyclic banded linear system with upper right
972
+ and lower blocks of size ``(k-1) / 2`` using
973
+ the Woodbury formula
974
+
975
+ Parameters
976
+ ----------
977
+ A : 2-D array, shape(k, n)
978
+ Matrix of diagonals of original matrix (see
979
+ ``solve_banded`` documentation).
980
+ ur : 2-D array, shape(bs, bs)
981
+ Upper right block matrix.
982
+ ll : 2-D array, shape(bs, bs)
983
+ Lower left block matrix.
984
+ b : 1-D array, shape(n,)
985
+ Vector of constant terms of the system of linear equations.
986
+ k : int
987
+ B-spline degree.
988
+
989
+ Returns
990
+ -------
991
+ c : 1-D array, shape(n,)
992
+ Solution of the original system of linear equations.
993
+
994
+ Notes
995
+ -----
996
+ This algorithm works only for systems with banded matrix A plus
997
+ a correction term U @ V.T, where the matrix U @ V.T gives upper right
998
+ and lower left block of A
999
+ The system is solved with the following steps:
1000
+ 1. New systems of linear equations are constructed:
1001
+ A @ z_i = u_i,
1002
+ u_i - column vector of U,
1003
+ i = 1, ..., k - 1
1004
+ 2. Matrix Z is formed from vectors z_i:
1005
+ Z = [ z_1 | z_2 | ... | z_{k - 1} ]
1006
+ 3. Matrix H = (1 + V.T @ Z)^{-1}
1007
+ 4. The system A' @ y = b is solved
1008
+ 5. x = y - Z @ (H @ V.T @ y)
1009
+ Also, ``n`` should be greater than ``k``, otherwise corner block
1010
+ elements will intersect with diagonals.
1011
+
1012
+ Examples
1013
+ --------
1014
+ Consider the case of n = 8, k = 5 (size of blocks - 2 x 2).
1015
+ The matrix of a system: U: V:
1016
+ x x x * * a b a b 0 0 0 0 1 0
1017
+ x x x x * * c 0 c 0 0 0 0 0 1
1018
+ x x x x x * * 0 0 0 0 0 0 0 0
1019
+ * x x x x x * 0 0 0 0 0 0 0 0
1020
+ * * x x x x x 0 0 0 0 0 0 0 0
1021
+ d * * x x x x 0 0 d 0 1 0 0 0
1022
+ e f * * x x x 0 0 e f 0 1 0 0
1023
+
1024
+ References
1025
+ ----------
1026
+ .. [1] William H. Press, Saul A. Teukolsky, William T. Vetterling
1027
+ and Brian P. Flannery, Numerical Recipes, 2007, Section 2.7.3
1028
+
1029
+ '''
1030
+ k_mod = k - k % 2
1031
+ bs = int((k - 1) / 2) + (k + 1) % 2
1032
+
1033
+ n = A.shape[1] + 1
1034
+ U = np.zeros((n - 1, k_mod))
1035
+ VT = np.zeros((k_mod, n - 1)) # V transpose
1036
+
1037
+ # upper right block
1038
+ U[:bs, :bs] = ur
1039
+ VT[np.arange(bs), np.arange(bs) - bs] = 1
1040
+
1041
+ # lower left block
1042
+ U[-bs:, -bs:] = ll
1043
+ VT[np.arange(bs) - bs, np.arange(bs)] = 1
1044
+
1045
+ Z = solve_banded((bs, bs), A, U)
1046
+
1047
+ H = solve(np.identity(k_mod) + VT @ Z, np.identity(k_mod))
1048
+
1049
+ y = solve_banded((bs, bs), A, b)
1050
+ c = y - Z @ (H @ (VT @ y))
1051
+
1052
+ return c
1053
+
1054
+
1055
+ def _periodic_knots(x, k):
1056
+ '''
1057
+ returns vector of nodes on circle
1058
+ '''
1059
+ xc = np.copy(x)
1060
+ n = len(xc)
1061
+ if k % 2 == 0:
1062
+ dx = np.diff(xc)
1063
+ xc[1: -1] -= dx[:-1] / 2
1064
+ dx = np.diff(xc)
1065
+ t = np.zeros(n + 2 * k)
1066
+ t[k: -k] = xc
1067
+ for i in range(0, k):
1068
+ # filling first `k` elements in descending order
1069
+ t[k - i - 1] = t[k - i] - dx[-(i % (n - 1)) - 1]
1070
+ # filling last `k` elements in ascending order
1071
+ t[-k + i] = t[-k + i - 1] + dx[i % (n - 1)]
1072
+ return t
1073
+
1074
+
1075
+ def _make_interp_per_full_matr(x, y, t, k):
1076
+ '''
1077
+ Returns a solution of a system for B-spline interpolation with periodic
1078
+ boundary conditions. First ``k - 1`` rows of matrix are conditions of
1079
+ periodicity (continuity of ``k - 1`` derivatives at the boundary points).
1080
+ Last ``n`` rows are interpolation conditions.
1081
+ RHS is ``k - 1`` zeros and ``n`` ordinates in this case.
1082
+
1083
+ Parameters
1084
+ ----------
1085
+ x : 1-D array, shape (n,)
1086
+ Values of x - coordinate of a given set of points.
1087
+ y : 1-D array, shape (n,)
1088
+ Values of y - coordinate of a given set of points.
1089
+ t : 1-D array, shape(n+2*k,)
1090
+ Vector of knots.
1091
+ k : int
1092
+ The maximum degree of spline
1093
+
1094
+ Returns
1095
+ -------
1096
+ c : 1-D array, shape (n+k-1,)
1097
+ B-spline coefficients
1098
+
1099
+ Notes
1100
+ -----
1101
+ ``t`` is supposed to be taken on circle.
1102
+
1103
+ '''
1104
+
1105
+ x, y, t = map(np.asarray, (x, y, t))
1106
+
1107
+ n = x.size
1108
+ # LHS: the collocation matrix + derivatives at edges
1109
+ matr = np.zeros((n + k - 1, n + k - 1))
1110
+
1111
+ # derivatives at x[0] and x[-1]:
1112
+ for i in range(k - 1):
1113
+ bb = _bspl.evaluate_all_bspl(t, k, x[0], k, nu=i + 1)
1114
+ matr[i, : k + 1] += bb
1115
+ bb = _bspl.evaluate_all_bspl(t, k, x[-1], n + k - 1, nu=i + 1)[:-1]
1116
+ matr[i, -k:] -= bb
1117
+
1118
+ # collocation matrix
1119
+ for i in range(n):
1120
+ xval = x[i]
1121
+ # find interval
1122
+ if xval == t[k]:
1123
+ left = k
1124
+ else:
1125
+ left = np.searchsorted(t, xval) - 1
1126
+
1127
+ # fill a row
1128
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
1129
+ matr[i + k - 1, left-k:left+1] = bb
1130
+
1131
+ # RHS
1132
+ b = np.r_[[0] * (k - 1), y]
1133
+
1134
+ c = solve(matr, b)
1135
+ return c
1136
+
1137
+
1138
+ def _make_periodic_spline(x, y, t, k, axis):
1139
+ '''
1140
+ Compute the (coefficients of) interpolating B-spline with periodic
1141
+ boundary conditions.
1142
+
1143
+ Parameters
1144
+ ----------
1145
+ x : array_like, shape (n,)
1146
+ Abscissas.
1147
+ y : array_like, shape (n,)
1148
+ Ordinates.
1149
+ k : int
1150
+ B-spline degree.
1151
+ t : array_like, shape (n + 2 * k,).
1152
+ Knots taken on a circle, ``k`` on the left and ``k`` on the right
1153
+ of the vector ``x``.
1154
+
1155
+ Returns
1156
+ -------
1157
+ b : a BSpline object of the degree ``k`` and with knots ``t``.
1158
+
1159
+ Notes
1160
+ -----
1161
+ The original system is formed by ``n + k - 1`` equations where the first
1162
+ ``k - 1`` of them stand for the ``k - 1`` derivatives continuity on the
1163
+ edges while the other equations correspond to an interpolating case
1164
+ (matching all the input points). Due to a special form of knot vector, it
1165
+ can be proved that in the original system the first and last ``k``
1166
+ coefficients of a spline function are the same, respectively. It follows
1167
+ from the fact that all ``k - 1`` derivatives are equal term by term at ends
1168
+ and that the matrix of the original system of linear equations is
1169
+ non-degenerate. So, we can reduce the number of equations to ``n - 1``
1170
+ (first ``k - 1`` equations could be reduced). Another trick of this
1171
+ implementation is cyclic shift of values of B-splines due to equality of
1172
+ ``k`` unknown coefficients. With this we can receive matrix of the system
1173
+ with upper right and lower left blocks, and ``k`` diagonals. It allows
1174
+ to use Woodbury formula to optimize the computations.
1175
+
1176
+ '''
1177
+ n = y.shape[0]
1178
+
1179
+ extradim = prod(y.shape[1:])
1180
+ y_new = y.reshape(n, extradim)
1181
+ c = np.zeros((n + k - 1, extradim))
1182
+
1183
+ # n <= k case is solved with full matrix
1184
+ if n <= k:
1185
+ for i in range(extradim):
1186
+ c[:, i] = _make_interp_per_full_matr(x, y_new[:, i], t, k)
1187
+ c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))
1188
+ return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)
1189
+
1190
+ nt = len(t) - k - 1
1191
+
1192
+ # size of block elements
1193
+ kul = int(k / 2)
1194
+
1195
+ # kl = ku = k
1196
+ ab = np.zeros((3 * k + 1, nt), dtype=np.float64, order='F')
1197
+
1198
+ # upper right and lower left blocks
1199
+ ur = np.zeros((kul, kul))
1200
+ ll = np.zeros_like(ur)
1201
+
1202
+ # `offset` is made to shift all the non-zero elements to the end of the
1203
+ # matrix
1204
+ _bspl._colloc(x, t, k, ab, offset=k)
1205
+
1206
+ # remove zeros before the matrix
1207
+ ab = ab[-k - (k + 1) % 2:, :]
1208
+
1209
+ # The least elements in rows (except repetitions) are diagonals
1210
+ # of block matrices. Upper right matrix is an upper triangular
1211
+ # matrix while lower left is a lower triangular one.
1212
+ for i in range(kul):
1213
+ ur += np.diag(ab[-i - 1, i: kul], k=i)
1214
+ ll += np.diag(ab[i, -kul - (k % 2): n - 1 + 2 * kul - i], k=-i)
1215
+
1216
+ # remove elements that occur in the last point
1217
+ # (first and last points are equivalent)
1218
+ A = ab[:, kul: -k + kul]
1219
+
1220
+ for i in range(extradim):
1221
+ cc = _woodbury_algorithm(A, ur, ll, y_new[:, i][:-1], k)
1222
+ c[:, i] = np.concatenate((cc[-kul:], cc, cc[:kul + k % 2]))
1223
+ c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))
1224
+ return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)
1225
+
1226
+
1227
+ def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0,
1228
+ check_finite=True):
1229
+ """Compute the (coefficients of) interpolating B-spline.
1230
+
1231
+ Parameters
1232
+ ----------
1233
+ x : array_like, shape (n,)
1234
+ Abscissas.
1235
+ y : array_like, shape (n, ...)
1236
+ Ordinates.
1237
+ k : int, optional
1238
+ B-spline degree. Default is cubic, ``k = 3``.
1239
+ t : array_like, shape (nt + k + 1,), optional.
1240
+ Knots.
1241
+ The number of knots needs to agree with the number of data points and
1242
+ the number of derivatives at the edges. Specifically, ``nt - n`` must
1243
+ equal ``len(deriv_l) + len(deriv_r)``.
1244
+ bc_type : 2-tuple or None
1245
+ Boundary conditions.
1246
+ Default is None, which means choosing the boundary conditions
1247
+ automatically. Otherwise, it must be a length-two tuple where the first
1248
+ element (``deriv_l``) sets the boundary conditions at ``x[0]`` and
1249
+ the second element (``deriv_r``) sets the boundary conditions at
1250
+ ``x[-1]``. Each of these must be an iterable of pairs
1251
+ ``(order, value)`` which gives the values of derivatives of specified
1252
+ orders at the given edge of the interpolation interval.
1253
+ Alternatively, the following string aliases are recognized:
1254
+
1255
+ * ``"clamped"``: The first derivatives at the ends are zero. This is
1256
+ equivalent to ``bc_type=([(1, 0.0)], [(1, 0.0)])``.
1257
+ * ``"natural"``: The second derivatives at ends are zero. This is
1258
+ equivalent to ``bc_type=([(2, 0.0)], [(2, 0.0)])``.
1259
+ * ``"not-a-knot"`` (default): The first and second segments are the
1260
+ same polynomial. This is equivalent to having ``bc_type=None``.
1261
+ * ``"periodic"``: The values and the first ``k-1`` derivatives at the
1262
+ ends are equivalent.
1263
+
1264
+ axis : int, optional
1265
+ Interpolation axis. Default is 0.
1266
+ check_finite : bool, optional
1267
+ Whether to check that the input arrays contain only finite numbers.
1268
+ Disabling may give a performance gain, but may result in problems
1269
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
1270
+ Default is True.
1271
+
1272
+ Returns
1273
+ -------
1274
+ b : a BSpline object of the degree ``k`` and with knots ``t``.
1275
+
1276
+ See Also
1277
+ --------
1278
+ BSpline : base class representing the B-spline objects
1279
+ CubicSpline : a cubic spline in the polynomial basis
1280
+ make_lsq_spline : a similar factory function for spline fitting
1281
+ UnivariateSpline : a wrapper over FITPACK spline fitting routines
1282
+ splrep : a wrapper over FITPACK spline fitting routines
1283
+
1284
+ Examples
1285
+ --------
1286
+
1287
+ Use cubic interpolation on Chebyshev nodes:
1288
+
1289
+ >>> import numpy as np
1290
+ >>> import matplotlib.pyplot as plt
1291
+ >>> def cheb_nodes(N):
1292
+ ... jj = 2.*np.arange(N) + 1
1293
+ ... x = np.cos(np.pi * jj / 2 / N)[::-1]
1294
+ ... return x
1295
+
1296
+ >>> x = cheb_nodes(20)
1297
+ >>> y = np.sqrt(1 - x**2)
1298
+
1299
+ >>> from scipy.interpolate import BSpline, make_interp_spline
1300
+ >>> b = make_interp_spline(x, y)
1301
+ >>> np.allclose(b(x), y)
1302
+ True
1303
+
1304
+ Note that the default is a cubic spline with a not-a-knot boundary condition
1305
+
1306
+ >>> b.k
1307
+ 3
1308
+
1309
+ Here we use a 'natural' spline, with zero 2nd derivatives at edges:
1310
+
1311
+ >>> l, r = [(2, 0.0)], [(2, 0.0)]
1312
+ >>> b_n = make_interp_spline(x, y, bc_type=(l, r)) # or, bc_type="natural"
1313
+ >>> np.allclose(b_n(x), y)
1314
+ True
1315
+ >>> x0, x1 = x[0], x[-1]
1316
+ >>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0])
1317
+ True
1318
+
1319
+ Interpolation of parametric curves is also supported. As an example, we
1320
+ compute a discretization of a snail curve in polar coordinates
1321
+
1322
+ >>> phi = np.linspace(0, 2.*np.pi, 40)
1323
+ >>> r = 0.3 + np.cos(phi)
1324
+ >>> x, y = r*np.cos(phi), r*np.sin(phi) # convert to Cartesian coordinates
1325
+
1326
+ Build an interpolating curve, parameterizing it by the angle
1327
+
1328
+ >>> spl = make_interp_spline(phi, np.c_[x, y])
1329
+
1330
+ Evaluate the interpolant on a finer grid (note that we transpose the result
1331
+ to unpack it into a pair of x- and y-arrays)
1332
+
1333
+ >>> phi_new = np.linspace(0, 2.*np.pi, 100)
1334
+ >>> x_new, y_new = spl(phi_new).T
1335
+
1336
+ Plot the result
1337
+
1338
+ >>> plt.plot(x, y, 'o')
1339
+ >>> plt.plot(x_new, y_new, '-')
1340
+ >>> plt.show()
1341
+
1342
+ Build a B-spline curve with 2 dimensional y
1343
+
1344
+ >>> x = np.linspace(0, 2*np.pi, 10)
1345
+ >>> y = np.array([np.sin(x), np.cos(x)])
1346
+
1347
+ Periodic condition is satisfied because y coordinates of points on the ends
1348
+ are equivalent
1349
+
1350
+ >>> ax = plt.axes(projection='3d')
1351
+ >>> xx = np.linspace(0, 2*np.pi, 100)
1352
+ >>> bspl = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)
1353
+ >>> ax.plot3D(xx, *bspl(xx))
1354
+ >>> ax.scatter3D(x, *y, color='red')
1355
+ >>> plt.show()
1356
+
1357
+ """
1358
+ # convert string aliases for the boundary conditions
1359
+ if bc_type is None or bc_type == 'not-a-knot' or bc_type == 'periodic':
1360
+ deriv_l, deriv_r = None, None
1361
+ elif isinstance(bc_type, str):
1362
+ deriv_l, deriv_r = bc_type, bc_type
1363
+ else:
1364
+ try:
1365
+ deriv_l, deriv_r = bc_type
1366
+ except TypeError as e:
1367
+ raise ValueError("Unknown boundary condition: %s" % bc_type) from e
1368
+
1369
+ y = np.asarray(y)
1370
+
1371
+ axis = normalize_axis_index(axis, y.ndim)
1372
+
1373
+ x = _as_float_array(x, check_finite)
1374
+ y = _as_float_array(y, check_finite)
1375
+
1376
+ y = np.moveaxis(y, axis, 0) # now internally interp axis is zero
1377
+
1378
+ # sanity check the input
1379
+ if bc_type == 'periodic' and not np.allclose(y[0], y[-1], atol=1e-15):
1380
+ raise ValueError("First and last points does not match while "
1381
+ "periodic case expected")
1382
+ if x.size != y.shape[0]:
1383
+ raise ValueError(f'Shapes of x {x.shape} and y {y.shape} are incompatible')
1384
+ if np.any(x[1:] == x[:-1]):
1385
+ raise ValueError("Expect x to not have duplicates")
1386
+ if x.ndim != 1 or np.any(x[1:] < x[:-1]):
1387
+ raise ValueError("Expect x to be a 1D strictly increasing sequence.")
1388
+
1389
+ # special-case k=0 right away
1390
+ if k == 0:
1391
+ if any(_ is not None for _ in (t, deriv_l, deriv_r)):
1392
+ raise ValueError("Too much info for k=0: t and bc_type can only "
1393
+ "be None.")
1394
+ t = np.r_[x, x[-1]]
1395
+ c = np.asarray(y)
1396
+ c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
1397
+ return BSpline.construct_fast(t, c, k, axis=axis)
1398
+
1399
+ # special-case k=1 (e.g., Lyche and Morken, Eq.(2.16))
1400
+ if k == 1 and t is None:
1401
+ if not (deriv_l is None and deriv_r is None):
1402
+ raise ValueError("Too much info for k=1: bc_type can only be None.")
1403
+ t = np.r_[x[0], x, x[-1]]
1404
+ c = np.asarray(y)
1405
+ c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
1406
+ return BSpline.construct_fast(t, c, k, axis=axis)
1407
+
1408
+ k = operator.index(k)
1409
+
1410
+ if bc_type == 'periodic' and t is not None:
1411
+ raise NotImplementedError("For periodic case t is constructed "
1412
+ "automatically and can not be passed "
1413
+ "manually")
1414
+
1415
+ # come up with a sensible knot vector, if needed
1416
+ if t is None:
1417
+ if deriv_l is None and deriv_r is None:
1418
+ if bc_type == 'periodic':
1419
+ t = _periodic_knots(x, k)
1420
+ elif k == 2:
1421
+ # OK, it's a bit ad hoc: Greville sites + omit
1422
+ # 2nd and 2nd-to-last points, a la not-a-knot
1423
+ t = (x[1:] + x[:-1]) / 2.
1424
+ t = np.r_[(x[0],)*(k+1),
1425
+ t[1:-1],
1426
+ (x[-1],)*(k+1)]
1427
+ else:
1428
+ t = _not_a_knot(x, k)
1429
+ else:
1430
+ t = _augknt(x, k)
1431
+
1432
+ t = _as_float_array(t, check_finite)
1433
+
1434
+ if k < 0:
1435
+ raise ValueError("Expect non-negative k.")
1436
+ if t.ndim != 1 or np.any(t[1:] < t[:-1]):
1437
+ raise ValueError("Expect t to be a 1-D sorted array_like.")
1438
+ if t.size < x.size + k + 1:
1439
+ raise ValueError('Got %d knots, need at least %d.' %
1440
+ (t.size, x.size + k + 1))
1441
+ if (x[0] < t[k]) or (x[-1] > t[-k]):
1442
+ raise ValueError('Out of bounds w/ x = %s.' % x)
1443
+
1444
+ if bc_type == 'periodic':
1445
+ return _make_periodic_spline(x, y, t, k, axis)
1446
+
1447
+ # Here : deriv_l, r = [(nu, value), ...]
1448
+ deriv_l = _convert_string_aliases(deriv_l, y.shape[1:])
1449
+ deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l)
1450
+ nleft = deriv_l_ords.shape[0]
1451
+
1452
+ deriv_r = _convert_string_aliases(deriv_r, y.shape[1:])
1453
+ deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r)
1454
+ nright = deriv_r_ords.shape[0]
1455
+
1456
+ # have `n` conditions for `nt` coefficients; need nt-n derivatives
1457
+ n = x.size
1458
+ nt = t.size - k - 1
1459
+
1460
+ if nt - n != nleft + nright:
1461
+ raise ValueError("The number of derivatives at boundaries does not "
1462
+ f"match: expected {nt-n}, got {nleft}+{nright}")
1463
+
1464
+ # bail out if the `y` array is zero-sized
1465
+ if y.size == 0:
1466
+ c = np.zeros((nt,) + y.shape[1:], dtype=float)
1467
+ return BSpline.construct_fast(t, c, k, axis=axis)
1468
+
1469
+ # set up the LHS: the collocation matrix + derivatives at boundaries
1470
+ kl = ku = k
1471
+ ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float64, order='F')
1472
+ _bspl._colloc(x, t, k, ab, offset=nleft)
1473
+ if nleft > 0:
1474
+ _bspl._handle_lhs_derivatives(t, k, x[0], ab, kl, ku,
1475
+ deriv_l_ords.astype(np.dtype("long")))
1476
+ if nright > 0:
1477
+ _bspl._handle_lhs_derivatives(t, k, x[-1], ab, kl, ku,
1478
+ deriv_r_ords.astype(np.dtype("long")),
1479
+ offset=nt-nright)
1480
+
1481
+ # set up the RHS: values to interpolate (+ derivative values, if any)
1482
+ extradim = prod(y.shape[1:])
1483
+ rhs = np.empty((nt, extradim), dtype=y.dtype)
1484
+ if nleft > 0:
1485
+ rhs[:nleft] = deriv_l_vals.reshape(-1, extradim)
1486
+ rhs[nleft:nt - nright] = y.reshape(-1, extradim)
1487
+ if nright > 0:
1488
+ rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim)
1489
+
1490
+ # solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded
1491
+ if check_finite:
1492
+ ab, rhs = map(np.asarray_chkfinite, (ab, rhs))
1493
+ gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs))
1494
+ lu, piv, c, info = gbsv(kl, ku, ab, rhs,
1495
+ overwrite_ab=True, overwrite_b=True)
1496
+
1497
+ if info > 0:
1498
+ raise LinAlgError("Collocation matrix is singular.")
1499
+ elif info < 0:
1500
+ raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)
1501
+
1502
+ c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:]))
1503
+ return BSpline.construct_fast(t, c, k, axis=axis)
1504
+
1505
+
1506
+ def make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True):
1507
+ r"""Compute the (coefficients of) an LSQ (Least SQuared) based
1508
+ fitting B-spline.
1509
+
1510
+ The result is a linear combination
1511
+
1512
+ .. math::
1513
+
1514
+ S(x) = \sum_j c_j B_j(x; t)
1515
+
1516
+ of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes
1517
+
1518
+ .. math::
1519
+
1520
+ \sum_{j} \left( w_j \times (S(x_j) - y_j) \right)^2
1521
+
1522
+ Parameters
1523
+ ----------
1524
+ x : array_like, shape (m,)
1525
+ Abscissas.
1526
+ y : array_like, shape (m, ...)
1527
+ Ordinates.
1528
+ t : array_like, shape (n + k + 1,).
1529
+ Knots.
1530
+ Knots and data points must satisfy Schoenberg-Whitney conditions.
1531
+ k : int, optional
1532
+ B-spline degree. Default is cubic, ``k = 3``.
1533
+ w : array_like, shape (m,), optional
1534
+ Weights for spline fitting. Must be positive. If ``None``,
1535
+ then weights are all equal.
1536
+ Default is ``None``.
1537
+ axis : int, optional
1538
+ Interpolation axis. Default is zero.
1539
+ check_finite : bool, optional
1540
+ Whether to check that the input arrays contain only finite numbers.
1541
+ Disabling may give a performance gain, but may result in problems
1542
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
1543
+ Default is True.
1544
+
1545
+ Returns
1546
+ -------
1547
+ b : a BSpline object of the degree ``k`` with knots ``t``.
1548
+
1549
+ See Also
1550
+ --------
1551
+ BSpline : base class representing the B-spline objects
1552
+ make_interp_spline : a similar factory function for interpolating splines
1553
+ LSQUnivariateSpline : a FITPACK-based spline fitting routine
1554
+ splrep : a FITPACK-based fitting routine
1555
+
1556
+ Notes
1557
+ -----
1558
+ The number of data points must be larger than the spline degree ``k``.
1559
+
1560
+ Knots ``t`` must satisfy the Schoenberg-Whitney conditions,
1561
+ i.e., there must be a subset of data points ``x[j]`` such that
1562
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
1563
+
1564
+ Examples
1565
+ --------
1566
+ Generate some noisy data:
1567
+
1568
+ >>> import numpy as np
1569
+ >>> import matplotlib.pyplot as plt
1570
+ >>> rng = np.random.default_rng()
1571
+ >>> x = np.linspace(-3, 3, 50)
1572
+ >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
1573
+
1574
+ Now fit a smoothing cubic spline with a pre-defined internal knots.
1575
+ Here we make the knot vector (k+1)-regular by adding boundary knots:
1576
+
1577
+ >>> from scipy.interpolate import make_lsq_spline, BSpline
1578
+ >>> t = [-1, 0, 1]
1579
+ >>> k = 3
1580
+ >>> t = np.r_[(x[0],)*(k+1),
1581
+ ... t,
1582
+ ... (x[-1],)*(k+1)]
1583
+ >>> spl = make_lsq_spline(x, y, t, k)
1584
+
1585
+ For comparison, we also construct an interpolating spline for the same
1586
+ set of data:
1587
+
1588
+ >>> from scipy.interpolate import make_interp_spline
1589
+ >>> spl_i = make_interp_spline(x, y)
1590
+
1591
+ Plot both:
1592
+
1593
+ >>> xs = np.linspace(-3, 3, 100)
1594
+ >>> plt.plot(x, y, 'ro', ms=5)
1595
+ >>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline')
1596
+ >>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline')
1597
+ >>> plt.legend(loc='best')
1598
+ >>> plt.show()
1599
+
1600
+ **NaN handling**: If the input arrays contain ``nan`` values, the result is
1601
+ not useful since the underlying spline fitting routines cannot deal with
1602
+ ``nan``. A workaround is to use zero weights for not-a-number data points:
1603
+
1604
+ >>> y[8] = np.nan
1605
+ >>> w = np.isnan(y)
1606
+ >>> y[w] = 0.
1607
+ >>> tck = make_lsq_spline(x, y, t, w=~w)
1608
+
1609
+ Notice the need to replace a ``nan`` by a numerical value (precise value
1610
+ does not matter as long as the corresponding weight is zero.)
1611
+
1612
+ """
1613
+ x = _as_float_array(x, check_finite)
1614
+ y = _as_float_array(y, check_finite)
1615
+ t = _as_float_array(t, check_finite)
1616
+ if w is not None:
1617
+ w = _as_float_array(w, check_finite)
1618
+ else:
1619
+ w = np.ones_like(x)
1620
+ k = operator.index(k)
1621
+
1622
+ axis = normalize_axis_index(axis, y.ndim)
1623
+
1624
+ y = np.moveaxis(y, axis, 0) # now internally interp axis is zero
1625
+
1626
+ if x.ndim != 1 or np.any(x[1:] - x[:-1] <= 0):
1627
+ raise ValueError("Expect x to be a 1-D sorted array_like.")
1628
+ if x.shape[0] < k+1:
1629
+ raise ValueError("Need more x points.")
1630
+ if k < 0:
1631
+ raise ValueError("Expect non-negative k.")
1632
+ if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0):
1633
+ raise ValueError("Expect t to be a 1-D sorted array_like.")
1634
+ if x.size != y.shape[0]:
1635
+ raise ValueError(f'Shapes of x {x.shape} and y {y.shape} are incompatible')
1636
+ if k > 0 and np.any((x < t[k]) | (x > t[-k])):
1637
+ raise ValueError('Out of bounds w/ x = %s.' % x)
1638
+ if x.size != w.size:
1639
+ raise ValueError(f'Shapes of x {x.shape} and w {w.shape} are incompatible')
1640
+
1641
+ # number of coefficients
1642
+ n = t.size - k - 1
1643
+
1644
+ # construct A.T @ A and rhs with A the collocation matrix, and
1645
+ # rhs = A.T @ y for solving the LSQ problem ``A.T @ A @ c = A.T @ y``
1646
+ lower = True
1647
+ extradim = prod(y.shape[1:])
1648
+ ab = np.zeros((k+1, n), dtype=np.float64, order='F')
1649
+ rhs = np.zeros((n, extradim), dtype=y.dtype, order='F')
1650
+ _bspl._norm_eq_lsq(x, t, k,
1651
+ y.reshape(-1, extradim),
1652
+ w,
1653
+ ab, rhs)
1654
+ rhs = rhs.reshape((n,) + y.shape[1:])
1655
+
1656
+ # have observation matrix & rhs, can solve the LSQ problem
1657
+ cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower,
1658
+ check_finite=check_finite)
1659
+ c = cho_solve_banded((cho_decomp, lower), rhs, overwrite_b=True,
1660
+ check_finite=check_finite)
1661
+
1662
+ c = np.ascontiguousarray(c)
1663
+ return BSpline.construct_fast(t, c, k, axis=axis)
1664
+
1665
+
1666
+ #############################
1667
+ # Smoothing spline helpers #
1668
+ #############################
1669
+
1670
+ def _compute_optimal_gcv_parameter(X, wE, y, w):
1671
+ """
1672
+ Returns an optimal regularization parameter from the GCV criteria [1].
1673
+
1674
+ Parameters
1675
+ ----------
1676
+ X : array, shape (5, n)
1677
+ 5 bands of the design matrix ``X`` stored in LAPACK banded storage.
1678
+ wE : array, shape (5, n)
1679
+ 5 bands of the penalty matrix :math:`W^{-1} E` stored in LAPACK banded
1680
+ storage.
1681
+ y : array, shape (n,)
1682
+ Ordinates.
1683
+ w : array, shape (n,)
1684
+ Vector of weights.
1685
+
1686
+ Returns
1687
+ -------
1688
+ lam : float
1689
+ An optimal from the GCV criteria point of view regularization
1690
+ parameter.
1691
+
1692
+ Notes
1693
+ -----
1694
+ No checks are performed.
1695
+
1696
+ References
1697
+ ----------
1698
+ .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models
1699
+ for observational data, Philadelphia, Pennsylvania: Society for
1700
+ Industrial and Applied Mathematics, 1990, pp. 45-65.
1701
+ :doi:`10.1137/1.9781611970128`
1702
+
1703
+ """
1704
+
1705
+ def compute_banded_symmetric_XT_W_Y(X, w, Y):
1706
+ """
1707
+ Assuming that the product :math:`X^T W Y` is symmetric and both ``X``
1708
+ and ``Y`` are 5-banded, compute the unique bands of the product.
1709
+
1710
+ Parameters
1711
+ ----------
1712
+ X : array, shape (5, n)
1713
+ 5 bands of the matrix ``X`` stored in LAPACK banded storage.
1714
+ w : array, shape (n,)
1715
+ Array of weights
1716
+ Y : array, shape (5, n)
1717
+ 5 bands of the matrix ``Y`` stored in LAPACK banded storage.
1718
+
1719
+ Returns
1720
+ -------
1721
+ res : array, shape (4, n)
1722
+ The result of the product :math:`X^T Y` stored in the banded way.
1723
+
1724
+ Notes
1725
+ -----
1726
+ As far as the matrices ``X`` and ``Y`` are 5-banded, their product
1727
+ :math:`X^T W Y` is 7-banded. It is also symmetric, so we can store only
1728
+ unique diagonals.
1729
+
1730
+ """
1731
+ # compute W Y
1732
+ W_Y = np.copy(Y)
1733
+
1734
+ W_Y[2] *= w
1735
+ for i in range(2):
1736
+ W_Y[i, 2 - i:] *= w[:-2 + i]
1737
+ W_Y[3 + i, :-1 - i] *= w[1 + i:]
1738
+
1739
+ n = X.shape[1]
1740
+ res = np.zeros((4, n))
1741
+ for i in range(n):
1742
+ for j in range(min(n-i, 4)):
1743
+ res[-j-1, i + j] = sum(X[j:, i] * W_Y[:5-j, i + j])
1744
+ return res
1745
+
1746
+ def compute_b_inv(A):
1747
+ """
1748
+ Inverse 3 central bands of matrix :math:`A=U^T D^{-1} U` assuming that
1749
+ ``U`` is a unit upper triangular banded matrix using an algorithm
1750
+ proposed in [1].
1751
+
1752
+ Parameters
1753
+ ----------
1754
+ A : array, shape (4, n)
1755
+ Matrix to inverse, stored in LAPACK banded storage.
1756
+
1757
+ Returns
1758
+ -------
1759
+ B : array, shape (4, n)
1760
+ 3 unique bands of the symmetric matrix that is an inverse to ``A``.
1761
+ The first row is filled with zeros.
1762
+
1763
+ Notes
1764
+ -----
1765
+ The algorithm is based on the cholesky decomposition and, therefore,
1766
+ in case matrix ``A`` is close to not positive defined, the function
1767
+ raises LinalgError.
1768
+
1769
+ Both matrices ``A`` and ``B`` are stored in LAPACK banded storage.
1770
+
1771
+ References
1772
+ ----------
1773
+ .. [1] M. F. Hutchinson and F. R. de Hoog, "Smoothing noisy data with
1774
+ spline functions," Numerische Mathematik, vol. 47, no. 1,
1775
+ pp. 99-106, 1985.
1776
+ :doi:`10.1007/BF01389878`
1777
+
1778
+ """
1779
+
1780
+ def find_b_inv_elem(i, j, U, D, B):
1781
+ rng = min(3, n - i - 1)
1782
+ rng_sum = 0.
1783
+ if j == 0:
1784
+ # use 2-nd formula from [1]
1785
+ for k in range(1, rng + 1):
1786
+ rng_sum -= U[-k - 1, i + k] * B[-k - 1, i + k]
1787
+ rng_sum += D[i]
1788
+ B[-1, i] = rng_sum
1789
+ else:
1790
+ # use 1-st formula from [1]
1791
+ for k in range(1, rng + 1):
1792
+ diag = abs(k - j)
1793
+ ind = i + min(k, j)
1794
+ rng_sum -= U[-k - 1, i + k] * B[-diag - 1, ind + diag]
1795
+ B[-j - 1, i + j] = rng_sum
1796
+
1797
+ U = cholesky_banded(A)
1798
+ for i in range(2, 5):
1799
+ U[-i, i-1:] /= U[-1, :-i+1]
1800
+ D = 1. / (U[-1])**2
1801
+ U[-1] /= U[-1]
1802
+
1803
+ n = U.shape[1]
1804
+
1805
+ B = np.zeros(shape=(4, n))
1806
+ for i in range(n - 1, -1, -1):
1807
+ for j in range(min(3, n - i - 1), -1, -1):
1808
+ find_b_inv_elem(i, j, U, D, B)
1809
+ # the first row contains garbage and should be removed
1810
+ B[0] = [0.] * n
1811
+ return B
1812
+
1813
+ def _gcv(lam, X, XtWX, wE, XtE):
1814
+ r"""
1815
+ Computes the generalized cross-validation criteria [1].
1816
+
1817
+ Parameters
1818
+ ----------
1819
+ lam : float, (:math:`\lambda \geq 0`)
1820
+ Regularization parameter.
1821
+ X : array, shape (5, n)
1822
+ Matrix is stored in LAPACK banded storage.
1823
+ XtWX : array, shape (4, n)
1824
+ Product :math:`X^T W X` stored in LAPACK banded storage.
1825
+ wE : array, shape (5, n)
1826
+ Matrix :math:`W^{-1} E` stored in LAPACK banded storage.
1827
+ XtE : array, shape (4, n)
1828
+ Product :math:`X^T E` stored in LAPACK banded storage.
1829
+
1830
+ Returns
1831
+ -------
1832
+ res : float
1833
+ Value of the GCV criteria with the regularization parameter
1834
+ :math:`\lambda`.
1835
+
1836
+ Notes
1837
+ -----
1838
+ Criteria is computed from the formula (1.3.2) [3]:
1839
+
1840
+ .. math:
1841
+
1842
+ GCV(\lambda) = \dfrac{1}{n} \sum\limits_{k = 1}^{n} \dfrac{ \left(
1843
+ y_k - f_{\lambda}(x_k) \right)^2}{\left( 1 - \Tr{A}/n\right)^2}$.
1844
+ The criteria is discussed in section 1.3 [3].
1845
+
1846
+ The numerator is computed using (2.2.4) [3] and the denominator is
1847
+ computed using an algorithm from [2] (see in the ``compute_b_inv``
1848
+ function).
1849
+
1850
+ References
1851
+ ----------
1852
+ .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models
1853
+ for observational data, Philadelphia, Pennsylvania: Society for
1854
+ Industrial and Applied Mathematics, 1990, pp. 45-65.
1855
+ :doi:`10.1137/1.9781611970128`
1856
+ .. [2] M. F. Hutchinson and F. R. de Hoog, "Smoothing noisy data with
1857
+ spline functions," Numerische Mathematik, vol. 47, no. 1,
1858
+ pp. 99-106, 1985.
1859
+ :doi:`10.1007/BF01389878`
1860
+ .. [3] E. Zemlyanoy, "Generalized cross-validation smoothing splines",
1861
+ BSc thesis, 2022. Might be available (in Russian)
1862
+ `here <https://www.hse.ru/ba/am/students/diplomas/620910604>`_
1863
+
1864
+ """
1865
+ # Compute the numerator from (2.2.4) [3]
1866
+ n = X.shape[1]
1867
+ c = solve_banded((2, 2), X + lam * wE, y)
1868
+ res = np.zeros(n)
1869
+ # compute ``W^{-1} E c`` with respect to banded-storage of ``E``
1870
+ tmp = wE * c
1871
+ for i in range(n):
1872
+ for j in range(max(0, i - n + 3), min(5, i + 3)):
1873
+ res[i] += tmp[j, i + 2 - j]
1874
+ numer = np.linalg.norm(lam * res)**2 / n
1875
+
1876
+ # compute the denominator
1877
+ lhs = XtWX + lam * XtE
1878
+ try:
1879
+ b_banded = compute_b_inv(lhs)
1880
+ # compute the trace of the product b_banded @ XtX
1881
+ tr = b_banded * XtWX
1882
+ tr[:-1] *= 2
1883
+ # find the denominator
1884
+ denom = (1 - sum(sum(tr)) / n)**2
1885
+ except LinAlgError:
1886
+ # cholesky decomposition cannot be performed
1887
+ raise ValueError('Seems like the problem is ill-posed')
1888
+
1889
+ res = numer / denom
1890
+
1891
+ return res
1892
+
1893
+ n = X.shape[1]
1894
+
1895
+ XtWX = compute_banded_symmetric_XT_W_Y(X, w, X)
1896
+ XtE = compute_banded_symmetric_XT_W_Y(X, w, wE)
1897
+
1898
+ def fun(lam):
1899
+ return _gcv(lam, X, XtWX, wE, XtE)
1900
+
1901
+ gcv_est = minimize_scalar(fun, bounds=(0, n), method='Bounded')
1902
+ if gcv_est.success:
1903
+ return gcv_est.x
1904
+ raise ValueError(f"Unable to find minimum of the GCV "
1905
+ f"function: {gcv_est.message}")
1906
+
1907
+
1908
+ def _coeff_of_divided_diff(x):
1909
+ """
1910
+ Returns the coefficients of the divided difference.
1911
+
1912
+ Parameters
1913
+ ----------
1914
+ x : array, shape (n,)
1915
+ Array which is used for the computation of divided difference.
1916
+
1917
+ Returns
1918
+ -------
1919
+ res : array_like, shape (n,)
1920
+ Coefficients of the divided difference.
1921
+
1922
+ Notes
1923
+ -----
1924
+ Vector ``x`` should have unique elements, otherwise an error division by
1925
+ zero might be raised.
1926
+
1927
+ No checks are performed.
1928
+
1929
+ """
1930
+ n = x.shape[0]
1931
+ res = np.zeros(n)
1932
+ for i in range(n):
1933
+ pp = 1.
1934
+ for k in range(n):
1935
+ if k != i:
1936
+ pp *= (x[i] - x[k])
1937
+ res[i] = 1. / pp
1938
+ return res
1939
+
1940
+
1941
+ def make_smoothing_spline(x, y, w=None, lam=None):
1942
+ r"""
1943
+ Compute the (coefficients of) smoothing cubic spline function using
1944
+ ``lam`` to control the tradeoff between the amount of smoothness of the
1945
+ curve and its proximity to the data. In case ``lam`` is None, using the
1946
+ GCV criteria [1] to find it.
1947
+
1948
+ A smoothing spline is found as a solution to the regularized weighted
1949
+ linear regression problem:
1950
+
1951
+ .. math::
1952
+
1953
+ \sum\limits_{i=1}^n w_i\lvert y_i - f(x_i) \rvert^2 +
1954
+ \lambda\int\limits_{x_1}^{x_n} (f^{(2)}(u))^2 d u
1955
+
1956
+ where :math:`f` is a spline function, :math:`w` is a vector of weights and
1957
+ :math:`\lambda` is a regularization parameter.
1958
+
1959
+ If ``lam`` is None, we use the GCV criteria to find an optimal
1960
+ regularization parameter, otherwise we solve the regularized weighted
1961
+ linear regression problem with given parameter. The parameter controls
1962
+ the tradeoff in the following way: the larger the parameter becomes, the
1963
+ smoother the function gets.
1964
+
1965
+ Parameters
1966
+ ----------
1967
+ x : array_like, shape (n,)
1968
+ Abscissas. `n` must be at least 5.
1969
+ y : array_like, shape (n,)
1970
+ Ordinates. `n` must be at least 5.
1971
+ w : array_like, shape (n,), optional
1972
+ Vector of weights. Default is ``np.ones_like(x)``.
1973
+ lam : float, (:math:`\lambda \geq 0`), optional
1974
+ Regularization parameter. If ``lam`` is None, then it is found from
1975
+ the GCV criteria. Default is None.
1976
+
1977
+ Returns
1978
+ -------
1979
+ func : a BSpline object.
1980
+ A callable representing a spline in the B-spline basis
1981
+ as a solution of the problem of smoothing splines using
1982
+ the GCV criteria [1] in case ``lam`` is None, otherwise using the
1983
+ given parameter ``lam``.
1984
+
1985
+ Notes
1986
+ -----
1987
+ This algorithm is a clean room reimplementation of the algorithm
1988
+ introduced by Woltring in FORTRAN [2]. The original version cannot be used
1989
+ in SciPy source code because of the license issues. The details of the
1990
+ reimplementation are discussed here (available only in Russian) [4].
1991
+
1992
+ If the vector of weights ``w`` is None, we assume that all the points are
1993
+ equal in terms of weights, and vector of weights is vector of ones.
1994
+
1995
+ Note that in weighted residual sum of squares, weights are not squared:
1996
+ :math:`\sum\limits_{i=1}^n w_i\lvert y_i - f(x_i) \rvert^2` while in
1997
+ ``splrep`` the sum is built from the squared weights.
1998
+
1999
+ In cases when the initial problem is ill-posed (for example, the product
2000
+ :math:`X^T W X` where :math:`X` is a design matrix is not a positive
2001
+ defined matrix) a ValueError is raised.
2002
+
2003
+ References
2004
+ ----------
2005
+ .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models for
2006
+ observational data, Philadelphia, Pennsylvania: Society for Industrial
2007
+ and Applied Mathematics, 1990, pp. 45-65.
2008
+ :doi:`10.1137/1.9781611970128`
2009
+ .. [2] H. J. Woltring, A Fortran package for generalized, cross-validatory
2010
+ spline smoothing and differentiation, Advances in Engineering
2011
+ Software, vol. 8, no. 2, pp. 104-113, 1986.
2012
+ :doi:`10.1016/0141-1195(86)90098-7`
2013
+ .. [3] T. Hastie, J. Friedman, and R. Tisbshirani, "Smoothing Splines" in
2014
+ The elements of Statistical Learning: Data Mining, Inference, and
2015
+ prediction, New York: Springer, 2017, pp. 241-249.
2016
+ :doi:`10.1007/978-0-387-84858-7`
2017
+ .. [4] E. Zemlyanoy, "Generalized cross-validation smoothing splines",
2018
+ BSc thesis, 2022.
2019
+ `<https://www.hse.ru/ba/am/students/diplomas/620910604>`_ (in
2020
+ Russian)
2021
+
2022
+ Examples
2023
+ --------
2024
+ Generate some noisy data
2025
+
2026
+ >>> import numpy as np
2027
+ >>> np.random.seed(1234)
2028
+ >>> n = 200
2029
+ >>> def func(x):
2030
+ ... return x**3 + x**2 * np.sin(4 * x)
2031
+ >>> x = np.sort(np.random.random_sample(n) * 4 - 2)
2032
+ >>> y = func(x) + np.random.normal(scale=1.5, size=n)
2033
+
2034
+ Make a smoothing spline function
2035
+
2036
+ >>> from scipy.interpolate import make_smoothing_spline
2037
+ >>> spl = make_smoothing_spline(x, y)
2038
+
2039
+ Plot both
2040
+
2041
+ >>> import matplotlib.pyplot as plt
2042
+ >>> grid = np.linspace(x[0], x[-1], 400)
2043
+ >>> plt.plot(grid, spl(grid), label='Spline')
2044
+ >>> plt.plot(grid, func(grid), label='Original function')
2045
+ >>> plt.scatter(x, y, marker='.')
2046
+ >>> plt.legend(loc='best')
2047
+ >>> plt.show()
2048
+
2049
+ """
2050
+
2051
+ x = np.ascontiguousarray(x, dtype=float)
2052
+ y = np.ascontiguousarray(y, dtype=float)
2053
+
2054
+ if any(x[1:] - x[:-1] <= 0):
2055
+ raise ValueError('``x`` should be an ascending array')
2056
+
2057
+ if x.ndim != 1 or y.ndim != 1 or x.shape[0] != y.shape[0]:
2058
+ raise ValueError('``x`` and ``y`` should be one dimensional and the'
2059
+ ' same size')
2060
+
2061
+ if w is None:
2062
+ w = np.ones(len(x))
2063
+ else:
2064
+ w = np.ascontiguousarray(w)
2065
+ if any(w <= 0):
2066
+ raise ValueError('Invalid vector of weights')
2067
+
2068
+ t = np.r_[[x[0]] * 3, x, [x[-1]] * 3]
2069
+ n = x.shape[0]
2070
+
2071
+ if n <= 4:
2072
+ raise ValueError('``x`` and ``y`` length must be at least 5')
2073
+
2074
+ # It is known that the solution to the stated minimization problem exists
2075
+ # and is a natural cubic spline with vector of knots equal to the unique
2076
+ # elements of ``x`` [3], so we will solve the problem in the basis of
2077
+ # natural splines.
2078
+
2079
+ # create design matrix in the B-spline basis
2080
+ X_bspl = BSpline.design_matrix(x, t, 3)
2081
+ # move from B-spline basis to the basis of natural splines using equations
2082
+ # (2.1.7) [4]
2083
+ # central elements
2084
+ X = np.zeros((5, n))
2085
+ for i in range(1, 4):
2086
+ X[i, 2: -2] = X_bspl[i: i - 4, 3: -3][np.diag_indices(n - 4)]
2087
+
2088
+ # first elements
2089
+ X[1, 1] = X_bspl[0, 0]
2090
+ X[2, :2] = ((x[2] + x[1] - 2 * x[0]) * X_bspl[0, 0],
2091
+ X_bspl[1, 1] + X_bspl[1, 2])
2092
+ X[3, :2] = ((x[2] - x[0]) * X_bspl[1, 1], X_bspl[2, 2])
2093
+
2094
+ # last elements
2095
+ X[1, -2:] = (X_bspl[-3, -3], (x[-1] - x[-3]) * X_bspl[-2, -2])
2096
+ X[2, -2:] = (X_bspl[-2, -3] + X_bspl[-2, -2],
2097
+ (2 * x[-1] - x[-2] - x[-3]) * X_bspl[-1, -1])
2098
+ X[3, -2] = X_bspl[-1, -1]
2099
+
2100
+ # create penalty matrix and divide it by vector of weights: W^{-1} E
2101
+ wE = np.zeros((5, n))
2102
+ wE[2:, 0] = _coeff_of_divided_diff(x[:3]) / w[:3]
2103
+ wE[1:, 1] = _coeff_of_divided_diff(x[:4]) / w[:4]
2104
+ for j in range(2, n - 2):
2105
+ wE[:, j] = (x[j+2] - x[j-2]) * _coeff_of_divided_diff(x[j-2:j+3])\
2106
+ / w[j-2: j+3]
2107
+
2108
+ wE[:-1, -2] = -_coeff_of_divided_diff(x[-4:]) / w[-4:]
2109
+ wE[:-2, -1] = _coeff_of_divided_diff(x[-3:]) / w[-3:]
2110
+ wE *= 6
2111
+
2112
+ if lam is None:
2113
+ lam = _compute_optimal_gcv_parameter(X, wE, y, w)
2114
+ elif lam < 0.:
2115
+ raise ValueError('Regularization parameter should be non-negative')
2116
+
2117
+ # solve the initial problem in the basis of natural splines
2118
+ c = solve_banded((2, 2), X + lam * wE, y)
2119
+ # move back to B-spline basis using equations (2.2.10) [4]
2120
+ c_ = np.r_[c[0] * (t[5] + t[4] - 2 * t[3]) + c[1],
2121
+ c[0] * (t[5] - t[3]) + c[1],
2122
+ c[1: -1],
2123
+ c[-1] * (t[-4] - t[-6]) + c[-2],
2124
+ c[-1] * (2 * t[-4] - t[-5] - t[-6]) + c[-2]]
2125
+
2126
+ return BSpline.construct_fast(t, c_, 3)
2127
+
2128
+
2129
+ ########################
2130
+ # FITPACK look-alikes #
2131
+ ########################
2132
+
2133
+ def fpcheck(x, t, k):
2134
+ """ Check consistency of the data vector `x` and the knot vector `t`.
2135
+
2136
+ Return None if inputs are consistent, raises a ValueError otherwise.
2137
+ """
2138
+ # This routine is a clone of the `fpchec` Fortran routine,
2139
+ # https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpchec.f
2140
+ # which carries the following comment:
2141
+ #
2142
+ # subroutine fpchec verifies the number and the position of the knots
2143
+ # t(j),j=1,2,...,n of a spline of degree k, in relation to the number
2144
+ # and the position of the data points x(i),i=1,2,...,m. if all of the
2145
+ # following conditions are fulfilled, the error parameter ier is set
2146
+ # to zero. if one of the conditions is violated ier is set to ten.
2147
+ # 1) k+1 <= n-k-1 <= m
2148
+ # 2) t(1) <= t(2) <= ... <= t(k+1)
2149
+ # t(n-k) <= t(n-k+1) <= ... <= t(n)
2150
+ # 3) t(k+1) < t(k+2) < ... < t(n-k)
2151
+ # 4) t(k+1) <= x(i) <= t(n-k)
2152
+ # 5) the conditions specified by schoenberg and whitney must hold
2153
+ # for at least one subset of data points, i.e. there must be a
2154
+ # subset of data points y(j) such that
2155
+ # t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
2156
+ x = np.asarray(x)
2157
+ t = np.asarray(t)
2158
+
2159
+ if x.ndim != 1 or t.ndim != 1:
2160
+ raise ValueError(f"Expect `x` and `t` be 1D sequences. Got {x = } and {t = }")
2161
+
2162
+ m = x.shape[0]
2163
+ n = t.shape[0]
2164
+ nk1 = n - k - 1
2165
+
2166
+ # check condition no 1
2167
+ # c 1) k+1 <= n-k-1 <= m
2168
+ if not (k + 1 <= nk1 <= m):
2169
+ raise ValueError(f"Need k+1 <= n-k-1 <= m. Got {m = }, {n = } and {k = }.")
2170
+
2171
+ # check condition no 2
2172
+ # c 2) t(1) <= t(2) <= ... <= t(k+1)
2173
+ # c t(n-k) <= t(n-k+1) <= ... <= t(n)
2174
+ if (t[:k+1] > t[1:k+2]).any():
2175
+ raise ValueError(f"First k knots must be ordered; got {t = }.")
2176
+
2177
+ if (t[nk1:] < t[nk1-1:-1]).any():
2178
+ raise ValueError(f"Last k knots must be ordered; got {t = }.")
2179
+
2180
+ # c check condition no 3
2181
+ # c 3) t(k+1) < t(k+2) < ... < t(n-k)
2182
+ if (t[k+1:n-k] <= t[k:n-k-1]).any():
2183
+ raise ValueError(f"Internal knots must be distinct. Got {t = }.")
2184
+
2185
+ # c check condition no 4
2186
+ # c 4) t(k+1) <= x(i) <= t(n-k)
2187
+ # NB: FITPACK's fpchec only checks x[0] & x[-1], so we follow.
2188
+ if (x[0] < t[k]) or (x[-1] > t[n-k-1]):
2189
+ raise ValueError(f"Out of bounds: {x = } and {t = }.")
2190
+
2191
+ # c check condition no 5
2192
+ # c 5) the conditions specified by schoenberg and whitney must hold
2193
+ # c for at least one subset of data points, i.e. there must be a
2194
+ # c subset of data points y(j) such that
2195
+ # c t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
2196
+ mesg = f"Schoenberg-Whitney condition is violated with {t = } and {x =}."
2197
+
2198
+ if (x[0] >= t[k+1]) or (x[-1] <= t[n-k-2]):
2199
+ raise ValueError(mesg)
2200
+
2201
+ m = x.shape[0]
2202
+ l = k+1
2203
+ nk3 = n - k - 3
2204
+ if nk3 < 2:
2205
+ return
2206
+ for j in range(1, nk3+1):
2207
+ tj = t[j]
2208
+ l += 1
2209
+ tl = t[l]
2210
+ i = np.argmax(x > tj)
2211
+ if i >= m-1:
2212
+ raise ValueError(mesg)
2213
+ if x[i] >= tl:
2214
+ raise ValueError(mesg)
2215
+ return
venv/lib/python3.10/site-packages/scipy/interpolate/_cubic.py ADDED
@@ -0,0 +1,970 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Interpolation algorithms using piecewise cubic polynomials."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ import warnings
8
+
9
+ import numpy as np
10
+
11
+ from scipy.linalg import solve, solve_banded
12
+
13
+ from . import PPoly
14
+ from ._polyint import _isscalar
15
+
16
+ if TYPE_CHECKING:
17
+ from typing import Literal
18
+
19
+ __all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
20
+ "Akima1DInterpolator", "CubicSpline"]
21
+
22
+
23
+ def prepare_input(x, y, axis, dydx=None):
24
+ """Prepare input for cubic spline interpolators.
25
+
26
+ All data are converted to numpy arrays and checked for correctness.
27
+ Axes equal to `axis` of arrays `y` and `dydx` are moved to be the 0th
28
+ axis. The value of `axis` is converted to lie in
29
+ [0, number of dimensions of `y`).
30
+ """
31
+
32
+ x, y = map(np.asarray, (x, y))
33
+ if np.issubdtype(x.dtype, np.complexfloating):
34
+ raise ValueError("`x` must contain real values.")
35
+ x = x.astype(float)
36
+
37
+ if np.issubdtype(y.dtype, np.complexfloating):
38
+ dtype = complex
39
+ else:
40
+ dtype = float
41
+
42
+ if dydx is not None:
43
+ dydx = np.asarray(dydx)
44
+ if y.shape != dydx.shape:
45
+ raise ValueError("The shapes of `y` and `dydx` must be identical.")
46
+ if np.issubdtype(dydx.dtype, np.complexfloating):
47
+ dtype = complex
48
+ dydx = dydx.astype(dtype, copy=False)
49
+
50
+ y = y.astype(dtype, copy=False)
51
+ axis = axis % y.ndim
52
+ if x.ndim != 1:
53
+ raise ValueError("`x` must be 1-dimensional.")
54
+ if x.shape[0] < 2:
55
+ raise ValueError("`x` must contain at least 2 elements.")
56
+ if x.shape[0] != y.shape[axis]:
57
+ raise ValueError(f"The length of `y` along `axis`={axis} doesn't "
58
+ "match the length of `x`")
59
+
60
+ if not np.all(np.isfinite(x)):
61
+ raise ValueError("`x` must contain only finite values.")
62
+ if not np.all(np.isfinite(y)):
63
+ raise ValueError("`y` must contain only finite values.")
64
+
65
+ if dydx is not None and not np.all(np.isfinite(dydx)):
66
+ raise ValueError("`dydx` must contain only finite values.")
67
+
68
+ dx = np.diff(x)
69
+ if np.any(dx <= 0):
70
+ raise ValueError("`x` must be strictly increasing sequence.")
71
+
72
+ y = np.moveaxis(y, axis, 0)
73
+ if dydx is not None:
74
+ dydx = np.moveaxis(dydx, axis, 0)
75
+
76
+ return x, dx, y, axis, dydx
77
+
78
+
79
+ class CubicHermiteSpline(PPoly):
80
+ """Piecewise-cubic interpolator matching values and first derivatives.
81
+
82
+ The result is represented as a `PPoly` instance.
83
+
84
+ Parameters
85
+ ----------
86
+ x : array_like, shape (n,)
87
+ 1-D array containing values of the independent variable.
88
+ Values must be real, finite and in strictly increasing order.
89
+ y : array_like
90
+ Array containing values of the dependent variable. It can have
91
+ arbitrary number of dimensions, but the length along ``axis``
92
+ (see below) must match the length of ``x``. Values must be finite.
93
+ dydx : array_like
94
+ Array containing derivatives of the dependent variable. It can have
95
+ arbitrary number of dimensions, but the length along ``axis``
96
+ (see below) must match the length of ``x``. Values must be finite.
97
+ axis : int, optional
98
+ Axis along which `y` is assumed to be varying. Meaning that for
99
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
100
+ Default is 0.
101
+ extrapolate : {bool, 'periodic', None}, optional
102
+ If bool, determines whether to extrapolate to out-of-bounds points
103
+ based on first and last intervals, or to return NaNs. If 'periodic',
104
+ periodic extrapolation is used. If None (default), it is set to True.
105
+
106
+ Attributes
107
+ ----------
108
+ x : ndarray, shape (n,)
109
+ Breakpoints. The same ``x`` which was passed to the constructor.
110
+ c : ndarray, shape (4, n-1, ...)
111
+ Coefficients of the polynomials on each segment. The trailing
112
+ dimensions match the dimensions of `y`, excluding ``axis``.
113
+ For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
114
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
115
+ axis : int
116
+ Interpolation axis. The same axis which was passed to the
117
+ constructor.
118
+
119
+ Methods
120
+ -------
121
+ __call__
122
+ derivative
123
+ antiderivative
124
+ integrate
125
+ roots
126
+
127
+ See Also
128
+ --------
129
+ Akima1DInterpolator : Akima 1D interpolator.
130
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
131
+ CubicSpline : Cubic spline data interpolator.
132
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
133
+
134
+ Notes
135
+ -----
136
+ If you want to create a higher-order spline matching higher-order
137
+ derivatives, use `BPoly.from_derivatives`.
138
+
139
+ References
140
+ ----------
141
+ .. [1] `Cubic Hermite spline
142
+ <https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_
143
+ on Wikipedia.
144
+ """
145
+
146
+ def __init__(self, x, y, dydx, axis=0, extrapolate=None):
147
+ if extrapolate is None:
148
+ extrapolate = True
149
+
150
+ x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
151
+
152
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
153
+ slope = np.diff(y, axis=0) / dxr
154
+ t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
155
+
156
+ c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
157
+ c[0] = t / dxr
158
+ c[1] = (slope - dydx[:-1]) / dxr - t
159
+ c[2] = dydx[:-1]
160
+ c[3] = y[:-1]
161
+
162
+ super().__init__(c, x, extrapolate=extrapolate)
163
+ self.axis = axis
164
+
165
+
166
+ class PchipInterpolator(CubicHermiteSpline):
167
+ r"""PCHIP 1-D monotonic cubic interpolation.
168
+
169
+ ``x`` and ``y`` are arrays of values used to approximate some function f,
170
+ with ``y = f(x)``. The interpolant uses monotonic cubic splines
171
+ to find the value of new points. (PCHIP stands for Piecewise Cubic
172
+ Hermite Interpolating Polynomial).
173
+
174
+ Parameters
175
+ ----------
176
+ x : ndarray, shape (npoints, )
177
+ A 1-D array of monotonically increasing real values. ``x`` cannot
178
+ include duplicate values (otherwise f is overspecified)
179
+ y : ndarray, shape (..., npoints, ...)
180
+ A N-D array of real values. ``y``'s length along the interpolation
181
+ axis must be equal to the length of ``x``. Use the ``axis``
182
+ parameter to select the interpolation axis.
183
+
184
+ .. deprecated:: 1.13.0
185
+ Complex data is deprecated and will raise an error in SciPy 1.15.0.
186
+ If you are trying to use the real components of the passed array,
187
+ use ``np.real`` on ``y``.
188
+
189
+ axis : int, optional
190
+ Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
191
+ to ``axis=0``.
192
+ extrapolate : bool, optional
193
+ Whether to extrapolate to out-of-bounds points based on first
194
+ and last intervals, or to return NaNs.
195
+
196
+ Methods
197
+ -------
198
+ __call__
199
+ derivative
200
+ antiderivative
201
+ roots
202
+
203
+ See Also
204
+ --------
205
+ CubicHermiteSpline : Piecewise-cubic interpolator.
206
+ Akima1DInterpolator : Akima 1D interpolator.
207
+ CubicSpline : Cubic spline data interpolator.
208
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
209
+
210
+ Notes
211
+ -----
212
+ The interpolator preserves monotonicity in the interpolation data and does
213
+ not overshoot if the data is not smooth.
214
+
215
+ The first derivatives are guaranteed to be continuous, but the second
216
+ derivatives may jump at :math:`x_k`.
217
+
218
+ Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
219
+ by using PCHIP algorithm [1]_.
220
+
221
+ Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
222
+ are the slopes at internal points :math:`x_k`.
223
+ If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
224
+ them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
225
+ weighted harmonic mean
226
+
227
+ .. math::
228
+
229
+ \frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
230
+
231
+ where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
232
+
233
+ The end slopes are set using a one-sided scheme [2]_.
234
+
235
+
236
+ References
237
+ ----------
238
+ .. [1] F. N. Fritsch and J. Butland,
239
+ A method for constructing local
240
+ monotone piecewise cubic interpolants,
241
+ SIAM J. Sci. Comput., 5(2), 300-304 (1984).
242
+ :doi:`10.1137/0905021`.
243
+ .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
244
+ :doi:`10.1137/1.9780898717952`
245
+
246
+ """
247
+
248
+ def __init__(self, x, y, axis=0, extrapolate=None):
249
+ x, _, y, axis, _ = prepare_input(x, y, axis)
250
+ if np.iscomplexobj(y):
251
+ msg = ("`PchipInterpolator` only works with real values for `y`. "
252
+ "Passing an array with a complex dtype for `y` is deprecated "
253
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
254
+ "use the real components of the passed array, use `np.real` on "
255
+ "the array before passing to `PchipInterpolator`.")
256
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
257
+ xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
258
+ dk = self._find_derivatives(xp, y)
259
+ super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
260
+ self.axis = axis
261
+
262
+ @staticmethod
263
+ def _edge_case(h0, h1, m0, m1):
264
+ # one-sided three-point estimate for the derivative
265
+ d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
266
+
267
+ # try to preserve shape
268
+ mask = np.sign(d) != np.sign(m0)
269
+ mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
270
+ mmm = (~mask) & mask2
271
+
272
+ d[mask] = 0.
273
+ d[mmm] = 3.*m0[mmm]
274
+
275
+ return d
276
+
277
+ @staticmethod
278
+ def _find_derivatives(x, y):
279
+ # Determine the derivatives at the points y_k, d_k, by using
280
+ # PCHIP algorithm is:
281
+ # We choose the derivatives at the point x_k by
282
+ # Let m_k be the slope of the kth segment (between k and k+1)
283
+ # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
284
+ # else use weighted harmonic mean:
285
+ # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
286
+ # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
287
+ # where h_k is the spacing between x_k and x_{k+1}
288
+ y_shape = y.shape
289
+ if y.ndim == 1:
290
+ # So that _edge_case doesn't end up assigning to scalars
291
+ x = x[:, None]
292
+ y = y[:, None]
293
+
294
+ hk = x[1:] - x[:-1]
295
+ mk = (y[1:] - y[:-1]) / hk
296
+
297
+ if y.shape[0] == 2:
298
+ # edge case: only have two points, use linear interpolation
299
+ dk = np.zeros_like(y)
300
+ dk[0] = mk
301
+ dk[1] = mk
302
+ return dk.reshape(y_shape)
303
+
304
+ smk = np.sign(mk)
305
+ condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
306
+
307
+ w1 = 2*hk[1:] + hk[:-1]
308
+ w2 = hk[1:] + 2*hk[:-1]
309
+
310
+ # values where division by zero occurs will be excluded
311
+ # by 'condition' afterwards
312
+ with np.errstate(divide='ignore', invalid='ignore'):
313
+ whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
314
+
315
+ dk = np.zeros_like(y)
316
+ dk[1:-1][condition] = 0.0
317
+ dk[1:-1][~condition] = 1.0 / whmean[~condition]
318
+
319
+ # special case endpoints, as suggested in
320
+ # Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
321
+ dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
322
+ dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
323
+
324
+ return dk.reshape(y_shape)
325
+
326
+
327
+ def pchip_interpolate(xi, yi, x, der=0, axis=0):
328
+ """
329
+ Convenience function for pchip interpolation.
330
+
331
+ xi and yi are arrays of values used to approximate some function f,
332
+ with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
333
+ to find the value of new points x and the derivatives there.
334
+
335
+ See `scipy.interpolate.PchipInterpolator` for details.
336
+
337
+ Parameters
338
+ ----------
339
+ xi : array_like
340
+ A sorted list of x-coordinates, of length N.
341
+ yi : array_like
342
+ A 1-D array of real values. `yi`'s length along the interpolation
343
+ axis must be equal to the length of `xi`. If N-D array, use axis
344
+ parameter to select correct axis.
345
+
346
+ .. deprecated:: 1.13.0
347
+ Complex data is deprecated and will raise an error in
348
+ SciPy 1.15.0. If you are trying to use the real components of
349
+ the passed array, use ``np.real`` on `yi`.
350
+
351
+ x : scalar or array_like
352
+ Of length M.
353
+ der : int or list, optional
354
+ Derivatives to extract. The 0th derivative can be included to
355
+ return the function value.
356
+ axis : int, optional
357
+ Axis in the yi array corresponding to the x-coordinate values.
358
+
359
+ Returns
360
+ -------
361
+ y : scalar or array_like
362
+ The result, of length R or length M or M by R.
363
+
364
+ See Also
365
+ --------
366
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
367
+
368
+ Examples
369
+ --------
370
+ We can interpolate 2D observed data using pchip interpolation:
371
+
372
+ >>> import numpy as np
373
+ >>> import matplotlib.pyplot as plt
374
+ >>> from scipy.interpolate import pchip_interpolate
375
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
376
+ >>> y_observed = np.sin(x_observed)
377
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
378
+ >>> y = pchip_interpolate(x_observed, y_observed, x)
379
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
380
+ >>> plt.plot(x, y, label="pchip interpolation")
381
+ >>> plt.legend()
382
+ >>> plt.show()
383
+
384
+ """
385
+ P = PchipInterpolator(xi, yi, axis=axis)
386
+
387
+ if der == 0:
388
+ return P(x)
389
+ elif _isscalar(der):
390
+ return P.derivative(der)(x)
391
+ else:
392
+ return [P.derivative(nu)(x) for nu in der]
393
+
394
+
395
+ class Akima1DInterpolator(CubicHermiteSpline):
396
+ r"""
397
+ Akima interpolator
398
+
399
+ Fit piecewise cubic polynomials, given vectors x and y. The interpolation
400
+ method by Akima uses a continuously differentiable sub-spline built from
401
+ piecewise cubic polynomials. The resultant curve passes through the given
402
+ data points and will appear smooth and natural.
403
+
404
+ Parameters
405
+ ----------
406
+ x : ndarray, shape (npoints, )
407
+ 1-D array of monotonically increasing real values.
408
+ y : ndarray, shape (..., npoints, ...)
409
+ N-D array of real values. The length of ``y`` along the interpolation axis
410
+ must be equal to the length of ``x``. Use the ``axis`` parameter to
411
+ select the interpolation axis.
412
+
413
+ .. deprecated:: 1.13.0
414
+ Complex data is deprecated and will raise an error in SciPy 1.15.0.
415
+ If you are trying to use the real components of the passed array,
416
+ use ``np.real`` on ``y``.
417
+
418
+ axis : int, optional
419
+ Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
420
+ to ``axis=0``.
421
+ method : {'akima', 'makima'}, optional
422
+ If ``"makima"``, use the modified Akima interpolation [2]_.
423
+ Defaults to ``"akima"``, use the Akima interpolation [1]_.
424
+
425
+ .. versionadded:: 1.13.0
426
+
427
+ Methods
428
+ -------
429
+ __call__
430
+ derivative
431
+ antiderivative
432
+ roots
433
+
434
+ See Also
435
+ --------
436
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
437
+ CubicSpline : Cubic spline data interpolator.
438
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
439
+
440
+ Notes
441
+ -----
442
+ .. versionadded:: 0.14
443
+
444
+ Use only for precise data, as the fitted curve passes through the given
445
+ points exactly. This routine is useful for plotting a pleasingly smooth
446
+ curve through a few given points for purposes of plotting.
447
+
448
+ Let :math:`\delta_i = (y_{i+1} - y_i) / (x_{i+1} - x_i)` be the slopes of
449
+ the interval :math:`\left[x_i, x_{i+1}\right)`. Akima's derivative at
450
+ :math:`x_i` is defined as:
451
+
452
+ .. math::
453
+
454
+ d_i = \frac{w_1}{w_1 + w_2}\delta_{i-1} + \frac{w_2}{w_1 + w_2}\delta_i
455
+
456
+ In the Akima interpolation [1]_ (``method="akima"``), the weights are:
457
+
458
+ .. math::
459
+
460
+ \begin{aligned}
461
+ w_1 &= |\delta_{i+1} - \delta_i| \\
462
+ w_2 &= |\delta_{i-1} - \delta_{i-2}|
463
+ \end{aligned}
464
+
465
+ In the modified Akima interpolation [2]_ (``method="makima"``),
466
+ to eliminate overshoot and avoid edge cases of both numerator and
467
+ denominator being equal to 0, the weights are modified as follows:
468
+
469
+ .. math::
470
+
471
+ \begin{align*}
472
+ w_1 &= |\delta_{i+1} - \delta_i| + |\delta_{i+1} + \delta_i| / 2 \\
473
+ w_2 &= |\delta_{i-1} - \delta_{i-2}| + |\delta_{i-1} + \delta_{i-2}| / 2
474
+ \end{align*}
475
+
476
+ Examples
477
+ --------
478
+ Comparison of ``method="akima"`` and ``method="makima"``:
479
+
480
+ >>> import numpy as np
481
+ >>> from scipy.interpolate import Akima1DInterpolator
482
+ >>> import matplotlib.pyplot as plt
483
+ >>> x = np.linspace(1, 7, 7)
484
+ >>> y = np.array([-1, -1, -1, 0, 1, 1, 1])
485
+ >>> xs = np.linspace(min(x), max(x), num=100)
486
+ >>> y_akima = Akima1DInterpolator(x, y, method="akima")(xs)
487
+ >>> y_makima = Akima1DInterpolator(x, y, method="makima")(xs)
488
+
489
+ >>> fig, ax = plt.subplots()
490
+ >>> ax.plot(x, y, "o", label="data")
491
+ >>> ax.plot(xs, y_akima, label="akima")
492
+ >>> ax.plot(xs, y_makima, label="makima")
493
+ >>> ax.legend()
494
+ >>> fig.show()
495
+
496
+ The overshoot that occured in ``"akima"`` has been avoided in ``"makima"``.
497
+
498
+ References
499
+ ----------
500
+ .. [1] A new method of interpolation and smooth curve fitting based
501
+ on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
502
+ 589-602. :doi:`10.1145/321607.321609`
503
+ .. [2] Makima Piecewise Cubic Interpolation. Cleve Moler and Cosmin Ionita, 2019.
504
+ https://blogs.mathworks.com/cleve/2019/04/29/makima-piecewise-cubic-interpolation/
505
+
506
+ """
507
+
508
+ def __init__(self, x, y, axis=0, *, method: Literal["akima", "makima"]="akima"):
509
+ if method not in {"akima", "makima"}:
510
+ raise NotImplementedError(f"`method`={method} is unsupported.")
511
+ # Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
512
+ # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
513
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
514
+
515
+ if np.iscomplexobj(y):
516
+ msg = ("`Akima1DInterpolator` only works with real values for `y`. "
517
+ "Passing an array with a complex dtype for `y` is deprecated "
518
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
519
+ "use the real components of the passed array, use `np.real` on "
520
+ "the array before passing to `Akima1DInterpolator`.")
521
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
522
+
523
+ # determine slopes between breakpoints
524
+ m = np.empty((x.size + 3, ) + y.shape[1:])
525
+ dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
526
+ m[2:-2] = np.diff(y, axis=0) / dx
527
+
528
+ # add two additional points on the left ...
529
+ m[1] = 2. * m[2] - m[3]
530
+ m[0] = 2. * m[1] - m[2]
531
+ # ... and on the right
532
+ m[-2] = 2. * m[-3] - m[-4]
533
+ m[-1] = 2. * m[-2] - m[-3]
534
+
535
+ # if m1 == m2 != m3 == m4, the slope at the breakpoint is not
536
+ # defined. This is the fill value:
537
+ t = .5 * (m[3:] + m[:-3])
538
+ # get the denominator of the slope t
539
+ dm = np.abs(np.diff(m, axis=0))
540
+ if method == "makima":
541
+ pm = np.abs(m[1:] + m[:-1])
542
+ f1 = dm[2:] + 0.5 * pm[2:]
543
+ f2 = dm[:-2] + 0.5 * pm[:-2]
544
+ else:
545
+ f1 = dm[2:]
546
+ f2 = dm[:-2]
547
+ f12 = f1 + f2
548
+ # These are the mask of where the slope at breakpoint is defined:
549
+ ind = np.nonzero(f12 > 1e-9 * np.max(f12, initial=-np.inf))
550
+ x_ind, y_ind = ind[0], ind[1:]
551
+ # Set the slope at breakpoint
552
+ t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
553
+ f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
554
+
555
+ super().__init__(x, y, t, axis=0, extrapolate=False)
556
+ self.axis = axis
557
+
558
+ def extend(self, c, x, right=True):
559
+ raise NotImplementedError("Extending a 1-D Akima interpolator is not "
560
+ "yet implemented")
561
+
562
+ # These are inherited from PPoly, but they do not produce an Akima
563
+ # interpolator. Hence stub them out.
564
+ @classmethod
565
+ def from_spline(cls, tck, extrapolate=None):
566
+ raise NotImplementedError("This method does not make sense for "
567
+ "an Akima interpolator.")
568
+
569
+ @classmethod
570
+ def from_bernstein_basis(cls, bp, extrapolate=None):
571
+ raise NotImplementedError("This method does not make sense for "
572
+ "an Akima interpolator.")
573
+
574
+
575
+ class CubicSpline(CubicHermiteSpline):
576
+ """Cubic spline data interpolator.
577
+
578
+ Interpolate data with a piecewise cubic polynomial which is twice
579
+ continuously differentiable [1]_. The result is represented as a `PPoly`
580
+ instance with breakpoints matching the given data.
581
+
582
+ Parameters
583
+ ----------
584
+ x : array_like, shape (n,)
585
+ 1-D array containing values of the independent variable.
586
+ Values must be real, finite and in strictly increasing order.
587
+ y : array_like
588
+ Array containing values of the dependent variable. It can have
589
+ arbitrary number of dimensions, but the length along ``axis``
590
+ (see below) must match the length of ``x``. Values must be finite.
591
+ axis : int, optional
592
+ Axis along which `y` is assumed to be varying. Meaning that for
593
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
594
+ Default is 0.
595
+ bc_type : string or 2-tuple, optional
596
+ Boundary condition type. Two additional equations, given by the
597
+ boundary conditions, are required to determine all coefficients of
598
+ polynomials on each segment [2]_.
599
+
600
+ If `bc_type` is a string, then the specified condition will be applied
601
+ at both ends of a spline. Available conditions are:
602
+
603
+ * 'not-a-knot' (default): The first and second segment at a curve end
604
+ are the same polynomial. It is a good default when there is no
605
+ information on boundary conditions.
606
+ * 'periodic': The interpolated functions is assumed to be periodic
607
+ of period ``x[-1] - x[0]``. The first and last value of `y` must be
608
+ identical: ``y[0] == y[-1]``. This boundary condition will result in
609
+ ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
610
+ * 'clamped': The first derivative at curves ends are zero. Assuming
611
+ a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
612
+ * 'natural': The second derivative at curve ends are zero. Assuming
613
+ a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
614
+
615
+ If `bc_type` is a 2-tuple, the first and the second value will be
616
+ applied at the curve start and end respectively. The tuple values can
617
+ be one of the previously mentioned strings (except 'periodic') or a
618
+ tuple `(order, deriv_values)` allowing to specify arbitrary
619
+ derivatives at curve ends:
620
+
621
+ * `order`: the derivative order, 1 or 2.
622
+ * `deriv_value`: array_like containing derivative values, shape must
623
+ be the same as `y`, excluding ``axis`` dimension. For example, if
624
+ `y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
625
+ the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
626
+ and have the shape (n0, n1).
627
+ extrapolate : {bool, 'periodic', None}, optional
628
+ If bool, determines whether to extrapolate to out-of-bounds points
629
+ based on first and last intervals, or to return NaNs. If 'periodic',
630
+ periodic extrapolation is used. If None (default), ``extrapolate`` is
631
+ set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
632
+
633
+ Attributes
634
+ ----------
635
+ x : ndarray, shape (n,)
636
+ Breakpoints. The same ``x`` which was passed to the constructor.
637
+ c : ndarray, shape (4, n-1, ...)
638
+ Coefficients of the polynomials on each segment. The trailing
639
+ dimensions match the dimensions of `y`, excluding ``axis``.
640
+ For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
641
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
642
+ axis : int
643
+ Interpolation axis. The same axis which was passed to the
644
+ constructor.
645
+
646
+ Methods
647
+ -------
648
+ __call__
649
+ derivative
650
+ antiderivative
651
+ integrate
652
+ roots
653
+
654
+ See Also
655
+ --------
656
+ Akima1DInterpolator : Akima 1D interpolator.
657
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
658
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
659
+
660
+ Notes
661
+ -----
662
+ Parameters `bc_type` and ``extrapolate`` work independently, i.e. the
663
+ former controls only construction of a spline, and the latter only
664
+ evaluation.
665
+
666
+ When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
667
+ a condition that the first derivative is equal to the linear interpolant
668
+ slope. When both boundary conditions are 'not-a-knot' and n = 3, the
669
+ solution is sought as a parabola passing through given points.
670
+
671
+ When 'not-a-knot' boundary conditions is applied to both ends, the
672
+ resulting spline will be the same as returned by `splrep` (with ``s=0``)
673
+ and `InterpolatedUnivariateSpline`, but these two methods use a
674
+ representation in B-spline basis.
675
+
676
+ .. versionadded:: 0.18.0
677
+
678
+ Examples
679
+ --------
680
+ In this example the cubic spline is used to interpolate a sampled sinusoid.
681
+ You can see that the spline continuity property holds for the first and
682
+ second derivatives and violates only for the third derivative.
683
+
684
+ >>> import numpy as np
685
+ >>> from scipy.interpolate import CubicSpline
686
+ >>> import matplotlib.pyplot as plt
687
+ >>> x = np.arange(10)
688
+ >>> y = np.sin(x)
689
+ >>> cs = CubicSpline(x, y)
690
+ >>> xs = np.arange(-0.5, 9.6, 0.1)
691
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
692
+ >>> ax.plot(x, y, 'o', label='data')
693
+ >>> ax.plot(xs, np.sin(xs), label='true')
694
+ >>> ax.plot(xs, cs(xs), label="S")
695
+ >>> ax.plot(xs, cs(xs, 1), label="S'")
696
+ >>> ax.plot(xs, cs(xs, 2), label="S''")
697
+ >>> ax.plot(xs, cs(xs, 3), label="S'''")
698
+ >>> ax.set_xlim(-0.5, 9.5)
699
+ >>> ax.legend(loc='lower left', ncol=2)
700
+ >>> plt.show()
701
+
702
+ In the second example, the unit circle is interpolated with a spline. A
703
+ periodic boundary condition is used. You can see that the first derivative
704
+ values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
705
+ computed. Note that a circle cannot be exactly represented by a cubic
706
+ spline. To increase precision, more breakpoints would be required.
707
+
708
+ >>> theta = 2 * np.pi * np.linspace(0, 1, 5)
709
+ >>> y = np.c_[np.cos(theta), np.sin(theta)]
710
+ >>> cs = CubicSpline(theta, y, bc_type='periodic')
711
+ >>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
712
+ ds/dx=0.0 ds/dy=1.0
713
+ >>> xs = 2 * np.pi * np.linspace(0, 1, 100)
714
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
715
+ >>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
716
+ >>> ax.plot(np.cos(xs), np.sin(xs), label='true')
717
+ >>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
718
+ >>> ax.axes.set_aspect('equal')
719
+ >>> ax.legend(loc='center')
720
+ >>> plt.show()
721
+
722
+ The third example is the interpolation of a polynomial y = x**3 on the
723
+ interval 0 <= x<= 1. A cubic spline can represent this function exactly.
724
+ To achieve that we need to specify values and first derivatives at
725
+ endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
726
+ y'(1) = 3.
727
+
728
+ >>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
729
+ >>> x = np.linspace(0, 1)
730
+ >>> np.allclose(x**3, cs(x))
731
+ True
732
+
733
+ References
734
+ ----------
735
+ .. [1] `Cubic Spline Interpolation
736
+ <https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
737
+ on Wikiversity.
738
+ .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
739
+ """
740
+
741
+ def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
742
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
743
+ n = len(x)
744
+
745
+ bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
746
+
747
+ if extrapolate is None:
748
+ if bc[0] == 'periodic':
749
+ extrapolate = 'periodic'
750
+ else:
751
+ extrapolate = True
752
+
753
+ if y.size == 0:
754
+ # bail out early for zero-sized arrays
755
+ s = np.zeros_like(y)
756
+ else:
757
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
758
+ slope = np.diff(y, axis=0) / dxr
759
+
760
+ # If bc is 'not-a-knot' this change is just a convention.
761
+ # If bc is 'periodic' then we already checked that y[0] == y[-1],
762
+ # and the spline is just a constant, we handle this case in the
763
+ # same way by setting the first derivatives to slope, which is 0.
764
+ if n == 2:
765
+ if bc[0] in ['not-a-knot', 'periodic']:
766
+ bc[0] = (1, slope[0])
767
+ if bc[1] in ['not-a-knot', 'periodic']:
768
+ bc[1] = (1, slope[0])
769
+
770
+ # This is a special case, when both conditions are 'not-a-knot'
771
+ # and n == 3. In this case 'not-a-knot' can't be handled regularly
772
+ # as the both conditions are identical. We handle this case by
773
+ # constructing a parabola passing through given points.
774
+ if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
775
+ A = np.zeros((3, 3)) # This is a standard matrix.
776
+ b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
777
+
778
+ A[0, 0] = 1
779
+ A[0, 1] = 1
780
+ A[1, 0] = dx[1]
781
+ A[1, 1] = 2 * (dx[0] + dx[1])
782
+ A[1, 2] = dx[0]
783
+ A[2, 1] = 1
784
+ A[2, 2] = 1
785
+
786
+ b[0] = 2 * slope[0]
787
+ b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
788
+ b[2] = 2 * slope[1]
789
+
790
+ s = solve(A, b, overwrite_a=True, overwrite_b=True,
791
+ check_finite=False)
792
+ elif n == 3 and bc[0] == 'periodic':
793
+ # In case when number of points is 3 we compute the derivatives
794
+ # manually
795
+ t = (slope / dxr).sum(0) / (1. / dxr).sum(0)
796
+ s = np.broadcast_to(t, (n,) + y.shape[1:])
797
+ else:
798
+ # Find derivative values at each x[i] by solving a tridiagonal
799
+ # system.
800
+ A = np.zeros((3, n)) # This is a banded matrix representation.
801
+ b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
802
+
803
+ # Filling the system for i=1..n-2
804
+ # (x[i-1] - x[i]) * s[i-1] +\
805
+ # 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
806
+ # (x[i] - x[i-1]) * s[i+1] =\
807
+ # 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
808
+ # (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
809
+
810
+ A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
811
+ A[0, 2:] = dx[:-1] # The upper diagonal
812
+ A[-1, :-2] = dx[1:] # The lower diagonal
813
+
814
+ b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
815
+
816
+ bc_start, bc_end = bc
817
+
818
+ if bc_start == 'periodic':
819
+ # Due to the periodicity, and because y[-1] = y[0], the
820
+ # linear system has (n-1) unknowns/equations instead of n:
821
+ A = A[:, 0:-1]
822
+ A[1, 0] = 2 * (dx[-1] + dx[0])
823
+ A[0, 1] = dx[-1]
824
+
825
+ b = b[:-1]
826
+
827
+ # Also, due to the periodicity, the system is not tri-diagonal.
828
+ # We need to compute a "condensed" matrix of shape (n-2, n-2).
829
+ # See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
830
+ # for more explanations.
831
+ # The condensed matrix is obtained by removing the last column
832
+ # and last row of the (n-1, n-1) system matrix. The removed
833
+ # values are saved in scalar variables with the (n-1, n-1)
834
+ # system matrix indices forming their names:
835
+ a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
836
+ a_m1_m2 = dx[-1]
837
+ a_m1_m1 = 2 * (dx[-1] + dx[-2])
838
+ a_m2_m1 = dx[-3]
839
+ a_0_m1 = dx[0]
840
+
841
+ b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
842
+ b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
843
+
844
+ Ac = A[:, :-1]
845
+ b1 = b[:-1]
846
+ b2 = np.zeros_like(b1)
847
+ b2[0] = -a_0_m1
848
+ b2[-1] = -a_m2_m1
849
+
850
+ # s1 and s2 are the solutions of (n-2, n-2) system
851
+ s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
852
+ overwrite_b=False, check_finite=False)
853
+
854
+ s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
855
+ overwrite_b=False, check_finite=False)
856
+
857
+ # computing the s[n-2] solution:
858
+ s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
859
+ (a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
860
+
861
+ # s is the solution of the (n, n) system:
862
+ s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
863
+ s[:-2] = s1 + s_m1 * s2
864
+ s[-2] = s_m1
865
+ s[-1] = s[0]
866
+ else:
867
+ if bc_start == 'not-a-knot':
868
+ A[1, 0] = dx[1]
869
+ A[0, 1] = x[2] - x[0]
870
+ d = x[2] - x[0]
871
+ b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
872
+ dxr[0]**2 * slope[1]) / d
873
+ elif bc_start[0] == 1:
874
+ A[1, 0] = 1
875
+ A[0, 1] = 0
876
+ b[0] = bc_start[1]
877
+ elif bc_start[0] == 2:
878
+ A[1, 0] = 2 * dx[0]
879
+ A[0, 1] = dx[0]
880
+ b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
881
+
882
+ if bc_end == 'not-a-knot':
883
+ A[1, -1] = dx[-2]
884
+ A[-1, -2] = x[-1] - x[-3]
885
+ d = x[-1] - x[-3]
886
+ b[-1] = ((dxr[-1]**2*slope[-2] +
887
+ (2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
888
+ elif bc_end[0] == 1:
889
+ A[1, -1] = 1
890
+ A[-1, -2] = 0
891
+ b[-1] = bc_end[1]
892
+ elif bc_end[0] == 2:
893
+ A[1, -1] = 2 * dx[-1]
894
+ A[-1, -2] = dx[-1]
895
+ b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
896
+
897
+ s = solve_banded((1, 1), A, b, overwrite_ab=True,
898
+ overwrite_b=True, check_finite=False)
899
+
900
+ super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
901
+ self.axis = axis
902
+
903
+ @staticmethod
904
+ def _validate_bc(bc_type, y, expected_deriv_shape, axis):
905
+ """Validate and prepare boundary conditions.
906
+
907
+ Returns
908
+ -------
909
+ validated_bc : 2-tuple
910
+ Boundary conditions for a curve start and end.
911
+ y : ndarray
912
+ y casted to complex dtype if one of the boundary conditions has
913
+ complex dtype.
914
+ """
915
+ if isinstance(bc_type, str):
916
+ if bc_type == 'periodic':
917
+ if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
918
+ raise ValueError(
919
+ f"The first and last `y` point along axis {axis} must "
920
+ "be identical (within machine precision) when "
921
+ "bc_type='periodic'.")
922
+
923
+ bc_type = (bc_type, bc_type)
924
+
925
+ else:
926
+ if len(bc_type) != 2:
927
+ raise ValueError("`bc_type` must contain 2 elements to "
928
+ "specify start and end conditions.")
929
+
930
+ if 'periodic' in bc_type:
931
+ raise ValueError("'periodic' `bc_type` is defined for both "
932
+ "curve ends and cannot be used with other "
933
+ "boundary conditions.")
934
+
935
+ validated_bc = []
936
+ for bc in bc_type:
937
+ if isinstance(bc, str):
938
+ if bc == 'clamped':
939
+ validated_bc.append((1, np.zeros(expected_deriv_shape)))
940
+ elif bc == 'natural':
941
+ validated_bc.append((2, np.zeros(expected_deriv_shape)))
942
+ elif bc in ['not-a-knot', 'periodic']:
943
+ validated_bc.append(bc)
944
+ else:
945
+ raise ValueError(f"bc_type={bc} is not allowed.")
946
+ else:
947
+ try:
948
+ deriv_order, deriv_value = bc
949
+ except Exception as e:
950
+ raise ValueError(
951
+ "A specified derivative value must be "
952
+ "given in the form (order, value)."
953
+ ) from e
954
+
955
+ if deriv_order not in [1, 2]:
956
+ raise ValueError("The specified derivative order must "
957
+ "be 1 or 2.")
958
+
959
+ deriv_value = np.asarray(deriv_value)
960
+ if deriv_value.shape != expected_deriv_shape:
961
+ raise ValueError(
962
+ "`deriv_value` shape {} is not the expected one {}."
963
+ .format(deriv_value.shape, expected_deriv_shape))
964
+
965
+ if np.issubdtype(deriv_value.dtype, np.complexfloating):
966
+ y = y.astype(complex, copy=False)
967
+
968
+ validated_bc.append((deriv_order, deriv_value))
969
+
970
+ return validated_bc, y
venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (91.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack2.py ADDED
@@ -0,0 +1,2362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ fitpack --- curve and surface fitting with splines
3
+
4
+ fitpack is based on a collection of Fortran routines DIERCKX
5
+ by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
6
+ to double routines by Pearu Peterson.
7
+ """
8
+ # Created by Pearu Peterson, June,August 2003
9
+ __all__ = [
10
+ 'UnivariateSpline',
11
+ 'InterpolatedUnivariateSpline',
12
+ 'LSQUnivariateSpline',
13
+ 'BivariateSpline',
14
+ 'LSQBivariateSpline',
15
+ 'SmoothBivariateSpline',
16
+ 'LSQSphereBivariateSpline',
17
+ 'SmoothSphereBivariateSpline',
18
+ 'RectBivariateSpline',
19
+ 'RectSphereBivariateSpline']
20
+
21
+
22
+ import warnings
23
+
24
+ from numpy import zeros, concatenate, ravel, diff, array, ones # noqa:F401
25
+ import numpy as np
26
+
27
+ from . import _fitpack_impl
28
+ from . import dfitpack
29
+
30
+
31
+ dfitpack_int = dfitpack.types.intvar.dtype
32
+
33
+
34
+ # ############### Univariate spline ####################
35
+
36
+ _curfit_messages = {1: """
37
+ The required storage space exceeds the available storage space, as
38
+ specified by the parameter nest: nest too small. If nest is already
39
+ large (say nest > m/2), it may also indicate that s is too small.
40
+ The approximation returned is the weighted least-squares spline
41
+ according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
42
+ gives the corresponding weighted sum of squared residuals (fp>s).
43
+ """,
44
+ 2: """
45
+ A theoretically impossible result was found during the iteration
46
+ process for finding a smoothing spline with fp = s: s too small.
47
+ There is an approximation returned but the corresponding weighted sum
48
+ of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
49
+ 3: """
50
+ The maximal number of iterations maxit (set to 20 by the program)
51
+ allowed for finding a smoothing spline with fp=s has been reached: s
52
+ too small.
53
+ There is an approximation returned but the corresponding weighted sum
54
+ of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
55
+ 10: """
56
+ Error on entry, no approximation returned. The following conditions
57
+ must hold:
58
+ xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
59
+ if iopt=-1:
60
+ xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
61
+ }
62
+
63
+
64
+ # UnivariateSpline, ext parameter can be an int or a string
65
+ _extrap_modes = {0: 0, 'extrapolate': 0,
66
+ 1: 1, 'zeros': 1,
67
+ 2: 2, 'raise': 2,
68
+ 3: 3, 'const': 3}
69
+
70
+
71
+ class UnivariateSpline:
72
+ """
73
+ 1-D smoothing spline fit to a given set of data points.
74
+
75
+ Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
76
+ specifies the number of knots by specifying a smoothing condition.
77
+
78
+ Parameters
79
+ ----------
80
+ x : (N,) array_like
81
+ 1-D array of independent input data. Must be increasing;
82
+ must be strictly increasing if `s` is 0.
83
+ y : (N,) array_like
84
+ 1-D array of dependent input data, of the same length as `x`.
85
+ w : (N,) array_like, optional
86
+ Weights for spline fitting. Must be positive. If `w` is None,
87
+ weights are all 1. Default is None.
88
+ bbox : (2,) array_like, optional
89
+ 2-sequence specifying the boundary of the approximation interval. If
90
+ `bbox` is None, ``bbox=[x[0], x[-1]]``. Default is None.
91
+ k : int, optional
92
+ Degree of the smoothing spline. Must be 1 <= `k` <= 5.
93
+ ``k = 3`` is a cubic spline. Default is 3.
94
+ s : float or None, optional
95
+ Positive smoothing factor used to choose the number of knots. Number
96
+ of knots will be increased until the smoothing condition is satisfied::
97
+
98
+ sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
99
+
100
+ However, because of numerical issues, the actual condition is::
101
+
102
+ abs(sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) - s) < 0.001 * s
103
+
104
+ If `s` is None, `s` will be set as `len(w)` for a smoothing spline
105
+ that uses all data points.
106
+ If 0, spline will interpolate through all data points. This is
107
+ equivalent to `InterpolatedUnivariateSpline`.
108
+ Default is None.
109
+ The user can use the `s` to control the tradeoff between closeness
110
+ and smoothness of fit. Larger `s` means more smoothing while smaller
111
+ values of `s` indicate less smoothing.
112
+ Recommended values of `s` depend on the weights, `w`. If the weights
113
+ represent the inverse of the standard-deviation of `y`, then a good
114
+ `s` value should be found in the range (m-sqrt(2*m),m+sqrt(2*m))
115
+ where m is the number of datapoints in `x`, `y`, and `w`. This means
116
+ ``s = len(w)`` should be a good value if ``1/w[i]`` is an
117
+ estimate of the standard deviation of ``y[i]``.
118
+ ext : int or str, optional
119
+ Controls the extrapolation mode for elements
120
+ not in the interval defined by the knot sequence.
121
+
122
+ * if ext=0 or 'extrapolate', return the extrapolated value.
123
+ * if ext=1 or 'zeros', return 0
124
+ * if ext=2 or 'raise', raise a ValueError
125
+ * if ext=3 or 'const', return the boundary value.
126
+
127
+ Default is 0.
128
+
129
+ check_finite : bool, optional
130
+ Whether to check that the input arrays contain only finite numbers.
131
+ Disabling may give a performance gain, but may result in problems
132
+ (crashes, non-termination or non-sensical results) if the inputs
133
+ do contain infinities or NaNs.
134
+ Default is False.
135
+
136
+ See Also
137
+ --------
138
+ BivariateSpline :
139
+ a base class for bivariate splines.
140
+ SmoothBivariateSpline :
141
+ a smoothing bivariate spline through the given points
142
+ LSQBivariateSpline :
143
+ a bivariate spline using weighted least-squares fitting
144
+ RectSphereBivariateSpline :
145
+ a bivariate spline over a rectangular mesh on a sphere
146
+ SmoothSphereBivariateSpline :
147
+ a smoothing bivariate spline in spherical coordinates
148
+ LSQSphereBivariateSpline :
149
+ a bivariate spline in spherical coordinates using weighted
150
+ least-squares fitting
151
+ RectBivariateSpline :
152
+ a bivariate spline over a rectangular mesh
153
+ InterpolatedUnivariateSpline :
154
+ a interpolating univariate spline for a given set of data points.
155
+ bisplrep :
156
+ a function to find a bivariate B-spline representation of a surface
157
+ bisplev :
158
+ a function to evaluate a bivariate B-spline and its derivatives
159
+ splrep :
160
+ a function to find the B-spline representation of a 1-D curve
161
+ splev :
162
+ a function to evaluate a B-spline or its derivatives
163
+ sproot :
164
+ a function to find the roots of a cubic B-spline
165
+ splint :
166
+ a function to evaluate the definite integral of a B-spline between two
167
+ given points
168
+ spalde :
169
+ a function to evaluate all derivatives of a B-spline
170
+
171
+ Notes
172
+ -----
173
+ The number of data points must be larger than the spline degree `k`.
174
+
175
+ **NaN handling**: If the input arrays contain ``nan`` values, the result
176
+ is not useful, since the underlying spline fitting routines cannot deal
177
+ with ``nan``. A workaround is to use zero weights for not-a-number
178
+ data points:
179
+
180
+ >>> import numpy as np
181
+ >>> from scipy.interpolate import UnivariateSpline
182
+ >>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
183
+ >>> w = np.isnan(y)
184
+ >>> y[w] = 0.
185
+ >>> spl = UnivariateSpline(x, y, w=~w)
186
+
187
+ Notice the need to replace a ``nan`` by a numerical value (precise value
188
+ does not matter as long as the corresponding weight is zero.)
189
+
190
+ References
191
+ ----------
192
+ Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
193
+
194
+ .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
195
+ integration of experimental data using spline functions",
196
+ J.Comp.Appl.Maths 1 (1975) 165-184.
197
+ .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
198
+ grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
199
+ 1286-1304.
200
+ .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
201
+ functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
202
+ .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
203
+ Numerical Analysis, Oxford University Press, 1993.
204
+
205
+ Examples
206
+ --------
207
+ >>> import numpy as np
208
+ >>> import matplotlib.pyplot as plt
209
+ >>> from scipy.interpolate import UnivariateSpline
210
+ >>> rng = np.random.default_rng()
211
+ >>> x = np.linspace(-3, 3, 50)
212
+ >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
213
+ >>> plt.plot(x, y, 'ro', ms=5)
214
+
215
+ Use the default value for the smoothing parameter:
216
+
217
+ >>> spl = UnivariateSpline(x, y)
218
+ >>> xs = np.linspace(-3, 3, 1000)
219
+ >>> plt.plot(xs, spl(xs), 'g', lw=3)
220
+
221
+ Manually change the amount of smoothing:
222
+
223
+ >>> spl.set_smoothing_factor(0.5)
224
+ >>> plt.plot(xs, spl(xs), 'b', lw=3)
225
+ >>> plt.show()
226
+
227
+ """
228
+
229
+ def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
230
+ ext=0, check_finite=False):
231
+
232
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, s, ext,
233
+ check_finite)
234
+
235
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
236
+ data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
237
+ xe=bbox[1], s=s)
238
+ if data[-1] == 1:
239
+ # nest too small, setting to maximum bound
240
+ data = self._reset_nest(data)
241
+ self._data = data
242
+ self._reset_class()
243
+
244
+ @staticmethod
245
+ def validate_input(x, y, w, bbox, k, s, ext, check_finite):
246
+ x, y, bbox = np.asarray(x), np.asarray(y), np.asarray(bbox)
247
+ if w is not None:
248
+ w = np.asarray(w)
249
+ if check_finite:
250
+ w_finite = np.isfinite(w).all() if w is not None else True
251
+ if (not np.isfinite(x).all() or not np.isfinite(y).all() or
252
+ not w_finite):
253
+ raise ValueError("x and y array must not contain "
254
+ "NaNs or infs.")
255
+ if s is None or s > 0:
256
+ if not np.all(diff(x) >= 0.0):
257
+ raise ValueError("x must be increasing if s > 0")
258
+ else:
259
+ if not np.all(diff(x) > 0.0):
260
+ raise ValueError("x must be strictly increasing if s = 0")
261
+ if x.size != y.size:
262
+ raise ValueError("x and y should have a same length")
263
+ elif w is not None and not x.size == y.size == w.size:
264
+ raise ValueError("x, y, and w should have a same length")
265
+ elif bbox.shape != (2,):
266
+ raise ValueError("bbox shape should be (2,)")
267
+ elif not (1 <= k <= 5):
268
+ raise ValueError("k should be 1 <= k <= 5")
269
+ elif s is not None and not s >= 0.0:
270
+ raise ValueError("s should be s >= 0.0")
271
+
272
+ try:
273
+ ext = _extrap_modes[ext]
274
+ except KeyError as e:
275
+ raise ValueError("Unknown extrapolation mode %s." % ext) from e
276
+
277
+ return x, y, w, bbox, ext
278
+
279
+ @classmethod
280
+ def _from_tck(cls, tck, ext=0):
281
+ """Construct a spline object from given tck"""
282
+ self = cls.__new__(cls)
283
+ t, c, k = tck
284
+ self._eval_args = tck
285
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
286
+ self._data = (None, None, None, None, None, k, None, len(t), t,
287
+ c, None, None, None, None)
288
+ self.ext = ext
289
+ return self
290
+
291
+ def _reset_class(self):
292
+ data = self._data
293
+ n, t, c, k, ier = data[7], data[8], data[9], data[5], data[-1]
294
+ self._eval_args = t[:n], c[:n], k
295
+ if ier == 0:
296
+ # the spline returned has a residual sum of squares fp
297
+ # such that abs(fp-s)/s <= tol with tol a relative
298
+ # tolerance set to 0.001 by the program
299
+ pass
300
+ elif ier == -1:
301
+ # the spline returned is an interpolating spline
302
+ self._set_class(InterpolatedUnivariateSpline)
303
+ elif ier == -2:
304
+ # the spline returned is the weighted least-squares
305
+ # polynomial of degree k. In this extreme case fp gives
306
+ # the upper bound fp0 for the smoothing factor s.
307
+ self._set_class(LSQUnivariateSpline)
308
+ else:
309
+ # error
310
+ if ier == 1:
311
+ self._set_class(LSQUnivariateSpline)
312
+ message = _curfit_messages.get(ier, 'ier=%s' % (ier))
313
+ warnings.warn(message, stacklevel=3)
314
+
315
+ def _set_class(self, cls):
316
+ self._spline_class = cls
317
+ if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
318
+ LSQUnivariateSpline):
319
+ self.__class__ = cls
320
+ else:
321
+ # It's an unknown subclass -- don't change class. cf. #731
322
+ pass
323
+
324
+ def _reset_nest(self, data, nest=None):
325
+ n = data[10]
326
+ if nest is None:
327
+ k, m = data[5], len(data[0])
328
+ nest = m+k+1 # this is the maximum bound for nest
329
+ else:
330
+ if not n <= nest:
331
+ raise ValueError("`nest` can only be increased")
332
+ t, c, fpint, nrdata = (np.resize(data[j], nest) for j in
333
+ [8, 9, 11, 12])
334
+
335
+ args = data[:8] + (t, c, n, fpint, nrdata, data[13])
336
+ data = dfitpack.fpcurf1(*args)
337
+ return data
338
+
339
+ def set_smoothing_factor(self, s):
340
+ """ Continue spline computation with the given smoothing
341
+ factor s and with the knots found at the last call.
342
+
343
+ This routine modifies the spline in place.
344
+
345
+ """
346
+ data = self._data
347
+ if data[6] == -1:
348
+ warnings.warn('smoothing factor unchanged for'
349
+ 'LSQ spline with fixed knots',
350
+ stacklevel=2)
351
+ return
352
+ args = data[:6] + (s,) + data[7:]
353
+ data = dfitpack.fpcurf1(*args)
354
+ if data[-1] == 1:
355
+ # nest too small, setting to maximum bound
356
+ data = self._reset_nest(data)
357
+ self._data = data
358
+ self._reset_class()
359
+
360
+ def __call__(self, x, nu=0, ext=None):
361
+ """
362
+ Evaluate spline (or its nu-th derivative) at positions x.
363
+
364
+ Parameters
365
+ ----------
366
+ x : array_like
367
+ A 1-D array of points at which to return the value of the smoothed
368
+ spline or its derivatives. Note: `x` can be unordered but the
369
+ evaluation is more efficient if `x` is (partially) ordered.
370
+ nu : int
371
+ The order of derivative of the spline to compute.
372
+ ext : int
373
+ Controls the value returned for elements of `x` not in the
374
+ interval defined by the knot sequence.
375
+
376
+ * if ext=0 or 'extrapolate', return the extrapolated value.
377
+ * if ext=1 or 'zeros', return 0
378
+ * if ext=2 or 'raise', raise a ValueError
379
+ * if ext=3 or 'const', return the boundary value.
380
+
381
+ The default value is 0, passed from the initialization of
382
+ UnivariateSpline.
383
+
384
+ """
385
+ x = np.asarray(x)
386
+ # empty input yields empty output
387
+ if x.size == 0:
388
+ return array([])
389
+ if ext is None:
390
+ ext = self.ext
391
+ else:
392
+ try:
393
+ ext = _extrap_modes[ext]
394
+ except KeyError as e:
395
+ raise ValueError("Unknown extrapolation mode %s." % ext) from e
396
+ return _fitpack_impl.splev(x, self._eval_args, der=nu, ext=ext)
397
+
398
+ def get_knots(self):
399
+ """ Return positions of interior knots of the spline.
400
+
401
+ Internally, the knot vector contains ``2*k`` additional boundary knots.
402
+ """
403
+ data = self._data
404
+ k, n = data[5], data[7]
405
+ return data[8][k:n-k]
406
+
407
+ def get_coeffs(self):
408
+ """Return spline coefficients."""
409
+ data = self._data
410
+ k, n = data[5], data[7]
411
+ return data[9][:n-k-1]
412
+
413
+ def get_residual(self):
414
+ """Return weighted sum of squared residuals of the spline approximation.
415
+
416
+ This is equivalent to::
417
+
418
+ sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
419
+
420
+ """
421
+ return self._data[10]
422
+
423
+ def integral(self, a, b):
424
+ """ Return definite integral of the spline between two given points.
425
+
426
+ Parameters
427
+ ----------
428
+ a : float
429
+ Lower limit of integration.
430
+ b : float
431
+ Upper limit of integration.
432
+
433
+ Returns
434
+ -------
435
+ integral : float
436
+ The value of the definite integral of the spline between limits.
437
+
438
+ Examples
439
+ --------
440
+ >>> import numpy as np
441
+ >>> from scipy.interpolate import UnivariateSpline
442
+ >>> x = np.linspace(0, 3, 11)
443
+ >>> y = x**2
444
+ >>> spl = UnivariateSpline(x, y)
445
+ >>> spl.integral(0, 3)
446
+ 9.0
447
+
448
+ which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
449
+ of 0 and 3.
450
+
451
+ A caveat is that this routine assumes the spline to be zero outside of
452
+ the data limits:
453
+
454
+ >>> spl.integral(-1, 4)
455
+ 9.0
456
+ >>> spl.integral(-1, 0)
457
+ 0.0
458
+
459
+ """
460
+ return _fitpack_impl.splint(a, b, self._eval_args)
461
+
462
+ def derivatives(self, x):
463
+ """ Return all derivatives of the spline at the point x.
464
+
465
+ Parameters
466
+ ----------
467
+ x : float
468
+ The point to evaluate the derivatives at.
469
+
470
+ Returns
471
+ -------
472
+ der : ndarray, shape(k+1,)
473
+ Derivatives of the orders 0 to k.
474
+
475
+ Examples
476
+ --------
477
+ >>> import numpy as np
478
+ >>> from scipy.interpolate import UnivariateSpline
479
+ >>> x = np.linspace(0, 3, 11)
480
+ >>> y = x**2
481
+ >>> spl = UnivariateSpline(x, y)
482
+ >>> spl.derivatives(1.5)
483
+ array([2.25, 3.0, 2.0, 0])
484
+
485
+ """
486
+ return _fitpack_impl.spalde(x, self._eval_args)
487
+
488
+ def roots(self):
489
+ """ Return the zeros of the spline.
490
+
491
+ Notes
492
+ -----
493
+ Restriction: only cubic splines are supported by FITPACK. For non-cubic
494
+ splines, use `PPoly.root` (see below for an example).
495
+
496
+ Examples
497
+ --------
498
+
499
+ For some data, this method may miss a root. This happens when one of
500
+ the spline knots (which FITPACK places automatically) happens to
501
+ coincide with the true root. A workaround is to convert to `PPoly`,
502
+ which uses a different root-finding algorithm.
503
+
504
+ For example,
505
+
506
+ >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
507
+ >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
508
+ ... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03,
509
+ ... 6.520430e-03, 8.170770e-03]
510
+ >>> from scipy.interpolate import UnivariateSpline
511
+ >>> spl = UnivariateSpline(x, y, s=0)
512
+ >>> spl.roots()
513
+ array([], dtype=float64)
514
+
515
+ Converting to a PPoly object does find the roots at `x=2`:
516
+
517
+ >>> from scipy.interpolate import splrep, PPoly
518
+ >>> tck = splrep(x, y, s=0)
519
+ >>> ppoly = PPoly.from_spline(tck)
520
+ >>> ppoly.roots(extrapolate=False)
521
+ array([2.])
522
+
523
+ See Also
524
+ --------
525
+ sproot
526
+ PPoly.roots
527
+
528
+ """
529
+ k = self._data[5]
530
+ if k == 3:
531
+ t = self._eval_args[0]
532
+ mest = 3 * (len(t) - 7)
533
+ return _fitpack_impl.sproot(self._eval_args, mest=mest)
534
+ raise NotImplementedError('finding roots unsupported for '
535
+ 'non-cubic splines')
536
+
537
+ def derivative(self, n=1):
538
+ """
539
+ Construct a new spline representing the derivative of this spline.
540
+
541
+ Parameters
542
+ ----------
543
+ n : int, optional
544
+ Order of derivative to evaluate. Default: 1
545
+
546
+ Returns
547
+ -------
548
+ spline : UnivariateSpline
549
+ Spline of order k2=k-n representing the derivative of this
550
+ spline.
551
+
552
+ See Also
553
+ --------
554
+ splder, antiderivative
555
+
556
+ Notes
557
+ -----
558
+
559
+ .. versionadded:: 0.13.0
560
+
561
+ Examples
562
+ --------
563
+ This can be used for finding maxima of a curve:
564
+
565
+ >>> import numpy as np
566
+ >>> from scipy.interpolate import UnivariateSpline
567
+ >>> x = np.linspace(0, 10, 70)
568
+ >>> y = np.sin(x)
569
+ >>> spl = UnivariateSpline(x, y, k=4, s=0)
570
+
571
+ Now, differentiate the spline and find the zeros of the
572
+ derivative. (NB: `sproot` only works for order 3 splines, so we
573
+ fit an order 4 spline):
574
+
575
+ >>> spl.derivative().roots() / np.pi
576
+ array([ 0.50000001, 1.5 , 2.49999998])
577
+
578
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
579
+ :math:`\\cos(x) = \\sin'(x)`.
580
+
581
+ """
582
+ tck = _fitpack_impl.splder(self._eval_args, n)
583
+ # if self.ext is 'const', derivative.ext will be 'zeros'
584
+ ext = 1 if self.ext == 3 else self.ext
585
+ return UnivariateSpline._from_tck(tck, ext=ext)
586
+
587
+ def antiderivative(self, n=1):
588
+ """
589
+ Construct a new spline representing the antiderivative of this spline.
590
+
591
+ Parameters
592
+ ----------
593
+ n : int, optional
594
+ Order of antiderivative to evaluate. Default: 1
595
+
596
+ Returns
597
+ -------
598
+ spline : UnivariateSpline
599
+ Spline of order k2=k+n representing the antiderivative of this
600
+ spline.
601
+
602
+ Notes
603
+ -----
604
+
605
+ .. versionadded:: 0.13.0
606
+
607
+ See Also
608
+ --------
609
+ splantider, derivative
610
+
611
+ Examples
612
+ --------
613
+ >>> import numpy as np
614
+ >>> from scipy.interpolate import UnivariateSpline
615
+ >>> x = np.linspace(0, np.pi/2, 70)
616
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
617
+ >>> spl = UnivariateSpline(x, y, s=0)
618
+
619
+ The derivative is the inverse operation of the antiderivative,
620
+ although some floating point error accumulates:
621
+
622
+ >>> spl(1.7), spl.antiderivative().derivative()(1.7)
623
+ (array(2.1565429877197317), array(2.1565429877201865))
624
+
625
+ Antiderivative can be used to evaluate definite integrals:
626
+
627
+ >>> ispl = spl.antiderivative()
628
+ >>> ispl(np.pi/2) - ispl(0)
629
+ 2.2572053588768486
630
+
631
+ This is indeed an approximation to the complete elliptic integral
632
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
633
+
634
+ >>> from scipy.special import ellipk
635
+ >>> ellipk(0.8)
636
+ 2.2572053268208538
637
+
638
+ """
639
+ tck = _fitpack_impl.splantider(self._eval_args, n)
640
+ return UnivariateSpline._from_tck(tck, self.ext)
641
+
642
+
643
+ class InterpolatedUnivariateSpline(UnivariateSpline):
644
+ """
645
+ 1-D interpolating spline for a given set of data points.
646
+
647
+ Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data.
648
+ Spline function passes through all provided points. Equivalent to
649
+ `UnivariateSpline` with `s` = 0.
650
+
651
+ Parameters
652
+ ----------
653
+ x : (N,) array_like
654
+ Input dimension of data points -- must be strictly increasing
655
+ y : (N,) array_like
656
+ input dimension of data points
657
+ w : (N,) array_like, optional
658
+ Weights for spline fitting. Must be positive. If None (default),
659
+ weights are all 1.
660
+ bbox : (2,) array_like, optional
661
+ 2-sequence specifying the boundary of the approximation interval. If
662
+ None (default), ``bbox=[x[0], x[-1]]``.
663
+ k : int, optional
664
+ Degree of the smoothing spline. Must be ``1 <= k <= 5``. Default is
665
+ ``k = 3``, a cubic spline.
666
+ ext : int or str, optional
667
+ Controls the extrapolation mode for elements
668
+ not in the interval defined by the knot sequence.
669
+
670
+ * if ext=0 or 'extrapolate', return the extrapolated value.
671
+ * if ext=1 or 'zeros', return 0
672
+ * if ext=2 or 'raise', raise a ValueError
673
+ * if ext=3 of 'const', return the boundary value.
674
+
675
+ The default value is 0.
676
+
677
+ check_finite : bool, optional
678
+ Whether to check that the input arrays contain only finite numbers.
679
+ Disabling may give a performance gain, but may result in problems
680
+ (crashes, non-termination or non-sensical results) if the inputs
681
+ do contain infinities or NaNs.
682
+ Default is False.
683
+
684
+ See Also
685
+ --------
686
+ UnivariateSpline :
687
+ a smooth univariate spline to fit a given set of data points.
688
+ LSQUnivariateSpline :
689
+ a spline for which knots are user-selected
690
+ SmoothBivariateSpline :
691
+ a smoothing bivariate spline through the given points
692
+ LSQBivariateSpline :
693
+ a bivariate spline using weighted least-squares fitting
694
+ splrep :
695
+ a function to find the B-spline representation of a 1-D curve
696
+ splev :
697
+ a function to evaluate a B-spline or its derivatives
698
+ sproot :
699
+ a function to find the roots of a cubic B-spline
700
+ splint :
701
+ a function to evaluate the definite integral of a B-spline between two
702
+ given points
703
+ spalde :
704
+ a function to evaluate all derivatives of a B-spline
705
+
706
+ Notes
707
+ -----
708
+ The number of data points must be larger than the spline degree `k`.
709
+
710
+ Examples
711
+ --------
712
+ >>> import numpy as np
713
+ >>> import matplotlib.pyplot as plt
714
+ >>> from scipy.interpolate import InterpolatedUnivariateSpline
715
+ >>> rng = np.random.default_rng()
716
+ >>> x = np.linspace(-3, 3, 50)
717
+ >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
718
+ >>> spl = InterpolatedUnivariateSpline(x, y)
719
+ >>> plt.plot(x, y, 'ro', ms=5)
720
+ >>> xs = np.linspace(-3, 3, 1000)
721
+ >>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
722
+ >>> plt.show()
723
+
724
+ Notice that the ``spl(x)`` interpolates `y`:
725
+
726
+ >>> spl.get_residual()
727
+ 0.0
728
+
729
+ """
730
+
731
+ def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
732
+ ext=0, check_finite=False):
733
+
734
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
735
+ ext, check_finite)
736
+ if not np.all(diff(x) > 0.0):
737
+ raise ValueError('x must be strictly increasing')
738
+
739
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
740
+ self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
741
+ xe=bbox[1], s=0)
742
+ self._reset_class()
743
+
744
+
745
+ _fpchec_error_string = """The input parameters have been rejected by fpchec. \
746
+ This means that at least one of the following conditions is violated:
747
+
748
+ 1) k+1 <= n-k-1 <= m
749
+ 2) t(1) <= t(2) <= ... <= t(k+1)
750
+ t(n-k) <= t(n-k+1) <= ... <= t(n)
751
+ 3) t(k+1) < t(k+2) < ... < t(n-k)
752
+ 4) t(k+1) <= x(i) <= t(n-k)
753
+ 5) The conditions specified by Schoenberg and Whitney must hold
754
+ for at least one subset of data points, i.e., there must be a
755
+ subset of data points y(j) such that
756
+ t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
757
+ """
758
+
759
+
760
+ class LSQUnivariateSpline(UnivariateSpline):
761
+ """
762
+ 1-D spline with explicit internal knots.
763
+
764
+ Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
765
+ specifies the internal knots of the spline
766
+
767
+ Parameters
768
+ ----------
769
+ x : (N,) array_like
770
+ Input dimension of data points -- must be increasing
771
+ y : (N,) array_like
772
+ Input dimension of data points
773
+ t : (M,) array_like
774
+ interior knots of the spline. Must be in ascending order and::
775
+
776
+ bbox[0] < t[0] < ... < t[-1] < bbox[-1]
777
+
778
+ w : (N,) array_like, optional
779
+ weights for spline fitting. Must be positive. If None (default),
780
+ weights are all 1.
781
+ bbox : (2,) array_like, optional
782
+ 2-sequence specifying the boundary of the approximation interval. If
783
+ None (default), ``bbox = [x[0], x[-1]]``.
784
+ k : int, optional
785
+ Degree of the smoothing spline. Must be 1 <= `k` <= 5.
786
+ Default is `k` = 3, a cubic spline.
787
+ ext : int or str, optional
788
+ Controls the extrapolation mode for elements
789
+ not in the interval defined by the knot sequence.
790
+
791
+ * if ext=0 or 'extrapolate', return the extrapolated value.
792
+ * if ext=1 or 'zeros', return 0
793
+ * if ext=2 or 'raise', raise a ValueError
794
+ * if ext=3 of 'const', return the boundary value.
795
+
796
+ The default value is 0.
797
+
798
+ check_finite : bool, optional
799
+ Whether to check that the input arrays contain only finite numbers.
800
+ Disabling may give a performance gain, but may result in problems
801
+ (crashes, non-termination or non-sensical results) if the inputs
802
+ do contain infinities or NaNs.
803
+ Default is False.
804
+
805
+ Raises
806
+ ------
807
+ ValueError
808
+ If the interior knots do not satisfy the Schoenberg-Whitney conditions
809
+
810
+ See Also
811
+ --------
812
+ UnivariateSpline :
813
+ a smooth univariate spline to fit a given set of data points.
814
+ InterpolatedUnivariateSpline :
815
+ a interpolating univariate spline for a given set of data points.
816
+ splrep :
817
+ a function to find the B-spline representation of a 1-D curve
818
+ splev :
819
+ a function to evaluate a B-spline or its derivatives
820
+ sproot :
821
+ a function to find the roots of a cubic B-spline
822
+ splint :
823
+ a function to evaluate the definite integral of a B-spline between two
824
+ given points
825
+ spalde :
826
+ a function to evaluate all derivatives of a B-spline
827
+
828
+ Notes
829
+ -----
830
+ The number of data points must be larger than the spline degree `k`.
831
+
832
+ Knots `t` must satisfy the Schoenberg-Whitney conditions,
833
+ i.e., there must be a subset of data points ``x[j]`` such that
834
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
835
+
836
+ Examples
837
+ --------
838
+ >>> import numpy as np
839
+ >>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
840
+ >>> import matplotlib.pyplot as plt
841
+ >>> rng = np.random.default_rng()
842
+ >>> x = np.linspace(-3, 3, 50)
843
+ >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
844
+
845
+ Fit a smoothing spline with a pre-defined internal knots:
846
+
847
+ >>> t = [-1, 0, 1]
848
+ >>> spl = LSQUnivariateSpline(x, y, t)
849
+
850
+ >>> xs = np.linspace(-3, 3, 1000)
851
+ >>> plt.plot(x, y, 'ro', ms=5)
852
+ >>> plt.plot(xs, spl(xs), 'g-', lw=3)
853
+ >>> plt.show()
854
+
855
+ Check the knot vector:
856
+
857
+ >>> spl.get_knots()
858
+ array([-3., -1., 0., 1., 3.])
859
+
860
+ Constructing lsq spline using the knots from another spline:
861
+
862
+ >>> x = np.arange(10)
863
+ >>> s = UnivariateSpline(x, x, s=0)
864
+ >>> s.get_knots()
865
+ array([ 0., 2., 3., 4., 5., 6., 7., 9.])
866
+ >>> knt = s.get_knots()
867
+ >>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
868
+ >>> s1.get_knots()
869
+ array([ 0., 2., 3., 4., 5., 6., 7., 9.])
870
+
871
+ """
872
+
873
+ def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
874
+ ext=0, check_finite=False):
875
+
876
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
877
+ ext, check_finite)
878
+ if not np.all(diff(x) >= 0.0):
879
+ raise ValueError('x must be increasing')
880
+
881
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
882
+ xb = bbox[0]
883
+ xe = bbox[1]
884
+ if xb is None:
885
+ xb = x[0]
886
+ if xe is None:
887
+ xe = x[-1]
888
+ t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
889
+ n = len(t)
890
+ if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
891
+ raise ValueError('Interior knots t must satisfy '
892
+ 'Schoenberg-Whitney conditions')
893
+ if not dfitpack.fpchec(x, t, k) == 0:
894
+ raise ValueError(_fpchec_error_string)
895
+ data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
896
+ self._data = data[:-3] + (None, None, data[-1])
897
+ self._reset_class()
898
+
899
+
900
+ # ############### Bivariate spline ####################
901
+
902
+ class _BivariateSplineBase:
903
+ """ Base class for Bivariate spline s(x,y) interpolation on the rectangle
904
+ [xb,xe] x [yb, ye] calculated from a given set of data points
905
+ (x,y,z).
906
+
907
+ See Also
908
+ --------
909
+ bisplrep :
910
+ a function to find a bivariate B-spline representation of a surface
911
+ bisplev :
912
+ a function to evaluate a bivariate B-spline and its derivatives
913
+ BivariateSpline :
914
+ a base class for bivariate splines.
915
+ SphereBivariateSpline :
916
+ a bivariate spline on a spherical grid
917
+ """
918
+
919
+ @classmethod
920
+ def _from_tck(cls, tck):
921
+ """Construct a spline object from given tck and degree"""
922
+ self = cls.__new__(cls)
923
+ if len(tck) != 5:
924
+ raise ValueError("tck should be a 5 element tuple of tx,"
925
+ " ty, c, kx, ky")
926
+ self.tck = tck[:3]
927
+ self.degrees = tck[3:]
928
+ return self
929
+
930
+ def get_residual(self):
931
+ """ Return weighted sum of squared residuals of the spline
932
+ approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
933
+ """
934
+ return self.fp
935
+
936
+ def get_knots(self):
937
+ """ Return a tuple (tx,ty) where tx,ty contain knots positions
938
+ of the spline with respect to x-, y-variable, respectively.
939
+ The position of interior and additional knots are given as
940
+ t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
941
+ """
942
+ return self.tck[:2]
943
+
944
+ def get_coeffs(self):
945
+ """ Return spline coefficients."""
946
+ return self.tck[2]
947
+
948
+ def __call__(self, x, y, dx=0, dy=0, grid=True):
949
+ """
950
+ Evaluate the spline or its derivatives at given positions.
951
+
952
+ Parameters
953
+ ----------
954
+ x, y : array_like
955
+ Input coordinates.
956
+
957
+ If `grid` is False, evaluate the spline at points ``(x[i],
958
+ y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
959
+ is obeyed.
960
+
961
+ If `grid` is True: evaluate spline at the grid points
962
+ defined by the coordinate arrays x, y. The arrays must be
963
+ sorted to increasing order.
964
+
965
+ The ordering of axes is consistent with
966
+ ``np.meshgrid(..., indexing="ij")`` and inconsistent with the
967
+ default ordering ``np.meshgrid(..., indexing="xy")``.
968
+ dx : int
969
+ Order of x-derivative
970
+
971
+ .. versionadded:: 0.14.0
972
+ dy : int
973
+ Order of y-derivative
974
+
975
+ .. versionadded:: 0.14.0
976
+ grid : bool
977
+ Whether to evaluate the results on a grid spanned by the
978
+ input arrays, or at points specified by the input arrays.
979
+
980
+ .. versionadded:: 0.14.0
981
+
982
+ Examples
983
+ --------
984
+ Suppose that we want to bilinearly interpolate an exponentially decaying
985
+ function in 2 dimensions.
986
+
987
+ >>> import numpy as np
988
+ >>> from scipy.interpolate import RectBivariateSpline
989
+
990
+ We sample the function on a coarse grid. Note that the default indexing="xy"
991
+ of meshgrid would result in an unexpected (transposed) result after
992
+ interpolation.
993
+
994
+ >>> xarr = np.linspace(-3, 3, 100)
995
+ >>> yarr = np.linspace(-3, 3, 100)
996
+ >>> xgrid, ygrid = np.meshgrid(xarr, yarr, indexing="ij")
997
+
998
+ The function to interpolate decays faster along one axis than the other.
999
+
1000
+ >>> zdata = np.exp(-np.sqrt((xgrid / 2) ** 2 + ygrid**2))
1001
+
1002
+ Next we sample on a finer grid using interpolation (kx=ky=1 for bilinear).
1003
+
1004
+ >>> rbs = RectBivariateSpline(xarr, yarr, zdata, kx=1, ky=1)
1005
+ >>> xarr_fine = np.linspace(-3, 3, 200)
1006
+ >>> yarr_fine = np.linspace(-3, 3, 200)
1007
+ >>> xgrid_fine, ygrid_fine = np.meshgrid(xarr_fine, yarr_fine, indexing="ij")
1008
+ >>> zdata_interp = rbs(xgrid_fine, ygrid_fine, grid=False)
1009
+
1010
+ And check that the result agrees with the input by plotting both.
1011
+
1012
+ >>> import matplotlib.pyplot as plt
1013
+ >>> fig = plt.figure()
1014
+ >>> ax1 = fig.add_subplot(1, 2, 1, aspect="equal")
1015
+ >>> ax2 = fig.add_subplot(1, 2, 2, aspect="equal")
1016
+ >>> ax1.imshow(zdata)
1017
+ >>> ax2.imshow(zdata_interp)
1018
+ >>> plt.show()
1019
+ """
1020
+ x = np.asarray(x)
1021
+ y = np.asarray(y)
1022
+
1023
+ tx, ty, c = self.tck[:3]
1024
+ kx, ky = self.degrees
1025
+ if grid:
1026
+ if x.size == 0 or y.size == 0:
1027
+ return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
1028
+
1029
+ if (x.size >= 2) and (not np.all(np.diff(x) >= 0.0)):
1030
+ raise ValueError("x must be strictly increasing when `grid` is True")
1031
+ if (y.size >= 2) and (not np.all(np.diff(y) >= 0.0)):
1032
+ raise ValueError("y must be strictly increasing when `grid` is True")
1033
+
1034
+ if dx or dy:
1035
+ z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
1036
+ if not ier == 0:
1037
+ raise ValueError("Error code returned by parder: %s" % ier)
1038
+ else:
1039
+ z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
1040
+ if not ier == 0:
1041
+ raise ValueError("Error code returned by bispev: %s" % ier)
1042
+ else:
1043
+ # standard Numpy broadcasting
1044
+ if x.shape != y.shape:
1045
+ x, y = np.broadcast_arrays(x, y)
1046
+
1047
+ shape = x.shape
1048
+ x = x.ravel()
1049
+ y = y.ravel()
1050
+
1051
+ if x.size == 0 or y.size == 0:
1052
+ return np.zeros(shape, dtype=self.tck[2].dtype)
1053
+
1054
+ if dx or dy:
1055
+ z, ier = dfitpack.pardeu(tx, ty, c, kx, ky, dx, dy, x, y)
1056
+ if not ier == 0:
1057
+ raise ValueError("Error code returned by pardeu: %s" % ier)
1058
+ else:
1059
+ z, ier = dfitpack.bispeu(tx, ty, c, kx, ky, x, y)
1060
+ if not ier == 0:
1061
+ raise ValueError("Error code returned by bispeu: %s" % ier)
1062
+
1063
+ z = z.reshape(shape)
1064
+ return z
1065
+
1066
+ def partial_derivative(self, dx, dy):
1067
+ """Construct a new spline representing a partial derivative of this
1068
+ spline.
1069
+
1070
+ Parameters
1071
+ ----------
1072
+ dx, dy : int
1073
+ Orders of the derivative in x and y respectively. They must be
1074
+ non-negative integers and less than the respective degree of the
1075
+ original spline (self) in that direction (``kx``, ``ky``).
1076
+
1077
+ Returns
1078
+ -------
1079
+ spline :
1080
+ A new spline of degrees (``kx - dx``, ``ky - dy``) representing the
1081
+ derivative of this spline.
1082
+
1083
+ Notes
1084
+ -----
1085
+
1086
+ .. versionadded:: 1.9.0
1087
+
1088
+ """
1089
+ if dx == 0 and dy == 0:
1090
+ return self
1091
+ else:
1092
+ kx, ky = self.degrees
1093
+ if not (dx >= 0 and dy >= 0):
1094
+ raise ValueError("order of derivative must be positive or"
1095
+ " zero")
1096
+ if not (dx < kx and dy < ky):
1097
+ raise ValueError("order of derivative must be less than"
1098
+ " degree of spline")
1099
+ tx, ty, c = self.tck[:3]
1100
+ newc, ier = dfitpack.pardtc(tx, ty, c, kx, ky, dx, dy)
1101
+ if ier != 0:
1102
+ # This should not happen under normal conditions.
1103
+ raise ValueError("Unexpected error code returned by"
1104
+ " pardtc: %d" % ier)
1105
+ nx = len(tx)
1106
+ ny = len(ty)
1107
+ newtx = tx[dx:nx - dx]
1108
+ newty = ty[dy:ny - dy]
1109
+ newkx, newky = kx - dx, ky - dy
1110
+ newclen = (nx - dx - kx - 1) * (ny - dy - ky - 1)
1111
+ return _DerivedBivariateSpline._from_tck((newtx, newty,
1112
+ newc[:newclen],
1113
+ newkx, newky))
1114
+
1115
+
1116
+ _surfit_messages = {1: """
1117
+ The required storage space exceeds the available storage space: nxest
1118
+ or nyest too small, or s too small.
1119
+ The weighted least-squares spline corresponds to the current set of
1120
+ knots.""",
1121
+ 2: """
1122
+ A theoretically impossible result was found during the iteration
1123
+ process for finding a smoothing spline with fp = s: s too small or
1124
+ badly chosen eps.
1125
+ Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
1126
+ 3: """
1127
+ the maximal number of iterations maxit (set to 20 by the program)
1128
+ allowed for finding a smoothing spline with fp=s has been reached:
1129
+ s too small.
1130
+ Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
1131
+ 4: """
1132
+ No more knots can be added because the number of b-spline coefficients
1133
+ (nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
1134
+ either s or m too small.
1135
+ The weighted least-squares spline corresponds to the current set of
1136
+ knots.""",
1137
+ 5: """
1138
+ No more knots can be added because the additional knot would (quasi)
1139
+ coincide with an old one: s too small or too large a weight to an
1140
+ inaccurate data point.
1141
+ The weighted least-squares spline corresponds to the current set of
1142
+ knots.""",
1143
+ 10: """
1144
+ Error on entry, no approximation returned. The following conditions
1145
+ must hold:
1146
+ xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
1147
+ If iopt==-1, then
1148
+ xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
1149
+ yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
1150
+ -3: """
1151
+ The coefficients of the spline returned have been computed as the
1152
+ minimal norm least-squares solution of a (numerically) rank deficient
1153
+ system (deficiency=%i). If deficiency is large, the results may be
1154
+ inaccurate. Deficiency may strongly depend on the value of eps."""
1155
+ }
1156
+
1157
+
1158
+ class BivariateSpline(_BivariateSplineBase):
1159
+ """
1160
+ Base class for bivariate splines.
1161
+
1162
+ This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
1163
+ the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
1164
+ of data points ``(x, y, z)``.
1165
+
1166
+ This class is meant to be subclassed, not instantiated directly.
1167
+ To construct these splines, call either `SmoothBivariateSpline` or
1168
+ `LSQBivariateSpline` or `RectBivariateSpline`.
1169
+
1170
+ See Also
1171
+ --------
1172
+ UnivariateSpline :
1173
+ a smooth univariate spline to fit a given set of data points.
1174
+ SmoothBivariateSpline :
1175
+ a smoothing bivariate spline through the given points
1176
+ LSQBivariateSpline :
1177
+ a bivariate spline using weighted least-squares fitting
1178
+ RectSphereBivariateSpline :
1179
+ a bivariate spline over a rectangular mesh on a sphere
1180
+ SmoothSphereBivariateSpline :
1181
+ a smoothing bivariate spline in spherical coordinates
1182
+ LSQSphereBivariateSpline :
1183
+ a bivariate spline in spherical coordinates using weighted
1184
+ least-squares fitting
1185
+ RectBivariateSpline :
1186
+ a bivariate spline over a rectangular mesh.
1187
+ bisplrep :
1188
+ a function to find a bivariate B-spline representation of a surface
1189
+ bisplev :
1190
+ a function to evaluate a bivariate B-spline and its derivatives
1191
+ """
1192
+
1193
+ def ev(self, xi, yi, dx=0, dy=0):
1194
+ """
1195
+ Evaluate the spline at points
1196
+
1197
+ Returns the interpolated value at ``(xi[i], yi[i]),
1198
+ i=0,...,len(xi)-1``.
1199
+
1200
+ Parameters
1201
+ ----------
1202
+ xi, yi : array_like
1203
+ Input coordinates. Standard Numpy broadcasting is obeyed.
1204
+ The ordering of axes is consistent with
1205
+ ``np.meshgrid(..., indexing="ij")`` and inconsistent with the
1206
+ default ordering ``np.meshgrid(..., indexing="xy")``.
1207
+ dx : int, optional
1208
+ Order of x-derivative
1209
+
1210
+ .. versionadded:: 0.14.0
1211
+ dy : int, optional
1212
+ Order of y-derivative
1213
+
1214
+ .. versionadded:: 0.14.0
1215
+
1216
+ Examples
1217
+ --------
1218
+ Suppose that we want to bilinearly interpolate an exponentially decaying
1219
+ function in 2 dimensions.
1220
+
1221
+ >>> import numpy as np
1222
+ >>> from scipy.interpolate import RectBivariateSpline
1223
+ >>> def f(x, y):
1224
+ ... return np.exp(-np.sqrt((x / 2) ** 2 + y**2))
1225
+
1226
+ We sample the function on a coarse grid and set up the interpolator. Note that
1227
+ the default ``indexing="xy"`` of meshgrid would result in an unexpected
1228
+ (transposed) result after interpolation.
1229
+
1230
+ >>> xarr = np.linspace(-3, 3, 21)
1231
+ >>> yarr = np.linspace(-3, 3, 21)
1232
+ >>> xgrid, ygrid = np.meshgrid(xarr, yarr, indexing="ij")
1233
+ >>> zdata = f(xgrid, ygrid)
1234
+ >>> rbs = RectBivariateSpline(xarr, yarr, zdata, kx=1, ky=1)
1235
+
1236
+ Next we sample the function along a diagonal slice through the coordinate space
1237
+ on a finer grid using interpolation.
1238
+
1239
+ >>> xinterp = np.linspace(-3, 3, 201)
1240
+ >>> yinterp = np.linspace(3, -3, 201)
1241
+ >>> zinterp = rbs.ev(xinterp, yinterp)
1242
+
1243
+ And check that the interpolation passes through the function evaluations as a
1244
+ function of the distance from the origin along the slice.
1245
+
1246
+ >>> import matplotlib.pyplot as plt
1247
+ >>> fig = plt.figure()
1248
+ >>> ax1 = fig.add_subplot(1, 1, 1)
1249
+ >>> ax1.plot(np.sqrt(xarr**2 + yarr**2), np.diag(zdata), "or")
1250
+ >>> ax1.plot(np.sqrt(xinterp**2 + yinterp**2), zinterp, "-b")
1251
+ >>> plt.show()
1252
+ """
1253
+ return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
1254
+
1255
+ def integral(self, xa, xb, ya, yb):
1256
+ """
1257
+ Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
1258
+
1259
+ Parameters
1260
+ ----------
1261
+ xa, xb : float
1262
+ The end-points of the x integration interval.
1263
+ ya, yb : float
1264
+ The end-points of the y integration interval.
1265
+
1266
+ Returns
1267
+ -------
1268
+ integ : float
1269
+ The value of the resulting integral.
1270
+
1271
+ """
1272
+ tx, ty, c = self.tck[:3]
1273
+ kx, ky = self.degrees
1274
+ return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
1275
+
1276
+ @staticmethod
1277
+ def _validate_input(x, y, z, w, kx, ky, eps):
1278
+ x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
1279
+ if not x.size == y.size == z.size:
1280
+ raise ValueError('x, y, and z should have a same length')
1281
+
1282
+ if w is not None:
1283
+ w = np.asarray(w)
1284
+ if x.size != w.size:
1285
+ raise ValueError('x, y, z, and w should have a same length')
1286
+ elif not np.all(w >= 0.0):
1287
+ raise ValueError('w should be positive')
1288
+ if (eps is not None) and (not 0.0 < eps < 1.0):
1289
+ raise ValueError('eps should be between (0, 1)')
1290
+ if not x.size >= (kx + 1) * (ky + 1):
1291
+ raise ValueError('The length of x, y and z should be at least'
1292
+ ' (kx+1) * (ky+1)')
1293
+ return x, y, z, w
1294
+
1295
+
1296
+ class _DerivedBivariateSpline(_BivariateSplineBase):
1297
+ """Bivariate spline constructed from the coefficients and knots of another
1298
+ spline.
1299
+
1300
+ Notes
1301
+ -----
1302
+ The class is not meant to be instantiated directly from the data to be
1303
+ interpolated or smoothed. As a result, its ``fp`` attribute and
1304
+ ``get_residual`` method are inherited but overridden; ``AttributeError`` is
1305
+ raised when they are accessed.
1306
+
1307
+ The other inherited attributes can be used as usual.
1308
+ """
1309
+ _invalid_why = ("is unavailable, because _DerivedBivariateSpline"
1310
+ " instance is not constructed from data that are to be"
1311
+ " interpolated or smoothed, but derived from the"
1312
+ " underlying knots and coefficients of another spline"
1313
+ " object")
1314
+
1315
+ @property
1316
+ def fp(self):
1317
+ raise AttributeError("attribute \"fp\" %s" % self._invalid_why)
1318
+
1319
+ def get_residual(self):
1320
+ raise AttributeError("method \"get_residual\" %s" % self._invalid_why)
1321
+
1322
+
1323
+ class SmoothBivariateSpline(BivariateSpline):
1324
+ """
1325
+ Smooth bivariate spline approximation.
1326
+
1327
+ Parameters
1328
+ ----------
1329
+ x, y, z : array_like
1330
+ 1-D sequences of data points (order is not important).
1331
+ w : array_like, optional
1332
+ Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
1333
+ bbox : array_like, optional
1334
+ Sequence of length 4 specifying the boundary of the rectangular
1335
+ approximation domain. By default,
1336
+ ``bbox=[min(x), max(x), min(y), max(y)]``.
1337
+ kx, ky : ints, optional
1338
+ Degrees of the bivariate spline. Default is 3.
1339
+ s : float, optional
1340
+ Positive smoothing factor defined for estimation condition:
1341
+ ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
1342
+ Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
1343
+ estimate of the standard deviation of ``z[i]``.
1344
+ eps : float, optional
1345
+ A threshold for determining the effective rank of an over-determined
1346
+ linear system of equations. `eps` should have a value within the open
1347
+ interval ``(0, 1)``, the default is 1e-16.
1348
+
1349
+ See Also
1350
+ --------
1351
+ BivariateSpline :
1352
+ a base class for bivariate splines.
1353
+ UnivariateSpline :
1354
+ a smooth univariate spline to fit a given set of data points.
1355
+ LSQBivariateSpline :
1356
+ a bivariate spline using weighted least-squares fitting
1357
+ RectSphereBivariateSpline :
1358
+ a bivariate spline over a rectangular mesh on a sphere
1359
+ SmoothSphereBivariateSpline :
1360
+ a smoothing bivariate spline in spherical coordinates
1361
+ LSQSphereBivariateSpline :
1362
+ a bivariate spline in spherical coordinates using weighted
1363
+ least-squares fitting
1364
+ RectBivariateSpline :
1365
+ a bivariate spline over a rectangular mesh
1366
+ bisplrep :
1367
+ a function to find a bivariate B-spline representation of a surface
1368
+ bisplev :
1369
+ a function to evaluate a bivariate B-spline and its derivatives
1370
+
1371
+ Notes
1372
+ -----
1373
+ The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
1374
+
1375
+ If the input data is such that input dimensions have incommensurate
1376
+ units and differ by many orders of magnitude, the interpolant may have
1377
+ numerical artifacts. Consider rescaling the data before interpolating.
1378
+
1379
+ This routine constructs spline knot vectors automatically via the FITPACK
1380
+ algorithm. The spline knots may be placed away from the data points. For
1381
+ some data sets, this routine may fail to construct an interpolating spline,
1382
+ even if one is requested via ``s=0`` parameter. In such situations, it is
1383
+ recommended to use `bisplrep` / `bisplev` directly instead of this routine
1384
+ and, if needed, increase the values of ``nxest`` and ``nyest`` parameters
1385
+ of `bisplrep`.
1386
+
1387
+ For linear interpolation, prefer `LinearNDInterpolator`.
1388
+ See ``https://gist.github.com/ev-br/8544371b40f414b7eaf3fe6217209bff``
1389
+ for discussion.
1390
+
1391
+ """
1392
+
1393
+ def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
1394
+ eps=1e-16):
1395
+
1396
+ x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
1397
+ bbox = ravel(bbox)
1398
+ if not bbox.shape == (4,):
1399
+ raise ValueError('bbox shape should be (4,)')
1400
+ if s is not None and not s >= 0.0:
1401
+ raise ValueError("s should be s >= 0.0")
1402
+
1403
+ xb, xe, yb, ye = bbox
1404
+ nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
1405
+ xb, xe, yb,
1406
+ ye, kx, ky,
1407
+ s=s, eps=eps,
1408
+ lwrk2=1)
1409
+ if ier > 10: # lwrk2 was to small, re-run
1410
+ nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
1411
+ xb, xe, yb,
1412
+ ye, kx, ky,
1413
+ s=s,
1414
+ eps=eps,
1415
+ lwrk2=ier)
1416
+ if ier in [0, -1, -2]: # normal return
1417
+ pass
1418
+ else:
1419
+ message = _surfit_messages.get(ier, 'ier=%s' % (ier))
1420
+ warnings.warn(message, stacklevel=2)
1421
+
1422
+ self.fp = fp
1423
+ self.tck = tx[:nx], ty[:ny], c[:(nx-kx-1)*(ny-ky-1)]
1424
+ self.degrees = kx, ky
1425
+
1426
+
1427
+ class LSQBivariateSpline(BivariateSpline):
1428
+ """
1429
+ Weighted least-squares bivariate spline approximation.
1430
+
1431
+ Parameters
1432
+ ----------
1433
+ x, y, z : array_like
1434
+ 1-D sequences of data points (order is not important).
1435
+ tx, ty : array_like
1436
+ Strictly ordered 1-D sequences of knots coordinates.
1437
+ w : array_like, optional
1438
+ Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
1439
+ bbox : (4,) array_like, optional
1440
+ Sequence of length 4 specifying the boundary of the rectangular
1441
+ approximation domain. By default,
1442
+ ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
1443
+ kx, ky : ints, optional
1444
+ Degrees of the bivariate spline. Default is 3.
1445
+ eps : float, optional
1446
+ A threshold for determining the effective rank of an over-determined
1447
+ linear system of equations. `eps` should have a value within the open
1448
+ interval ``(0, 1)``, the default is 1e-16.
1449
+
1450
+ See Also
1451
+ --------
1452
+ BivariateSpline :
1453
+ a base class for bivariate splines.
1454
+ UnivariateSpline :
1455
+ a smooth univariate spline to fit a given set of data points.
1456
+ SmoothBivariateSpline :
1457
+ a smoothing bivariate spline through the given points
1458
+ RectSphereBivariateSpline :
1459
+ a bivariate spline over a rectangular mesh on a sphere
1460
+ SmoothSphereBivariateSpline :
1461
+ a smoothing bivariate spline in spherical coordinates
1462
+ LSQSphereBivariateSpline :
1463
+ a bivariate spline in spherical coordinates using weighted
1464
+ least-squares fitting
1465
+ RectBivariateSpline :
1466
+ a bivariate spline over a rectangular mesh.
1467
+ bisplrep :
1468
+ a function to find a bivariate B-spline representation of a surface
1469
+ bisplev :
1470
+ a function to evaluate a bivariate B-spline and its derivatives
1471
+
1472
+ Notes
1473
+ -----
1474
+ The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
1475
+
1476
+ If the input data is such that input dimensions have incommensurate
1477
+ units and differ by many orders of magnitude, the interpolant may have
1478
+ numerical artifacts. Consider rescaling the data before interpolating.
1479
+
1480
+ """
1481
+
1482
+ def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
1483
+ eps=None):
1484
+
1485
+ x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
1486
+ bbox = ravel(bbox)
1487
+ if not bbox.shape == (4,):
1488
+ raise ValueError('bbox shape should be (4,)')
1489
+
1490
+ nx = 2*kx+2+len(tx)
1491
+ ny = 2*ky+2+len(ty)
1492
+ # The Fortran subroutine "surfit" (called as dfitpack.surfit_lsq)
1493
+ # requires that the knot arrays passed as input should be "real
1494
+ # array(s) of dimension nmax" where "nmax" refers to the greater of nx
1495
+ # and ny. We pad the tx1/ty1 arrays here so that this is satisfied, and
1496
+ # slice them to the desired sizes upon return.
1497
+ nmax = max(nx, ny)
1498
+ tx1 = zeros((nmax,), float)
1499
+ ty1 = zeros((nmax,), float)
1500
+ tx1[kx+1:nx-kx-1] = tx
1501
+ ty1[ky+1:ny-ky-1] = ty
1502
+
1503
+ xb, xe, yb, ye = bbox
1504
+ tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, nx, tx1, ny, ty1,
1505
+ w, xb, xe, yb, ye,
1506
+ kx, ky, eps, lwrk2=1)
1507
+ if ier > 10:
1508
+ tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z,
1509
+ nx, tx1, ny, ty1, w,
1510
+ xb, xe, yb, ye,
1511
+ kx, ky, eps, lwrk2=ier)
1512
+ if ier in [0, -1, -2]: # normal return
1513
+ pass
1514
+ else:
1515
+ if ier < -2:
1516
+ deficiency = (nx-kx-1)*(ny-ky-1)+ier
1517
+ message = _surfit_messages.get(-3) % (deficiency)
1518
+ else:
1519
+ message = _surfit_messages.get(ier, 'ier=%s' % (ier))
1520
+ warnings.warn(message, stacklevel=2)
1521
+ self.fp = fp
1522
+ self.tck = tx1[:nx], ty1[:ny], c
1523
+ self.degrees = kx, ky
1524
+
1525
+
1526
+ class RectBivariateSpline(BivariateSpline):
1527
+ """
1528
+ Bivariate spline approximation over a rectangular mesh.
1529
+
1530
+ Can be used for both smoothing and interpolating data.
1531
+
1532
+ Parameters
1533
+ ----------
1534
+ x,y : array_like
1535
+ 1-D arrays of coordinates in strictly ascending order.
1536
+ Evaluated points outside the data range will be extrapolated.
1537
+ z : array_like
1538
+ 2-D array of data with shape (x.size,y.size).
1539
+ bbox : array_like, optional
1540
+ Sequence of length 4 specifying the boundary of the rectangular
1541
+ approximation domain, which means the start and end spline knots of
1542
+ each dimension are set by these values. By default,
1543
+ ``bbox=[min(x), max(x), min(y), max(y)]``.
1544
+ kx, ky : ints, optional
1545
+ Degrees of the bivariate spline. Default is 3.
1546
+ s : float, optional
1547
+ Positive smoothing factor defined for estimation condition:
1548
+ ``sum((z[i]-f(x[i], y[i]))**2, axis=0) <= s`` where f is a spline
1549
+ function. Default is ``s=0``, which is for interpolation.
1550
+
1551
+ See Also
1552
+ --------
1553
+ BivariateSpline :
1554
+ a base class for bivariate splines.
1555
+ UnivariateSpline :
1556
+ a smooth univariate spline to fit a given set of data points.
1557
+ SmoothBivariateSpline :
1558
+ a smoothing bivariate spline through the given points
1559
+ LSQBivariateSpline :
1560
+ a bivariate spline using weighted least-squares fitting
1561
+ RectSphereBivariateSpline :
1562
+ a bivariate spline over a rectangular mesh on a sphere
1563
+ SmoothSphereBivariateSpline :
1564
+ a smoothing bivariate spline in spherical coordinates
1565
+ LSQSphereBivariateSpline :
1566
+ a bivariate spline in spherical coordinates using weighted
1567
+ least-squares fitting
1568
+ bisplrep :
1569
+ a function to find a bivariate B-spline representation of a surface
1570
+ bisplev :
1571
+ a function to evaluate a bivariate B-spline and its derivatives
1572
+
1573
+ Notes
1574
+ -----
1575
+
1576
+ If the input data is such that input dimensions have incommensurate
1577
+ units and differ by many orders of magnitude, the interpolant may have
1578
+ numerical artifacts. Consider rescaling the data before interpolating.
1579
+
1580
+ """
1581
+
1582
+ def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
1583
+ x, y, bbox = ravel(x), ravel(y), ravel(bbox)
1584
+ z = np.asarray(z)
1585
+ if not np.all(diff(x) > 0.0):
1586
+ raise ValueError('x must be strictly increasing')
1587
+ if not np.all(diff(y) > 0.0):
1588
+ raise ValueError('y must be strictly increasing')
1589
+ if not x.size == z.shape[0]:
1590
+ raise ValueError('x dimension of z must have same number of '
1591
+ 'elements as x')
1592
+ if not y.size == z.shape[1]:
1593
+ raise ValueError('y dimension of z must have same number of '
1594
+ 'elements as y')
1595
+ if not bbox.shape == (4,):
1596
+ raise ValueError('bbox shape should be (4,)')
1597
+ if s is not None and not s >= 0.0:
1598
+ raise ValueError("s should be s >= 0.0")
1599
+
1600
+ z = ravel(z)
1601
+ xb, xe, yb, ye = bbox
1602
+ nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
1603
+ ye, kx, ky, s)
1604
+
1605
+ if ier not in [0, -1, -2]:
1606
+ msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
1607
+ raise ValueError(msg)
1608
+
1609
+ self.fp = fp
1610
+ self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
1611
+ self.degrees = kx, ky
1612
+
1613
+
1614
+ _spherefit_messages = _surfit_messages.copy()
1615
+ _spherefit_messages[10] = """
1616
+ ERROR. On entry, the input data are controlled on validity. The following
1617
+ restrictions must be satisfied:
1618
+ -1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
1619
+ 0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
1620
+ lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
1621
+ kwrk >= m+(ntest-7)*(npest-7)
1622
+ if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
1623
+ 0<tt(5)<tt(6)<...<tt(nt-4)<pi
1624
+ 0<tp(5)<tp(6)<...<tp(np-4)<2*pi
1625
+ if iopt>=0: s>=0
1626
+ if one of these conditions is found to be violated,control
1627
+ is immediately repassed to the calling program. in that
1628
+ case there is no approximation returned."""
1629
+ _spherefit_messages[-3] = """
1630
+ WARNING. The coefficients of the spline returned have been computed as the
1631
+ minimal norm least-squares solution of a (numerically) rank
1632
+ deficient system (deficiency=%i, rank=%i). Especially if the rank
1633
+ deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
1634
+ the results may be inaccurate. They could also seriously depend on
1635
+ the value of eps."""
1636
+
1637
+
1638
+ class SphereBivariateSpline(_BivariateSplineBase):
1639
+ """
1640
+ Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
1641
+ given set of data points (theta,phi,r).
1642
+
1643
+ .. versionadded:: 0.11.0
1644
+
1645
+ See Also
1646
+ --------
1647
+ bisplrep :
1648
+ a function to find a bivariate B-spline representation of a surface
1649
+ bisplev :
1650
+ a function to evaluate a bivariate B-spline and its derivatives
1651
+ UnivariateSpline :
1652
+ a smooth univariate spline to fit a given set of data points.
1653
+ SmoothBivariateSpline :
1654
+ a smoothing bivariate spline through the given points
1655
+ LSQUnivariateSpline :
1656
+ a univariate spline using weighted least-squares fitting
1657
+ """
1658
+
1659
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
1660
+ """
1661
+ Evaluate the spline or its derivatives at given positions.
1662
+
1663
+ Parameters
1664
+ ----------
1665
+ theta, phi : array_like
1666
+ Input coordinates.
1667
+
1668
+ If `grid` is False, evaluate the spline at points
1669
+ ``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
1670
+ Numpy broadcasting is obeyed.
1671
+
1672
+ If `grid` is True: evaluate spline at the grid points
1673
+ defined by the coordinate arrays theta, phi. The arrays
1674
+ must be sorted to increasing order.
1675
+ The ordering of axes is consistent with
1676
+ ``np.meshgrid(..., indexing="ij")`` and inconsistent with the
1677
+ default ordering ``np.meshgrid(..., indexing="xy")``.
1678
+ dtheta : int, optional
1679
+ Order of theta-derivative
1680
+
1681
+ .. versionadded:: 0.14.0
1682
+ dphi : int
1683
+ Order of phi-derivative
1684
+
1685
+ .. versionadded:: 0.14.0
1686
+ grid : bool
1687
+ Whether to evaluate the results on a grid spanned by the
1688
+ input arrays, or at points specified by the input arrays.
1689
+
1690
+ .. versionadded:: 0.14.0
1691
+
1692
+ Examples
1693
+ --------
1694
+
1695
+ Suppose that we want to use splines to interpolate a bivariate function on a
1696
+ sphere. The value of the function is known on a grid of longitudes and
1697
+ colatitudes.
1698
+
1699
+ >>> import numpy as np
1700
+ >>> from scipy.interpolate import RectSphereBivariateSpline
1701
+ >>> def f(theta, phi):
1702
+ ... return np.sin(theta) * np.cos(phi)
1703
+
1704
+ We evaluate the function on the grid. Note that the default indexing="xy"
1705
+ of meshgrid would result in an unexpected (transposed) result after
1706
+ interpolation.
1707
+
1708
+ >>> thetaarr = np.linspace(0, np.pi, 22)[1:-1]
1709
+ >>> phiarr = np.linspace(0, 2 * np.pi, 21)[:-1]
1710
+ >>> thetagrid, phigrid = np.meshgrid(thetaarr, phiarr, indexing="ij")
1711
+ >>> zdata = f(thetagrid, phigrid)
1712
+
1713
+ We next set up the interpolator and use it to evaluate the function
1714
+ on a finer grid.
1715
+
1716
+ >>> rsbs = RectSphereBivariateSpline(thetaarr, phiarr, zdata)
1717
+ >>> thetaarr_fine = np.linspace(0, np.pi, 200)
1718
+ >>> phiarr_fine = np.linspace(0, 2 * np.pi, 200)
1719
+ >>> zdata_fine = rsbs(thetaarr_fine, phiarr_fine)
1720
+
1721
+ Finally we plot the coarsly-sampled input data alongside the
1722
+ finely-sampled interpolated data to check that they agree.
1723
+
1724
+ >>> import matplotlib.pyplot as plt
1725
+ >>> fig = plt.figure()
1726
+ >>> ax1 = fig.add_subplot(1, 2, 1)
1727
+ >>> ax2 = fig.add_subplot(1, 2, 2)
1728
+ >>> ax1.imshow(zdata)
1729
+ >>> ax2.imshow(zdata_fine)
1730
+ >>> plt.show()
1731
+ """
1732
+ theta = np.asarray(theta)
1733
+ phi = np.asarray(phi)
1734
+
1735
+ if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
1736
+ raise ValueError("requested theta out of bounds.")
1737
+
1738
+ return _BivariateSplineBase.__call__(self, theta, phi,
1739
+ dx=dtheta, dy=dphi, grid=grid)
1740
+
1741
+ def ev(self, theta, phi, dtheta=0, dphi=0):
1742
+ """
1743
+ Evaluate the spline at points
1744
+
1745
+ Returns the interpolated value at ``(theta[i], phi[i]),
1746
+ i=0,...,len(theta)-1``.
1747
+
1748
+ Parameters
1749
+ ----------
1750
+ theta, phi : array_like
1751
+ Input coordinates. Standard Numpy broadcasting is obeyed.
1752
+ The ordering of axes is consistent with
1753
+ np.meshgrid(..., indexing="ij") and inconsistent with the
1754
+ default ordering np.meshgrid(..., indexing="xy").
1755
+ dtheta : int, optional
1756
+ Order of theta-derivative
1757
+
1758
+ .. versionadded:: 0.14.0
1759
+ dphi : int, optional
1760
+ Order of phi-derivative
1761
+
1762
+ .. versionadded:: 0.14.0
1763
+
1764
+ Examples
1765
+ --------
1766
+ Suppose that we want to use splines to interpolate a bivariate function on a
1767
+ sphere. The value of the function is known on a grid of longitudes and
1768
+ colatitudes.
1769
+
1770
+ >>> import numpy as np
1771
+ >>> from scipy.interpolate import RectSphereBivariateSpline
1772
+ >>> def f(theta, phi):
1773
+ ... return np.sin(theta) * np.cos(phi)
1774
+
1775
+ We evaluate the function on the grid. Note that the default indexing="xy"
1776
+ of meshgrid would result in an unexpected (transposed) result after
1777
+ interpolation.
1778
+
1779
+ >>> thetaarr = np.linspace(0, np.pi, 22)[1:-1]
1780
+ >>> phiarr = np.linspace(0, 2 * np.pi, 21)[:-1]
1781
+ >>> thetagrid, phigrid = np.meshgrid(thetaarr, phiarr, indexing="ij")
1782
+ >>> zdata = f(thetagrid, phigrid)
1783
+
1784
+ We next set up the interpolator and use it to evaluate the function
1785
+ at points not on the original grid.
1786
+
1787
+ >>> rsbs = RectSphereBivariateSpline(thetaarr, phiarr, zdata)
1788
+ >>> thetainterp = np.linspace(thetaarr[0], thetaarr[-1], 200)
1789
+ >>> phiinterp = np.linspace(phiarr[0], phiarr[-1], 200)
1790
+ >>> zinterp = rsbs.ev(thetainterp, phiinterp)
1791
+
1792
+ Finally we plot the original data for a diagonal slice through the
1793
+ initial grid, and the spline approximation along the same slice.
1794
+
1795
+ >>> import matplotlib.pyplot as plt
1796
+ >>> fig = plt.figure()
1797
+ >>> ax1 = fig.add_subplot(1, 1, 1)
1798
+ >>> ax1.plot(np.sin(thetaarr) * np.sin(phiarr), np.diag(zdata), "or")
1799
+ >>> ax1.plot(np.sin(thetainterp) * np.sin(phiinterp), zinterp, "-b")
1800
+ >>> plt.show()
1801
+ """
1802
+ return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
1803
+
1804
+
1805
+ class SmoothSphereBivariateSpline(SphereBivariateSpline):
1806
+ """
1807
+ Smooth bivariate spline approximation in spherical coordinates.
1808
+
1809
+ .. versionadded:: 0.11.0
1810
+
1811
+ Parameters
1812
+ ----------
1813
+ theta, phi, r : array_like
1814
+ 1-D sequences of data points (order is not important). Coordinates
1815
+ must be given in radians. Theta must lie within the interval
1816
+ ``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
1817
+ w : array_like, optional
1818
+ Positive 1-D sequence of weights.
1819
+ s : float, optional
1820
+ Positive smoothing factor defined for estimation condition:
1821
+ ``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
1822
+ Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
1823
+ estimate of the standard deviation of ``r[i]``.
1824
+ eps : float, optional
1825
+ A threshold for determining the effective rank of an over-determined
1826
+ linear system of equations. `eps` should have a value within the open
1827
+ interval ``(0, 1)``, the default is 1e-16.
1828
+
1829
+ See Also
1830
+ --------
1831
+ BivariateSpline :
1832
+ a base class for bivariate splines.
1833
+ UnivariateSpline :
1834
+ a smooth univariate spline to fit a given set of data points.
1835
+ SmoothBivariateSpline :
1836
+ a smoothing bivariate spline through the given points
1837
+ LSQBivariateSpline :
1838
+ a bivariate spline using weighted least-squares fitting
1839
+ RectSphereBivariateSpline :
1840
+ a bivariate spline over a rectangular mesh on a sphere
1841
+ LSQSphereBivariateSpline :
1842
+ a bivariate spline in spherical coordinates using weighted
1843
+ least-squares fitting
1844
+ RectBivariateSpline :
1845
+ a bivariate spline over a rectangular mesh.
1846
+ bisplrep :
1847
+ a function to find a bivariate B-spline representation of a surface
1848
+ bisplev :
1849
+ a function to evaluate a bivariate B-spline and its derivatives
1850
+
1851
+ Notes
1852
+ -----
1853
+ For more information, see the FITPACK_ site about this function.
1854
+
1855
+ .. _FITPACK: http://www.netlib.org/dierckx/sphere.f
1856
+
1857
+ Examples
1858
+ --------
1859
+ Suppose we have global data on a coarse grid (the input data does not
1860
+ have to be on a grid):
1861
+
1862
+ >>> import numpy as np
1863
+ >>> theta = np.linspace(0., np.pi, 7)
1864
+ >>> phi = np.linspace(0., 2*np.pi, 9)
1865
+ >>> data = np.empty((theta.shape[0], phi.shape[0]))
1866
+ >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
1867
+ >>> data[1:-1,1], data[1:-1,-1] = 1., 1.
1868
+ >>> data[1,1:-1], data[-2,1:-1] = 1., 1.
1869
+ >>> data[2:-2,2], data[2:-2,-2] = 2., 2.
1870
+ >>> data[2,2:-2], data[-3,2:-2] = 2., 2.
1871
+ >>> data[3,3:-2] = 3.
1872
+ >>> data = np.roll(data, 4, 1)
1873
+
1874
+ We need to set up the interpolator object
1875
+
1876
+ >>> lats, lons = np.meshgrid(theta, phi)
1877
+ >>> from scipy.interpolate import SmoothSphereBivariateSpline
1878
+ >>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
1879
+ ... data.T.ravel(), s=3.5)
1880
+
1881
+ As a first test, we'll see what the algorithm returns when run on the
1882
+ input coordinates
1883
+
1884
+ >>> data_orig = lut(theta, phi)
1885
+
1886
+ Finally we interpolate the data to a finer grid
1887
+
1888
+ >>> fine_lats = np.linspace(0., np.pi, 70)
1889
+ >>> fine_lons = np.linspace(0., 2 * np.pi, 90)
1890
+
1891
+ >>> data_smth = lut(fine_lats, fine_lons)
1892
+
1893
+ >>> import matplotlib.pyplot as plt
1894
+ >>> fig = plt.figure()
1895
+ >>> ax1 = fig.add_subplot(131)
1896
+ >>> ax1.imshow(data, interpolation='nearest')
1897
+ >>> ax2 = fig.add_subplot(132)
1898
+ >>> ax2.imshow(data_orig, interpolation='nearest')
1899
+ >>> ax3 = fig.add_subplot(133)
1900
+ >>> ax3.imshow(data_smth, interpolation='nearest')
1901
+ >>> plt.show()
1902
+
1903
+ """
1904
+
1905
+ def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
1906
+
1907
+ theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
1908
+
1909
+ # input validation
1910
+ if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
1911
+ raise ValueError('theta should be between [0, pi]')
1912
+ if not ((0.0 <= phi).all() and (phi <= 2.0 * np.pi).all()):
1913
+ raise ValueError('phi should be between [0, 2pi]')
1914
+ if w is not None:
1915
+ w = np.asarray(w)
1916
+ if not (w >= 0.0).all():
1917
+ raise ValueError('w should be positive')
1918
+ if not s >= 0.0:
1919
+ raise ValueError('s should be positive')
1920
+ if not 0.0 < eps < 1.0:
1921
+ raise ValueError('eps should be between (0, 1)')
1922
+
1923
+ nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
1924
+ r, w=w, s=s,
1925
+ eps=eps)
1926
+ if ier not in [0, -1, -2]:
1927
+ message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
1928
+ raise ValueError(message)
1929
+
1930
+ self.fp = fp
1931
+ self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
1932
+ self.degrees = (3, 3)
1933
+
1934
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
1935
+
1936
+ theta = np.asarray(theta)
1937
+ phi = np.asarray(phi)
1938
+
1939
+ if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
1940
+ raise ValueError("requested phi out of bounds.")
1941
+
1942
+ return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
1943
+ dphi=dphi, grid=grid)
1944
+
1945
+
1946
+ class LSQSphereBivariateSpline(SphereBivariateSpline):
1947
+ """
1948
+ Weighted least-squares bivariate spline approximation in spherical
1949
+ coordinates.
1950
+
1951
+ Determines a smoothing bicubic spline according to a given
1952
+ set of knots in the `theta` and `phi` directions.
1953
+
1954
+ .. versionadded:: 0.11.0
1955
+
1956
+ Parameters
1957
+ ----------
1958
+ theta, phi, r : array_like
1959
+ 1-D sequences of data points (order is not important). Coordinates
1960
+ must be given in radians. Theta must lie within the interval
1961
+ ``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
1962
+ tt, tp : array_like
1963
+ Strictly ordered 1-D sequences of knots coordinates.
1964
+ Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
1965
+ w : array_like, optional
1966
+ Positive 1-D sequence of weights, of the same length as `theta`, `phi`
1967
+ and `r`.
1968
+ eps : float, optional
1969
+ A threshold for determining the effective rank of an over-determined
1970
+ linear system of equations. `eps` should have a value within the
1971
+ open interval ``(0, 1)``, the default is 1e-16.
1972
+
1973
+ See Also
1974
+ --------
1975
+ BivariateSpline :
1976
+ a base class for bivariate splines.
1977
+ UnivariateSpline :
1978
+ a smooth univariate spline to fit a given set of data points.
1979
+ SmoothBivariateSpline :
1980
+ a smoothing bivariate spline through the given points
1981
+ LSQBivariateSpline :
1982
+ a bivariate spline using weighted least-squares fitting
1983
+ RectSphereBivariateSpline :
1984
+ a bivariate spline over a rectangular mesh on a sphere
1985
+ SmoothSphereBivariateSpline :
1986
+ a smoothing bivariate spline in spherical coordinates
1987
+ RectBivariateSpline :
1988
+ a bivariate spline over a rectangular mesh.
1989
+ bisplrep :
1990
+ a function to find a bivariate B-spline representation of a surface
1991
+ bisplev :
1992
+ a function to evaluate a bivariate B-spline and its derivatives
1993
+
1994
+ Notes
1995
+ -----
1996
+ For more information, see the FITPACK_ site about this function.
1997
+
1998
+ .. _FITPACK: http://www.netlib.org/dierckx/sphere.f
1999
+
2000
+ Examples
2001
+ --------
2002
+ Suppose we have global data on a coarse grid (the input data does not
2003
+ have to be on a grid):
2004
+
2005
+ >>> from scipy.interpolate import LSQSphereBivariateSpline
2006
+ >>> import numpy as np
2007
+ >>> import matplotlib.pyplot as plt
2008
+
2009
+ >>> theta = np.linspace(0, np.pi, num=7)
2010
+ >>> phi = np.linspace(0, 2*np.pi, num=9)
2011
+ >>> data = np.empty((theta.shape[0], phi.shape[0]))
2012
+ >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
2013
+ >>> data[1:-1,1], data[1:-1,-1] = 1., 1.
2014
+ >>> data[1,1:-1], data[-2,1:-1] = 1., 1.
2015
+ >>> data[2:-2,2], data[2:-2,-2] = 2., 2.
2016
+ >>> data[2,2:-2], data[-3,2:-2] = 2., 2.
2017
+ >>> data[3,3:-2] = 3.
2018
+ >>> data = np.roll(data, 4, 1)
2019
+
2020
+ We need to set up the interpolator object. Here, we must also specify the
2021
+ coordinates of the knots to use.
2022
+
2023
+ >>> lats, lons = np.meshgrid(theta, phi)
2024
+ >>> knotst, knotsp = theta.copy(), phi.copy()
2025
+ >>> knotst[0] += .0001
2026
+ >>> knotst[-1] -= .0001
2027
+ >>> knotsp[0] += .0001
2028
+ >>> knotsp[-1] -= .0001
2029
+ >>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
2030
+ ... data.T.ravel(), knotst, knotsp)
2031
+
2032
+ As a first test, we'll see what the algorithm returns when run on the
2033
+ input coordinates
2034
+
2035
+ >>> data_orig = lut(theta, phi)
2036
+
2037
+ Finally we interpolate the data to a finer grid
2038
+
2039
+ >>> fine_lats = np.linspace(0., np.pi, 70)
2040
+ >>> fine_lons = np.linspace(0., 2*np.pi, 90)
2041
+ >>> data_lsq = lut(fine_lats, fine_lons)
2042
+
2043
+ >>> fig = plt.figure()
2044
+ >>> ax1 = fig.add_subplot(131)
2045
+ >>> ax1.imshow(data, interpolation='nearest')
2046
+ >>> ax2 = fig.add_subplot(132)
2047
+ >>> ax2.imshow(data_orig, interpolation='nearest')
2048
+ >>> ax3 = fig.add_subplot(133)
2049
+ >>> ax3.imshow(data_lsq, interpolation='nearest')
2050
+ >>> plt.show()
2051
+
2052
+ """
2053
+
2054
+ def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
2055
+
2056
+ theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
2057
+ tt, tp = np.asarray(tt), np.asarray(tp)
2058
+
2059
+ if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
2060
+ raise ValueError('theta should be between [0, pi]')
2061
+ if not ((0.0 <= phi).all() and (phi <= 2*np.pi).all()):
2062
+ raise ValueError('phi should be between [0, 2pi]')
2063
+ if not ((0.0 < tt).all() and (tt < np.pi).all()):
2064
+ raise ValueError('tt should be between (0, pi)')
2065
+ if not ((0.0 < tp).all() and (tp < 2*np.pi).all()):
2066
+ raise ValueError('tp should be between (0, 2pi)')
2067
+ if w is not None:
2068
+ w = np.asarray(w)
2069
+ if not (w >= 0.0).all():
2070
+ raise ValueError('w should be positive')
2071
+ if not 0.0 < eps < 1.0:
2072
+ raise ValueError('eps should be between (0, 1)')
2073
+
2074
+ nt_, np_ = 8 + len(tt), 8 + len(tp)
2075
+ tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
2076
+ tt_[4:-4], tp_[4:-4] = tt, tp
2077
+ tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
2078
+ tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
2079
+ w=w, eps=eps)
2080
+ if ier > 0:
2081
+ message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
2082
+ raise ValueError(message)
2083
+
2084
+ self.fp = fp
2085
+ self.tck = tt_, tp_, c
2086
+ self.degrees = (3, 3)
2087
+
2088
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
2089
+
2090
+ theta = np.asarray(theta)
2091
+ phi = np.asarray(phi)
2092
+
2093
+ if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
2094
+ raise ValueError("requested phi out of bounds.")
2095
+
2096
+ return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
2097
+ dphi=dphi, grid=grid)
2098
+
2099
+
2100
+ _spfit_messages = _surfit_messages.copy()
2101
+ _spfit_messages[10] = """
2102
+ ERROR: on entry, the input data are controlled on validity
2103
+ the following restrictions must be satisfied.
2104
+ -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
2105
+ -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
2106
+ -1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
2107
+ mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
2108
+ kwrk>=5+mu+mv+nuest+nvest,
2109
+ lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
2110
+ 0< u(i-1)<u(i)< pi,i=2,..,mu,
2111
+ -pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
2112
+ if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
2113
+ 0<tu(5)<tu(6)<...<tu(nu-4)< pi
2114
+ 8<=nv<=min(nvest,mv+7)
2115
+ v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
2116
+ the schoenberg-whitney conditions, i.e. there must be
2117
+ subset of grid coordinates uu(p) and vv(q) such that
2118
+ tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
2119
+ (iopt(2)=1 and iopt(3)=1 also count for a uu-value
2120
+ tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
2121
+ (vv(q) is either a value v(j) or v(j)+2*pi)
2122
+ if iopt(1)>=0: s>=0
2123
+ if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
2124
+ if one of these conditions is found to be violated,control is
2125
+ immediately repassed to the calling program. in that case there is no
2126
+ approximation returned."""
2127
+
2128
+
2129
+ class RectSphereBivariateSpline(SphereBivariateSpline):
2130
+ """
2131
+ Bivariate spline approximation over a rectangular mesh on a sphere.
2132
+
2133
+ Can be used for smoothing data.
2134
+
2135
+ .. versionadded:: 0.11.0
2136
+
2137
+ Parameters
2138
+ ----------
2139
+ u : array_like
2140
+ 1-D array of colatitude coordinates in strictly ascending order.
2141
+ Coordinates must be given in radians and lie within the open interval
2142
+ ``(0, pi)``.
2143
+ v : array_like
2144
+ 1-D array of longitude coordinates in strictly ascending order.
2145
+ Coordinates must be given in radians. First element (``v[0]``) must lie
2146
+ within the interval ``[-pi, pi)``. Last element (``v[-1]``) must satisfy
2147
+ ``v[-1] <= v[0] + 2*pi``.
2148
+ r : array_like
2149
+ 2-D array of data with shape ``(u.size, v.size)``.
2150
+ s : float, optional
2151
+ Positive smoothing factor defined for estimation condition
2152
+ (``s=0`` is for interpolation).
2153
+ pole_continuity : bool or (bool, bool), optional
2154
+ Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
2155
+ ``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
2156
+ will be 1 or 0 when this is True or False, respectively.
2157
+ Defaults to False.
2158
+ pole_values : float or (float, float), optional
2159
+ Data values at the poles ``u=0`` and ``u=pi``. Either the whole
2160
+ parameter or each individual element can be None. Defaults to None.
2161
+ pole_exact : bool or (bool, bool), optional
2162
+ Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
2163
+ value is considered to be the right function value, and it will be
2164
+ fitted exactly. If False, the value will be considered to be a data
2165
+ value just like the other data values. Defaults to False.
2166
+ pole_flat : bool or (bool, bool), optional
2167
+ For the poles at ``u=0`` and ``u=pi``, specify whether or not the
2168
+ approximation has vanishing derivatives. Defaults to False.
2169
+
2170
+ See Also
2171
+ --------
2172
+ BivariateSpline :
2173
+ a base class for bivariate splines.
2174
+ UnivariateSpline :
2175
+ a smooth univariate spline to fit a given set of data points.
2176
+ SmoothBivariateSpline :
2177
+ a smoothing bivariate spline through the given points
2178
+ LSQBivariateSpline :
2179
+ a bivariate spline using weighted least-squares fitting
2180
+ SmoothSphereBivariateSpline :
2181
+ a smoothing bivariate spline in spherical coordinates
2182
+ LSQSphereBivariateSpline :
2183
+ a bivariate spline in spherical coordinates using weighted
2184
+ least-squares fitting
2185
+ RectBivariateSpline :
2186
+ a bivariate spline over a rectangular mesh.
2187
+ bisplrep :
2188
+ a function to find a bivariate B-spline representation of a surface
2189
+ bisplev :
2190
+ a function to evaluate a bivariate B-spline and its derivatives
2191
+
2192
+ Notes
2193
+ -----
2194
+ Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
2195
+ ``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
2196
+ least-squares spline approximation is not implemented yet.
2197
+
2198
+ When actually performing the interpolation, the requested `v` values must
2199
+ lie within the same length 2pi interval that the original `v` values were
2200
+ chosen from.
2201
+
2202
+ For more information, see the FITPACK_ site about this function.
2203
+
2204
+ .. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
2205
+
2206
+ Examples
2207
+ --------
2208
+ Suppose we have global data on a coarse grid
2209
+
2210
+ >>> import numpy as np
2211
+ >>> lats = np.linspace(10, 170, 9) * np.pi / 180.
2212
+ >>> lons = np.linspace(0, 350, 18) * np.pi / 180.
2213
+ >>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
2214
+ ... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
2215
+
2216
+ We want to interpolate it to a global one-degree grid
2217
+
2218
+ >>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
2219
+ >>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
2220
+ >>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
2221
+
2222
+ We need to set up the interpolator object
2223
+
2224
+ >>> from scipy.interpolate import RectSphereBivariateSpline
2225
+ >>> lut = RectSphereBivariateSpline(lats, lons, data)
2226
+
2227
+ Finally we interpolate the data. The `RectSphereBivariateSpline` object
2228
+ only takes 1-D arrays as input, therefore we need to do some reshaping.
2229
+
2230
+ >>> data_interp = lut.ev(new_lats.ravel(),
2231
+ ... new_lons.ravel()).reshape((360, 180)).T
2232
+
2233
+ Looking at the original and the interpolated data, one can see that the
2234
+ interpolant reproduces the original data very well:
2235
+
2236
+ >>> import matplotlib.pyplot as plt
2237
+ >>> fig = plt.figure()
2238
+ >>> ax1 = fig.add_subplot(211)
2239
+ >>> ax1.imshow(data, interpolation='nearest')
2240
+ >>> ax2 = fig.add_subplot(212)
2241
+ >>> ax2.imshow(data_interp, interpolation='nearest')
2242
+ >>> plt.show()
2243
+
2244
+ Choosing the optimal value of ``s`` can be a delicate task. Recommended
2245
+ values for ``s`` depend on the accuracy of the data values. If the user
2246
+ has an idea of the statistical errors on the data, she can also find a
2247
+ proper estimate for ``s``. By assuming that, if she specifies the
2248
+ right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
2249
+ reproduces the function underlying the data, she can evaluate
2250
+ ``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
2251
+ For example, if she knows that the statistical errors on her
2252
+ ``r(i,j)``-values are not greater than 0.1, she may expect that a good
2253
+ ``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
2254
+
2255
+ If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
2256
+ be determined by trial and error. The best is then to start with a very
2257
+ large value of ``s`` (to determine the least-squares polynomial and the
2258
+ corresponding upper bound ``fp0`` for ``s``) and then to progressively
2259
+ decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
2260
+ ``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
2261
+ shows more detail) to obtain closer fits.
2262
+
2263
+ The interpolation results for different values of ``s`` give some insight
2264
+ into this process:
2265
+
2266
+ >>> fig2 = plt.figure()
2267
+ >>> s = [3e9, 2e9, 1e9, 1e8]
2268
+ >>> for idx, sval in enumerate(s, 1):
2269
+ ... lut = RectSphereBivariateSpline(lats, lons, data, s=sval)
2270
+ ... data_interp = lut.ev(new_lats.ravel(),
2271
+ ... new_lons.ravel()).reshape((360, 180)).T
2272
+ ... ax = fig2.add_subplot(2, 2, idx)
2273
+ ... ax.imshow(data_interp, interpolation='nearest')
2274
+ ... ax.set_title(f"s = {sval:g}")
2275
+ >>> plt.show()
2276
+
2277
+ """
2278
+
2279
+ def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
2280
+ pole_exact=False, pole_flat=False):
2281
+ iopt = np.array([0, 0, 0], dtype=dfitpack_int)
2282
+ ider = np.array([-1, 0, -1, 0], dtype=dfitpack_int)
2283
+ if pole_values is None:
2284
+ pole_values = (None, None)
2285
+ elif isinstance(pole_values, (float, np.float32, np.float64)):
2286
+ pole_values = (pole_values, pole_values)
2287
+ if isinstance(pole_continuity, bool):
2288
+ pole_continuity = (pole_continuity, pole_continuity)
2289
+ if isinstance(pole_exact, bool):
2290
+ pole_exact = (pole_exact, pole_exact)
2291
+ if isinstance(pole_flat, bool):
2292
+ pole_flat = (pole_flat, pole_flat)
2293
+
2294
+ r0, r1 = pole_values
2295
+ iopt[1:] = pole_continuity
2296
+ if r0 is None:
2297
+ ider[0] = -1
2298
+ else:
2299
+ ider[0] = pole_exact[0]
2300
+
2301
+ if r1 is None:
2302
+ ider[2] = -1
2303
+ else:
2304
+ ider[2] = pole_exact[1]
2305
+
2306
+ ider[1], ider[3] = pole_flat
2307
+
2308
+ u, v = np.ravel(u), np.ravel(v)
2309
+ r = np.asarray(r)
2310
+
2311
+ if not (0.0 < u[0] and u[-1] < np.pi):
2312
+ raise ValueError('u should be between (0, pi)')
2313
+ if not -np.pi <= v[0] < np.pi:
2314
+ raise ValueError('v[0] should be between [-pi, pi)')
2315
+ if not v[-1] <= v[0] + 2*np.pi:
2316
+ raise ValueError('v[-1] should be v[0] + 2pi or less ')
2317
+
2318
+ if not np.all(np.diff(u) > 0.0):
2319
+ raise ValueError('u must be strictly increasing')
2320
+ if not np.all(np.diff(v) > 0.0):
2321
+ raise ValueError('v must be strictly increasing')
2322
+
2323
+ if not u.size == r.shape[0]:
2324
+ raise ValueError('u dimension of r must have same number of '
2325
+ 'elements as u')
2326
+ if not v.size == r.shape[1]:
2327
+ raise ValueError('v dimension of r must have same number of '
2328
+ 'elements as v')
2329
+
2330
+ if pole_continuity[1] is False and pole_flat[1] is True:
2331
+ raise ValueError('if pole_continuity is False, so must be '
2332
+ 'pole_flat')
2333
+ if pole_continuity[0] is False and pole_flat[0] is True:
2334
+ raise ValueError('if pole_continuity is False, so must be '
2335
+ 'pole_flat')
2336
+
2337
+ if not s >= 0.0:
2338
+ raise ValueError('s should be positive')
2339
+
2340
+ r = np.ravel(r)
2341
+ nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
2342
+ u.copy(),
2343
+ v.copy(),
2344
+ r.copy(),
2345
+ r0, r1, s)
2346
+
2347
+ if ier not in [0, -1, -2]:
2348
+ msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
2349
+ raise ValueError(msg)
2350
+
2351
+ self.fp = fp
2352
+ self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
2353
+ self.degrees = (3, 3)
2354
+ self.v0 = v[0]
2355
+
2356
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
2357
+
2358
+ theta = np.asarray(theta)
2359
+ phi = np.asarray(phi)
2360
+
2361
+ return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
2362
+ dphi=dphi, grid=grid)
venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py ADDED
@@ -0,0 +1,805 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
3
+ FITPACK is a collection of FORTRAN programs for curve and surface
4
+ fitting with splines and tensor product splines.
5
+
6
+ See
7
+ https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
8
+ or
9
+ http://www.netlib.org/dierckx/
10
+
11
+ Copyright 2002 Pearu Peterson all rights reserved,
12
+ Pearu Peterson <[email protected]>
13
+ Permission to use, modify, and distribute this software is given under the
14
+ terms of the SciPy (BSD style) license. See LICENSE.txt that came with
15
+ this distribution for specifics.
16
+
17
+ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
18
+
19
+ TODO: Make interfaces to the following fitpack functions:
20
+ For univariate splines: cocosp, concon, fourco, insert
21
+ For bivariate splines: profil, regrid, parsur, surev
22
+ """
23
+
24
+ __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
25
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
26
+
27
+ import warnings
28
+ import numpy as np
29
+ from . import _fitpack
30
+ from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
31
+ empty, iinfo, asarray)
32
+
33
+ # Try to replace _fitpack interface with
34
+ # f2py-generated version
35
+ from . import dfitpack
36
+
37
+
38
+ dfitpack_int = dfitpack.types.intvar.dtype
39
+
40
+
41
+ def _int_overflow(x, exception, msg=None):
42
+ """Cast the value to an dfitpack_int and raise an OverflowError if the value
43
+ cannot fit.
44
+ """
45
+ if x > iinfo(dfitpack_int).max:
46
+ if msg is None:
47
+ msg = f'{x!r} cannot fit into an {dfitpack_int!r}'
48
+ raise exception(msg)
49
+ return dfitpack_int.type(x)
50
+
51
+
52
+ _iermess = {
53
+ 0: ["The spline has a residual sum of squares fp such that "
54
+ "abs(fp-s)/s<=0.001", None],
55
+ -1: ["The spline is an interpolating spline (fp=0)", None],
56
+ -2: ["The spline is weighted least-squares polynomial of degree k.\n"
57
+ "fp gives the upper bound fp0 for the smoothing factor s", None],
58
+ 1: ["The required storage space exceeds the available storage space.\n"
59
+ "Probable causes: data (x,y) size is too small or smoothing parameter"
60
+ "\ns is too small (fp>s).", ValueError],
61
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
62
+ "with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
63
+ ValueError],
64
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
65
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
66
+ "(abs(fp-s)/s>0.001)", ValueError],
67
+ 10: ["Error on input data", ValueError],
68
+ 'unknown': ["An error occurred", TypeError]
69
+ }
70
+
71
+ _iermess2 = {
72
+ 0: ["The spline has a residual sum of squares fp such that "
73
+ "abs(fp-s)/s<=0.001", None],
74
+ -1: ["The spline is an interpolating spline (fp=0)", None],
75
+ -2: ["The spline is weighted least-squares polynomial of degree kx and ky."
76
+ "\nfp gives the upper bound fp0 for the smoothing factor s", None],
77
+ -3: ["Warning. The coefficients of the spline have been computed as the\n"
78
+ "minimal norm least-squares solution of a rank deficient system.",
79
+ None],
80
+ 1: ["The required storage space exceeds the available storage space.\n"
81
+ "Probable causes: nxest or nyest too small or s is too small. (fp>s)",
82
+ ValueError],
83
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
84
+ "with fp = s. Probable causes: s too small or badly chosen eps.\n"
85
+ "(abs(fp-s)/s>0.001)", ValueError],
86
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
87
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
88
+ "(abs(fp-s)/s>0.001)", ValueError],
89
+ 4: ["No more knots can be added because the number of B-spline\n"
90
+ "coefficients already exceeds the number of data points m.\n"
91
+ "Probable causes: either s or m too small. (fp>s)", ValueError],
92
+ 5: ["No more knots can be added because the additional knot would\n"
93
+ "coincide with an old one. Probable cause: s too small or too large\n"
94
+ "a weight to an inaccurate data point. (fp>s)", ValueError],
95
+ 10: ["Error on input data", ValueError],
96
+ 11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
97
+ "the minimal least-squares solution of a rank deficient system of\n"
98
+ "linear equations.", ValueError],
99
+ 'unknown': ["An error occurred", TypeError]
100
+ }
101
+
102
+ _parcur_cache = {'t': array([], float), 'wrk': array([], float),
103
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
104
+ 'ub': 0, 'ue': 1}
105
+
106
+
107
+ def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
108
+ full_output=0, nest=None, per=0, quiet=1):
109
+ # see the docstring of `_fitpack_py/splprep`
110
+ if task <= 0:
111
+ _parcur_cache = {'t': array([], float), 'wrk': array([], float),
112
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
113
+ 'ub': 0, 'ue': 1}
114
+ x = atleast_1d(x)
115
+ idim, m = x.shape
116
+ if per:
117
+ for i in range(idim):
118
+ if x[i][0] != x[i][-1]:
119
+ if not quiet:
120
+ warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
121
+ (i, m, i)),
122
+ stacklevel=2)
123
+ x[i][-1] = x[i][0]
124
+ if not 0 < idim < 11:
125
+ raise TypeError('0 < idim < 11 must hold')
126
+ if w is None:
127
+ w = ones(m, float)
128
+ else:
129
+ w = atleast_1d(w)
130
+ ipar = (u is not None)
131
+ if ipar:
132
+ _parcur_cache['u'] = u
133
+ if ub is None:
134
+ _parcur_cache['ub'] = u[0]
135
+ else:
136
+ _parcur_cache['ub'] = ub
137
+ if ue is None:
138
+ _parcur_cache['ue'] = u[-1]
139
+ else:
140
+ _parcur_cache['ue'] = ue
141
+ else:
142
+ _parcur_cache['u'] = zeros(m, float)
143
+ if not (1 <= k <= 5):
144
+ raise TypeError('1 <= k= %d <=5 must hold' % k)
145
+ if not (-1 <= task <= 1):
146
+ raise TypeError('task must be -1, 0 or 1')
147
+ if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
148
+ raise TypeError('Mismatch of input dimensions')
149
+ if s is None:
150
+ s = m - sqrt(2*m)
151
+ if t is None and task == -1:
152
+ raise TypeError('Knots must be given for task=-1')
153
+ if t is not None:
154
+ _parcur_cache['t'] = atleast_1d(t)
155
+ n = len(_parcur_cache['t'])
156
+ if task == -1 and n < 2*k + 2:
157
+ raise TypeError('There must be at least 2*k+2 knots for task=-1')
158
+ if m <= k:
159
+ raise TypeError('m > k must hold')
160
+ if nest is None:
161
+ nest = m + 2*k
162
+
163
+ if (task >= 0 and s == 0) or (nest < 0):
164
+ if per:
165
+ nest = m + 2*k
166
+ else:
167
+ nest = m + k + 1
168
+ nest = max(nest, 2*k + 3)
169
+ u = _parcur_cache['u']
170
+ ub = _parcur_cache['ub']
171
+ ue = _parcur_cache['ue']
172
+ t = _parcur_cache['t']
173
+ wrk = _parcur_cache['wrk']
174
+ iwrk = _parcur_cache['iwrk']
175
+ t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
176
+ task, ipar, s, t, nest, wrk, iwrk, per)
177
+ _parcur_cache['u'] = o['u']
178
+ _parcur_cache['ub'] = o['ub']
179
+ _parcur_cache['ue'] = o['ue']
180
+ _parcur_cache['t'] = t
181
+ _parcur_cache['wrk'] = o['wrk']
182
+ _parcur_cache['iwrk'] = o['iwrk']
183
+ ier = o['ier']
184
+ fp = o['fp']
185
+ n = len(t)
186
+ u = o['u']
187
+ c.shape = idim, n - k - 1
188
+ tcku = [t, list(c), k], u
189
+ if ier <= 0 and not quiet:
190
+ warnings.warn(RuntimeWarning(_iermess[ier][0] +
191
+ "\tk=%d n=%d m=%d fp=%f s=%f" %
192
+ (k, len(t), m, fp, s)),
193
+ stacklevel=2)
194
+ if ier > 0 and not full_output:
195
+ if ier in [1, 2, 3]:
196
+ warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
197
+ else:
198
+ try:
199
+ raise _iermess[ier][1](_iermess[ier][0])
200
+ except KeyError as e:
201
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
202
+ if full_output:
203
+ try:
204
+ return tcku, fp, ier, _iermess[ier][0]
205
+ except KeyError:
206
+ return tcku, fp, ier, _iermess['unknown'][0]
207
+ else:
208
+ return tcku
209
+
210
+
211
+ _curfit_cache = {'t': array([], float), 'wrk': array([], float),
212
+ 'iwrk': array([], dfitpack_int)}
213
+
214
+
215
+ def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
216
+ full_output=0, per=0, quiet=1):
217
+ # see the docstring of `_fitpack_py/splrep`
218
+ if task <= 0:
219
+ _curfit_cache = {}
220
+ x, y = map(atleast_1d, [x, y])
221
+ m = len(x)
222
+ if w is None:
223
+ w = ones(m, float)
224
+ if s is None:
225
+ s = 0.0
226
+ else:
227
+ w = atleast_1d(w)
228
+ if s is None:
229
+ s = m - sqrt(2*m)
230
+ if not len(w) == m:
231
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
232
+ if (m != len(y)) or (m != len(w)):
233
+ raise TypeError('Lengths of the first three arguments (x,y,w) must '
234
+ 'be equal')
235
+ if not (1 <= k <= 5):
236
+ raise TypeError('Given degree of the spline (k=%d) is not supported. '
237
+ '(1<=k<=5)' % k)
238
+ if m <= k:
239
+ raise TypeError('m > k must hold')
240
+ if xb is None:
241
+ xb = x[0]
242
+ if xe is None:
243
+ xe = x[-1]
244
+ if not (-1 <= task <= 1):
245
+ raise TypeError('task must be -1, 0 or 1')
246
+ if t is not None:
247
+ task = -1
248
+ if task == -1:
249
+ if t is None:
250
+ raise TypeError('Knots must be given for task=-1')
251
+ numknots = len(t)
252
+ _curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
253
+ _curfit_cache['t'][k+1:-k-1] = t
254
+ nest = len(_curfit_cache['t'])
255
+ elif task == 0:
256
+ if per:
257
+ nest = max(m + 2*k, 2*k + 3)
258
+ else:
259
+ nest = max(m + k + 1, 2*k + 3)
260
+ t = empty((nest,), float)
261
+ _curfit_cache['t'] = t
262
+ if task <= 0:
263
+ if per:
264
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
265
+ else:
266
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
267
+ _curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
268
+ try:
269
+ t = _curfit_cache['t']
270
+ wrk = _curfit_cache['wrk']
271
+ iwrk = _curfit_cache['iwrk']
272
+ except KeyError as e:
273
+ raise TypeError("must call with task=1 only after"
274
+ " call with task=0,-1") from e
275
+ if not per:
276
+ n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
277
+ xb, xe, k, s)
278
+ else:
279
+ n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
280
+ tck = (t[:n], c[:n], k)
281
+ if ier <= 0 and not quiet:
282
+ _mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
283
+ (k, len(t), m, fp, s))
284
+ warnings.warn(RuntimeWarning(_mess), stacklevel=2)
285
+ if ier > 0 and not full_output:
286
+ if ier in [1, 2, 3]:
287
+ warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
288
+ else:
289
+ try:
290
+ raise _iermess[ier][1](_iermess[ier][0])
291
+ except KeyError as e:
292
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
293
+ if full_output:
294
+ try:
295
+ return tck, fp, ier, _iermess[ier][0]
296
+ except KeyError:
297
+ return tck, fp, ier, _iermess['unknown'][0]
298
+ else:
299
+ return tck
300
+
301
+
302
+ def splev(x, tck, der=0, ext=0):
303
+ # see the docstring of `_fitpack_py/splev`
304
+ t, c, k = tck
305
+ try:
306
+ c[0][0]
307
+ parametric = True
308
+ except Exception:
309
+ parametric = False
310
+ if parametric:
311
+ return list(map(lambda c, x=x, t=t, k=k, der=der:
312
+ splev(x, [t, c, k], der, ext), c))
313
+ else:
314
+ if not (0 <= der <= k):
315
+ raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
316
+ if ext not in (0, 1, 2, 3):
317
+ raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
318
+
319
+ x = asarray(x)
320
+ shape = x.shape
321
+ x = atleast_1d(x).ravel()
322
+ if der == 0:
323
+ y, ier = dfitpack.splev(t, c, k, x, ext)
324
+ else:
325
+ y, ier = dfitpack.splder(t, c, k, x, der, ext)
326
+
327
+ if ier == 10:
328
+ raise ValueError("Invalid input data")
329
+ if ier == 1:
330
+ raise ValueError("Found x value not in the domain")
331
+ if ier:
332
+ raise TypeError("An error occurred")
333
+
334
+ return y.reshape(shape)
335
+
336
+
337
+ def splint(a, b, tck, full_output=0):
338
+ # see the docstring of `_fitpack_py/splint`
339
+ t, c, k = tck
340
+ try:
341
+ c[0][0]
342
+ parametric = True
343
+ except Exception:
344
+ parametric = False
345
+ if parametric:
346
+ return list(map(lambda c, a=a, b=b, t=t, k=k:
347
+ splint(a, b, [t, c, k]), c))
348
+ else:
349
+ aint, wrk = dfitpack.splint(t, c, k, a, b)
350
+ if full_output:
351
+ return aint, wrk
352
+ else:
353
+ return aint
354
+
355
+
356
+ def sproot(tck, mest=10):
357
+ # see the docstring of `_fitpack_py/sproot`
358
+ t, c, k = tck
359
+ if k != 3:
360
+ raise ValueError("sproot works only for cubic (k=3) splines")
361
+ try:
362
+ c[0][0]
363
+ parametric = True
364
+ except Exception:
365
+ parametric = False
366
+ if parametric:
367
+ return list(map(lambda c, t=t, k=k, mest=mest:
368
+ sproot([t, c, k], mest), c))
369
+ else:
370
+ if len(t) < 8:
371
+ raise TypeError("The number of knots %d>=8" % len(t))
372
+ z, m, ier = dfitpack.sproot(t, c, mest)
373
+ if ier == 10:
374
+ raise TypeError("Invalid input data. "
375
+ "t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
376
+ if ier == 0:
377
+ return z[:m]
378
+ if ier == 1:
379
+ warnings.warn(RuntimeWarning("The number of zeros exceeds mest"),
380
+ stacklevel=2)
381
+ return z[:m]
382
+ raise TypeError("Unknown error")
383
+
384
+
385
+ def spalde(x, tck):
386
+ # see the docstring of `_fitpack_py/spalde`
387
+ t, c, k = tck
388
+ try:
389
+ c[0][0]
390
+ parametric = True
391
+ except Exception:
392
+ parametric = False
393
+ if parametric:
394
+ return list(map(lambda c, x=x, t=t, k=k:
395
+ spalde(x, [t, c, k]), c))
396
+ else:
397
+ x = atleast_1d(x)
398
+ if len(x) > 1:
399
+ return list(map(lambda x, tck=tck: spalde(x, tck), x))
400
+ d, ier = dfitpack.spalde(t, c, k+1, x[0])
401
+ if ier == 0:
402
+ return d
403
+ if ier == 10:
404
+ raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
405
+ raise TypeError("Unknown error")
406
+
407
+ # def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
408
+ # full_output=0,nest=None,per=0,quiet=1):
409
+
410
+
411
+ _surfit_cache = {'tx': array([], float), 'ty': array([], float),
412
+ 'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
413
+
414
+
415
+ def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
416
+ kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
417
+ full_output=0, nxest=None, nyest=None, quiet=1):
418
+ """
419
+ Find a bivariate B-spline representation of a surface.
420
+
421
+ Given a set of data points (x[i], y[i], z[i]) representing a surface
422
+ z=f(x,y), compute a B-spline representation of the surface. Based on
423
+ the routine SURFIT from FITPACK.
424
+
425
+ Parameters
426
+ ----------
427
+ x, y, z : ndarray
428
+ Rank-1 arrays of data points.
429
+ w : ndarray, optional
430
+ Rank-1 array of weights. By default ``w=np.ones(len(x))``.
431
+ xb, xe : float, optional
432
+ End points of approximation interval in `x`.
433
+ By default ``xb = x.min(), xe=x.max()``.
434
+ yb, ye : float, optional
435
+ End points of approximation interval in `y`.
436
+ By default ``yb=y.min(), ye = y.max()``.
437
+ kx, ky : int, optional
438
+ The degrees of the spline (1 <= kx, ky <= 5).
439
+ Third order (kx=ky=3) is recommended.
440
+ task : int, optional
441
+ If task=0, find knots in x and y and coefficients for a given
442
+ smoothing factor, s.
443
+ If task=1, find knots and coefficients for another value of the
444
+ smoothing factor, s. bisplrep must have been previously called
445
+ with task=0 or task=1.
446
+ If task=-1, find coefficients for a given set of knots tx, ty.
447
+ s : float, optional
448
+ A non-negative smoothing factor. If weights correspond
449
+ to the inverse of the standard-deviation of the errors in z,
450
+ then a good s-value should be found in the range
451
+ ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
452
+ eps : float, optional
453
+ A threshold for determining the effective rank of an
454
+ over-determined linear system of equations (0 < eps < 1).
455
+ `eps` is not likely to need changing.
456
+ tx, ty : ndarray, optional
457
+ Rank-1 arrays of the knots of the spline for task=-1
458
+ full_output : int, optional
459
+ Non-zero to return optional outputs.
460
+ nxest, nyest : int, optional
461
+ Over-estimates of the total number of knots. If None then
462
+ ``nxest = max(kx+sqrt(m/2),2*kx+3)``,
463
+ ``nyest = max(ky+sqrt(m/2),2*ky+3)``.
464
+ quiet : int, optional
465
+ Non-zero to suppress printing of messages.
466
+
467
+ Returns
468
+ -------
469
+ tck : array_like
470
+ A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
471
+ coefficients (c) of the bivariate B-spline representation of the
472
+ surface along with the degree of the spline.
473
+ fp : ndarray
474
+ The weighted sum of squared residuals of the spline approximation.
475
+ ier : int
476
+ An integer flag about splrep success. Success is indicated if
477
+ ier<=0. If ier in [1,2,3] an error occurred but was not raised.
478
+ Otherwise an error is raised.
479
+ msg : str
480
+ A message corresponding to the integer flag, ier.
481
+
482
+ See Also
483
+ --------
484
+ splprep, splrep, splint, sproot, splev
485
+ UnivariateSpline, BivariateSpline
486
+
487
+ Notes
488
+ -----
489
+ See `bisplev` to evaluate the value of the B-spline given its tck
490
+ representation.
491
+
492
+ If the input data is such that input dimensions have incommensurate
493
+ units and differ by many orders of magnitude, the interpolant may have
494
+ numerical artifacts. Consider rescaling the data before interpolation.
495
+
496
+ References
497
+ ----------
498
+ .. [1] Dierckx P.:An algorithm for surface fitting with spline functions
499
+ Ima J. Numer. Anal. 1 (1981) 267-283.
500
+ .. [2] Dierckx P.:An algorithm for surface fitting with spline functions
501
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
502
+ .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
503
+ Numerical Analysis, Oxford University Press, 1993.
504
+
505
+ Examples
506
+ --------
507
+ Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
508
+
509
+ """
510
+ x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
511
+ m = len(x)
512
+ if not (m == len(y) == len(z)):
513
+ raise TypeError('len(x)==len(y)==len(z) must hold.')
514
+ if w is None:
515
+ w = ones(m, float)
516
+ else:
517
+ w = atleast_1d(w)
518
+ if not len(w) == m:
519
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
520
+ if xb is None:
521
+ xb = x.min()
522
+ if xe is None:
523
+ xe = x.max()
524
+ if yb is None:
525
+ yb = y.min()
526
+ if ye is None:
527
+ ye = y.max()
528
+ if not (-1 <= task <= 1):
529
+ raise TypeError('task must be -1, 0 or 1')
530
+ if s is None:
531
+ s = m - sqrt(2*m)
532
+ if tx is None and task == -1:
533
+ raise TypeError('Knots_x must be given for task=-1')
534
+ if tx is not None:
535
+ _surfit_cache['tx'] = atleast_1d(tx)
536
+ nx = len(_surfit_cache['tx'])
537
+ if ty is None and task == -1:
538
+ raise TypeError('Knots_y must be given for task=-1')
539
+ if ty is not None:
540
+ _surfit_cache['ty'] = atleast_1d(ty)
541
+ ny = len(_surfit_cache['ty'])
542
+ if task == -1 and nx < 2*kx+2:
543
+ raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
544
+ if task == -1 and ny < 2*ky+2:
545
+ raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
546
+ if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
547
+ raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
548
+ 'supported. (1<=k<=5)' % (kx, ky))
549
+ if m < (kx + 1)*(ky + 1):
550
+ raise TypeError('m >= (kx+1)(ky+1) must hold')
551
+ if nxest is None:
552
+ nxest = int(kx + sqrt(m/2))
553
+ if nyest is None:
554
+ nyest = int(ky + sqrt(m/2))
555
+ nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
556
+ if task >= 0 and s == 0:
557
+ nxest = int(kx + sqrt(3*m))
558
+ nyest = int(ky + sqrt(3*m))
559
+ if task == -1:
560
+ _surfit_cache['tx'] = atleast_1d(tx)
561
+ _surfit_cache['ty'] = atleast_1d(ty)
562
+ tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
563
+ wrk = _surfit_cache['wrk']
564
+ u = nxest - kx - 1
565
+ v = nyest - ky - 1
566
+ km = max(kx, ky) + 1
567
+ ne = max(nxest, nyest)
568
+ bx, by = kx*v + ky + 1, ky*u + kx + 1
569
+ b1, b2 = bx, bx + v - ky
570
+ if bx > by:
571
+ b1, b2 = by, by + u - kx
572
+ msg = "Too many data points to interpolate"
573
+ lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
574
+ 2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
575
+ OverflowError,
576
+ msg=msg)
577
+ lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, OverflowError, msg=msg)
578
+ tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
579
+ task, s, eps, tx, ty, nxest, nyest,
580
+ wrk, lwrk1, lwrk2)
581
+ _curfit_cache['tx'] = tx
582
+ _curfit_cache['ty'] = ty
583
+ _curfit_cache['wrk'] = o['wrk']
584
+ ier, fp = o['ier'], o['fp']
585
+ tck = [tx, ty, c, kx, ky]
586
+
587
+ ierm = min(11, max(-3, ier))
588
+ if ierm <= 0 and not quiet:
589
+ _mess = (_iermess2[ierm][0] +
590
+ "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
591
+ (kx, ky, len(tx), len(ty), m, fp, s))
592
+ warnings.warn(RuntimeWarning(_mess), stacklevel=2)
593
+ if ierm > 0 and not full_output:
594
+ if ier in [1, 2, 3, 4, 5]:
595
+ _mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
596
+ (kx, ky, len(tx), len(ty), m, fp, s))
597
+ warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess), stacklevel=2)
598
+ else:
599
+ try:
600
+ raise _iermess2[ierm][1](_iermess2[ierm][0])
601
+ except KeyError as e:
602
+ raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
603
+ if full_output:
604
+ try:
605
+ return tck, fp, ier, _iermess2[ierm][0]
606
+ except KeyError:
607
+ return tck, fp, ier, _iermess2['unknown'][0]
608
+ else:
609
+ return tck
610
+
611
+
612
+ def bisplev(x, y, tck, dx=0, dy=0):
613
+ """
614
+ Evaluate a bivariate B-spline and its derivatives.
615
+
616
+ Return a rank-2 array of spline function values (or spline derivative
617
+ values) at points given by the cross-product of the rank-1 arrays `x` and
618
+ `y`. In special cases, return an array or just a float if either `x` or
619
+ `y` or both are floats. Based on BISPEV and PARDER from FITPACK.
620
+
621
+ Parameters
622
+ ----------
623
+ x, y : ndarray
624
+ Rank-1 arrays specifying the domain over which to evaluate the
625
+ spline or its derivative.
626
+ tck : tuple
627
+ A sequence of length 5 returned by `bisplrep` containing the knot
628
+ locations, the coefficients, and the degree of the spline:
629
+ [tx, ty, c, kx, ky].
630
+ dx, dy : int, optional
631
+ The orders of the partial derivatives in `x` and `y` respectively.
632
+
633
+ Returns
634
+ -------
635
+ vals : ndarray
636
+ The B-spline or its derivative evaluated over the set formed by
637
+ the cross-product of `x` and `y`.
638
+
639
+ See Also
640
+ --------
641
+ splprep, splrep, splint, sproot, splev
642
+ UnivariateSpline, BivariateSpline
643
+
644
+ Notes
645
+ -----
646
+ See `bisplrep` to generate the `tck` representation.
647
+
648
+ References
649
+ ----------
650
+ .. [1] Dierckx P. : An algorithm for surface fitting
651
+ with spline functions
652
+ Ima J. Numer. Anal. 1 (1981) 267-283.
653
+ .. [2] Dierckx P. : An algorithm for surface fitting
654
+ with spline functions
655
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
656
+ .. [3] Dierckx P. : Curve and surface fitting with splines,
657
+ Monographs on Numerical Analysis, Oxford University Press, 1993.
658
+
659
+ Examples
660
+ --------
661
+ Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
662
+
663
+ """
664
+ tx, ty, c, kx, ky = tck
665
+ if not (0 <= dx < kx):
666
+ raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
667
+ if not (0 <= dy < ky):
668
+ raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
669
+ x, y = map(atleast_1d, [x, y])
670
+ if (len(x.shape) != 1) or (len(y.shape) != 1):
671
+ raise ValueError("First two entries should be rank-1 arrays.")
672
+
673
+ msg = "Too many data points to interpolate."
674
+
675
+ _int_overflow(x.size * y.size, MemoryError, msg=msg)
676
+
677
+ if dx != 0 or dy != 0:
678
+ _int_overflow((tx.size - kx - 1)*(ty.size - ky - 1),
679
+ MemoryError, msg=msg)
680
+ z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
681
+ else:
682
+ z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
683
+
684
+ if ier == 10:
685
+ raise ValueError("Invalid input data")
686
+ if ier:
687
+ raise TypeError("An error occurred")
688
+ z.shape = len(x), len(y)
689
+ if len(z) > 1:
690
+ return z
691
+ if len(z[0]) > 1:
692
+ return z[0]
693
+ return z[0][0]
694
+
695
+
696
+ def dblint(xa, xb, ya, yb, tck):
697
+ """Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
698
+
699
+ Parameters
700
+ ----------
701
+ xa, xb : float
702
+ The end-points of the x integration interval.
703
+ ya, yb : float
704
+ The end-points of the y integration interval.
705
+ tck : list [tx, ty, c, kx, ky]
706
+ A sequence of length 5 returned by bisplrep containing the knot
707
+ locations tx, ty, the coefficients c, and the degrees kx, ky
708
+ of the spline.
709
+
710
+ Returns
711
+ -------
712
+ integ : float
713
+ The value of the resulting integral.
714
+ """
715
+ tx, ty, c, kx, ky = tck
716
+ return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
717
+
718
+
719
+ def insert(x, tck, m=1, per=0):
720
+ # see the docstring of `_fitpack_py/insert`
721
+ t, c, k = tck
722
+ try:
723
+ c[0][0]
724
+ parametric = True
725
+ except Exception:
726
+ parametric = False
727
+ if parametric:
728
+ cc = []
729
+ for c_vals in c:
730
+ tt, cc_val, kk = insert(x, [t, c_vals, k], m)
731
+ cc.append(cc_val)
732
+ return (tt, cc, kk)
733
+ else:
734
+ tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
735
+ if ier == 10:
736
+ raise ValueError("Invalid input data")
737
+ if ier:
738
+ raise TypeError("An error occurred")
739
+ return (tt, cc, k)
740
+
741
+
742
+ def splder(tck, n=1):
743
+ # see the docstring of `_fitpack_py/splder`
744
+ if n < 0:
745
+ return splantider(tck, -n)
746
+
747
+ t, c, k = tck
748
+
749
+ if n > k:
750
+ raise ValueError(f"Order of derivative (n = {n!r}) must be <= "
751
+ f"order of spline (k = {tck[2]!r})")
752
+
753
+ # Extra axes for the trailing dims of the `c` array:
754
+ sh = (slice(None),) + ((None,)*len(c.shape[1:]))
755
+
756
+ with np.errstate(invalid='raise', divide='raise'):
757
+ try:
758
+ for j in range(n):
759
+ # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
760
+
761
+ # Compute the denominator in the differentiation formula.
762
+ # (and append trailing dims, if necessary)
763
+ dt = t[k+1:-1] - t[1:-k-1]
764
+ dt = dt[sh]
765
+ # Compute the new coefficients
766
+ c = (c[1:-1-k] - c[:-2-k]) * k / dt
767
+ # Pad coefficient array to same size as knots (FITPACK
768
+ # convention)
769
+ c = np.r_[c, np.zeros((k,) + c.shape[1:])]
770
+ # Adjust knots
771
+ t = t[1:-1]
772
+ k -= 1
773
+ except FloatingPointError as e:
774
+ raise ValueError(("The spline has internal repeated knots "
775
+ "and is not differentiable %d times") % n) from e
776
+
777
+ return t, c, k
778
+
779
+
780
+ def splantider(tck, n=1):
781
+ # see the docstring of `_fitpack_py/splantider`
782
+ if n < 0:
783
+ return splder(tck, -n)
784
+
785
+ t, c, k = tck
786
+
787
+ # Extra axes for the trailing dims of the `c` array:
788
+ sh = (slice(None),) + (None,)*len(c.shape[1:])
789
+
790
+ for j in range(n):
791
+ # This is the inverse set of operations to splder.
792
+
793
+ # Compute the multiplier in the antiderivative formula.
794
+ dt = t[k+1:] - t[:-k-1]
795
+ dt = dt[sh]
796
+ # Compute the new coefficients
797
+ c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
798
+ c = np.r_[np.zeros((1,) + c.shape[1:]),
799
+ c,
800
+ [c[-1]] * (k+2)]
801
+ # New knots
802
+ t = np.r_[t[0], t, t[-1]]
803
+ k += 1
804
+
805
+ return t, c, k
venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
2
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
3
+
4
+
5
+ import numpy as np
6
+
7
+ # These are in the API for fitpack even if not used in fitpack.py itself.
8
+ from ._fitpack_impl import bisplrep, bisplev, dblint # noqa: F401
9
+ from . import _fitpack_impl as _impl
10
+ from ._bsplines import BSpline
11
+
12
+
13
+ def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
14
+ full_output=0, nest=None, per=0, quiet=1):
15
+ """
16
+ Find the B-spline representation of an N-D curve.
17
+
18
+ Given a list of N rank-1 arrays, `x`, which represent a curve in
19
+ N-dimensional space parametrized by `u`, find a smooth approximating
20
+ spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
21
+
22
+ Parameters
23
+ ----------
24
+ x : array_like
25
+ A list of sample vector arrays representing the curve.
26
+ w : array_like, optional
27
+ Strictly positive rank-1 array of weights the same length as `x[0]`.
28
+ The weights are used in computing the weighted least-squares spline
29
+ fit. If the errors in the `x` values have standard-deviation given by
30
+ the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
31
+ u : array_like, optional
32
+ An array of parameter values. If not given, these values are
33
+ calculated automatically as ``M = len(x[0])``, where
34
+
35
+ v[0] = 0
36
+
37
+ v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
38
+
39
+ u[i] = v[i] / v[M-1]
40
+
41
+ ub, ue : int, optional
42
+ The end-points of the parameters interval. Defaults to
43
+ u[0] and u[-1].
44
+ k : int, optional
45
+ Degree of the spline. Cubic splines are recommended.
46
+ Even values of `k` should be avoided especially with a small s-value.
47
+ ``1 <= k <= 5``, default is 3.
48
+ task : int, optional
49
+ If task==0 (default), find t and c for a given smoothing factor, s.
50
+ If task==1, find t and c for another value of the smoothing factor, s.
51
+ There must have been a previous call with task=0 or task=1
52
+ for the same set of data.
53
+ If task=-1 find the weighted least square spline for a given set of
54
+ knots, t.
55
+ s : float, optional
56
+ A smoothing condition. The amount of smoothness is determined by
57
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
58
+ where g(x) is the smoothed interpolation of (x,y). The user can
59
+ use `s` to control the trade-off between closeness and smoothness
60
+ of fit. Larger `s` means more smoothing while smaller values of `s`
61
+ indicate less smoothing. Recommended values of `s` depend on the
62
+ weights, w. If the weights represent the inverse of the
63
+ standard-deviation of y, then a good `s` value should be found in
64
+ the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
65
+ data points in x, y, and w.
66
+ t : array, optional
67
+ The knots needed for ``task=-1``.
68
+ There must be at least ``2*k+2`` knots.
69
+ full_output : int, optional
70
+ If non-zero, then return optional outputs.
71
+ nest : int, optional
72
+ An over-estimate of the total number of knots of the spline to
73
+ help in determining the storage space. By default nest=m/2.
74
+ Always large enough is nest=m+k+1.
75
+ per : int, optional
76
+ If non-zero, data points are considered periodic with period
77
+ ``x[m-1] - x[0]`` and a smooth periodic spline approximation is
78
+ returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
79
+ quiet : int, optional
80
+ Non-zero to suppress messages.
81
+
82
+ Returns
83
+ -------
84
+ tck : tuple
85
+ A tuple, ``(t,c,k)`` containing the vector of knots, the B-spline
86
+ coefficients, and the degree of the spline.
87
+ u : array
88
+ An array of the values of the parameter.
89
+ fp : float
90
+ The weighted sum of squared residuals of the spline approximation.
91
+ ier : int
92
+ An integer flag about splrep success. Success is indicated
93
+ if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
94
+ Otherwise an error is raised.
95
+ msg : str
96
+ A message corresponding to the integer flag, ier.
97
+
98
+ See Also
99
+ --------
100
+ splrep, splev, sproot, spalde, splint,
101
+ bisplrep, bisplev
102
+ UnivariateSpline, BivariateSpline
103
+ BSpline
104
+ make_interp_spline
105
+
106
+ Notes
107
+ -----
108
+ See `splev` for evaluation of the spline and its derivatives.
109
+ The number of dimensions N must be smaller than 11.
110
+
111
+ The number of coefficients in the `c` array is ``k+1`` less than the number
112
+ of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
113
+ the array of coefficients to have the same length as the array of knots.
114
+ These additional coefficients are ignored by evaluation routines, `splev`
115
+ and `BSpline`.
116
+
117
+ References
118
+ ----------
119
+ .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
120
+ parametric splines, Computer Graphics and Image Processing",
121
+ 20 (1982) 171-184.
122
+ .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
123
+ parametric splines", report tw55, Dept. Computer Science,
124
+ K.U.Leuven, 1981.
125
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
126
+ Numerical Analysis, Oxford University Press, 1993.
127
+
128
+ Examples
129
+ --------
130
+ Generate a discretization of a limacon curve in the polar coordinates:
131
+
132
+ >>> import numpy as np
133
+ >>> phi = np.linspace(0, 2.*np.pi, 40)
134
+ >>> r = 0.5 + np.cos(phi) # polar coords
135
+ >>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
136
+
137
+ And interpolate:
138
+
139
+ >>> from scipy.interpolate import splprep, splev
140
+ >>> tck, u = splprep([x, y], s=0)
141
+ >>> new_points = splev(u, tck)
142
+
143
+ Notice that (i) we force interpolation by using `s=0`,
144
+ (ii) the parameterization, ``u``, is generated automatically.
145
+ Now plot the result:
146
+
147
+ >>> import matplotlib.pyplot as plt
148
+ >>> fig, ax = plt.subplots()
149
+ >>> ax.plot(x, y, 'ro')
150
+ >>> ax.plot(new_points[0], new_points[1], 'r-')
151
+ >>> plt.show()
152
+
153
+ """
154
+
155
+ res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
156
+ quiet)
157
+ return res
158
+
159
+
160
+ def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
161
+ full_output=0, per=0, quiet=1):
162
+ """
163
+ Find the B-spline representation of a 1-D curve.
164
+
165
+ Given the set of data points ``(x[i], y[i])`` determine a smooth spline
166
+ approximation of degree k on the interval ``xb <= x <= xe``.
167
+
168
+ Parameters
169
+ ----------
170
+ x, y : array_like
171
+ The data points defining a curve ``y = f(x)``.
172
+ w : array_like, optional
173
+ Strictly positive rank-1 array of weights the same length as `x` and `y`.
174
+ The weights are used in computing the weighted least-squares spline
175
+ fit. If the errors in the `y` values have standard-deviation given by the
176
+ vector ``d``, then `w` should be ``1/d``. Default is ``ones(len(x))``.
177
+ xb, xe : float, optional
178
+ The interval to fit. If None, these default to ``x[0]`` and ``x[-1]``
179
+ respectively.
180
+ k : int, optional
181
+ The degree of the spline fit. It is recommended to use cubic splines.
182
+ Even values of `k` should be avoided especially with small `s` values.
183
+ ``1 <= k <= 5``.
184
+ task : {1, 0, -1}, optional
185
+ If ``task==0``, find ``t`` and ``c`` for a given smoothing factor, `s`.
186
+
187
+ If ``task==1`` find ``t`` and ``c`` for another value of the smoothing factor,
188
+ `s`. There must have been a previous call with ``task=0`` or ``task=1`` for
189
+ the same set of data (``t`` will be stored an used internally)
190
+
191
+ If ``task=-1`` find the weighted least square spline for a given set of
192
+ knots, ``t``. These should be interior knots as knots on the ends will be
193
+ added automatically.
194
+ s : float, optional
195
+ A smoothing condition. The amount of smoothness is determined by
196
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s`` where ``g(x)``
197
+ is the smoothed interpolation of ``(x,y)``. The user can use `s` to control
198
+ the tradeoff between closeness and smoothness of fit. Larger `s` means
199
+ more smoothing while smaller values of `s` indicate less smoothing.
200
+ Recommended values of `s` depend on the weights, `w`. If the weights
201
+ represent the inverse of the standard-deviation of `y`, then a good `s`
202
+ value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))`` where ``m`` is
203
+ the number of datapoints in `x`, `y`, and `w`. default : ``s=m-sqrt(2*m)`` if
204
+ weights are supplied. ``s = 0.0`` (interpolating) if no weights are
205
+ supplied.
206
+ t : array_like, optional
207
+ The knots needed for ``task=-1``. If given then task is automatically set
208
+ to ``-1``.
209
+ full_output : bool, optional
210
+ If non-zero, then return optional outputs.
211
+ per : bool, optional
212
+ If non-zero, data points are considered periodic with period ``x[m-1]`` -
213
+ ``x[0]`` and a smooth periodic spline approximation is returned. Values of
214
+ ``y[m-1]`` and ``w[m-1]`` are not used.
215
+ The default is zero, corresponding to boundary condition 'not-a-knot'.
216
+ quiet : bool, optional
217
+ Non-zero to suppress messages.
218
+
219
+ Returns
220
+ -------
221
+ tck : tuple
222
+ A tuple ``(t,c,k)`` containing the vector of knots, the B-spline
223
+ coefficients, and the degree of the spline.
224
+ fp : array, optional
225
+ The weighted sum of squared residuals of the spline approximation.
226
+ ier : int, optional
227
+ An integer flag about splrep success. Success is indicated if ``ier<=0``.
228
+ If ``ier in [1,2,3]``, an error occurred but was not raised. Otherwise an
229
+ error is raised.
230
+ msg : str, optional
231
+ A message corresponding to the integer flag, `ier`.
232
+
233
+ See Also
234
+ --------
235
+ UnivariateSpline, BivariateSpline
236
+ splprep, splev, sproot, spalde, splint
237
+ bisplrep, bisplev
238
+ BSpline
239
+ make_interp_spline
240
+
241
+ Notes
242
+ -----
243
+ See `splev` for evaluation of the spline and its derivatives. Uses the
244
+ FORTRAN routine ``curfit`` from FITPACK.
245
+
246
+ The user is responsible for assuring that the values of `x` are unique.
247
+ Otherwise, `splrep` will not return sensible results.
248
+
249
+ If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
250
+ i.e., there must be a subset of data points ``x[j]`` such that
251
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
252
+
253
+ This routine zero-pads the coefficients array ``c`` to have the same length
254
+ as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
255
+ by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
256
+ `splprep`, which does not zero-pad the coefficients.
257
+
258
+ The default boundary condition is 'not-a-knot', i.e. the first and second
259
+ segment at a curve end are the same polynomial. More boundary conditions are
260
+ available in `CubicSpline`.
261
+
262
+ References
263
+ ----------
264
+ Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
265
+
266
+ .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
267
+ integration of experimental data using spline functions",
268
+ J.Comp.Appl.Maths 1 (1975) 165-184.
269
+ .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
270
+ grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
271
+ 1286-1304.
272
+ .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
273
+ functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
274
+ .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
275
+ Numerical Analysis, Oxford University Press, 1993.
276
+
277
+ Examples
278
+ --------
279
+ You can interpolate 1-D points with a B-spline curve.
280
+ Further examples are given in
281
+ :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
282
+
283
+ >>> import numpy as np
284
+ >>> import matplotlib.pyplot as plt
285
+ >>> from scipy.interpolate import splev, splrep
286
+ >>> x = np.linspace(0, 10, 10)
287
+ >>> y = np.sin(x)
288
+ >>> spl = splrep(x, y)
289
+ >>> x2 = np.linspace(0, 10, 200)
290
+ >>> y2 = splev(x2, spl)
291
+ >>> plt.plot(x, y, 'o', x2, y2)
292
+ >>> plt.show()
293
+
294
+ """
295
+ res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
296
+ return res
297
+
298
+
299
+ def splev(x, tck, der=0, ext=0):
300
+ """
301
+ Evaluate a B-spline or its derivatives.
302
+
303
+ Given the knots and coefficients of a B-spline representation, evaluate
304
+ the value of the smoothing polynomial and its derivatives. This is a
305
+ wrapper around the FORTRAN routines splev and splder of FITPACK.
306
+
307
+ Parameters
308
+ ----------
309
+ x : array_like
310
+ An array of points at which to return the value of the smoothed
311
+ spline or its derivatives. If `tck` was returned from `splprep`,
312
+ then the parameter values, u should be given.
313
+ tck : 3-tuple or a BSpline object
314
+ If a tuple, then it should be a sequence of length 3 returned by
315
+ `splrep` or `splprep` containing the knots, coefficients, and degree
316
+ of the spline. (Also see Notes.)
317
+ der : int, optional
318
+ The order of derivative of the spline to compute (must be less than
319
+ or equal to k, the degree of the spline).
320
+ ext : int, optional
321
+ Controls the value returned for elements of ``x`` not in the
322
+ interval defined by the knot sequence.
323
+
324
+ * if ext=0, return the extrapolated value.
325
+ * if ext=1, return 0
326
+ * if ext=2, raise a ValueError
327
+ * if ext=3, return the boundary value.
328
+
329
+ The default value is 0.
330
+
331
+ Returns
332
+ -------
333
+ y : ndarray or list of ndarrays
334
+ An array of values representing the spline function evaluated at
335
+ the points in `x`. If `tck` was returned from `splprep`, then this
336
+ is a list of arrays representing the curve in an N-D space.
337
+
338
+ See Also
339
+ --------
340
+ splprep, splrep, sproot, spalde, splint
341
+ bisplrep, bisplev
342
+ BSpline
343
+
344
+ Notes
345
+ -----
346
+ Manipulating the tck-tuples directly is not recommended. In new code,
347
+ prefer using `BSpline` objects.
348
+
349
+ References
350
+ ----------
351
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
352
+ Theory, 6, p.50-62, 1972.
353
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
354
+ Applics, 10, p.134-149, 1972.
355
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
356
+ on Numerical Analysis, Oxford University Press, 1993.
357
+
358
+ Examples
359
+ --------
360
+ Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
361
+
362
+ """
363
+ if isinstance(tck, BSpline):
364
+ if tck.c.ndim > 1:
365
+ mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
366
+ "not allowed. Use BSpline.__call__(x) instead.")
367
+ raise ValueError(mesg)
368
+
369
+ # remap the out-of-bounds behavior
370
+ try:
371
+ extrapolate = {0: True, }[ext]
372
+ except KeyError as e:
373
+ raise ValueError("Extrapolation mode %s is not supported "
374
+ "by BSpline." % ext) from e
375
+
376
+ return tck(x, der, extrapolate=extrapolate)
377
+ else:
378
+ return _impl.splev(x, tck, der, ext)
379
+
380
+
381
+ def splint(a, b, tck, full_output=0):
382
+ """
383
+ Evaluate the definite integral of a B-spline between two given points.
384
+
385
+ Parameters
386
+ ----------
387
+ a, b : float
388
+ The end-points of the integration interval.
389
+ tck : tuple or a BSpline instance
390
+ If a tuple, then it should be a sequence of length 3, containing the
391
+ vector of knots, the B-spline coefficients, and the degree of the
392
+ spline (see `splev`).
393
+ full_output : int, optional
394
+ Non-zero to return optional output.
395
+
396
+ Returns
397
+ -------
398
+ integral : float
399
+ The resulting integral.
400
+ wrk : ndarray
401
+ An array containing the integrals of the normalized B-splines
402
+ defined on the set of knots.
403
+ (Only returned if `full_output` is non-zero)
404
+
405
+ See Also
406
+ --------
407
+ splprep, splrep, sproot, spalde, splev
408
+ bisplrep, bisplev
409
+ BSpline
410
+
411
+ Notes
412
+ -----
413
+ `splint` silently assumes that the spline function is zero outside the data
414
+ interval (`a`, `b`).
415
+
416
+ Manipulating the tck-tuples directly is not recommended. In new code,
417
+ prefer using the `BSpline` objects.
418
+
419
+ References
420
+ ----------
421
+ .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
422
+ J. Inst. Maths Applics, 17, p.37-41, 1976.
423
+ .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
424
+ on Numerical Analysis, Oxford University Press, 1993.
425
+
426
+ Examples
427
+ --------
428
+ Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
429
+
430
+ """
431
+ if isinstance(tck, BSpline):
432
+ if tck.c.ndim > 1:
433
+ mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
434
+ "not allowed. Use BSpline.integrate() instead.")
435
+ raise ValueError(mesg)
436
+
437
+ if full_output != 0:
438
+ mesg = ("full_output = %s is not supported. Proceeding as if "
439
+ "full_output = 0" % full_output)
440
+
441
+ return tck.integrate(a, b, extrapolate=False)
442
+ else:
443
+ return _impl.splint(a, b, tck, full_output)
444
+
445
+
446
+ def sproot(tck, mest=10):
447
+ """
448
+ Find the roots of a cubic B-spline.
449
+
450
+ Given the knots (>=8) and coefficients of a cubic B-spline return the
451
+ roots of the spline.
452
+
453
+ Parameters
454
+ ----------
455
+ tck : tuple or a BSpline object
456
+ If a tuple, then it should be a sequence of length 3, containing the
457
+ vector of knots, the B-spline coefficients, and the degree of the
458
+ spline.
459
+ The number of knots must be >= 8, and the degree must be 3.
460
+ The knots must be a montonically increasing sequence.
461
+ mest : int, optional
462
+ An estimate of the number of zeros (Default is 10).
463
+
464
+ Returns
465
+ -------
466
+ zeros : ndarray
467
+ An array giving the roots of the spline.
468
+
469
+ See Also
470
+ --------
471
+ splprep, splrep, splint, spalde, splev
472
+ bisplrep, bisplev
473
+ BSpline
474
+
475
+ Notes
476
+ -----
477
+ Manipulating the tck-tuples directly is not recommended. In new code,
478
+ prefer using the `BSpline` objects.
479
+
480
+ References
481
+ ----------
482
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
483
+ Theory, 6, p.50-62, 1972.
484
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
485
+ Applics, 10, p.134-149, 1972.
486
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
487
+ on Numerical Analysis, Oxford University Press, 1993.
488
+
489
+ Examples
490
+ --------
491
+
492
+ For some data, this method may miss a root. This happens when one of
493
+ the spline knots (which FITPACK places automatically) happens to
494
+ coincide with the true root. A workaround is to convert to `PPoly`,
495
+ which uses a different root-finding algorithm.
496
+
497
+ For example,
498
+
499
+ >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
500
+ >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
501
+ ... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03,
502
+ ... 6.520430e-03, 8.170770e-03]
503
+ >>> from scipy.interpolate import splrep, sproot, PPoly
504
+ >>> tck = splrep(x, y, s=0)
505
+ >>> sproot(tck)
506
+ array([], dtype=float64)
507
+
508
+ Converting to a PPoly object does find the roots at `x=2`:
509
+
510
+ >>> ppoly = PPoly.from_spline(tck)
511
+ >>> ppoly.roots(extrapolate=False)
512
+ array([2.])
513
+
514
+
515
+ Further examples are given :ref:`in the tutorial
516
+ <tutorial-interpolate_splXXX>`.
517
+
518
+ """
519
+ if isinstance(tck, BSpline):
520
+ if tck.c.ndim > 1:
521
+ mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
522
+ "not allowed.")
523
+ raise ValueError(mesg)
524
+
525
+ t, c, k = tck.tck
526
+
527
+ # _impl.sproot expects the interpolation axis to be last, so roll it.
528
+ # NB: This transpose is a no-op if c is 1D.
529
+ sh = tuple(range(c.ndim))
530
+ c = c.transpose(sh[1:] + (0,))
531
+ return _impl.sproot((t, c, k), mest)
532
+ else:
533
+ return _impl.sproot(tck, mest)
534
+
535
+
536
+ def spalde(x, tck):
537
+ """
538
+ Evaluate all derivatives of a B-spline.
539
+
540
+ Given the knots and coefficients of a cubic B-spline compute all
541
+ derivatives up to order k at a point (or set of points).
542
+
543
+ Parameters
544
+ ----------
545
+ x : array_like
546
+ A point or a set of points at which to evaluate the derivatives.
547
+ Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
548
+ tck : tuple
549
+ A tuple (t,c,k) containing the vector of knots,
550
+ the B-spline coefficients, and the degree of the spline.
551
+
552
+ Returns
553
+ -------
554
+ results : {ndarray, list of ndarrays}
555
+ An array (or a list of arrays) containing all derivatives
556
+ up to order k inclusive for each point `x`.
557
+
558
+ See Also
559
+ --------
560
+ splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
561
+ UnivariateSpline, BivariateSpline
562
+
563
+ References
564
+ ----------
565
+ .. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
566
+ 6 (1972) 50-62.
567
+ .. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
568
+ applics 10 (1972) 134-149.
569
+ .. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
570
+ Numerical Analysis, Oxford University Press, 1993.
571
+
572
+ """
573
+ if isinstance(tck, BSpline):
574
+ raise TypeError("spalde does not accept BSpline instances.")
575
+ else:
576
+ return _impl.spalde(x, tck)
577
+
578
+
579
+ def insert(x, tck, m=1, per=0):
580
+ """
581
+ Insert knots into a B-spline.
582
+
583
+ Given the knots and coefficients of a B-spline representation, create a
584
+ new B-spline with a knot inserted `m` times at point `x`.
585
+ This is a wrapper around the FORTRAN routine insert of FITPACK.
586
+
587
+ Parameters
588
+ ----------
589
+ x (u) : float
590
+ A knot value at which to insert a new knot. If `tck` was returned
591
+ from ``splprep``, then the parameter values, u should be given.
592
+ tck : a `BSpline` instance or a tuple
593
+ If tuple, then it is expected to be a tuple (t,c,k) containing
594
+ the vector of knots, the B-spline coefficients, and the degree of
595
+ the spline.
596
+ m : int, optional
597
+ The number of times to insert the given knot (its multiplicity).
598
+ Default is 1.
599
+ per : int, optional
600
+ If non-zero, the input spline is considered periodic.
601
+
602
+ Returns
603
+ -------
604
+ BSpline instance or a tuple
605
+ A new B-spline with knots t, coefficients c, and degree k.
606
+ ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
607
+ In case of a periodic spline (``per != 0``) there must be
608
+ either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
609
+ or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
610
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
611
+ a BSpline object is constructed and returned.
612
+
613
+ Notes
614
+ -----
615
+ Based on algorithms from [1]_ and [2]_.
616
+
617
+ Manipulating the tck-tuples directly is not recommended. In new code,
618
+ prefer using the `BSpline` objects, in particular `BSpline.insert_knot`
619
+ method.
620
+
621
+ See Also
622
+ --------
623
+ BSpline.insert_knot
624
+
625
+ References
626
+ ----------
627
+ .. [1] W. Boehm, "Inserting new knots into b-spline curves.",
628
+ Computer Aided Design, 12, p.199-201, 1980.
629
+ .. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
630
+ Numerical Analysis", Oxford University Press, 1993.
631
+
632
+ Examples
633
+ --------
634
+ You can insert knots into a B-spline.
635
+
636
+ >>> from scipy.interpolate import splrep, insert
637
+ >>> import numpy as np
638
+ >>> x = np.linspace(0, 10, 5)
639
+ >>> y = np.sin(x)
640
+ >>> tck = splrep(x, y)
641
+ >>> tck[0]
642
+ array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
643
+
644
+ A knot is inserted:
645
+
646
+ >>> tck_inserted = insert(3, tck)
647
+ >>> tck_inserted[0]
648
+ array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
649
+
650
+ Some knots are inserted:
651
+
652
+ >>> tck_inserted2 = insert(8, tck, m=3)
653
+ >>> tck_inserted2[0]
654
+ array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
655
+
656
+ """
657
+ if isinstance(tck, BSpline):
658
+
659
+ t, c, k = tck.tck
660
+
661
+ # FITPACK expects the interpolation axis to be last, so roll it over
662
+ # NB: if c array is 1D, transposes are no-ops
663
+ sh = tuple(range(c.ndim))
664
+ c = c.transpose(sh[1:] + (0,))
665
+ t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
666
+
667
+ # and roll the last axis back
668
+ c_ = np.asarray(c_)
669
+ c_ = c_.transpose((sh[-1],) + sh[:-1])
670
+ return BSpline(t_, c_, k_)
671
+ else:
672
+ return _impl.insert(x, tck, m, per)
673
+
674
+
675
+ def splder(tck, n=1):
676
+ """
677
+ Compute the spline representation of the derivative of a given spline
678
+
679
+ Parameters
680
+ ----------
681
+ tck : BSpline instance or a tuple of (t, c, k)
682
+ Spline whose derivative to compute
683
+ n : int, optional
684
+ Order of derivative to evaluate. Default: 1
685
+
686
+ Returns
687
+ -------
688
+ `BSpline` instance or tuple
689
+ Spline of order k2=k-n representing the derivative
690
+ of the input spline.
691
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
692
+ a BSpline object is constructed and returned.
693
+
694
+ See Also
695
+ --------
696
+ splantider, splev, spalde
697
+ BSpline
698
+
699
+ Notes
700
+ -----
701
+
702
+ .. versionadded:: 0.13.0
703
+
704
+ Examples
705
+ --------
706
+ This can be used for finding maxima of a curve:
707
+
708
+ >>> from scipy.interpolate import splrep, splder, sproot
709
+ >>> import numpy as np
710
+ >>> x = np.linspace(0, 10, 70)
711
+ >>> y = np.sin(x)
712
+ >>> spl = splrep(x, y, k=4)
713
+
714
+ Now, differentiate the spline and find the zeros of the
715
+ derivative. (NB: `sproot` only works for order 3 splines, so we
716
+ fit an order 4 spline):
717
+
718
+ >>> dspl = splder(spl)
719
+ >>> sproot(dspl) / np.pi
720
+ array([ 0.50000001, 1.5 , 2.49999998])
721
+
722
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
723
+ :math:`\\cos(x) = \\sin'(x)`.
724
+
725
+ """
726
+ if isinstance(tck, BSpline):
727
+ return tck.derivative(n)
728
+ else:
729
+ return _impl.splder(tck, n)
730
+
731
+
732
+ def splantider(tck, n=1):
733
+ """
734
+ Compute the spline for the antiderivative (integral) of a given spline.
735
+
736
+ Parameters
737
+ ----------
738
+ tck : BSpline instance or a tuple of (t, c, k)
739
+ Spline whose antiderivative to compute
740
+ n : int, optional
741
+ Order of antiderivative to evaluate. Default: 1
742
+
743
+ Returns
744
+ -------
745
+ BSpline instance or a tuple of (t2, c2, k2)
746
+ Spline of order k2=k+n representing the antiderivative of the input
747
+ spline.
748
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
749
+ a BSpline object is constructed and returned.
750
+
751
+ See Also
752
+ --------
753
+ splder, splev, spalde
754
+ BSpline
755
+
756
+ Notes
757
+ -----
758
+ The `splder` function is the inverse operation of this function.
759
+ Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
760
+ rounding error.
761
+
762
+ .. versionadded:: 0.13.0
763
+
764
+ Examples
765
+ --------
766
+ >>> from scipy.interpolate import splrep, splder, splantider, splev
767
+ >>> import numpy as np
768
+ >>> x = np.linspace(0, np.pi/2, 70)
769
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
770
+ >>> spl = splrep(x, y)
771
+
772
+ The derivative is the inverse operation of the antiderivative,
773
+ although some floating point error accumulates:
774
+
775
+ >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
776
+ (array(2.1565429877197317), array(2.1565429877201865))
777
+
778
+ Antiderivative can be used to evaluate definite integrals:
779
+
780
+ >>> ispl = splantider(spl)
781
+ >>> splev(np.pi/2, ispl) - splev(0, ispl)
782
+ 2.2572053588768486
783
+
784
+ This is indeed an approximation to the complete elliptic integral
785
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
786
+
787
+ >>> from scipy.special import ellipk
788
+ >>> ellipk(0.8)
789
+ 2.2572053268208538
790
+
791
+ """
792
+ if isinstance(tck, BSpline):
793
+ return tck.antiderivative(n)
794
+ else:
795
+ return _impl.splantider(tck, n)
796
+
venv/lib/python3.10/site-packages/scipy/interpolate/_interpolate.py ADDED
@@ -0,0 +1,2473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly']
2
+
3
+ from math import prod
4
+ import warnings
5
+
6
+ import numpy as np
7
+ from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
8
+ ravel, poly1d, asarray, intp)
9
+
10
+ import scipy.special as spec
11
+ from scipy._lib._util import copy_if_needed
12
+ from scipy.special import comb
13
+
14
+ from . import _fitpack_py
15
+ from . import dfitpack
16
+ from ._polyint import _Interpolator1D
17
+ from . import _ppoly
18
+ from .interpnd import _ndim_coords_from_arrays
19
+ from ._bsplines import make_interp_spline, BSpline
20
+
21
+
22
+ def lagrange(x, w):
23
+ r"""
24
+ Return a Lagrange interpolating polynomial.
25
+
26
+ Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
27
+ polynomial through the points ``(x, w)``.
28
+
29
+ Warning: This implementation is numerically unstable. Do not expect to
30
+ be able to use more than about 20 points even if they are chosen optimally.
31
+
32
+ Parameters
33
+ ----------
34
+ x : array_like
35
+ `x` represents the x-coordinates of a set of datapoints.
36
+ w : array_like
37
+ `w` represents the y-coordinates of a set of datapoints, i.e., f(`x`).
38
+
39
+ Returns
40
+ -------
41
+ lagrange : `numpy.poly1d` instance
42
+ The Lagrange interpolating polynomial.
43
+
44
+ Examples
45
+ --------
46
+ Interpolate :math:`f(x) = x^3` by 3 points.
47
+
48
+ >>> import numpy as np
49
+ >>> from scipy.interpolate import lagrange
50
+ >>> x = np.array([0, 1, 2])
51
+ >>> y = x**3
52
+ >>> poly = lagrange(x, y)
53
+
54
+ Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
55
+ it is given by
56
+
57
+ .. math::
58
+
59
+ \begin{aligned}
60
+ L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
61
+ &= x (-2 + 3x)
62
+ \end{aligned}
63
+
64
+ >>> from numpy.polynomial.polynomial import Polynomial
65
+ >>> Polynomial(poly.coef[::-1]).coef
66
+ array([ 0., -2., 3.])
67
+
68
+ >>> import matplotlib.pyplot as plt
69
+ >>> x_new = np.arange(0, 2.1, 0.1)
70
+ >>> plt.scatter(x, y, label='data')
71
+ >>> plt.plot(x_new, Polynomial(poly.coef[::-1])(x_new), label='Polynomial')
72
+ >>> plt.plot(x_new, 3*x_new**2 - 2*x_new + 0*x_new,
73
+ ... label=r"$3 x^2 - 2 x$", linestyle='-.')
74
+ >>> plt.legend()
75
+ >>> plt.show()
76
+
77
+ """
78
+
79
+ M = len(x)
80
+ p = poly1d(0.0)
81
+ for j in range(M):
82
+ pt = poly1d(w[j])
83
+ for k in range(M):
84
+ if k == j:
85
+ continue
86
+ fac = x[j]-x[k]
87
+ pt *= poly1d([1.0, -x[k]])/fac
88
+ p += pt
89
+ return p
90
+
91
+
92
+ # !! Need to find argument for keeping initialize. If it isn't
93
+ # !! found, get rid of it!
94
+
95
+
96
+ dep_mesg = """\
97
+ `interp2d` is deprecated in SciPy 1.10 and will be removed in SciPy 1.14.0.
98
+
99
+ For legacy code, nearly bug-for-bug compatible replacements are
100
+ `RectBivariateSpline` on regular grids, and `bisplrep`/`bisplev` for
101
+ scattered 2D data.
102
+
103
+ In new code, for regular grids use `RegularGridInterpolator` instead.
104
+ For scattered data, prefer `LinearNDInterpolator` or
105
+ `CloughTocher2DInterpolator`.
106
+
107
+ For more details see
108
+ `https://scipy.github.io/devdocs/notebooks/interp_transition_guide.html`
109
+ """
110
+
111
+ class interp2d:
112
+ """
113
+ interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
114
+ fill_value=None)
115
+
116
+ .. deprecated:: 1.10.0
117
+
118
+ `interp2d` is deprecated in SciPy 1.10 and will be removed in SciPy
119
+ 1.14.0.
120
+
121
+ For legacy code, nearly bug-for-bug compatible replacements are
122
+ `RectBivariateSpline` on regular grids, and `bisplrep`/`bisplev` for
123
+ scattered 2D data.
124
+
125
+ In new code, for regular grids use `RegularGridInterpolator` instead.
126
+ For scattered data, prefer `LinearNDInterpolator` or
127
+ `CloughTocher2DInterpolator`.
128
+
129
+ For more details see
130
+ `https://scipy.github.io/devdocs/notebooks/interp_transition_guide.html
131
+ <https://scipy.github.io/devdocs/notebooks/interp_transition_guide.html>`_
132
+
133
+
134
+ Interpolate over a 2-D grid.
135
+
136
+ `x`, `y` and `z` are arrays of values used to approximate some function
137
+ f: ``z = f(x, y)`` which returns a scalar value `z`. This class returns a
138
+ function whose call method uses spline interpolation to find the value
139
+ of new points.
140
+
141
+ If `x` and `y` represent a regular grid, consider using
142
+ `RectBivariateSpline`.
143
+
144
+ If `z` is a vector value, consider using `interpn`.
145
+
146
+ Note that calling `interp2d` with NaNs present in input values, or with
147
+ decreasing values in `x` an `y` results in undefined behaviour.
148
+
149
+ Methods
150
+ -------
151
+ __call__
152
+
153
+ Parameters
154
+ ----------
155
+ x, y : array_like
156
+ Arrays defining the data point coordinates.
157
+ The data point coordinates need to be sorted by increasing order.
158
+
159
+ If the points lie on a regular grid, `x` can specify the column
160
+ coordinates and `y` the row coordinates, for example::
161
+
162
+ >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
163
+
164
+ Otherwise, `x` and `y` must specify the full coordinates for each
165
+ point, for example::
166
+
167
+ >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,4,2,5,3,6]
168
+
169
+ If `x` and `y` are multidimensional, they are flattened before use.
170
+ z : array_like
171
+ The values of the function to interpolate at the data points. If
172
+ `z` is a multidimensional array, it is flattened before use assuming
173
+ Fortran-ordering (order='F'). The length of a flattened `z` array
174
+ is either len(`x`)*len(`y`) if `x` and `y` specify the column and
175
+ row coordinates or ``len(z) == len(x) == len(y)`` if `x` and `y`
176
+ specify coordinates for each point.
177
+ kind : {'linear', 'cubic', 'quintic'}, optional
178
+ The kind of spline interpolation to use. Default is 'linear'.
179
+ copy : bool, optional
180
+ If True, the class makes internal copies of x, y and z.
181
+ If False, references may be used. The default is to copy.
182
+ bounds_error : bool, optional
183
+ If True, when interpolated values are requested outside of the
184
+ domain of the input data (x,y), a ValueError is raised.
185
+ If False, then `fill_value` is used.
186
+ fill_value : number, optional
187
+ If provided, the value to use for points outside of the
188
+ interpolation domain. If omitted (None), values outside
189
+ the domain are extrapolated via nearest-neighbor extrapolation.
190
+
191
+ See Also
192
+ --------
193
+ RectBivariateSpline :
194
+ Much faster 2-D interpolation if your input data is on a grid
195
+ bisplrep, bisplev :
196
+ Spline interpolation based on FITPACK
197
+ BivariateSpline : a more recent wrapper of the FITPACK routines
198
+ interp1d : 1-D version of this function
199
+ RegularGridInterpolator : interpolation on a regular or rectilinear grid
200
+ in arbitrary dimensions.
201
+ interpn : Multidimensional interpolation on regular grids (wraps
202
+ `RegularGridInterpolator` and `RectBivariateSpline`).
203
+
204
+ Notes
205
+ -----
206
+ The minimum number of data points required along the interpolation
207
+ axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
208
+ quintic interpolation.
209
+
210
+ The interpolator is constructed by `bisplrep`, with a smoothing factor
211
+ of 0. If more control over smoothing is needed, `bisplrep` should be
212
+ used directly.
213
+
214
+ The coordinates of the data points to interpolate `xnew` and `ynew`
215
+ have to be sorted by ascending order.
216
+ `interp2d` is legacy and is not
217
+ recommended for use in new code. New code should use
218
+ `RegularGridInterpolator` instead.
219
+
220
+ Examples
221
+ --------
222
+ Construct a 2-D grid and interpolate on it:
223
+
224
+ >>> import numpy as np
225
+ >>> from scipy import interpolate
226
+ >>> x = np.arange(-5.01, 5.01, 0.25)
227
+ >>> y = np.arange(-5.01, 5.01, 0.25)
228
+ >>> xx, yy = np.meshgrid(x, y)
229
+ >>> z = np.sin(xx**2+yy**2)
230
+ >>> f = interpolate.interp2d(x, y, z, kind='cubic')
231
+
232
+ Now use the obtained interpolation function and plot the result:
233
+
234
+ >>> import matplotlib.pyplot as plt
235
+ >>> xnew = np.arange(-5.01, 5.01, 1e-2)
236
+ >>> ynew = np.arange(-5.01, 5.01, 1e-2)
237
+ >>> znew = f(xnew, ynew)
238
+ >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
239
+ >>> plt.show()
240
+ """
241
+
242
+ def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
243
+ fill_value=None):
244
+ warnings.warn(dep_mesg, DeprecationWarning, stacklevel=2)
245
+
246
+ x = ravel(x)
247
+ y = ravel(y)
248
+ z = asarray(z)
249
+
250
+ rectangular_grid = (z.size == len(x) * len(y))
251
+ if rectangular_grid:
252
+ if z.ndim == 2:
253
+ if z.shape != (len(y), len(x)):
254
+ raise ValueError("When on a regular grid with x.size = m "
255
+ "and y.size = n, if z.ndim == 2, then z "
256
+ "must have shape (n, m)")
257
+ if not np.all(x[1:] >= x[:-1]):
258
+ j = np.argsort(x)
259
+ x = x[j]
260
+ z = z[:, j]
261
+ if not np.all(y[1:] >= y[:-1]):
262
+ j = np.argsort(y)
263
+ y = y[j]
264
+ z = z[j, :]
265
+ z = ravel(z.T)
266
+ else:
267
+ z = ravel(z)
268
+ if len(x) != len(y):
269
+ raise ValueError(
270
+ "x and y must have equal lengths for non rectangular grid")
271
+ if len(z) != len(x):
272
+ raise ValueError(
273
+ "Invalid length for input z for non rectangular grid")
274
+
275
+ interpolation_types = {'linear': 1, 'cubic': 3, 'quintic': 5}
276
+ try:
277
+ kx = ky = interpolation_types[kind]
278
+ except KeyError as e:
279
+ raise ValueError(
280
+ f"Unsupported interpolation type {repr(kind)}, must be "
281
+ f"either of {', '.join(map(repr, interpolation_types))}."
282
+ ) from e
283
+
284
+ if not rectangular_grid:
285
+ # TODO: surfit is really not meant for interpolation!
286
+ self.tck = _fitpack_py.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
287
+ else:
288
+ nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
289
+ x, y, z, None, None, None, None,
290
+ kx=kx, ky=ky, s=0.0)
291
+ self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
292
+ kx, ky)
293
+
294
+ self.bounds_error = bounds_error
295
+ self.fill_value = fill_value
296
+ self.x, self.y, self.z = (array(a, copy=copy) for a in (x, y, z))
297
+
298
+ self.x_min, self.x_max = np.amin(x), np.amax(x)
299
+ self.y_min, self.y_max = np.amin(y), np.amax(y)
300
+
301
+ def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
302
+ """Interpolate the function.
303
+
304
+ Parameters
305
+ ----------
306
+ x : 1-D array
307
+ x-coordinates of the mesh on which to interpolate.
308
+ y : 1-D array
309
+ y-coordinates of the mesh on which to interpolate.
310
+ dx : int >= 0, < kx
311
+ Order of partial derivatives in x.
312
+ dy : int >= 0, < ky
313
+ Order of partial derivatives in y.
314
+ assume_sorted : bool, optional
315
+ If False, values of `x` and `y` can be in any order and they are
316
+ sorted first.
317
+ If True, `x` and `y` have to be arrays of monotonically
318
+ increasing values.
319
+
320
+ Returns
321
+ -------
322
+ z : 2-D array with shape (len(y), len(x))
323
+ The interpolated values.
324
+ """
325
+ warnings.warn(dep_mesg, DeprecationWarning, stacklevel=2)
326
+
327
+ x = atleast_1d(x)
328
+ y = atleast_1d(y)
329
+
330
+ if x.ndim != 1 or y.ndim != 1:
331
+ raise ValueError("x and y should both be 1-D arrays")
332
+
333
+ if not assume_sorted:
334
+ x = np.sort(x, kind="mergesort")
335
+ y = np.sort(y, kind="mergesort")
336
+
337
+ if self.bounds_error or self.fill_value is not None:
338
+ out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
339
+ out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
340
+
341
+ any_out_of_bounds_x = np.any(out_of_bounds_x)
342
+ any_out_of_bounds_y = np.any(out_of_bounds_y)
343
+
344
+ if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
345
+ raise ValueError(
346
+ f"Values out of range; x must be in {(self.x_min, self.x_max)!r}, "
347
+ f"y in {(self.y_min, self.y_max)!r}"
348
+ )
349
+
350
+ z = _fitpack_py.bisplev(x, y, self.tck, dx, dy)
351
+ z = atleast_2d(z)
352
+ z = transpose(z)
353
+
354
+ if self.fill_value is not None:
355
+ if any_out_of_bounds_x:
356
+ z[:, out_of_bounds_x] = self.fill_value
357
+ if any_out_of_bounds_y:
358
+ z[out_of_bounds_y, :] = self.fill_value
359
+
360
+ if len(z) == 1:
361
+ z = z[0]
362
+ return array(z)
363
+
364
+
365
+ def _check_broadcast_up_to(arr_from, shape_to, name):
366
+ """Helper to check that arr_from broadcasts up to shape_to"""
367
+ shape_from = arr_from.shape
368
+ if len(shape_to) >= len(shape_from):
369
+ for t, f in zip(shape_to[::-1], shape_from[::-1]):
370
+ if f != 1 and f != t:
371
+ break
372
+ else: # all checks pass, do the upcasting that we need later
373
+ if arr_from.size != 1 and arr_from.shape != shape_to:
374
+ arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
375
+ return arr_from.ravel()
376
+ # at least one check failed
377
+ raise ValueError(f'{name} argument must be able to broadcast up '
378
+ f'to shape {shape_to} but had shape {shape_from}')
379
+
380
+
381
+ def _do_extrapolate(fill_value):
382
+ """Helper to check if fill_value == "extrapolate" without warnings"""
383
+ return (isinstance(fill_value, str) and
384
+ fill_value == 'extrapolate')
385
+
386
+
387
+ class interp1d(_Interpolator1D):
388
+ """
389
+ Interpolate a 1-D function.
390
+
391
+ .. legacy:: class
392
+
393
+ For a guide to the intended replacements for `interp1d` see
394
+ :ref:`tutorial-interpolate_1Dsection`.
395
+
396
+ `x` and `y` are arrays of values used to approximate some function f:
397
+ ``y = f(x)``. This class returns a function whose call method uses
398
+ interpolation to find the value of new points.
399
+
400
+ Parameters
401
+ ----------
402
+ x : (npoints, ) array_like
403
+ A 1-D array of real values.
404
+ y : (..., npoints, ...) array_like
405
+ A N-D array of real values. The length of `y` along the interpolation
406
+ axis must be equal to the length of `x`. Use the ``axis`` parameter
407
+ to select correct axis. Unlike other interpolators, the default
408
+ interpolation axis is the last axis of `y`.
409
+ kind : str or int, optional
410
+ Specifies the kind of interpolation as a string or as an integer
411
+ specifying the order of the spline interpolator to use.
412
+ The string has to be one of 'linear', 'nearest', 'nearest-up', 'zero',
413
+ 'slinear', 'quadratic', 'cubic', 'previous', or 'next'. 'zero',
414
+ 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of
415
+ zeroth, first, second or third order; 'previous' and 'next' simply
416
+ return the previous or next value of the point; 'nearest-up' and
417
+ 'nearest' differ when interpolating half-integers (e.g. 0.5, 1.5)
418
+ in that 'nearest-up' rounds up and 'nearest' rounds down. Default
419
+ is 'linear'.
420
+ axis : int, optional
421
+ Axis in the ``y`` array corresponding to the x-coordinate values. Unlike
422
+ other interpolators, defaults to ``axis=-1``.
423
+ copy : bool, optional
424
+ If ``True``, the class makes internal copies of x and y. If ``False``,
425
+ references to ``x`` and ``y`` are used if possible. The default is to copy.
426
+ bounds_error : bool, optional
427
+ If True, a ValueError is raised any time interpolation is attempted on
428
+ a value outside of the range of x (where extrapolation is
429
+ necessary). If False, out of bounds values are assigned `fill_value`.
430
+ By default, an error is raised unless ``fill_value="extrapolate"``.
431
+ fill_value : array-like or (array-like, array_like) or "extrapolate", optional
432
+ - if a ndarray (or float), this value will be used to fill in for
433
+ requested points outside of the data range. If not provided, then
434
+ the default is NaN. The array-like must broadcast properly to the
435
+ dimensions of the non-interpolation axes.
436
+ - If a two-element tuple, then the first element is used as a
437
+ fill value for ``x_new < x[0]`` and the second element is used for
438
+ ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
439
+ list or ndarray, regardless of shape) is taken to be a single
440
+ array-like argument meant to be used for both bounds as
441
+ ``below, above = fill_value, fill_value``. Using a two-element tuple
442
+ or ndarray requires ``bounds_error=False``.
443
+
444
+ .. versionadded:: 0.17.0
445
+ - If "extrapolate", then points outside the data range will be
446
+ extrapolated.
447
+
448
+ .. versionadded:: 0.17.0
449
+ assume_sorted : bool, optional
450
+ If False, values of `x` can be in any order and they are sorted first.
451
+ If True, `x` has to be an array of monotonically increasing values.
452
+
453
+ Attributes
454
+ ----------
455
+ fill_value
456
+
457
+ Methods
458
+ -------
459
+ __call__
460
+
461
+ See Also
462
+ --------
463
+ splrep, splev
464
+ Spline interpolation/smoothing based on FITPACK.
465
+ UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
466
+ interp2d : 2-D interpolation
467
+
468
+ Notes
469
+ -----
470
+ Calling `interp1d` with NaNs present in input values results in
471
+ undefined behaviour.
472
+
473
+ Input values `x` and `y` must be convertible to `float` values like
474
+ `int` or `float`.
475
+
476
+ If the values in `x` are not unique, the resulting behavior is
477
+ undefined and specific to the choice of `kind`, i.e., changing
478
+ `kind` will change the behavior for duplicates.
479
+
480
+
481
+ Examples
482
+ --------
483
+ >>> import numpy as np
484
+ >>> import matplotlib.pyplot as plt
485
+ >>> from scipy import interpolate
486
+ >>> x = np.arange(0, 10)
487
+ >>> y = np.exp(-x/3.0)
488
+ >>> f = interpolate.interp1d(x, y)
489
+
490
+ >>> xnew = np.arange(0, 9, 0.1)
491
+ >>> ynew = f(xnew) # use interpolation function returned by `interp1d`
492
+ >>> plt.plot(x, y, 'o', xnew, ynew, '-')
493
+ >>> plt.show()
494
+ """
495
+
496
+ def __init__(self, x, y, kind='linear', axis=-1,
497
+ copy=True, bounds_error=None, fill_value=np.nan,
498
+ assume_sorted=False):
499
+ """ Initialize a 1-D linear interpolation class."""
500
+ _Interpolator1D.__init__(self, x, y, axis=axis)
501
+
502
+ self.bounds_error = bounds_error # used by fill_value setter
503
+
504
+ # `copy` keyword semantics changed in NumPy 2.0, once that is
505
+ # the minimum version this can use `copy=None`.
506
+ self.copy = copy
507
+ if not copy:
508
+ self.copy = copy_if_needed
509
+
510
+ if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
511
+ order = {'zero': 0, 'slinear': 1,
512
+ 'quadratic': 2, 'cubic': 3}[kind]
513
+ kind = 'spline'
514
+ elif isinstance(kind, int):
515
+ order = kind
516
+ kind = 'spline'
517
+ elif kind not in ('linear', 'nearest', 'nearest-up', 'previous',
518
+ 'next'):
519
+ raise NotImplementedError("%s is unsupported: Use fitpack "
520
+ "routines for other types." % kind)
521
+ x = array(x, copy=self.copy)
522
+ y = array(y, copy=self.copy)
523
+
524
+ if not assume_sorted:
525
+ ind = np.argsort(x, kind="mergesort")
526
+ x = x[ind]
527
+ y = np.take(y, ind, axis=axis)
528
+
529
+ if x.ndim != 1:
530
+ raise ValueError("the x array must have exactly one dimension.")
531
+ if y.ndim == 0:
532
+ raise ValueError("the y array must have at least one dimension.")
533
+
534
+ # Force-cast y to a floating-point type, if it's not yet one
535
+ if not issubclass(y.dtype.type, np.inexact):
536
+ y = y.astype(np.float64)
537
+
538
+ # Backward compatibility
539
+ self.axis = axis % y.ndim
540
+
541
+ # Interpolation goes internally along the first axis
542
+ self.y = y
543
+ self._y = self._reshape_yi(self.y)
544
+ self.x = x
545
+ del y, x # clean up namespace to prevent misuse; use attributes
546
+ self._kind = kind
547
+
548
+ # Adjust to interpolation kind; store reference to *unbound*
549
+ # interpolation methods, in order to avoid circular references to self
550
+ # stored in the bound instance methods, and therefore delayed garbage
551
+ # collection. See: https://docs.python.org/reference/datamodel.html
552
+ if kind in ('linear', 'nearest', 'nearest-up', 'previous', 'next'):
553
+ # Make a "view" of the y array that is rotated to the interpolation
554
+ # axis.
555
+ minval = 1
556
+ if kind == 'nearest':
557
+ # Do division before addition to prevent possible integer
558
+ # overflow
559
+ self._side = 'left'
560
+ self.x_bds = self.x / 2.0
561
+ self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
562
+
563
+ self._call = self.__class__._call_nearest
564
+ elif kind == 'nearest-up':
565
+ # Do division before addition to prevent possible integer
566
+ # overflow
567
+ self._side = 'right'
568
+ self.x_bds = self.x / 2.0
569
+ self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
570
+
571
+ self._call = self.__class__._call_nearest
572
+ elif kind == 'previous':
573
+ # Side for np.searchsorted and index for clipping
574
+ self._side = 'left'
575
+ self._ind = 0
576
+ # Move x by one floating point value to the left
577
+ self._x_shift = np.nextafter(self.x, -np.inf)
578
+ self._call = self.__class__._call_previousnext
579
+ if _do_extrapolate(fill_value):
580
+ self._check_and_update_bounds_error_for_extrapolation()
581
+ # assume y is sorted by x ascending order here.
582
+ fill_value = (np.nan, np.take(self.y, -1, axis))
583
+ elif kind == 'next':
584
+ self._side = 'right'
585
+ self._ind = 1
586
+ # Move x by one floating point value to the right
587
+ self._x_shift = np.nextafter(self.x, np.inf)
588
+ self._call = self.__class__._call_previousnext
589
+ if _do_extrapolate(fill_value):
590
+ self._check_and_update_bounds_error_for_extrapolation()
591
+ # assume y is sorted by x ascending order here.
592
+ fill_value = (np.take(self.y, 0, axis), np.nan)
593
+ else:
594
+ # Check if we can delegate to numpy.interp (2x-10x faster).
595
+ np_dtypes = (np.dtype(np.float64), np.dtype(int))
596
+ cond = self.x.dtype in np_dtypes and self.y.dtype in np_dtypes
597
+ cond = cond and self.y.ndim == 1
598
+ cond = cond and not _do_extrapolate(fill_value)
599
+
600
+ if cond:
601
+ self._call = self.__class__._call_linear_np
602
+ else:
603
+ self._call = self.__class__._call_linear
604
+ else:
605
+ minval = order + 1
606
+
607
+ rewrite_nan = False
608
+ xx, yy = self.x, self._y
609
+ if order > 1:
610
+ # Quadratic or cubic spline. If input contains even a single
611
+ # nan, then the output is all nans. We cannot just feed data
612
+ # with nans to make_interp_spline because it calls LAPACK.
613
+ # So, we make up a bogus x and y with no nans and use it
614
+ # to get the correct shape of the output, which we then fill
615
+ # with nans.
616
+ # For slinear or zero order spline, we just pass nans through.
617
+ mask = np.isnan(self.x)
618
+ if mask.any():
619
+ sx = self.x[~mask]
620
+ if sx.size == 0:
621
+ raise ValueError("`x` array is all-nan")
622
+ xx = np.linspace(np.nanmin(self.x),
623
+ np.nanmax(self.x),
624
+ len(self.x))
625
+ rewrite_nan = True
626
+ if np.isnan(self._y).any():
627
+ yy = np.ones_like(self._y)
628
+ rewrite_nan = True
629
+
630
+ self._spline = make_interp_spline(xx, yy, k=order,
631
+ check_finite=False)
632
+ if rewrite_nan:
633
+ self._call = self.__class__._call_nan_spline
634
+ else:
635
+ self._call = self.__class__._call_spline
636
+
637
+ if len(self.x) < minval:
638
+ raise ValueError("x and y arrays must have at "
639
+ "least %d entries" % minval)
640
+
641
+ self.fill_value = fill_value # calls the setter, can modify bounds_err
642
+
643
+ @property
644
+ def fill_value(self):
645
+ """The fill value."""
646
+ # backwards compat: mimic a public attribute
647
+ return self._fill_value_orig
648
+
649
+ @fill_value.setter
650
+ def fill_value(self, fill_value):
651
+ # extrapolation only works for nearest neighbor and linear methods
652
+ if _do_extrapolate(fill_value):
653
+ self._check_and_update_bounds_error_for_extrapolation()
654
+ self._extrapolate = True
655
+ else:
656
+ broadcast_shape = (self.y.shape[:self.axis] +
657
+ self.y.shape[self.axis + 1:])
658
+ if len(broadcast_shape) == 0:
659
+ broadcast_shape = (1,)
660
+ # it's either a pair (_below_range, _above_range) or a single value
661
+ # for both above and below range
662
+ if isinstance(fill_value, tuple) and len(fill_value) == 2:
663
+ below_above = [np.asarray(fill_value[0]),
664
+ np.asarray(fill_value[1])]
665
+ names = ('fill_value (below)', 'fill_value (above)')
666
+ for ii in range(2):
667
+ below_above[ii] = _check_broadcast_up_to(
668
+ below_above[ii], broadcast_shape, names[ii])
669
+ else:
670
+ fill_value = np.asarray(fill_value)
671
+ below_above = [_check_broadcast_up_to(
672
+ fill_value, broadcast_shape, 'fill_value')] * 2
673
+ self._fill_value_below, self._fill_value_above = below_above
674
+ self._extrapolate = False
675
+ if self.bounds_error is None:
676
+ self.bounds_error = True
677
+ # backwards compat: fill_value was a public attr; make it writeable
678
+ self._fill_value_orig = fill_value
679
+
680
+ def _check_and_update_bounds_error_for_extrapolation(self):
681
+ if self.bounds_error:
682
+ raise ValueError("Cannot extrapolate and raise "
683
+ "at the same time.")
684
+ self.bounds_error = False
685
+
686
+ def _call_linear_np(self, x_new):
687
+ # Note that out-of-bounds values are taken care of in self._evaluate
688
+ return np.interp(x_new, self.x, self.y)
689
+
690
+ def _call_linear(self, x_new):
691
+ # 2. Find where in the original data, the values to interpolate
692
+ # would be inserted.
693
+ # Note: If x_new[n] == x[m], then m is returned by searchsorted.
694
+ x_new_indices = searchsorted(self.x, x_new)
695
+
696
+ # 3. Clip x_new_indices so that they are within the range of
697
+ # self.x indices and at least 1. Removes mis-interpolation
698
+ # of x_new[n] = x[0]
699
+ x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
700
+
701
+ # 4. Calculate the slope of regions that each x_new value falls in.
702
+ lo = x_new_indices - 1
703
+ hi = x_new_indices
704
+
705
+ x_lo = self.x[lo]
706
+ x_hi = self.x[hi]
707
+ y_lo = self._y[lo]
708
+ y_hi = self._y[hi]
709
+
710
+ # Note that the following two expressions rely on the specifics of the
711
+ # broadcasting semantics.
712
+ slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
713
+
714
+ # 5. Calculate the actual value for each entry in x_new.
715
+ y_new = slope*(x_new - x_lo)[:, None] + y_lo
716
+
717
+ return y_new
718
+
719
+ def _call_nearest(self, x_new):
720
+ """ Find nearest neighbor interpolated y_new = f(x_new)."""
721
+
722
+ # 2. Find where in the averaged data the values to interpolate
723
+ # would be inserted.
724
+ # Note: use side='left' (right) to searchsorted() to define the
725
+ # halfway point to be nearest to the left (right) neighbor
726
+ x_new_indices = searchsorted(self.x_bds, x_new, side=self._side)
727
+
728
+ # 3. Clip x_new_indices so that they are within the range of x indices.
729
+ x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
730
+
731
+ # 4. Calculate the actual value for each entry in x_new.
732
+ y_new = self._y[x_new_indices]
733
+
734
+ return y_new
735
+
736
+ def _call_previousnext(self, x_new):
737
+ """Use previous/next neighbor of x_new, y_new = f(x_new)."""
738
+
739
+ # 1. Get index of left/right value
740
+ x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
741
+
742
+ # 2. Clip x_new_indices so that they are within the range of x indices.
743
+ x_new_indices = x_new_indices.clip(1-self._ind,
744
+ len(self.x)-self._ind).astype(intp)
745
+
746
+ # 3. Calculate the actual value for each entry in x_new.
747
+ y_new = self._y[x_new_indices+self._ind-1]
748
+
749
+ return y_new
750
+
751
+ def _call_spline(self, x_new):
752
+ return self._spline(x_new)
753
+
754
+ def _call_nan_spline(self, x_new):
755
+ out = self._spline(x_new)
756
+ out[...] = np.nan
757
+ return out
758
+
759
+ def _evaluate(self, x_new):
760
+ # 1. Handle values in x_new that are outside of x. Throw error,
761
+ # or return a list of mask array indicating the outofbounds values.
762
+ # The behavior is set by the bounds_error variable.
763
+ x_new = asarray(x_new)
764
+ y_new = self._call(self, x_new)
765
+ if not self._extrapolate:
766
+ below_bounds, above_bounds = self._check_bounds(x_new)
767
+ if len(y_new) > 0:
768
+ # Note fill_value must be broadcast up to the proper size
769
+ # and flattened to work here
770
+ y_new[below_bounds] = self._fill_value_below
771
+ y_new[above_bounds] = self._fill_value_above
772
+ return y_new
773
+
774
+ def _check_bounds(self, x_new):
775
+ """Check the inputs for being in the bounds of the interpolated data.
776
+
777
+ Parameters
778
+ ----------
779
+ x_new : array
780
+
781
+ Returns
782
+ -------
783
+ out_of_bounds : bool array
784
+ The mask on x_new of values that are out of the bounds.
785
+ """
786
+
787
+ # If self.bounds_error is True, we raise an error if any x_new values
788
+ # fall outside the range of x. Otherwise, we return an array indicating
789
+ # which values are outside the boundary region.
790
+ below_bounds = x_new < self.x[0]
791
+ above_bounds = x_new > self.x[-1]
792
+
793
+ if self.bounds_error and below_bounds.any():
794
+ below_bounds_value = x_new[np.argmax(below_bounds)]
795
+ raise ValueError(f"A value ({below_bounds_value}) in x_new is below "
796
+ f"the interpolation range's minimum value ({self.x[0]}).")
797
+ if self.bounds_error and above_bounds.any():
798
+ above_bounds_value = x_new[np.argmax(above_bounds)]
799
+ raise ValueError(f"A value ({above_bounds_value}) in x_new is above "
800
+ f"the interpolation range's maximum value ({self.x[-1]}).")
801
+
802
+ # !! Should we emit a warning if some values are out of bounds?
803
+ # !! matlab does not.
804
+ return below_bounds, above_bounds
805
+
806
+
807
+ class _PPolyBase:
808
+ """Base class for piecewise polynomials."""
809
+ __slots__ = ('c', 'x', 'extrapolate', 'axis')
810
+
811
+ def __init__(self, c, x, extrapolate=None, axis=0):
812
+ self.c = np.asarray(c)
813
+ self.x = np.ascontiguousarray(x, dtype=np.float64)
814
+
815
+ if extrapolate is None:
816
+ extrapolate = True
817
+ elif extrapolate != 'periodic':
818
+ extrapolate = bool(extrapolate)
819
+ self.extrapolate = extrapolate
820
+
821
+ if self.c.ndim < 2:
822
+ raise ValueError("Coefficients array must be at least "
823
+ "2-dimensional.")
824
+
825
+ if not (0 <= axis < self.c.ndim - 1):
826
+ raise ValueError(f"axis={axis} must be between 0 and {self.c.ndim-1}")
827
+
828
+ self.axis = axis
829
+ if axis != 0:
830
+ # move the interpolation axis to be the first one in self.c
831
+ # More specifically, the target shape for self.c is (k, m, ...),
832
+ # and axis !=0 means that we have c.shape (..., k, m, ...)
833
+ # ^
834
+ # axis
835
+ # So we roll two of them.
836
+ self.c = np.moveaxis(self.c, axis+1, 0)
837
+ self.c = np.moveaxis(self.c, axis+1, 0)
838
+
839
+ if self.x.ndim != 1:
840
+ raise ValueError("x must be 1-dimensional")
841
+ if self.x.size < 2:
842
+ raise ValueError("at least 2 breakpoints are needed")
843
+ if self.c.ndim < 2:
844
+ raise ValueError("c must have at least 2 dimensions")
845
+ if self.c.shape[0] == 0:
846
+ raise ValueError("polynomial must be at least of order 0")
847
+ if self.c.shape[1] != self.x.size-1:
848
+ raise ValueError("number of coefficients != len(x)-1")
849
+ dx = np.diff(self.x)
850
+ if not (np.all(dx >= 0) or np.all(dx <= 0)):
851
+ raise ValueError("`x` must be strictly increasing or decreasing.")
852
+
853
+ dtype = self._get_dtype(self.c.dtype)
854
+ self.c = np.ascontiguousarray(self.c, dtype=dtype)
855
+
856
+ def _get_dtype(self, dtype):
857
+ if np.issubdtype(dtype, np.complexfloating) \
858
+ or np.issubdtype(self.c.dtype, np.complexfloating):
859
+ return np.complex128
860
+ else:
861
+ return np.float64
862
+
863
+ @classmethod
864
+ def construct_fast(cls, c, x, extrapolate=None, axis=0):
865
+ """
866
+ Construct the piecewise polynomial without making checks.
867
+
868
+ Takes the same parameters as the constructor. Input arguments
869
+ ``c`` and ``x`` must be arrays of the correct shape and type. The
870
+ ``c`` array can only be of dtypes float and complex, and ``x``
871
+ array must have dtype float.
872
+ """
873
+ self = object.__new__(cls)
874
+ self.c = c
875
+ self.x = x
876
+ self.axis = axis
877
+ if extrapolate is None:
878
+ extrapolate = True
879
+ self.extrapolate = extrapolate
880
+ return self
881
+
882
+ def _ensure_c_contiguous(self):
883
+ """
884
+ c and x may be modified by the user. The Cython code expects
885
+ that they are C contiguous.
886
+ """
887
+ if not self.x.flags.c_contiguous:
888
+ self.x = self.x.copy()
889
+ if not self.c.flags.c_contiguous:
890
+ self.c = self.c.copy()
891
+
892
+ def extend(self, c, x):
893
+ """
894
+ Add additional breakpoints and coefficients to the polynomial.
895
+
896
+ Parameters
897
+ ----------
898
+ c : ndarray, size (k, m, ...)
899
+ Additional coefficients for polynomials in intervals. Note that
900
+ the first additional interval will be formed using one of the
901
+ ``self.x`` end points.
902
+ x : ndarray, size (m,)
903
+ Additional breakpoints. Must be sorted in the same order as
904
+ ``self.x`` and either to the right or to the left of the current
905
+ breakpoints.
906
+ """
907
+
908
+ c = np.asarray(c)
909
+ x = np.asarray(x)
910
+
911
+ if c.ndim < 2:
912
+ raise ValueError("invalid dimensions for c")
913
+ if x.ndim != 1:
914
+ raise ValueError("invalid dimensions for x")
915
+ if x.shape[0] != c.shape[1]:
916
+ raise ValueError(f"Shapes of x {x.shape} and c {c.shape} are incompatible")
917
+ if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
918
+ raise ValueError("Shapes of c {} and self.c {} are incompatible"
919
+ .format(c.shape, self.c.shape))
920
+
921
+ if c.size == 0:
922
+ return
923
+
924
+ dx = np.diff(x)
925
+ if not (np.all(dx >= 0) or np.all(dx <= 0)):
926
+ raise ValueError("`x` is not sorted.")
927
+
928
+ if self.x[-1] >= self.x[0]:
929
+ if not x[-1] >= x[0]:
930
+ raise ValueError("`x` is in the different order "
931
+ "than `self.x`.")
932
+
933
+ if x[0] >= self.x[-1]:
934
+ action = 'append'
935
+ elif x[-1] <= self.x[0]:
936
+ action = 'prepend'
937
+ else:
938
+ raise ValueError("`x` is neither on the left or on the right "
939
+ "from `self.x`.")
940
+ else:
941
+ if not x[-1] <= x[0]:
942
+ raise ValueError("`x` is in the different order "
943
+ "than `self.x`.")
944
+
945
+ if x[0] <= self.x[-1]:
946
+ action = 'append'
947
+ elif x[-1] >= self.x[0]:
948
+ action = 'prepend'
949
+ else:
950
+ raise ValueError("`x` is neither on the left or on the right "
951
+ "from `self.x`.")
952
+
953
+ dtype = self._get_dtype(c.dtype)
954
+
955
+ k2 = max(c.shape[0], self.c.shape[0])
956
+ c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
957
+ dtype=dtype)
958
+
959
+ if action == 'append':
960
+ c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
961
+ c2[k2-c.shape[0]:, self.c.shape[1]:] = c
962
+ self.x = np.r_[self.x, x]
963
+ elif action == 'prepend':
964
+ c2[k2-self.c.shape[0]:, :c.shape[1]] = c
965
+ c2[k2-c.shape[0]:, c.shape[1]:] = self.c
966
+ self.x = np.r_[x, self.x]
967
+
968
+ self.c = c2
969
+
970
+ def __call__(self, x, nu=0, extrapolate=None):
971
+ """
972
+ Evaluate the piecewise polynomial or its derivative.
973
+
974
+ Parameters
975
+ ----------
976
+ x : array_like
977
+ Points to evaluate the interpolant at.
978
+ nu : int, optional
979
+ Order of derivative to evaluate. Must be non-negative.
980
+ extrapolate : {bool, 'periodic', None}, optional
981
+ If bool, determines whether to extrapolate to out-of-bounds points
982
+ based on first and last intervals, or to return NaNs.
983
+ If 'periodic', periodic extrapolation is used.
984
+ If None (default), use `self.extrapolate`.
985
+
986
+ Returns
987
+ -------
988
+ y : array_like
989
+ Interpolated values. Shape is determined by replacing
990
+ the interpolation axis in the original array with the shape of x.
991
+
992
+ Notes
993
+ -----
994
+ Derivatives are evaluated piecewise for each polynomial
995
+ segment, even if the polynomial is not differentiable at the
996
+ breakpoints. The polynomial intervals are considered half-open,
997
+ ``[a, b)``, except for the last interval which is closed
998
+ ``[a, b]``.
999
+ """
1000
+ if extrapolate is None:
1001
+ extrapolate = self.extrapolate
1002
+ x = np.asarray(x)
1003
+ x_shape, x_ndim = x.shape, x.ndim
1004
+ x = np.ascontiguousarray(x.ravel(), dtype=np.float64)
1005
+
1006
+ # With periodic extrapolation we map x to the segment
1007
+ # [self.x[0], self.x[-1]].
1008
+ if extrapolate == 'periodic':
1009
+ x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
1010
+ extrapolate = False
1011
+
1012
+ out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
1013
+ self._ensure_c_contiguous()
1014
+ self._evaluate(x, nu, extrapolate, out)
1015
+ out = out.reshape(x_shape + self.c.shape[2:])
1016
+ if self.axis != 0:
1017
+ # transpose to move the calculated values to the interpolation axis
1018
+ l = list(range(out.ndim))
1019
+ l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
1020
+ out = out.transpose(l)
1021
+ return out
1022
+
1023
+
1024
+ class PPoly(_PPolyBase):
1025
+ """
1026
+ Piecewise polynomial in terms of coefficients and breakpoints
1027
+
1028
+ The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
1029
+ local power basis::
1030
+
1031
+ S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
1032
+
1033
+ where ``k`` is the degree of the polynomial.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ c : ndarray, shape (k, m, ...)
1038
+ Polynomial coefficients, order `k` and `m` intervals.
1039
+ x : ndarray, shape (m+1,)
1040
+ Polynomial breakpoints. Must be sorted in either increasing or
1041
+ decreasing order.
1042
+ extrapolate : bool or 'periodic', optional
1043
+ If bool, determines whether to extrapolate to out-of-bounds points
1044
+ based on first and last intervals, or to return NaNs. If 'periodic',
1045
+ periodic extrapolation is used. Default is True.
1046
+ axis : int, optional
1047
+ Interpolation axis. Default is zero.
1048
+
1049
+ Attributes
1050
+ ----------
1051
+ x : ndarray
1052
+ Breakpoints.
1053
+ c : ndarray
1054
+ Coefficients of the polynomials. They are reshaped
1055
+ to a 3-D array with the last dimension representing
1056
+ the trailing dimensions of the original coefficient array.
1057
+ axis : int
1058
+ Interpolation axis.
1059
+
1060
+ Methods
1061
+ -------
1062
+ __call__
1063
+ derivative
1064
+ antiderivative
1065
+ integrate
1066
+ solve
1067
+ roots
1068
+ extend
1069
+ from_spline
1070
+ from_bernstein_basis
1071
+ construct_fast
1072
+
1073
+ See also
1074
+ --------
1075
+ BPoly : piecewise polynomials in the Bernstein basis
1076
+
1077
+ Notes
1078
+ -----
1079
+ High-order polynomials in the power basis can be numerically
1080
+ unstable. Precision problems can start to appear for orders
1081
+ larger than 20-30.
1082
+ """
1083
+
1084
+ def _evaluate(self, x, nu, extrapolate, out):
1085
+ _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
1086
+ self.x, x, nu, bool(extrapolate), out)
1087
+
1088
+ def derivative(self, nu=1):
1089
+ """
1090
+ Construct a new piecewise polynomial representing the derivative.
1091
+
1092
+ Parameters
1093
+ ----------
1094
+ nu : int, optional
1095
+ Order of derivative to evaluate. Default is 1, i.e., compute the
1096
+ first derivative. If negative, the antiderivative is returned.
1097
+
1098
+ Returns
1099
+ -------
1100
+ pp : PPoly
1101
+ Piecewise polynomial of order k2 = k - n representing the derivative
1102
+ of this polynomial.
1103
+
1104
+ Notes
1105
+ -----
1106
+ Derivatives are evaluated piecewise for each polynomial
1107
+ segment, even if the polynomial is not differentiable at the
1108
+ breakpoints. The polynomial intervals are considered half-open,
1109
+ ``[a, b)``, except for the last interval which is closed
1110
+ ``[a, b]``.
1111
+ """
1112
+ if nu < 0:
1113
+ return self.antiderivative(-nu)
1114
+
1115
+ # reduce order
1116
+ if nu == 0:
1117
+ c2 = self.c.copy()
1118
+ else:
1119
+ c2 = self.c[:-nu, :].copy()
1120
+
1121
+ if c2.shape[0] == 0:
1122
+ # derivative of order 0 is zero
1123
+ c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
1124
+
1125
+ # multiply by the correct rising factorials
1126
+ factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
1127
+ c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
1128
+
1129
+ # construct a compatible polynomial
1130
+ return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
1131
+
1132
+ def antiderivative(self, nu=1):
1133
+ """
1134
+ Construct a new piecewise polynomial representing the antiderivative.
1135
+
1136
+ Antiderivative is also the indefinite integral of the function,
1137
+ and derivative is its inverse operation.
1138
+
1139
+ Parameters
1140
+ ----------
1141
+ nu : int, optional
1142
+ Order of antiderivative to evaluate. Default is 1, i.e., compute
1143
+ the first integral. If negative, the derivative is returned.
1144
+
1145
+ Returns
1146
+ -------
1147
+ pp : PPoly
1148
+ Piecewise polynomial of order k2 = k + n representing
1149
+ the antiderivative of this polynomial.
1150
+
1151
+ Notes
1152
+ -----
1153
+ The antiderivative returned by this function is continuous and
1154
+ continuously differentiable to order n-1, up to floating point
1155
+ rounding error.
1156
+
1157
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
1158
+ it will be set to False for the returned instance. This is done because
1159
+ the antiderivative is no longer periodic and its correct evaluation
1160
+ outside of the initially given x interval is difficult.
1161
+ """
1162
+ if nu <= 0:
1163
+ return self.derivative(-nu)
1164
+
1165
+ c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
1166
+ dtype=self.c.dtype)
1167
+ c[:-nu] = self.c
1168
+
1169
+ # divide by the correct rising factorials
1170
+ factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
1171
+ c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
1172
+
1173
+ # fix continuity of added degrees of freedom
1174
+ self._ensure_c_contiguous()
1175
+ _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
1176
+ self.x, nu - 1)
1177
+
1178
+ if self.extrapolate == 'periodic':
1179
+ extrapolate = False
1180
+ else:
1181
+ extrapolate = self.extrapolate
1182
+
1183
+ # construct a compatible polynomial
1184
+ return self.construct_fast(c, self.x, extrapolate, self.axis)
1185
+
1186
+ def integrate(self, a, b, extrapolate=None):
1187
+ """
1188
+ Compute a definite integral over a piecewise polynomial.
1189
+
1190
+ Parameters
1191
+ ----------
1192
+ a : float
1193
+ Lower integration bound
1194
+ b : float
1195
+ Upper integration bound
1196
+ extrapolate : {bool, 'periodic', None}, optional
1197
+ If bool, determines whether to extrapolate to out-of-bounds points
1198
+ based on first and last intervals, or to return NaNs.
1199
+ If 'periodic', periodic extrapolation is used.
1200
+ If None (default), use `self.extrapolate`.
1201
+
1202
+ Returns
1203
+ -------
1204
+ ig : array_like
1205
+ Definite integral of the piecewise polynomial over [a, b]
1206
+ """
1207
+ if extrapolate is None:
1208
+ extrapolate = self.extrapolate
1209
+
1210
+ # Swap integration bounds if needed
1211
+ sign = 1
1212
+ if b < a:
1213
+ a, b = b, a
1214
+ sign = -1
1215
+
1216
+ range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
1217
+ self._ensure_c_contiguous()
1218
+
1219
+ # Compute the integral.
1220
+ if extrapolate == 'periodic':
1221
+ # Split the integral into the part over period (can be several
1222
+ # of them) and the remaining part.
1223
+
1224
+ xs, xe = self.x[0], self.x[-1]
1225
+ period = xe - xs
1226
+ interval = b - a
1227
+ n_periods, left = divmod(interval, period)
1228
+
1229
+ if n_periods > 0:
1230
+ _ppoly.integrate(
1231
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
1232
+ self.x, xs, xe, False, out=range_int)
1233
+ range_int *= n_periods
1234
+ else:
1235
+ range_int.fill(0)
1236
+
1237
+ # Map a to [xs, xe], b is always a + left.
1238
+ a = xs + (a - xs) % period
1239
+ b = a + left
1240
+
1241
+ # If b <= xe then we need to integrate over [a, b], otherwise
1242
+ # over [a, xe] and from xs to what is remained.
1243
+ remainder_int = np.empty_like(range_int)
1244
+ if b <= xe:
1245
+ _ppoly.integrate(
1246
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
1247
+ self.x, a, b, False, out=remainder_int)
1248
+ range_int += remainder_int
1249
+ else:
1250
+ _ppoly.integrate(
1251
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
1252
+ self.x, a, xe, False, out=remainder_int)
1253
+ range_int += remainder_int
1254
+
1255
+ _ppoly.integrate(
1256
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
1257
+ self.x, xs, xs + left + a - xe, False, out=remainder_int)
1258
+ range_int += remainder_int
1259
+ else:
1260
+ _ppoly.integrate(
1261
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
1262
+ self.x, a, b, bool(extrapolate), out=range_int)
1263
+
1264
+ # Return
1265
+ range_int *= sign
1266
+ return range_int.reshape(self.c.shape[2:])
1267
+
1268
+ def solve(self, y=0., discontinuity=True, extrapolate=None):
1269
+ """
1270
+ Find real solutions of the equation ``pp(x) == y``.
1271
+
1272
+ Parameters
1273
+ ----------
1274
+ y : float, optional
1275
+ Right-hand side. Default is zero.
1276
+ discontinuity : bool, optional
1277
+ Whether to report sign changes across discontinuities at
1278
+ breakpoints as roots.
1279
+ extrapolate : {bool, 'periodic', None}, optional
1280
+ If bool, determines whether to return roots from the polynomial
1281
+ extrapolated based on first and last intervals, 'periodic' works
1282
+ the same as False. If None (default), use `self.extrapolate`.
1283
+
1284
+ Returns
1285
+ -------
1286
+ roots : ndarray
1287
+ Roots of the polynomial(s).
1288
+
1289
+ If the PPoly object describes multiple polynomials, the
1290
+ return value is an object array whose each element is an
1291
+ ndarray containing the roots.
1292
+
1293
+ Notes
1294
+ -----
1295
+ This routine works only on real-valued polynomials.
1296
+
1297
+ If the piecewise polynomial contains sections that are
1298
+ identically zero, the root list will contain the start point
1299
+ of the corresponding interval, followed by a ``nan`` value.
1300
+
1301
+ If the polynomial is discontinuous across a breakpoint, and
1302
+ there is a sign change across the breakpoint, this is reported
1303
+ if the `discont` parameter is True.
1304
+
1305
+ Examples
1306
+ --------
1307
+
1308
+ Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
1309
+ ``[-2, 1], [1, 2]``:
1310
+
1311
+ >>> import numpy as np
1312
+ >>> from scipy.interpolate import PPoly
1313
+ >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
1314
+ >>> pp.solve()
1315
+ array([-1., 1.])
1316
+ """
1317
+ if extrapolate is None:
1318
+ extrapolate = self.extrapolate
1319
+
1320
+ self._ensure_c_contiguous()
1321
+
1322
+ if np.issubdtype(self.c.dtype, np.complexfloating):
1323
+ raise ValueError("Root finding is only for "
1324
+ "real-valued polynomials")
1325
+
1326
+ y = float(y)
1327
+ r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
1328
+ self.x, y, bool(discontinuity),
1329
+ bool(extrapolate))
1330
+ if self.c.ndim == 2:
1331
+ return r[0]
1332
+ else:
1333
+ r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
1334
+ # this for-loop is equivalent to ``r2[...] = r``, but that's broken
1335
+ # in NumPy 1.6.0
1336
+ for ii, root in enumerate(r):
1337
+ r2[ii] = root
1338
+
1339
+ return r2.reshape(self.c.shape[2:])
1340
+
1341
+ def roots(self, discontinuity=True, extrapolate=None):
1342
+ """
1343
+ Find real roots of the piecewise polynomial.
1344
+
1345
+ Parameters
1346
+ ----------
1347
+ discontinuity : bool, optional
1348
+ Whether to report sign changes across discontinuities at
1349
+ breakpoints as roots.
1350
+ extrapolate : {bool, 'periodic', None}, optional
1351
+ If bool, determines whether to return roots from the polynomial
1352
+ extrapolated based on first and last intervals, 'periodic' works
1353
+ the same as False. If None (default), use `self.extrapolate`.
1354
+
1355
+ Returns
1356
+ -------
1357
+ roots : ndarray
1358
+ Roots of the polynomial(s).
1359
+
1360
+ If the PPoly object describes multiple polynomials, the
1361
+ return value is an object array whose each element is an
1362
+ ndarray containing the roots.
1363
+
1364
+ See Also
1365
+ --------
1366
+ PPoly.solve
1367
+ """
1368
+ return self.solve(0, discontinuity, extrapolate)
1369
+
1370
+ @classmethod
1371
+ def from_spline(cls, tck, extrapolate=None):
1372
+ """
1373
+ Construct a piecewise polynomial from a spline
1374
+
1375
+ Parameters
1376
+ ----------
1377
+ tck
1378
+ A spline, as returned by `splrep` or a BSpline object.
1379
+ extrapolate : bool or 'periodic', optional
1380
+ If bool, determines whether to extrapolate to out-of-bounds points
1381
+ based on first and last intervals, or to return NaNs.
1382
+ If 'periodic', periodic extrapolation is used. Default is True.
1383
+
1384
+ Examples
1385
+ --------
1386
+ Construct an interpolating spline and convert it to a `PPoly` instance
1387
+
1388
+ >>> import numpy as np
1389
+ >>> from scipy.interpolate import splrep, PPoly
1390
+ >>> x = np.linspace(0, 1, 11)
1391
+ >>> y = np.sin(2*np.pi*x)
1392
+ >>> tck = splrep(x, y, s=0)
1393
+ >>> p = PPoly.from_spline(tck)
1394
+ >>> isinstance(p, PPoly)
1395
+ True
1396
+
1397
+ Note that this function only supports 1D splines out of the box.
1398
+
1399
+ If the ``tck`` object represents a parametric spline (e.g. constructed
1400
+ by `splprep` or a `BSpline` with ``c.ndim > 1``), you will need to loop
1401
+ over the dimensions manually.
1402
+
1403
+ >>> from scipy.interpolate import splprep, splev
1404
+ >>> t = np.linspace(0, 1, 11)
1405
+ >>> x = np.sin(2*np.pi*t)
1406
+ >>> y = np.cos(2*np.pi*t)
1407
+ >>> (t, c, k), u = splprep([x, y], s=0)
1408
+
1409
+ Note that ``c`` is a list of two arrays of length 11.
1410
+
1411
+ >>> unew = np.arange(0, 1.01, 0.01)
1412
+ >>> out = splev(unew, (t, c, k))
1413
+
1414
+ To convert this spline to the power basis, we convert each
1415
+ component of the list of b-spline coefficients, ``c``, into the
1416
+ corresponding cubic polynomial.
1417
+
1418
+ >>> polys = [PPoly.from_spline((t, cj, k)) for cj in c]
1419
+ >>> polys[0].c.shape
1420
+ (4, 14)
1421
+
1422
+ Note that the coefficients of the polynomials `polys` are in the
1423
+ power basis and their dimensions reflect just that: here 4 is the order
1424
+ (degree+1), and 14 is the number of intervals---which is nothing but
1425
+ the length of the knot array of the original `tck` minus one.
1426
+
1427
+ Optionally, we can stack the components into a single `PPoly` along
1428
+ the third dimension:
1429
+
1430
+ >>> cc = np.dstack([p.c for p in polys]) # has shape = (4, 14, 2)
1431
+ >>> poly = PPoly(cc, polys[0].x)
1432
+ >>> np.allclose(poly(unew).T, # note the transpose to match `splev`
1433
+ ... out, atol=1e-15)
1434
+ True
1435
+
1436
+ """
1437
+ if isinstance(tck, BSpline):
1438
+ t, c, k = tck.tck
1439
+ if extrapolate is None:
1440
+ extrapolate = tck.extrapolate
1441
+ else:
1442
+ t, c, k = tck
1443
+
1444
+ cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
1445
+ for m in range(k, -1, -1):
1446
+ y = _fitpack_py.splev(t[:-1], tck, der=m)
1447
+ cvals[k - m, :] = y/spec.gamma(m+1)
1448
+
1449
+ return cls.construct_fast(cvals, t, extrapolate)
1450
+
1451
+ @classmethod
1452
+ def from_bernstein_basis(cls, bp, extrapolate=None):
1453
+ """
1454
+ Construct a piecewise polynomial in the power basis
1455
+ from a polynomial in Bernstein basis.
1456
+
1457
+ Parameters
1458
+ ----------
1459
+ bp : BPoly
1460
+ A Bernstein basis polynomial, as created by BPoly
1461
+ extrapolate : bool or 'periodic', optional
1462
+ If bool, determines whether to extrapolate to out-of-bounds points
1463
+ based on first and last intervals, or to return NaNs.
1464
+ If 'periodic', periodic extrapolation is used. Default is True.
1465
+ """
1466
+ if not isinstance(bp, BPoly):
1467
+ raise TypeError(".from_bernstein_basis only accepts BPoly instances. "
1468
+ "Got %s instead." % type(bp))
1469
+
1470
+ dx = np.diff(bp.x)
1471
+ k = bp.c.shape[0] - 1 # polynomial order
1472
+
1473
+ rest = (None,)*(bp.c.ndim-2)
1474
+
1475
+ c = np.zeros_like(bp.c)
1476
+ for a in range(k+1):
1477
+ factor = (-1)**a * comb(k, a) * bp.c[a]
1478
+ for s in range(a, k+1):
1479
+ val = comb(k-a, s-a) * (-1)**s
1480
+ c[k-s] += factor * val / dx[(slice(None),)+rest]**s
1481
+
1482
+ if extrapolate is None:
1483
+ extrapolate = bp.extrapolate
1484
+
1485
+ return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
1486
+
1487
+
1488
+ class BPoly(_PPolyBase):
1489
+ """Piecewise polynomial in terms of coefficients and breakpoints.
1490
+
1491
+ The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
1492
+ Bernstein polynomial basis::
1493
+
1494
+ S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
1495
+
1496
+ where ``k`` is the degree of the polynomial, and::
1497
+
1498
+ b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
1499
+
1500
+ with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
1501
+ coefficient.
1502
+
1503
+ Parameters
1504
+ ----------
1505
+ c : ndarray, shape (k, m, ...)
1506
+ Polynomial coefficients, order `k` and `m` intervals
1507
+ x : ndarray, shape (m+1,)
1508
+ Polynomial breakpoints. Must be sorted in either increasing or
1509
+ decreasing order.
1510
+ extrapolate : bool, optional
1511
+ If bool, determines whether to extrapolate to out-of-bounds points
1512
+ based on first and last intervals, or to return NaNs. If 'periodic',
1513
+ periodic extrapolation is used. Default is True.
1514
+ axis : int, optional
1515
+ Interpolation axis. Default is zero.
1516
+
1517
+ Attributes
1518
+ ----------
1519
+ x : ndarray
1520
+ Breakpoints.
1521
+ c : ndarray
1522
+ Coefficients of the polynomials. They are reshaped
1523
+ to a 3-D array with the last dimension representing
1524
+ the trailing dimensions of the original coefficient array.
1525
+ axis : int
1526
+ Interpolation axis.
1527
+
1528
+ Methods
1529
+ -------
1530
+ __call__
1531
+ extend
1532
+ derivative
1533
+ antiderivative
1534
+ integrate
1535
+ construct_fast
1536
+ from_power_basis
1537
+ from_derivatives
1538
+
1539
+ See also
1540
+ --------
1541
+ PPoly : piecewise polynomials in the power basis
1542
+
1543
+ Notes
1544
+ -----
1545
+ Properties of Bernstein polynomials are well documented in the literature,
1546
+ see for example [1]_ [2]_ [3]_.
1547
+
1548
+ References
1549
+ ----------
1550
+ .. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
1551
+
1552
+ .. [2] Kenneth I. Joy, Bernstein polynomials,
1553
+ http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
1554
+
1555
+ .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
1556
+ vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
1557
+
1558
+ Examples
1559
+ --------
1560
+ >>> from scipy.interpolate import BPoly
1561
+ >>> x = [0, 1]
1562
+ >>> c = [[1], [2], [3]]
1563
+ >>> bp = BPoly(c, x)
1564
+
1565
+ This creates a 2nd order polynomial
1566
+
1567
+ .. math::
1568
+
1569
+ B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3
1570
+ \\times b_{2, 2}(x) \\\\
1571
+ = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
1572
+
1573
+ """ # noqa: E501
1574
+
1575
+ def _evaluate(self, x, nu, extrapolate, out):
1576
+ _ppoly.evaluate_bernstein(
1577
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
1578
+ self.x, x, nu, bool(extrapolate), out)
1579
+
1580
+ def derivative(self, nu=1):
1581
+ """
1582
+ Construct a new piecewise polynomial representing the derivative.
1583
+
1584
+ Parameters
1585
+ ----------
1586
+ nu : int, optional
1587
+ Order of derivative to evaluate. Default is 1, i.e., compute the
1588
+ first derivative. If negative, the antiderivative is returned.
1589
+
1590
+ Returns
1591
+ -------
1592
+ bp : BPoly
1593
+ Piecewise polynomial of order k - nu representing the derivative of
1594
+ this polynomial.
1595
+
1596
+ """
1597
+ if nu < 0:
1598
+ return self.antiderivative(-nu)
1599
+
1600
+ if nu > 1:
1601
+ bp = self
1602
+ for k in range(nu):
1603
+ bp = bp.derivative()
1604
+ return bp
1605
+
1606
+ # reduce order
1607
+ if nu == 0:
1608
+ c2 = self.c.copy()
1609
+ else:
1610
+ # For a polynomial
1611
+ # B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
1612
+ # we use the fact that
1613
+ # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
1614
+ # which leads to
1615
+ # B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
1616
+ #
1617
+ # finally, for an interval [y, y + dy] with dy != 1,
1618
+ # we need to correct for an extra power of dy
1619
+
1620
+ rest = (None,)*(self.c.ndim-2)
1621
+
1622
+ k = self.c.shape[0] - 1
1623
+ dx = np.diff(self.x)[(None, slice(None))+rest]
1624
+ c2 = k * np.diff(self.c, axis=0) / dx
1625
+
1626
+ if c2.shape[0] == 0:
1627
+ # derivative of order 0 is zero
1628
+ c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
1629
+
1630
+ # construct a compatible polynomial
1631
+ return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
1632
+
1633
+ def antiderivative(self, nu=1):
1634
+ """
1635
+ Construct a new piecewise polynomial representing the antiderivative.
1636
+
1637
+ Parameters
1638
+ ----------
1639
+ nu : int, optional
1640
+ Order of antiderivative to evaluate. Default is 1, i.e., compute
1641
+ the first integral. If negative, the derivative is returned.
1642
+
1643
+ Returns
1644
+ -------
1645
+ bp : BPoly
1646
+ Piecewise polynomial of order k + nu representing the
1647
+ antiderivative of this polynomial.
1648
+
1649
+ Notes
1650
+ -----
1651
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
1652
+ it will be set to False for the returned instance. This is done because
1653
+ the antiderivative is no longer periodic and its correct evaluation
1654
+ outside of the initially given x interval is difficult.
1655
+ """
1656
+ if nu <= 0:
1657
+ return self.derivative(-nu)
1658
+
1659
+ if nu > 1:
1660
+ bp = self
1661
+ for k in range(nu):
1662
+ bp = bp.antiderivative()
1663
+ return bp
1664
+
1665
+ # Construct the indefinite integrals on individual intervals
1666
+ c, x = self.c, self.x
1667
+ k = c.shape[0]
1668
+ c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
1669
+
1670
+ c2[1:, ...] = np.cumsum(c, axis=0) / k
1671
+ delta = x[1:] - x[:-1]
1672
+ c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
1673
+
1674
+ # Now fix continuity: on the very first interval, take the integration
1675
+ # constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
1676
+ # the integration constant is then equal to the jump of the `bp` at x_j.
1677
+ # The latter is given by the coefficient of B_{n+1, n+1}
1678
+ # *on the previous interval* (other B. polynomials are zero at the
1679
+ # breakpoint). Finally, use the fact that BPs form a partition of unity.
1680
+ c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
1681
+
1682
+ if self.extrapolate == 'periodic':
1683
+ extrapolate = False
1684
+ else:
1685
+ extrapolate = self.extrapolate
1686
+
1687
+ return self.construct_fast(c2, x, extrapolate, axis=self.axis)
1688
+
1689
+ def integrate(self, a, b, extrapolate=None):
1690
+ """
1691
+ Compute a definite integral over a piecewise polynomial.
1692
+
1693
+ Parameters
1694
+ ----------
1695
+ a : float
1696
+ Lower integration bound
1697
+ b : float
1698
+ Upper integration bound
1699
+ extrapolate : {bool, 'periodic', None}, optional
1700
+ Whether to extrapolate to out-of-bounds points based on first
1701
+ and last intervals, or to return NaNs. If 'periodic', periodic
1702
+ extrapolation is used. If None (default), use `self.extrapolate`.
1703
+
1704
+ Returns
1705
+ -------
1706
+ array_like
1707
+ Definite integral of the piecewise polynomial over [a, b]
1708
+
1709
+ """
1710
+ # XXX: can probably use instead the fact that
1711
+ # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
1712
+ ib = self.antiderivative()
1713
+ if extrapolate is None:
1714
+ extrapolate = self.extrapolate
1715
+
1716
+ # ib.extrapolate shouldn't be 'periodic', it is converted to
1717
+ # False for 'periodic. in antiderivative() call.
1718
+ if extrapolate != 'periodic':
1719
+ ib.extrapolate = extrapolate
1720
+
1721
+ if extrapolate == 'periodic':
1722
+ # Split the integral into the part over period (can be several
1723
+ # of them) and the remaining part.
1724
+
1725
+ # For simplicity and clarity convert to a <= b case.
1726
+ if a <= b:
1727
+ sign = 1
1728
+ else:
1729
+ a, b = b, a
1730
+ sign = -1
1731
+
1732
+ xs, xe = self.x[0], self.x[-1]
1733
+ period = xe - xs
1734
+ interval = b - a
1735
+ n_periods, left = divmod(interval, period)
1736
+ res = n_periods * (ib(xe) - ib(xs))
1737
+
1738
+ # Map a and b to [xs, xe].
1739
+ a = xs + (a - xs) % period
1740
+ b = a + left
1741
+
1742
+ # If b <= xe then we need to integrate over [a, b], otherwise
1743
+ # over [a, xe] and from xs to what is remained.
1744
+ if b <= xe:
1745
+ res += ib(b) - ib(a)
1746
+ else:
1747
+ res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
1748
+
1749
+ return sign * res
1750
+ else:
1751
+ return ib(b) - ib(a)
1752
+
1753
+ def extend(self, c, x):
1754
+ k = max(self.c.shape[0], c.shape[0])
1755
+ self.c = self._raise_degree(self.c, k - self.c.shape[0])
1756
+ c = self._raise_degree(c, k - c.shape[0])
1757
+ return _PPolyBase.extend(self, c, x)
1758
+ extend.__doc__ = _PPolyBase.extend.__doc__
1759
+
1760
+ @classmethod
1761
+ def from_power_basis(cls, pp, extrapolate=None):
1762
+ """
1763
+ Construct a piecewise polynomial in Bernstein basis
1764
+ from a power basis polynomial.
1765
+
1766
+ Parameters
1767
+ ----------
1768
+ pp : PPoly
1769
+ A piecewise polynomial in the power basis
1770
+ extrapolate : bool or 'periodic', optional
1771
+ If bool, determines whether to extrapolate to out-of-bounds points
1772
+ based on first and last intervals, or to return NaNs.
1773
+ If 'periodic', periodic extrapolation is used. Default is True.
1774
+ """
1775
+ if not isinstance(pp, PPoly):
1776
+ raise TypeError(".from_power_basis only accepts PPoly instances. "
1777
+ "Got %s instead." % type(pp))
1778
+
1779
+ dx = np.diff(pp.x)
1780
+ k = pp.c.shape[0] - 1 # polynomial order
1781
+
1782
+ rest = (None,)*(pp.c.ndim-2)
1783
+
1784
+ c = np.zeros_like(pp.c)
1785
+ for a in range(k+1):
1786
+ factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
1787
+ for j in range(k-a, k+1):
1788
+ c[j] += factor * comb(j, k-a)
1789
+
1790
+ if extrapolate is None:
1791
+ extrapolate = pp.extrapolate
1792
+
1793
+ return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
1794
+
1795
+ @classmethod
1796
+ def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
1797
+ """Construct a piecewise polynomial in the Bernstein basis,
1798
+ compatible with the specified values and derivatives at breakpoints.
1799
+
1800
+ Parameters
1801
+ ----------
1802
+ xi : array_like
1803
+ sorted 1-D array of x-coordinates
1804
+ yi : array_like or list of array_likes
1805
+ ``yi[i][j]`` is the ``j``\\ th derivative known at ``xi[i]``
1806
+ orders : None or int or array_like of ints. Default: None.
1807
+ Specifies the degree of local polynomials. If not None, some
1808
+ derivatives are ignored.
1809
+ extrapolate : bool or 'periodic', optional
1810
+ If bool, determines whether to extrapolate to out-of-bounds points
1811
+ based on first and last intervals, or to return NaNs.
1812
+ If 'periodic', periodic extrapolation is used. Default is True.
1813
+
1814
+ Notes
1815
+ -----
1816
+ If ``k`` derivatives are specified at a breakpoint ``x``, the
1817
+ constructed polynomial is exactly ``k`` times continuously
1818
+ differentiable at ``x``, unless the ``order`` is provided explicitly.
1819
+ In the latter case, the smoothness of the polynomial at
1820
+ the breakpoint is controlled by the ``order``.
1821
+
1822
+ Deduces the number of derivatives to match at each end
1823
+ from ``order`` and the number of derivatives available. If
1824
+ possible it uses the same number of derivatives from
1825
+ each end; if the number is odd it tries to take the
1826
+ extra one from y2. In any case if not enough derivatives
1827
+ are available at one end or another it draws enough to
1828
+ make up the total from the other end.
1829
+
1830
+ If the order is too high and not enough derivatives are available,
1831
+ an exception is raised.
1832
+
1833
+ Examples
1834
+ --------
1835
+
1836
+ >>> from scipy.interpolate import BPoly
1837
+ >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
1838
+
1839
+ Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
1840
+ such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
1841
+
1842
+ >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
1843
+
1844
+ Creates a piecewise polynomial `f(x)`, such that
1845
+ `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
1846
+ Based on the number of derivatives provided, the order of the
1847
+ local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
1848
+ Notice that no restriction is imposed on the derivatives at
1849
+ ``x = 1`` and ``x = 2``.
1850
+
1851
+ Indeed, the explicit form of the polynomial is::
1852
+
1853
+ f(x) = | x * (1 - x), 0 <= x < 1
1854
+ | 2 * (x - 1), 1 <= x <= 2
1855
+
1856
+ So that f'(1-0) = -1 and f'(1+0) = 2
1857
+
1858
+ """
1859
+ xi = np.asarray(xi)
1860
+ if len(xi) != len(yi):
1861
+ raise ValueError("xi and yi need to have the same length")
1862
+ if np.any(xi[1:] - xi[:1] <= 0):
1863
+ raise ValueError("x coordinates are not in increasing order")
1864
+
1865
+ # number of intervals
1866
+ m = len(xi) - 1
1867
+
1868
+ # global poly order is k-1, local orders are <=k and can vary
1869
+ try:
1870
+ k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
1871
+ except TypeError as e:
1872
+ raise ValueError(
1873
+ "Using a 1-D array for y? Please .reshape(-1, 1)."
1874
+ ) from e
1875
+
1876
+ if orders is None:
1877
+ orders = [None] * m
1878
+ else:
1879
+ if isinstance(orders, (int, np.integer)):
1880
+ orders = [orders] * m
1881
+ k = max(k, max(orders))
1882
+
1883
+ if any(o <= 0 for o in orders):
1884
+ raise ValueError("Orders must be positive.")
1885
+
1886
+ c = []
1887
+ for i in range(m):
1888
+ y1, y2 = yi[i], yi[i+1]
1889
+ if orders[i] is None:
1890
+ n1, n2 = len(y1), len(y2)
1891
+ else:
1892
+ n = orders[i]+1
1893
+ n1 = min(n//2, len(y1))
1894
+ n2 = min(n - n1, len(y2))
1895
+ n1 = min(n - n2, len(y2))
1896
+ if n1+n2 != n:
1897
+ mesg = ("Point %g has %d derivatives, point %g"
1898
+ " has %d derivatives, but order %d requested" % (
1899
+ xi[i], len(y1), xi[i+1], len(y2), orders[i]))
1900
+ raise ValueError(mesg)
1901
+
1902
+ if not (n1 <= len(y1) and n2 <= len(y2)):
1903
+ raise ValueError("`order` input incompatible with"
1904
+ " length y1 or y2.")
1905
+
1906
+ b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
1907
+ y1[:n1], y2[:n2])
1908
+ if len(b) < k:
1909
+ b = BPoly._raise_degree(b, k - len(b))
1910
+ c.append(b)
1911
+
1912
+ c = np.asarray(c)
1913
+ return cls(c.swapaxes(0, 1), xi, extrapolate)
1914
+
1915
+ @staticmethod
1916
+ def _construct_from_derivatives(xa, xb, ya, yb):
1917
+ r"""Compute the coefficients of a polynomial in the Bernstein basis
1918
+ given the values and derivatives at the edges.
1919
+
1920
+ Return the coefficients of a polynomial in the Bernstein basis
1921
+ defined on ``[xa, xb]`` and having the values and derivatives at the
1922
+ endpoints `xa` and `xb` as specified by `ya` and `yb`.
1923
+ The polynomial constructed is of the minimal possible degree, i.e.,
1924
+ if the lengths of `ya` and `yb` are `na` and `nb`, the degree
1925
+ of the polynomial is ``na + nb - 1``.
1926
+
1927
+ Parameters
1928
+ ----------
1929
+ xa : float
1930
+ Left-hand end point of the interval
1931
+ xb : float
1932
+ Right-hand end point of the interval
1933
+ ya : array_like
1934
+ Derivatives at `xa`. ``ya[0]`` is the value of the function, and
1935
+ ``ya[i]`` for ``i > 0`` is the value of the ``i``\ th derivative.
1936
+ yb : array_like
1937
+ Derivatives at `xb`.
1938
+
1939
+ Returns
1940
+ -------
1941
+ array
1942
+ coefficient array of a polynomial having specified derivatives
1943
+
1944
+ Notes
1945
+ -----
1946
+ This uses several facts from life of Bernstein basis functions.
1947
+ First of all,
1948
+
1949
+ .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
1950
+
1951
+ If B(x) is a linear combination of the form
1952
+
1953
+ .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
1954
+
1955
+ then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
1956
+ Iterating the latter one, one finds for the q-th derivative
1957
+
1958
+ .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
1959
+
1960
+ with
1961
+
1962
+ .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
1963
+
1964
+ This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
1965
+ `c_q` are found one by one by iterating `q = 0, ..., na`.
1966
+
1967
+ At ``x = xb`` it's the same with ``a = n - q``.
1968
+
1969
+ """
1970
+ ya, yb = np.asarray(ya), np.asarray(yb)
1971
+ if ya.shape[1:] != yb.shape[1:]:
1972
+ raise ValueError('Shapes of ya {} and yb {} are incompatible'
1973
+ .format(ya.shape, yb.shape))
1974
+
1975
+ dta, dtb = ya.dtype, yb.dtype
1976
+ if (np.issubdtype(dta, np.complexfloating) or
1977
+ np.issubdtype(dtb, np.complexfloating)):
1978
+ dt = np.complex128
1979
+ else:
1980
+ dt = np.float64
1981
+
1982
+ na, nb = len(ya), len(yb)
1983
+ n = na + nb
1984
+
1985
+ c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
1986
+
1987
+ # compute coefficients of a polynomial degree na+nb-1
1988
+ # walk left-to-right
1989
+ for q in range(0, na):
1990
+ c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
1991
+ for j in range(0, q):
1992
+ c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
1993
+
1994
+ # now walk right-to-left
1995
+ for q in range(0, nb):
1996
+ c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
1997
+ for j in range(0, q):
1998
+ c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
1999
+
2000
+ return c
2001
+
2002
+ @staticmethod
2003
+ def _raise_degree(c, d):
2004
+ r"""Raise a degree of a polynomial in the Bernstein basis.
2005
+
2006
+ Given the coefficients of a polynomial degree `k`, return (the
2007
+ coefficients of) the equivalent polynomial of degree `k+d`.
2008
+
2009
+ Parameters
2010
+ ----------
2011
+ c : array_like
2012
+ coefficient array, 1-D
2013
+ d : integer
2014
+
2015
+ Returns
2016
+ -------
2017
+ array
2018
+ coefficient array, 1-D array of length `c.shape[0] + d`
2019
+
2020
+ Notes
2021
+ -----
2022
+ This uses the fact that a Bernstein polynomial `b_{a, k}` can be
2023
+ identically represented as a linear combination of polynomials of
2024
+ a higher degree `k+d`:
2025
+
2026
+ .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
2027
+ comb(d, j) / comb(k+d, a+j)
2028
+
2029
+ """
2030
+ if d == 0:
2031
+ return c
2032
+
2033
+ k = c.shape[0] - 1
2034
+ out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
2035
+
2036
+ for a in range(c.shape[0]):
2037
+ f = c[a] * comb(k, a)
2038
+ for j in range(d+1):
2039
+ out[a+j] += f * comb(d, j) / comb(k+d, a+j)
2040
+ return out
2041
+
2042
+
2043
+ class NdPPoly:
2044
+ """
2045
+ Piecewise tensor product polynomial
2046
+
2047
+ The value at point ``xp = (x', y', z', ...)`` is evaluated by first
2048
+ computing the interval indices `i` such that::
2049
+
2050
+ x[0][i[0]] <= x' < x[0][i[0]+1]
2051
+ x[1][i[1]] <= y' < x[1][i[1]+1]
2052
+ ...
2053
+
2054
+ and then computing::
2055
+
2056
+ S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
2057
+ * (xp[0] - x[0][i[0]])**m0
2058
+ * ...
2059
+ * (xp[n] - x[n][i[n]])**mn
2060
+ for m0 in range(k[0]+1)
2061
+ ...
2062
+ for mn in range(k[n]+1))
2063
+
2064
+ where ``k[j]`` is the degree of the polynomial in dimension j. This
2065
+ representation is the piecewise multivariate power basis.
2066
+
2067
+ Parameters
2068
+ ----------
2069
+ c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
2070
+ Polynomial coefficients, with polynomial order `kj` and
2071
+ `mj+1` intervals for each dimension `j`.
2072
+ x : ndim-tuple of ndarrays, shapes (mj+1,)
2073
+ Polynomial breakpoints for each dimension. These must be
2074
+ sorted in increasing order.
2075
+ extrapolate : bool, optional
2076
+ Whether to extrapolate to out-of-bounds points based on first
2077
+ and last intervals, or to return NaNs. Default: True.
2078
+
2079
+ Attributes
2080
+ ----------
2081
+ x : tuple of ndarrays
2082
+ Breakpoints.
2083
+ c : ndarray
2084
+ Coefficients of the polynomials.
2085
+
2086
+ Methods
2087
+ -------
2088
+ __call__
2089
+ derivative
2090
+ antiderivative
2091
+ integrate
2092
+ integrate_1d
2093
+ construct_fast
2094
+
2095
+ See also
2096
+ --------
2097
+ PPoly : piecewise polynomials in 1D
2098
+
2099
+ Notes
2100
+ -----
2101
+ High-order polynomials in the power basis can be numerically
2102
+ unstable.
2103
+
2104
+ """
2105
+
2106
+ def __init__(self, c, x, extrapolate=None):
2107
+ self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
2108
+ self.c = np.asarray(c)
2109
+ if extrapolate is None:
2110
+ extrapolate = True
2111
+ self.extrapolate = bool(extrapolate)
2112
+
2113
+ ndim = len(self.x)
2114
+ if any(v.ndim != 1 for v in self.x):
2115
+ raise ValueError("x arrays must all be 1-dimensional")
2116
+ if any(v.size < 2 for v in self.x):
2117
+ raise ValueError("x arrays must all contain at least 2 points")
2118
+ if c.ndim < 2*ndim:
2119
+ raise ValueError("c must have at least 2*len(x) dimensions")
2120
+ if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
2121
+ raise ValueError("x-coordinates are not in increasing order")
2122
+ if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
2123
+ raise ValueError("x and c do not agree on the number of intervals")
2124
+
2125
+ dtype = self._get_dtype(self.c.dtype)
2126
+ self.c = np.ascontiguousarray(self.c, dtype=dtype)
2127
+
2128
+ @classmethod
2129
+ def construct_fast(cls, c, x, extrapolate=None):
2130
+ """
2131
+ Construct the piecewise polynomial without making checks.
2132
+
2133
+ Takes the same parameters as the constructor. Input arguments
2134
+ ``c`` and ``x`` must be arrays of the correct shape and type. The
2135
+ ``c`` array can only be of dtypes float and complex, and ``x``
2136
+ array must have dtype float.
2137
+
2138
+ """
2139
+ self = object.__new__(cls)
2140
+ self.c = c
2141
+ self.x = x
2142
+ if extrapolate is None:
2143
+ extrapolate = True
2144
+ self.extrapolate = extrapolate
2145
+ return self
2146
+
2147
+ def _get_dtype(self, dtype):
2148
+ if np.issubdtype(dtype, np.complexfloating) \
2149
+ or np.issubdtype(self.c.dtype, np.complexfloating):
2150
+ return np.complex128
2151
+ else:
2152
+ return np.float64
2153
+
2154
+ def _ensure_c_contiguous(self):
2155
+ if not self.c.flags.c_contiguous:
2156
+ self.c = self.c.copy()
2157
+ if not isinstance(self.x, tuple):
2158
+ self.x = tuple(self.x)
2159
+
2160
+ def __call__(self, x, nu=None, extrapolate=None):
2161
+ """
2162
+ Evaluate the piecewise polynomial or its derivative
2163
+
2164
+ Parameters
2165
+ ----------
2166
+ x : array-like
2167
+ Points to evaluate the interpolant at.
2168
+ nu : tuple, optional
2169
+ Orders of derivatives to evaluate. Each must be non-negative.
2170
+ extrapolate : bool, optional
2171
+ Whether to extrapolate to out-of-bounds points based on first
2172
+ and last intervals, or to return NaNs.
2173
+
2174
+ Returns
2175
+ -------
2176
+ y : array-like
2177
+ Interpolated values. Shape is determined by replacing
2178
+ the interpolation axis in the original array with the shape of x.
2179
+
2180
+ Notes
2181
+ -----
2182
+ Derivatives are evaluated piecewise for each polynomial
2183
+ segment, even if the polynomial is not differentiable at the
2184
+ breakpoints. The polynomial intervals are considered half-open,
2185
+ ``[a, b)``, except for the last interval which is closed
2186
+ ``[a, b]``.
2187
+
2188
+ """
2189
+ if extrapolate is None:
2190
+ extrapolate = self.extrapolate
2191
+ else:
2192
+ extrapolate = bool(extrapolate)
2193
+
2194
+ ndim = len(self.x)
2195
+
2196
+ x = _ndim_coords_from_arrays(x)
2197
+ x_shape = x.shape
2198
+ x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float64)
2199
+
2200
+ if nu is None:
2201
+ nu = np.zeros((ndim,), dtype=np.intc)
2202
+ else:
2203
+ nu = np.asarray(nu, dtype=np.intc)
2204
+ if nu.ndim != 1 or nu.shape[0] != ndim:
2205
+ raise ValueError("invalid number of derivative orders nu")
2206
+
2207
+ dim1 = prod(self.c.shape[:ndim])
2208
+ dim2 = prod(self.c.shape[ndim:2*ndim])
2209
+ dim3 = prod(self.c.shape[2*ndim:])
2210
+ ks = np.array(self.c.shape[:ndim], dtype=np.intc)
2211
+
2212
+ out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
2213
+ self._ensure_c_contiguous()
2214
+
2215
+ _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
2216
+ self.x,
2217
+ ks,
2218
+ x,
2219
+ nu,
2220
+ bool(extrapolate),
2221
+ out)
2222
+
2223
+ return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
2224
+
2225
+ def _derivative_inplace(self, nu, axis):
2226
+ """
2227
+ Compute 1-D derivative along a selected dimension in-place
2228
+ May result to non-contiguous c array.
2229
+ """
2230
+ if nu < 0:
2231
+ return self._antiderivative_inplace(-nu, axis)
2232
+
2233
+ ndim = len(self.x)
2234
+ axis = axis % ndim
2235
+
2236
+ # reduce order
2237
+ if nu == 0:
2238
+ # noop
2239
+ return
2240
+ else:
2241
+ sl = [slice(None)]*ndim
2242
+ sl[axis] = slice(None, -nu, None)
2243
+ c2 = self.c[tuple(sl)]
2244
+
2245
+ if c2.shape[axis] == 0:
2246
+ # derivative of order 0 is zero
2247
+ shp = list(c2.shape)
2248
+ shp[axis] = 1
2249
+ c2 = np.zeros(shp, dtype=c2.dtype)
2250
+
2251
+ # multiply by the correct rising factorials
2252
+ factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
2253
+ sl = [None]*c2.ndim
2254
+ sl[axis] = slice(None)
2255
+ c2 *= factor[tuple(sl)]
2256
+
2257
+ self.c = c2
2258
+
2259
+ def _antiderivative_inplace(self, nu, axis):
2260
+ """
2261
+ Compute 1-D antiderivative along a selected dimension
2262
+ May result to non-contiguous c array.
2263
+ """
2264
+ if nu <= 0:
2265
+ return self._derivative_inplace(-nu, axis)
2266
+
2267
+ ndim = len(self.x)
2268
+ axis = axis % ndim
2269
+
2270
+ perm = list(range(ndim))
2271
+ perm[0], perm[axis] = perm[axis], perm[0]
2272
+ perm = perm + list(range(ndim, self.c.ndim))
2273
+
2274
+ c = self.c.transpose(perm)
2275
+
2276
+ c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
2277
+ dtype=c.dtype)
2278
+ c2[:-nu] = c
2279
+
2280
+ # divide by the correct rising factorials
2281
+ factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
2282
+ c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
2283
+
2284
+ # fix continuity of added degrees of freedom
2285
+ perm2 = list(range(c2.ndim))
2286
+ perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
2287
+
2288
+ c2 = c2.transpose(perm2)
2289
+ c2 = c2.copy()
2290
+ _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
2291
+ self.x[axis], nu-1)
2292
+
2293
+ c2 = c2.transpose(perm2)
2294
+ c2 = c2.transpose(perm)
2295
+
2296
+ # Done
2297
+ self.c = c2
2298
+
2299
+ def derivative(self, nu):
2300
+ """
2301
+ Construct a new piecewise polynomial representing the derivative.
2302
+
2303
+ Parameters
2304
+ ----------
2305
+ nu : ndim-tuple of int
2306
+ Order of derivatives to evaluate for each dimension.
2307
+ If negative, the antiderivative is returned.
2308
+
2309
+ Returns
2310
+ -------
2311
+ pp : NdPPoly
2312
+ Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
2313
+ representing the derivative of this polynomial.
2314
+
2315
+ Notes
2316
+ -----
2317
+ Derivatives are evaluated piecewise for each polynomial
2318
+ segment, even if the polynomial is not differentiable at the
2319
+ breakpoints. The polynomial intervals in each dimension are
2320
+ considered half-open, ``[a, b)``, except for the last interval
2321
+ which is closed ``[a, b]``.
2322
+
2323
+ """
2324
+ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
2325
+
2326
+ for axis, n in enumerate(nu):
2327
+ p._derivative_inplace(n, axis)
2328
+
2329
+ p._ensure_c_contiguous()
2330
+ return p
2331
+
2332
+ def antiderivative(self, nu):
2333
+ """
2334
+ Construct a new piecewise polynomial representing the antiderivative.
2335
+
2336
+ Antiderivative is also the indefinite integral of the function,
2337
+ and derivative is its inverse operation.
2338
+
2339
+ Parameters
2340
+ ----------
2341
+ nu : ndim-tuple of int
2342
+ Order of derivatives to evaluate for each dimension.
2343
+ If negative, the derivative is returned.
2344
+
2345
+ Returns
2346
+ -------
2347
+ pp : PPoly
2348
+ Piecewise polynomial of order k2 = k + n representing
2349
+ the antiderivative of this polynomial.
2350
+
2351
+ Notes
2352
+ -----
2353
+ The antiderivative returned by this function is continuous and
2354
+ continuously differentiable to order n-1, up to floating point
2355
+ rounding error.
2356
+
2357
+ """
2358
+ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
2359
+
2360
+ for axis, n in enumerate(nu):
2361
+ p._antiderivative_inplace(n, axis)
2362
+
2363
+ p._ensure_c_contiguous()
2364
+ return p
2365
+
2366
+ def integrate_1d(self, a, b, axis, extrapolate=None):
2367
+ r"""
2368
+ Compute NdPPoly representation for one dimensional definite integral
2369
+
2370
+ The result is a piecewise polynomial representing the integral:
2371
+
2372
+ .. math::
2373
+
2374
+ p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
2375
+
2376
+ where the dimension integrated over is specified with the
2377
+ `axis` parameter.
2378
+
2379
+ Parameters
2380
+ ----------
2381
+ a, b : float
2382
+ Lower and upper bound for integration.
2383
+ axis : int
2384
+ Dimension over which to compute the 1-D integrals
2385
+ extrapolate : bool, optional
2386
+ Whether to extrapolate to out-of-bounds points based on first
2387
+ and last intervals, or to return NaNs.
2388
+
2389
+ Returns
2390
+ -------
2391
+ ig : NdPPoly or array-like
2392
+ Definite integral of the piecewise polynomial over [a, b].
2393
+ If the polynomial was 1D, an array is returned,
2394
+ otherwise, an NdPPoly object.
2395
+
2396
+ """
2397
+ if extrapolate is None:
2398
+ extrapolate = self.extrapolate
2399
+ else:
2400
+ extrapolate = bool(extrapolate)
2401
+
2402
+ ndim = len(self.x)
2403
+ axis = int(axis) % ndim
2404
+
2405
+ # reuse 1-D integration routines
2406
+ c = self.c
2407
+ swap = list(range(c.ndim))
2408
+ swap.insert(0, swap[axis])
2409
+ del swap[axis + 1]
2410
+ swap.insert(1, swap[ndim + axis])
2411
+ del swap[ndim + axis + 1]
2412
+
2413
+ c = c.transpose(swap)
2414
+ p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
2415
+ self.x[axis],
2416
+ extrapolate=extrapolate)
2417
+ out = p.integrate(a, b, extrapolate=extrapolate)
2418
+
2419
+ # Construct result
2420
+ if ndim == 1:
2421
+ return out.reshape(c.shape[2:])
2422
+ else:
2423
+ c = out.reshape(c.shape[2:])
2424
+ x = self.x[:axis] + self.x[axis+1:]
2425
+ return self.construct_fast(c, x, extrapolate=extrapolate)
2426
+
2427
+ def integrate(self, ranges, extrapolate=None):
2428
+ """
2429
+ Compute a definite integral over a piecewise polynomial.
2430
+
2431
+ Parameters
2432
+ ----------
2433
+ ranges : ndim-tuple of 2-tuples float
2434
+ Sequence of lower and upper bounds for each dimension,
2435
+ ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
2436
+ extrapolate : bool, optional
2437
+ Whether to extrapolate to out-of-bounds points based on first
2438
+ and last intervals, or to return NaNs.
2439
+
2440
+ Returns
2441
+ -------
2442
+ ig : array_like
2443
+ Definite integral of the piecewise polynomial over
2444
+ [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
2445
+
2446
+ """
2447
+
2448
+ ndim = len(self.x)
2449
+
2450
+ if extrapolate is None:
2451
+ extrapolate = self.extrapolate
2452
+ else:
2453
+ extrapolate = bool(extrapolate)
2454
+
2455
+ if not hasattr(ranges, '__len__') or len(ranges) != ndim:
2456
+ raise ValueError("Range not a sequence of correct length")
2457
+
2458
+ self._ensure_c_contiguous()
2459
+
2460
+ # Reuse 1D integration routine
2461
+ c = self.c
2462
+ for n, (a, b) in enumerate(ranges):
2463
+ swap = list(range(c.ndim))
2464
+ swap.insert(1, swap[ndim - n])
2465
+ del swap[ndim - n + 1]
2466
+
2467
+ c = c.transpose(swap)
2468
+
2469
+ p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
2470
+ out = p.integrate(a, b, extrapolate=extrapolate)
2471
+ c = out.reshape(c.shape[2:])
2472
+
2473
+ return c
venv/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import functools
3
+ import operator
4
+ import numpy as np
5
+
6
+ from math import prod
7
+
8
+ from . import _bspl # type: ignore
9
+
10
+ import scipy.sparse.linalg as ssl
11
+ from scipy.sparse import csr_array
12
+
13
+ from ._bsplines import _not_a_knot
14
+
15
+ __all__ = ["NdBSpline"]
16
+
17
+
18
+ def _get_dtype(dtype):
19
+ """Return np.complex128 for complex dtypes, np.float64 otherwise."""
20
+ if np.issubdtype(dtype, np.complexfloating):
21
+ return np.complex128
22
+ else:
23
+ return np.float64
24
+
25
+
26
+ class NdBSpline:
27
+ """Tensor product spline object.
28
+
29
+ The value at point ``xp = (x1, x2, ..., xN)`` is evaluated as a linear
30
+ combination of products of one-dimensional b-splines in each of the ``N``
31
+ dimensions::
32
+
33
+ c[i1, i2, ..., iN] * B(x1; i1, t1) * B(x2; i2, t2) * ... * B(xN; iN, tN)
34
+
35
+
36
+ Here ``B(x; i, t)`` is the ``i``-th b-spline defined by the knot vector
37
+ ``t`` evaluated at ``x``.
38
+
39
+ Parameters
40
+ ----------
41
+ t : tuple of 1D ndarrays
42
+ knot vectors in directions 1, 2, ... N,
43
+ ``len(t[i]) == n[i] + k + 1``
44
+ c : ndarray, shape (n1, n2, ..., nN, ...)
45
+ b-spline coefficients
46
+ k : int or length-d tuple of integers
47
+ spline degrees.
48
+ A single integer is interpreted as having this degree for
49
+ all dimensions.
50
+ extrapolate : bool, optional
51
+ Whether to extrapolate out-of-bounds inputs, or return `nan`.
52
+ Default is to extrapolate.
53
+
54
+ Attributes
55
+ ----------
56
+ t : tuple of ndarrays
57
+ Knots vectors.
58
+ c : ndarray
59
+ Coefficients of the tensor-produce spline.
60
+ k : tuple of integers
61
+ Degrees for each dimension.
62
+ extrapolate : bool, optional
63
+ Whether to extrapolate or return nans for out-of-bounds inputs.
64
+ Defaults to true.
65
+
66
+ Methods
67
+ -------
68
+ __call__
69
+ design_matrix
70
+
71
+ See Also
72
+ --------
73
+ BSpline : a one-dimensional B-spline object
74
+ NdPPoly : an N-dimensional piecewise tensor product polynomial
75
+
76
+ """
77
+ def __init__(self, t, c, k, *, extrapolate=None):
78
+ ndim = len(t)
79
+
80
+ try:
81
+ len(k)
82
+ except TypeError:
83
+ # make k a tuple
84
+ k = (k,)*ndim
85
+
86
+ if len(k) != ndim:
87
+ raise ValueError(f"{len(t) = } != {len(k) = }.")
88
+
89
+ self.k = tuple(operator.index(ki) for ki in k)
90
+ self.t = tuple(np.ascontiguousarray(ti, dtype=float) for ti in t)
91
+ self.c = np.asarray(c)
92
+
93
+ if extrapolate is None:
94
+ extrapolate = True
95
+ self.extrapolate = bool(extrapolate)
96
+
97
+ self.c = np.asarray(c)
98
+
99
+ for d in range(ndim):
100
+ td = self.t[d]
101
+ kd = self.k[d]
102
+ n = td.shape[0] - kd - 1
103
+ if kd < 0:
104
+ raise ValueError(f"Spline degree in dimension {d} cannot be"
105
+ f" negative.")
106
+ if td.ndim != 1:
107
+ raise ValueError(f"Knot vector in dimension {d} must be"
108
+ f" one-dimensional.")
109
+ if n < kd + 1:
110
+ raise ValueError(f"Need at least {2*kd + 2} knots for degree"
111
+ f" {kd} in dimension {d}.")
112
+ if (np.diff(td) < 0).any():
113
+ raise ValueError(f"Knots in dimension {d} must be in a"
114
+ f" non-decreasing order.")
115
+ if len(np.unique(td[kd:n + 1])) < 2:
116
+ raise ValueError(f"Need at least two internal knots in"
117
+ f" dimension {d}.")
118
+ if not np.isfinite(td).all():
119
+ raise ValueError(f"Knots in dimension {d} should not have"
120
+ f" nans or infs.")
121
+ if self.c.ndim < ndim:
122
+ raise ValueError(f"Coefficients must be at least"
123
+ f" {d}-dimensional.")
124
+ if self.c.shape[d] != n:
125
+ raise ValueError(f"Knots, coefficients and degree in dimension"
126
+ f" {d} are inconsistent:"
127
+ f" got {self.c.shape[d]} coefficients for"
128
+ f" {len(td)} knots, need at least {n} for"
129
+ f" k={k}.")
130
+
131
+ dt = _get_dtype(self.c.dtype)
132
+ self.c = np.ascontiguousarray(self.c, dtype=dt)
133
+
134
+ def __call__(self, xi, *, nu=None, extrapolate=None):
135
+ """Evaluate the tensor product b-spline at ``xi``.
136
+
137
+ Parameters
138
+ ----------
139
+ xi : array_like, shape(..., ndim)
140
+ The coordinates to evaluate the interpolator at.
141
+ This can be a list or tuple of ndim-dimensional points
142
+ or an array with the shape (num_points, ndim).
143
+ nu : array_like, optional, shape (ndim,)
144
+ Orders of derivatives to evaluate. Each must be non-negative.
145
+ Defaults to the zeroth derivivative.
146
+ extrapolate : bool, optional
147
+ Whether to exrapolate based on first and last intervals in each
148
+ dimension, or return `nan`. Default is to ``self.extrapolate``.
149
+
150
+ Returns
151
+ -------
152
+ values : ndarray, shape ``xi.shape[:-1] + self.c.shape[ndim:]``
153
+ Interpolated values at ``xi``
154
+ """
155
+ ndim = len(self.t)
156
+
157
+ if extrapolate is None:
158
+ extrapolate = self.extrapolate
159
+ extrapolate = bool(extrapolate)
160
+
161
+ if nu is None:
162
+ nu = np.zeros((ndim,), dtype=np.intc)
163
+ else:
164
+ nu = np.asarray(nu, dtype=np.intc)
165
+ if nu.ndim != 1 or nu.shape[0] != ndim:
166
+ raise ValueError(
167
+ f"invalid number of derivative orders {nu = } for "
168
+ f"ndim = {len(self.t)}.")
169
+ if any(nu < 0):
170
+ raise ValueError(f"derivatives must be positive, got {nu = }")
171
+
172
+ # prepare xi : shape (..., m1, ..., md) -> (1, m1, ..., md)
173
+ xi = np.asarray(xi, dtype=float)
174
+ xi_shape = xi.shape
175
+ xi = xi.reshape(-1, xi_shape[-1])
176
+ xi = np.ascontiguousarray(xi)
177
+
178
+ if xi_shape[-1] != ndim:
179
+ raise ValueError(f"Shapes: xi.shape={xi_shape} and ndim={ndim}")
180
+
181
+ # prepare k & t
182
+ _k = np.asarray(self.k, dtype=np.dtype("long"))
183
+
184
+ # pack the knots into a single array
185
+ len_t = [len(ti) for ti in self.t]
186
+ _t = np.empty((ndim, max(len_t)), dtype=float)
187
+ _t.fill(np.nan)
188
+ for d in range(ndim):
189
+ _t[d, :len(self.t[d])] = self.t[d]
190
+ len_t = np.asarray(len_t, dtype=np.dtype("long"))
191
+
192
+ # tabulate the flat indices for iterating over the (k+1)**ndim subarray
193
+ shape = tuple(kd + 1 for kd in self.k)
194
+ indices = np.unravel_index(np.arange(prod(shape)), shape)
195
+ _indices_k1d = np.asarray(indices, dtype=np.intp).T
196
+
197
+ # prepare the coefficients: flatten the trailing dimensions
198
+ c1 = self.c.reshape(self.c.shape[:ndim] + (-1,))
199
+ c1r = c1.ravel()
200
+
201
+ # replacement for np.ravel_multi_index for indexing of `c1`:
202
+ _strides_c1 = np.asarray([s // c1.dtype.itemsize
203
+ for s in c1.strides], dtype=np.intp)
204
+
205
+ num_c_tr = c1.shape[-1] # # of trailing coefficients
206
+ out = np.empty(xi.shape[:-1] + (num_c_tr,), dtype=c1.dtype)
207
+
208
+ _bspl.evaluate_ndbspline(xi,
209
+ _t,
210
+ len_t,
211
+ _k,
212
+ nu,
213
+ extrapolate,
214
+ c1r,
215
+ num_c_tr,
216
+ _strides_c1,
217
+ _indices_k1d,
218
+ out,)
219
+
220
+ return out.reshape(xi_shape[:-1] + self.c.shape[ndim:])
221
+
222
+ @classmethod
223
+ def design_matrix(cls, xvals, t, k, extrapolate=True):
224
+ """Construct the design matrix as a CSR format sparse array.
225
+
226
+ Parameters
227
+ ----------
228
+ xvals : ndarray, shape(npts, ndim)
229
+ Data points. ``xvals[j, :]`` gives the ``j``-th data point as an
230
+ ``ndim``-dimensional array.
231
+ t : tuple of 1D ndarrays, length-ndim
232
+ Knot vectors in directions 1, 2, ... ndim,
233
+ k : int
234
+ B-spline degree.
235
+ extrapolate : bool, optional
236
+ Whether to extrapolate out-of-bounds values of raise a `ValueError`
237
+
238
+ Returns
239
+ -------
240
+ design_matrix : a CSR array
241
+ Each row of the design matrix corresponds to a value in `xvals` and
242
+ contains values of b-spline basis elements which are non-zero
243
+ at this value.
244
+
245
+ """
246
+ xvals = np.asarray(xvals, dtype=float)
247
+ ndim = xvals.shape[-1]
248
+ if len(t) != ndim:
249
+ raise ValueError(
250
+ f"Data and knots are inconsistent: len(t) = {len(t)} for "
251
+ f" {ndim = }."
252
+ )
253
+ try:
254
+ len(k)
255
+ except TypeError:
256
+ # make k a tuple
257
+ k = (k,)*ndim
258
+
259
+ kk = np.asarray(k, dtype=np.int32)
260
+ data, indices, indptr = _bspl._colloc_nd(xvals, t, kk)
261
+ return csr_array((data, indices, indptr))
262
+
263
+
264
+ def _iter_solve(a, b, solver=ssl.gcrotmk, **solver_args):
265
+ # work around iterative solvers not accepting multiple r.h.s.
266
+
267
+ # also work around a.dtype == float64 and b.dtype == complex128
268
+ # cf https://github.com/scipy/scipy/issues/19644
269
+ if np.issubdtype(b.dtype, np.complexfloating):
270
+ real = _iter_solve(a, b.real, solver, **solver_args)
271
+ imag = _iter_solve(a, b.imag, solver, **solver_args)
272
+ return real + 1j*imag
273
+
274
+ if b.ndim == 2 and b.shape[1] !=1:
275
+ res = np.empty_like(b)
276
+ for j in range(b.shape[1]):
277
+ res[:, j], info = solver(a, b[:, j], **solver_args)
278
+ if info != 0:
279
+ raise ValueError(f"{solver = } returns {info =} for column {j}.")
280
+ return res
281
+ else:
282
+ res, info = solver(a, b, **solver_args)
283
+ if info != 0:
284
+ raise ValueError(f"{solver = } returns {info = }.")
285
+ return res
286
+
287
+
288
+ def make_ndbspl(points, values, k=3, *, solver=ssl.gcrotmk, **solver_args):
289
+ """Construct an interpolating NdBspline.
290
+
291
+ Parameters
292
+ ----------
293
+ points : tuple of ndarrays of float, with shapes (m1,), ... (mN,)
294
+ The points defining the regular grid in N dimensions. The points in
295
+ each dimension (i.e. every element of the `points` tuple) must be
296
+ strictly ascending or descending.
297
+ values : ndarray of float, shape (m1, ..., mN, ...)
298
+ The data on the regular grid in n dimensions.
299
+ k : int, optional
300
+ The spline degree. Must be odd. Default is cubic, k=3
301
+ solver : a `scipy.sparse.linalg` solver (iterative or direct), optional.
302
+ An iterative solver from `scipy.sparse.linalg` or a direct one,
303
+ `sparse.sparse.linalg.spsolve`.
304
+ Used to solve the sparse linear system
305
+ ``design_matrix @ coefficients = rhs`` for the coefficients.
306
+ Default is `scipy.sparse.linalg.gcrotmk`
307
+ solver_args : dict, optional
308
+ Additional arguments for the solver. The call signature is
309
+ ``solver(csr_array, rhs_vector, **solver_args)``
310
+
311
+ Returns
312
+ -------
313
+ spl : NdBSpline object
314
+
315
+ Notes
316
+ -----
317
+ Boundary conditions are not-a-knot in all dimensions.
318
+ """
319
+ ndim = len(points)
320
+ xi_shape = tuple(len(x) for x in points)
321
+
322
+ try:
323
+ len(k)
324
+ except TypeError:
325
+ # make k a tuple
326
+ k = (k,)*ndim
327
+
328
+ for d, point in enumerate(points):
329
+ numpts = len(np.atleast_1d(point))
330
+ if numpts <= k[d]:
331
+ raise ValueError(f"There are {numpts} points in dimension {d},"
332
+ f" but order {k[d]} requires at least "
333
+ f" {k[d]+1} points per dimension.")
334
+
335
+ t = tuple(_not_a_knot(np.asarray(points[d], dtype=float), k[d])
336
+ for d in range(ndim))
337
+ xvals = np.asarray([xv for xv in itertools.product(*points)], dtype=float)
338
+
339
+ # construct the colocation matrix
340
+ matr = NdBSpline.design_matrix(xvals, t, k)
341
+
342
+ # Solve for the coefficients given `values`.
343
+ # Trailing dimensions: first ndim dimensions are data, the rest are batch
344
+ # dimensions, so stack `values` into a 2D array for `spsolve` to undestand.
345
+ v_shape = values.shape
346
+ vals_shape = (prod(v_shape[:ndim]), prod(v_shape[ndim:]))
347
+ vals = values.reshape(vals_shape)
348
+
349
+ if solver != ssl.spsolve:
350
+ solver = functools.partial(_iter_solve, solver=solver)
351
+ if "atol" not in solver_args:
352
+ # avoid a DeprecationWarning, grumble grumble
353
+ solver_args["atol"] = 1e-6
354
+
355
+ coef = solver(matr, vals, **solver_args)
356
+ coef = coef.reshape(xi_shape + v_shape[ndim:])
357
+ return NdBSpline(t, coef, k)
358
+
venv/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Convenience interface to N-D interpolation
3
+
4
+ .. versionadded:: 0.9
5
+
6
+ """
7
+ import numpy as np
8
+ from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
9
+ CloughTocher2DInterpolator, _ndim_coords_from_arrays
10
+ from scipy.spatial import cKDTree
11
+
12
+ __all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
13
+ 'CloughTocher2DInterpolator']
14
+
15
+ #------------------------------------------------------------------------------
16
+ # Nearest-neighbor interpolation
17
+ #------------------------------------------------------------------------------
18
+
19
+
20
+ class NearestNDInterpolator(NDInterpolatorBase):
21
+ """NearestNDInterpolator(x, y).
22
+
23
+ Nearest-neighbor interpolator in N > 1 dimensions.
24
+
25
+ .. versionadded:: 0.9
26
+
27
+ Methods
28
+ -------
29
+ __call__
30
+
31
+ Parameters
32
+ ----------
33
+ x : (npoints, ndims) 2-D ndarray of floats
34
+ Data point coordinates.
35
+ y : (npoints, ) 1-D ndarray of float or complex
36
+ Data values.
37
+ rescale : boolean, optional
38
+ Rescale points to unit cube before performing interpolation.
39
+ This is useful if some of the input dimensions have
40
+ incommensurable units and differ by many orders of magnitude.
41
+
42
+ .. versionadded:: 0.14.0
43
+ tree_options : dict, optional
44
+ Options passed to the underlying ``cKDTree``.
45
+
46
+ .. versionadded:: 0.17.0
47
+
48
+ See Also
49
+ --------
50
+ griddata :
51
+ Interpolate unstructured D-D data.
52
+ LinearNDInterpolator :
53
+ Piecewise linear interpolator in N dimensions.
54
+ CloughTocher2DInterpolator :
55
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
56
+ interpn : Interpolation on a regular grid or rectilinear grid.
57
+ RegularGridInterpolator : Interpolator on a regular or rectilinear grid
58
+ in arbitrary dimensions (`interpn` wraps this
59
+ class).
60
+
61
+ Notes
62
+ -----
63
+ Uses ``scipy.spatial.cKDTree``
64
+
65
+ .. note:: For data on a regular grid use `interpn` instead.
66
+
67
+ Examples
68
+ --------
69
+ We can interpolate values on a 2D plane:
70
+
71
+ >>> from scipy.interpolate import NearestNDInterpolator
72
+ >>> import numpy as np
73
+ >>> import matplotlib.pyplot as plt
74
+ >>> rng = np.random.default_rng()
75
+ >>> x = rng.random(10) - 0.5
76
+ >>> y = rng.random(10) - 0.5
77
+ >>> z = np.hypot(x, y)
78
+ >>> X = np.linspace(min(x), max(x))
79
+ >>> Y = np.linspace(min(y), max(y))
80
+ >>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
81
+ >>> interp = NearestNDInterpolator(list(zip(x, y)), z)
82
+ >>> Z = interp(X, Y)
83
+ >>> plt.pcolormesh(X, Y, Z, shading='auto')
84
+ >>> plt.plot(x, y, "ok", label="input point")
85
+ >>> plt.legend()
86
+ >>> plt.colorbar()
87
+ >>> plt.axis("equal")
88
+ >>> plt.show()
89
+
90
+ """
91
+
92
+ def __init__(self, x, y, rescale=False, tree_options=None):
93
+ NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
94
+ need_contiguous=False,
95
+ need_values=False)
96
+ if tree_options is None:
97
+ tree_options = dict()
98
+ self.tree = cKDTree(self.points, **tree_options)
99
+ self.values = np.asarray(y)
100
+
101
+ def __call__(self, *args, **query_options):
102
+ """
103
+ Evaluate interpolator at given points.
104
+
105
+ Parameters
106
+ ----------
107
+ x1, x2, ... xn : array-like of float
108
+ Points where to interpolate data at.
109
+ x1, x2, ... xn can be array-like of float with broadcastable shape.
110
+ or x1 can be array-like of float with shape ``(..., ndim)``
111
+ **query_options
112
+ This allows ``eps``, ``p``, ``distance_upper_bound``, and ``workers``
113
+ being passed to the cKDTree's query function to be explicitly set.
114
+ See `scipy.spatial.cKDTree.query` for an overview of the different options.
115
+
116
+ .. versionadded:: 1.12.0
117
+
118
+ """
119
+ # For the sake of enabling subclassing, NDInterpolatorBase._set_xi performs
120
+ # some operations which are not required by NearestNDInterpolator.__call__,
121
+ # hence here we operate on xi directly, without calling a parent class function.
122
+ xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
123
+ xi = self._check_call_shape(xi)
124
+ xi = self._scale_x(xi)
125
+
126
+ # We need to handle two important cases:
127
+ # (1) the case where xi has trailing dimensions (..., ndim), and
128
+ # (2) the case where y has trailing dimensions
129
+ # We will first flatten xi to deal with case (1),
130
+ # do the computation in flattened array while retaining y's dimensionality,
131
+ # and then reshape the interpolated values back to match xi's shape.
132
+
133
+ # Flatten xi for the query
134
+ xi_flat = xi.reshape(-1, xi.shape[-1])
135
+ original_shape = xi.shape
136
+ flattened_shape = xi_flat.shape
137
+
138
+ # if distance_upper_bound is set to not be infinite,
139
+ # then we need to consider the case where cKDtree
140
+ # does not find any points within distance_upper_bound to return.
141
+ # It marks those points as having infinte distance, which is what will be used
142
+ # below to mask the array and return only the points that were deemed
143
+ # to have a close enough neighbor to return something useful.
144
+ dist, i = self.tree.query(xi_flat, **query_options)
145
+ valid_mask = np.isfinite(dist)
146
+
147
+ # create a holder interp_values array and fill with nans.
148
+ if self.values.ndim > 1:
149
+ interp_shape = flattened_shape[:-1] + self.values.shape[1:]
150
+ else:
151
+ interp_shape = flattened_shape[:-1]
152
+
153
+ if np.issubdtype(self.values.dtype, np.complexfloating):
154
+ interp_values = np.full(interp_shape, np.nan, dtype=self.values.dtype)
155
+ else:
156
+ interp_values = np.full(interp_shape, np.nan)
157
+
158
+ interp_values[valid_mask] = self.values[i[valid_mask], ...]
159
+
160
+ if self.values.ndim > 1:
161
+ new_shape = original_shape[:-1] + self.values.shape[1:]
162
+ else:
163
+ new_shape = original_shape[:-1]
164
+ interp_values = interp_values.reshape(new_shape)
165
+
166
+ return interp_values
167
+
168
+
169
+ #------------------------------------------------------------------------------
170
+ # Convenience interface function
171
+ #------------------------------------------------------------------------------
172
+
173
+
174
+ def griddata(points, values, xi, method='linear', fill_value=np.nan,
175
+ rescale=False):
176
+ """
177
+ Interpolate unstructured D-D data.
178
+
179
+ Parameters
180
+ ----------
181
+ points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
182
+ Data point coordinates.
183
+ values : ndarray of float or complex, shape (n,)
184
+ Data values.
185
+ xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
186
+ Points at which to interpolate data.
187
+ method : {'linear', 'nearest', 'cubic'}, optional
188
+ Method of interpolation. One of
189
+
190
+ ``nearest``
191
+ return the value at the data point closest to
192
+ the point of interpolation. See `NearestNDInterpolator` for
193
+ more details.
194
+
195
+ ``linear``
196
+ tessellate the input point set to N-D
197
+ simplices, and interpolate linearly on each simplex. See
198
+ `LinearNDInterpolator` for more details.
199
+
200
+ ``cubic`` (1-D)
201
+ return the value determined from a cubic
202
+ spline.
203
+
204
+ ``cubic`` (2-D)
205
+ return the value determined from a
206
+ piecewise cubic, continuously differentiable (C1), and
207
+ approximately curvature-minimizing polynomial surface. See
208
+ `CloughTocher2DInterpolator` for more details.
209
+ fill_value : float, optional
210
+ Value used to fill in for requested points outside of the
211
+ convex hull of the input points. If not provided, then the
212
+ default is ``nan``. This option has no effect for the
213
+ 'nearest' method.
214
+ rescale : bool, optional
215
+ Rescale points to unit cube before performing interpolation.
216
+ This is useful if some of the input dimensions have
217
+ incommensurable units and differ by many orders of magnitude.
218
+
219
+ .. versionadded:: 0.14.0
220
+
221
+ Returns
222
+ -------
223
+ ndarray
224
+ Array of interpolated values.
225
+
226
+ See Also
227
+ --------
228
+ LinearNDInterpolator :
229
+ Piecewise linear interpolator in N dimensions.
230
+ NearestNDInterpolator :
231
+ Nearest-neighbor interpolator in N dimensions.
232
+ CloughTocher2DInterpolator :
233
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
234
+ interpn : Interpolation on a regular grid or rectilinear grid.
235
+ RegularGridInterpolator : Interpolator on a regular or rectilinear grid
236
+ in arbitrary dimensions (`interpn` wraps this
237
+ class).
238
+
239
+ Notes
240
+ -----
241
+
242
+ .. versionadded:: 0.9
243
+
244
+ .. note:: For data on a regular grid use `interpn` instead.
245
+
246
+ Examples
247
+ --------
248
+
249
+ Suppose we want to interpolate the 2-D function
250
+
251
+ >>> import numpy as np
252
+ >>> def func(x, y):
253
+ ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
254
+
255
+ on a grid in [0, 1]x[0, 1]
256
+
257
+ >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
258
+
259
+ but we only know its values at 1000 data points:
260
+
261
+ >>> rng = np.random.default_rng()
262
+ >>> points = rng.random((1000, 2))
263
+ >>> values = func(points[:,0], points[:,1])
264
+
265
+ This can be done with `griddata` -- below we try out all of the
266
+ interpolation methods:
267
+
268
+ >>> from scipy.interpolate import griddata
269
+ >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
270
+ >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
271
+ >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
272
+
273
+ One can see that the exact result is reproduced by all of the
274
+ methods to some degree, but for this smooth function the piecewise
275
+ cubic interpolant gives the best results:
276
+
277
+ >>> import matplotlib.pyplot as plt
278
+ >>> plt.subplot(221)
279
+ >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
280
+ >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
281
+ >>> plt.title('Original')
282
+ >>> plt.subplot(222)
283
+ >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
284
+ >>> plt.title('Nearest')
285
+ >>> plt.subplot(223)
286
+ >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
287
+ >>> plt.title('Linear')
288
+ >>> plt.subplot(224)
289
+ >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
290
+ >>> plt.title('Cubic')
291
+ >>> plt.gcf().set_size_inches(6, 6)
292
+ >>> plt.show()
293
+
294
+ """ # numpy/numpydoc#87 # noqa: E501
295
+
296
+ points = _ndim_coords_from_arrays(points)
297
+
298
+ if points.ndim < 2:
299
+ ndim = points.ndim
300
+ else:
301
+ ndim = points.shape[-1]
302
+
303
+ if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
304
+ from ._interpolate import interp1d
305
+ points = points.ravel()
306
+ if isinstance(xi, tuple):
307
+ if len(xi) != 1:
308
+ raise ValueError("invalid number of dimensions in xi")
309
+ xi, = xi
310
+ # Sort points/values together, necessary as input for interp1d
311
+ idx = np.argsort(points)
312
+ points = points[idx]
313
+ values = values[idx]
314
+ if method == 'nearest':
315
+ fill_value = 'extrapolate'
316
+ ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
317
+ fill_value=fill_value)
318
+ return ip(xi)
319
+ elif method == 'nearest':
320
+ ip = NearestNDInterpolator(points, values, rescale=rescale)
321
+ return ip(xi)
322
+ elif method == 'linear':
323
+ ip = LinearNDInterpolator(points, values, fill_value=fill_value,
324
+ rescale=rescale)
325
+ return ip(xi)
326
+ elif method == 'cubic' and ndim == 2:
327
+ ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
328
+ rescale=rescale)
329
+ return ip(xi)
330
+ else:
331
+ raise ValueError("Unknown interpolation method %r for "
332
+ "%d dimensional data" % (method, ndim))
venv/lib/python3.10/site-packages/scipy/interpolate/_pade.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import zeros, asarray, eye, poly1d, hstack, r_
2
+ from scipy import linalg
3
+
4
+ __all__ = ["pade"]
5
+
6
+ def pade(an, m, n=None):
7
+ """
8
+ Return Pade approximation to a polynomial as the ratio of two polynomials.
9
+
10
+ Parameters
11
+ ----------
12
+ an : (N,) array_like
13
+ Taylor series coefficients.
14
+ m : int
15
+ The order of the returned approximating polynomial `q`.
16
+ n : int, optional
17
+ The order of the returned approximating polynomial `p`. By default,
18
+ the order is ``len(an)-1-m``.
19
+
20
+ Returns
21
+ -------
22
+ p, q : Polynomial class
23
+ The Pade approximation of the polynomial defined by `an` is
24
+ ``p(x)/q(x)``.
25
+
26
+ Examples
27
+ --------
28
+ >>> import numpy as np
29
+ >>> from scipy.interpolate import pade
30
+ >>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
31
+ >>> p, q = pade(e_exp, 2)
32
+
33
+ >>> e_exp.reverse()
34
+ >>> e_poly = np.poly1d(e_exp)
35
+
36
+ Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)``
37
+
38
+ >>> e_poly(1)
39
+ 2.7166666666666668
40
+
41
+ >>> p(1)/q(1)
42
+ 2.7179487179487181
43
+
44
+ """
45
+ an = asarray(an)
46
+ if n is None:
47
+ n = len(an) - 1 - m
48
+ if n < 0:
49
+ raise ValueError("Order of q <m> must be smaller than len(an)-1.")
50
+ if n < 0:
51
+ raise ValueError("Order of p <n> must be greater than 0.")
52
+ N = m + n
53
+ if N > len(an)-1:
54
+ raise ValueError("Order of q+p <m+n> must be smaller than len(an).")
55
+ an = an[:N+1]
56
+ Akj = eye(N+1, n+1, dtype=an.dtype)
57
+ Bkj = zeros((N+1, m), dtype=an.dtype)
58
+ for row in range(1, m+1):
59
+ Bkj[row,:row] = -(an[:row])[::-1]
60
+ for row in range(m+1, N+1):
61
+ Bkj[row,:] = -(an[row-m:row])[::-1]
62
+ C = hstack((Akj, Bkj))
63
+ pq = linalg.solve(C, an)
64
+ p = pq[:n+1]
65
+ q = r_[1.0, pq[n+1:]]
66
+ return poly1d(p[::-1]), poly1d(q[::-1])
67
+
venv/lib/python3.10/site-packages/scipy/interpolate/_polyint.py ADDED
@@ -0,0 +1,938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+ from scipy.special import factorial
5
+ from scipy._lib._util import _asarray_validated, float_factorial, check_random_state
6
+
7
+
8
+ __all__ = ["KroghInterpolator", "krogh_interpolate",
9
+ "BarycentricInterpolator", "barycentric_interpolate",
10
+ "approximate_taylor_polynomial"]
11
+
12
+
13
+ def _isscalar(x):
14
+ """Check whether x is if a scalar type, or 0-dim"""
15
+ return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
16
+
17
+
18
+ class _Interpolator1D:
19
+ """
20
+ Common features in univariate interpolation
21
+
22
+ Deal with input data type and interpolation axis rolling. The
23
+ actual interpolator can assume the y-data is of shape (n, r) where
24
+ `n` is the number of x-points, and `r` the number of variables,
25
+ and use self.dtype as the y-data type.
26
+
27
+ Attributes
28
+ ----------
29
+ _y_axis
30
+ Axis along which the interpolation goes in the original array
31
+ _y_extra_shape
32
+ Additional trailing shape of the input arrays, excluding
33
+ the interpolation axis.
34
+ dtype
35
+ Dtype of the y-data arrays. Can be set via _set_dtype, which
36
+ forces it to be float or complex.
37
+
38
+ Methods
39
+ -------
40
+ __call__
41
+ _prepare_x
42
+ _finish_y
43
+ _reshape_yi
44
+ _set_yi
45
+ _set_dtype
46
+ _evaluate
47
+
48
+ """
49
+
50
+ __slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
51
+
52
+ def __init__(self, xi=None, yi=None, axis=None):
53
+ self._y_axis = axis
54
+ self._y_extra_shape = None
55
+ self.dtype = None
56
+ if yi is not None:
57
+ self._set_yi(yi, xi=xi, axis=axis)
58
+
59
+ def __call__(self, x):
60
+ """
61
+ Evaluate the interpolant
62
+
63
+ Parameters
64
+ ----------
65
+ x : array_like
66
+ Point or points at which to evaluate the interpolant.
67
+
68
+ Returns
69
+ -------
70
+ y : array_like
71
+ Interpolated values. Shape is determined by replacing
72
+ the interpolation axis in the original array with the shape of `x`.
73
+
74
+ Notes
75
+ -----
76
+ Input values `x` must be convertible to `float` values like `int`
77
+ or `float`.
78
+
79
+ """
80
+ x, x_shape = self._prepare_x(x)
81
+ y = self._evaluate(x)
82
+ return self._finish_y(y, x_shape)
83
+
84
+ def _evaluate(self, x):
85
+ """
86
+ Actually evaluate the value of the interpolator.
87
+ """
88
+ raise NotImplementedError()
89
+
90
+ def _prepare_x(self, x):
91
+ """Reshape input x array to 1-D"""
92
+ x = _asarray_validated(x, check_finite=False, as_inexact=True)
93
+ x_shape = x.shape
94
+ return x.ravel(), x_shape
95
+
96
+ def _finish_y(self, y, x_shape):
97
+ """Reshape interpolated y back to an N-D array similar to initial y"""
98
+ y = y.reshape(x_shape + self._y_extra_shape)
99
+ if self._y_axis != 0 and x_shape != ():
100
+ nx = len(x_shape)
101
+ ny = len(self._y_extra_shape)
102
+ s = (list(range(nx, nx + self._y_axis))
103
+ + list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
104
+ y = y.transpose(s)
105
+ return y
106
+
107
+ def _reshape_yi(self, yi, check=False):
108
+ yi = np.moveaxis(np.asarray(yi), self._y_axis, 0)
109
+ if check and yi.shape[1:] != self._y_extra_shape:
110
+ ok_shape = "{!r} + (N,) + {!r}".format(self._y_extra_shape[-self._y_axis:],
111
+ self._y_extra_shape[:-self._y_axis])
112
+ raise ValueError("Data must be of shape %s" % ok_shape)
113
+ return yi.reshape((yi.shape[0], -1))
114
+
115
+ def _set_yi(self, yi, xi=None, axis=None):
116
+ if axis is None:
117
+ axis = self._y_axis
118
+ if axis is None:
119
+ raise ValueError("no interpolation axis specified")
120
+
121
+ yi = np.asarray(yi)
122
+
123
+ shape = yi.shape
124
+ if shape == ():
125
+ shape = (1,)
126
+ if xi is not None and shape[axis] != len(xi):
127
+ raise ValueError("x and y arrays must be equal in length along "
128
+ "interpolation axis.")
129
+
130
+ self._y_axis = (axis % yi.ndim)
131
+ self._y_extra_shape = yi.shape[:self._y_axis] + yi.shape[self._y_axis+1:]
132
+ self.dtype = None
133
+ self._set_dtype(yi.dtype)
134
+
135
+ def _set_dtype(self, dtype, union=False):
136
+ if np.issubdtype(dtype, np.complexfloating) \
137
+ or np.issubdtype(self.dtype, np.complexfloating):
138
+ self.dtype = np.complex128
139
+ else:
140
+ if not union or self.dtype != np.complex128:
141
+ self.dtype = np.float64
142
+
143
+
144
+ class _Interpolator1DWithDerivatives(_Interpolator1D):
145
+ def derivatives(self, x, der=None):
146
+ """
147
+ Evaluate several derivatives of the polynomial at the point `x`
148
+
149
+ Produce an array of derivatives evaluated at the point `x`.
150
+
151
+ Parameters
152
+ ----------
153
+ x : array_like
154
+ Point or points at which to evaluate the derivatives
155
+ der : int or list or None, optional
156
+ How many derivatives to evaluate, or None for all potentially
157
+ nonzero derivatives (that is, a number equal to the number
158
+ of points), or a list of derivatives to evaluate. This number
159
+ includes the function value as the '0th' derivative.
160
+
161
+ Returns
162
+ -------
163
+ d : ndarray
164
+ Array with derivatives; ``d[j]`` contains the jth derivative.
165
+ Shape of ``d[j]`` is determined by replacing the interpolation
166
+ axis in the original array with the shape of `x`.
167
+
168
+ Examples
169
+ --------
170
+ >>> from scipy.interpolate import KroghInterpolator
171
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
172
+ array([1.0,2.0,3.0])
173
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
174
+ array([[1.0,1.0],
175
+ [2.0,2.0],
176
+ [3.0,3.0]])
177
+
178
+ """
179
+ x, x_shape = self._prepare_x(x)
180
+ y = self._evaluate_derivatives(x, der)
181
+
182
+ y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
183
+ if self._y_axis != 0 and x_shape != ():
184
+ nx = len(x_shape)
185
+ ny = len(self._y_extra_shape)
186
+ s = ([0] + list(range(nx+1, nx + self._y_axis+1))
187
+ + list(range(1, nx+1)) +
188
+ list(range(nx+1+self._y_axis, nx+ny+1)))
189
+ y = y.transpose(s)
190
+ return y
191
+
192
+ def derivative(self, x, der=1):
193
+ """
194
+ Evaluate a single derivative of the polynomial at the point `x`.
195
+
196
+ Parameters
197
+ ----------
198
+ x : array_like
199
+ Point or points at which to evaluate the derivatives
200
+
201
+ der : integer, optional
202
+ Which derivative to evaluate (default: first derivative).
203
+ This number includes the function value as 0th derivative.
204
+
205
+ Returns
206
+ -------
207
+ d : ndarray
208
+ Derivative interpolated at the x-points. Shape of `d` is
209
+ determined by replacing the interpolation axis in the
210
+ original array with the shape of `x`.
211
+
212
+ Notes
213
+ -----
214
+ This may be computed by evaluating all derivatives up to the desired
215
+ one (using self.derivatives()) and then discarding the rest.
216
+
217
+ """
218
+ x, x_shape = self._prepare_x(x)
219
+ y = self._evaluate_derivatives(x, der+1)
220
+ return self._finish_y(y[der], x_shape)
221
+
222
+ def _evaluate_derivatives(self, x, der=None):
223
+ """
224
+ Actually evaluate the derivatives.
225
+
226
+ Parameters
227
+ ----------
228
+ x : array_like
229
+ 1D array of points at which to evaluate the derivatives
230
+ der : integer, optional
231
+ The number of derivatives to evaluate, from 'order 0' (der=1)
232
+ to order der-1. If omitted, return all possibly-non-zero
233
+ derivatives, ie 0 to order n-1.
234
+
235
+ Returns
236
+ -------
237
+ d : ndarray
238
+ Array of shape ``(der, x.size, self.yi.shape[1])`` containing
239
+ the derivatives from 0 to der-1
240
+ """
241
+ raise NotImplementedError()
242
+
243
+
244
+ class KroghInterpolator(_Interpolator1DWithDerivatives):
245
+ """
246
+ Interpolating polynomial for a set of points.
247
+
248
+ The polynomial passes through all the pairs ``(xi, yi)``. One may
249
+ additionally specify a number of derivatives at each point `xi`;
250
+ this is done by repeating the value `xi` and specifying the
251
+ derivatives as successive `yi` values.
252
+
253
+ Allows evaluation of the polynomial and all its derivatives.
254
+ For reasons of numerical stability, this function does not compute
255
+ the coefficients of the polynomial, although they can be obtained
256
+ by evaluating all the derivatives.
257
+
258
+ Parameters
259
+ ----------
260
+ xi : array_like, shape (npoints, )
261
+ Known x-coordinates. Must be sorted in increasing order.
262
+ yi : array_like, shape (..., npoints, ...)
263
+ Known y-coordinates. When an xi occurs two or more times in
264
+ a row, the corresponding yi's represent derivative values. The length of `yi`
265
+ along the interpolation axis must be equal to the length of `xi`. Use the
266
+ `axis` parameter to select the correct axis.
267
+ axis : int, optional
268
+ Axis in the `yi` array corresponding to the x-coordinate values. Defaults to
269
+ ``axis=0``.
270
+
271
+ Notes
272
+ -----
273
+ Be aware that the algorithms implemented here are not necessarily
274
+ the most numerically stable known. Moreover, even in a world of
275
+ exact computation, unless the x coordinates are chosen very
276
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
277
+ polynomial interpolation itself is a very ill-conditioned process
278
+ due to the Runge phenomenon. In general, even with well-chosen
279
+ x values, degrees higher than about thirty cause problems with
280
+ numerical instability in this code.
281
+
282
+ Based on [1]_.
283
+
284
+ References
285
+ ----------
286
+ .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
287
+ and Numerical Differentiation", 1970.
288
+
289
+ Examples
290
+ --------
291
+ To produce a polynomial that is zero at 0 and 1 and has
292
+ derivative 2 at 0, call
293
+
294
+ >>> from scipy.interpolate import KroghInterpolator
295
+ >>> KroghInterpolator([0,0,1],[0,2,0])
296
+
297
+ This constructs the quadratic :math:`2x^2-2x`. The derivative condition
298
+ is indicated by the repeated zero in the `xi` array; the corresponding
299
+ yi values are 0, the function value, and 2, the derivative value.
300
+
301
+ For another example, given `xi`, `yi`, and a derivative `ypi` for each
302
+ point, appropriate arrays can be constructed as:
303
+
304
+ >>> import numpy as np
305
+ >>> rng = np.random.default_rng()
306
+ >>> xi = np.linspace(0, 1, 5)
307
+ >>> yi, ypi = rng.random((2, 5))
308
+ >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
309
+ >>> KroghInterpolator(xi_k, yi_k)
310
+
311
+ To produce a vector-valued polynomial, supply a higher-dimensional
312
+ array for `yi`:
313
+
314
+ >>> KroghInterpolator([0,1],[[2,3],[4,5]])
315
+
316
+ This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
317
+
318
+ """
319
+
320
+ def __init__(self, xi, yi, axis=0):
321
+ super().__init__(xi, yi, axis)
322
+
323
+ self.xi = np.asarray(xi)
324
+ self.yi = self._reshape_yi(yi)
325
+ self.n, self.r = self.yi.shape
326
+
327
+ if (deg := self.xi.size) > 30:
328
+ warnings.warn(f"{deg} degrees provided, degrees higher than about"
329
+ " thirty cause problems with numerical instability "
330
+ "with 'KroghInterpolator'", stacklevel=2)
331
+
332
+ c = np.zeros((self.n+1, self.r), dtype=self.dtype)
333
+ c[0] = self.yi[0]
334
+ Vk = np.zeros((self.n, self.r), dtype=self.dtype)
335
+ for k in range(1, self.n):
336
+ s = 0
337
+ while s <= k and xi[k-s] == xi[k]:
338
+ s += 1
339
+ s -= 1
340
+ Vk[0] = self.yi[k]/float_factorial(s)
341
+ for i in range(k-s):
342
+ if xi[i] == xi[k]:
343
+ raise ValueError("Elements of `xi` can't be equal.")
344
+ if s == 0:
345
+ Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
346
+ else:
347
+ Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
348
+ c[k] = Vk[k-s]
349
+ self.c = c
350
+
351
+ def _evaluate(self, x):
352
+ pi = 1
353
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
354
+ p += self.c[0,np.newaxis,:]
355
+ for k in range(1, self.n):
356
+ w = x - self.xi[k-1]
357
+ pi = w*pi
358
+ p += pi[:,np.newaxis] * self.c[k]
359
+ return p
360
+
361
+ def _evaluate_derivatives(self, x, der=None):
362
+ n = self.n
363
+ r = self.r
364
+
365
+ if der is None:
366
+ der = self.n
367
+
368
+ pi = np.zeros((n, len(x)))
369
+ w = np.zeros((n, len(x)))
370
+ pi[0] = 1
371
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
372
+ p += self.c[0, np.newaxis, :]
373
+
374
+ for k in range(1, n):
375
+ w[k-1] = x - self.xi[k-1]
376
+ pi[k] = w[k-1] * pi[k-1]
377
+ p += pi[k, :, np.newaxis] * self.c[k]
378
+
379
+ cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
380
+ cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
381
+ cn[0] = p
382
+ for k in range(1, n):
383
+ for i in range(1, n-k+1):
384
+ pi[i] = w[k+i-1]*pi[i-1] + pi[i]
385
+ cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
386
+ cn[k] *= float_factorial(k)
387
+
388
+ cn[n, :, :] = 0
389
+ return cn[:der]
390
+
391
+
392
+ def krogh_interpolate(xi, yi, x, der=0, axis=0):
393
+ """
394
+ Convenience function for polynomial interpolation.
395
+
396
+ See `KroghInterpolator` for more details.
397
+
398
+ Parameters
399
+ ----------
400
+ xi : array_like
401
+ Interpolation points (known x-coordinates).
402
+ yi : array_like
403
+ Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
404
+ vectors of length R, or scalars if R=1.
405
+ x : array_like
406
+ Point or points at which to evaluate the derivatives.
407
+ der : int or list or None, optional
408
+ How many derivatives to evaluate, or None for all potentially
409
+ nonzero derivatives (that is, a number equal to the number
410
+ of points), or a list of derivatives to evaluate. This number
411
+ includes the function value as the '0th' derivative.
412
+ axis : int, optional
413
+ Axis in the `yi` array corresponding to the x-coordinate values.
414
+
415
+ Returns
416
+ -------
417
+ d : ndarray
418
+ If the interpolator's values are R-D then the
419
+ returned array will be the number of derivatives by N by R.
420
+ If `x` is a scalar, the middle dimension will be dropped; if
421
+ the `yi` are scalars then the last dimension will be dropped.
422
+
423
+ See Also
424
+ --------
425
+ KroghInterpolator : Krogh interpolator
426
+
427
+ Notes
428
+ -----
429
+ Construction of the interpolating polynomial is a relatively expensive
430
+ process. If you want to evaluate it repeatedly consider using the class
431
+ KroghInterpolator (which is what this function uses).
432
+
433
+ Examples
434
+ --------
435
+ We can interpolate 2D observed data using Krogh interpolation:
436
+
437
+ >>> import numpy as np
438
+ >>> import matplotlib.pyplot as plt
439
+ >>> from scipy.interpolate import krogh_interpolate
440
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
441
+ >>> y_observed = np.sin(x_observed)
442
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
443
+ >>> y = krogh_interpolate(x_observed, y_observed, x)
444
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
445
+ >>> plt.plot(x, y, label="krogh interpolation")
446
+ >>> plt.legend()
447
+ >>> plt.show()
448
+ """
449
+
450
+ P = KroghInterpolator(xi, yi, axis=axis)
451
+ if der == 0:
452
+ return P(x)
453
+ elif _isscalar(der):
454
+ return P.derivative(x, der=der)
455
+ else:
456
+ return P.derivatives(x, der=np.amax(der)+1)[der]
457
+
458
+
459
+ def approximate_taylor_polynomial(f,x,degree,scale,order=None):
460
+ """
461
+ Estimate the Taylor polynomial of f at x by polynomial fitting.
462
+
463
+ Parameters
464
+ ----------
465
+ f : callable
466
+ The function whose Taylor polynomial is sought. Should accept
467
+ a vector of `x` values.
468
+ x : scalar
469
+ The point at which the polynomial is to be evaluated.
470
+ degree : int
471
+ The degree of the Taylor polynomial
472
+ scale : scalar
473
+ The width of the interval to use to evaluate the Taylor polynomial.
474
+ Function values spread over a range this wide are used to fit the
475
+ polynomial. Must be chosen carefully.
476
+ order : int or None, optional
477
+ The order of the polynomial to be used in the fitting; `f` will be
478
+ evaluated ``order+1`` times. If None, use `degree`.
479
+
480
+ Returns
481
+ -------
482
+ p : poly1d instance
483
+ The Taylor polynomial (translated to the origin, so that
484
+ for example p(0)=f(x)).
485
+
486
+ Notes
487
+ -----
488
+ The appropriate choice of "scale" is a trade-off; too large and the
489
+ function differs from its Taylor polynomial too much to get a good
490
+ answer, too small and round-off errors overwhelm the higher-order terms.
491
+ The algorithm used becomes numerically unstable around order 30 even
492
+ under ideal circumstances.
493
+
494
+ Choosing order somewhat larger than degree may improve the higher-order
495
+ terms.
496
+
497
+ Examples
498
+ --------
499
+ We can calculate Taylor approximation polynomials of sin function with
500
+ various degrees:
501
+
502
+ >>> import numpy as np
503
+ >>> import matplotlib.pyplot as plt
504
+ >>> from scipy.interpolate import approximate_taylor_polynomial
505
+ >>> x = np.linspace(-10.0, 10.0, num=100)
506
+ >>> plt.plot(x, np.sin(x), label="sin curve")
507
+ >>> for degree in np.arange(1, 15, step=2):
508
+ ... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
509
+ ... order=degree + 2)
510
+ ... plt.plot(x, sin_taylor(x), label=f"degree={degree}")
511
+ >>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
512
+ ... borderaxespad=0.0, shadow=True)
513
+ >>> plt.tight_layout()
514
+ >>> plt.axis([-10, 10, -10, 10])
515
+ >>> plt.show()
516
+
517
+ """
518
+ if order is None:
519
+ order = degree
520
+
521
+ n = order+1
522
+ # Choose n points that cluster near the endpoints of the interval in
523
+ # a way that avoids the Runge phenomenon. Ensure, by including the
524
+ # endpoint or not as appropriate, that one point always falls at x
525
+ # exactly.
526
+ xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
527
+
528
+ P = KroghInterpolator(xs, f(xs))
529
+ d = P.derivatives(x,der=degree+1)
530
+
531
+ return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
532
+
533
+
534
+ class BarycentricInterpolator(_Interpolator1DWithDerivatives):
535
+ r"""Interpolating polynomial for a set of points.
536
+
537
+ Constructs a polynomial that passes through a given set of points.
538
+ Allows evaluation of the polynomial and all its derivatives,
539
+ efficient changing of the y-values to be interpolated,
540
+ and updating by adding more x- and y-values.
541
+
542
+ For reasons of numerical stability, this function does not compute
543
+ the coefficients of the polynomial.
544
+
545
+ The values `yi` need to be provided before the function is
546
+ evaluated, but none of the preprocessing depends on them, so rapid
547
+ updates are possible.
548
+
549
+ Parameters
550
+ ----------
551
+ xi : array_like, shape (npoints, )
552
+ 1-D array of x coordinates of the points the polynomial
553
+ should pass through
554
+ yi : array_like, shape (..., npoints, ...), optional
555
+ N-D array of y coordinates of the points the polynomial should pass through.
556
+ If None, the y values will be supplied later via the `set_y` method.
557
+ The length of `yi` along the interpolation axis must be equal to the length
558
+ of `xi`. Use the ``axis`` parameter to select correct axis.
559
+ axis : int, optional
560
+ Axis in the yi array corresponding to the x-coordinate values. Defaults
561
+ to ``axis=0``.
562
+ wi : array_like, optional
563
+ The barycentric weights for the chosen interpolation points `xi`.
564
+ If absent or None, the weights will be computed from `xi` (default).
565
+ This allows for the reuse of the weights `wi` if several interpolants
566
+ are being calculated using the same nodes `xi`, without re-computation.
567
+ random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
568
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
569
+ singleton is used.
570
+ If `seed` is an int, a new ``RandomState`` instance is used,
571
+ seeded with `seed`.
572
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
573
+ that instance is used.
574
+
575
+ Notes
576
+ -----
577
+ This class uses a "barycentric interpolation" method that treats
578
+ the problem as a special case of rational function interpolation.
579
+ This algorithm is quite stable, numerically, but even in a world of
580
+ exact computation, unless the x coordinates are chosen very
581
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
582
+ polynomial interpolation itself is a very ill-conditioned process
583
+ due to the Runge phenomenon.
584
+
585
+ Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
586
+
587
+ Examples
588
+ --------
589
+ To produce a quintic barycentric interpolant approximating the function
590
+ :math:`\sin x`, and its first four derivatives, using six randomly-spaced
591
+ nodes in :math:`(0, \frac{\pi}{2})`:
592
+
593
+ >>> import numpy as np
594
+ >>> import matplotlib.pyplot as plt
595
+ >>> from scipy.interpolate import BarycentricInterpolator
596
+ >>> rng = np.random.default_rng()
597
+ >>> xi = rng.random(6) * np.pi/2
598
+ >>> f, f_d1, f_d2, f_d3, f_d4 = np.sin, np.cos, lambda x: -np.sin(x), lambda x: -np.cos(x), np.sin
599
+ >>> P = BarycentricInterpolator(xi, f(xi), random_state=rng)
600
+ >>> fig, axs = plt.subplots(5, 1, sharex=True, layout='constrained', figsize=(7,10))
601
+ >>> x = np.linspace(0, np.pi, 100)
602
+ >>> axs[0].plot(x, P(x), 'r:', x, f(x), 'k--', xi, f(xi), 'xk')
603
+ >>> axs[1].plot(x, P.derivative(x), 'r:', x, f_d1(x), 'k--', xi, f_d1(xi), 'xk')
604
+ >>> axs[2].plot(x, P.derivative(x, 2), 'r:', x, f_d2(x), 'k--', xi, f_d2(xi), 'xk')
605
+ >>> axs[3].plot(x, P.derivative(x, 3), 'r:', x, f_d3(x), 'k--', xi, f_d3(xi), 'xk')
606
+ >>> axs[4].plot(x, P.derivative(x, 4), 'r:', x, f_d4(x), 'k--', xi, f_d4(xi), 'xk')
607
+ >>> axs[0].set_xlim(0, np.pi)
608
+ >>> axs[4].set_xlabel(r"$x$")
609
+ >>> axs[4].set_xticks([i * np.pi / 4 for i in range(5)],
610
+ ... ["0", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"])
611
+ >>> axs[0].set_ylabel("$f(x)$")
612
+ >>> axs[1].set_ylabel("$f'(x)$")
613
+ >>> axs[2].set_ylabel("$f''(x)$")
614
+ >>> axs[3].set_ylabel("$f^{(3)}(x)$")
615
+ >>> axs[4].set_ylabel("$f^{(4)}(x)$")
616
+ >>> labels = ['Interpolation nodes', 'True function $f$', 'Barycentric interpolation']
617
+ >>> axs[0].legend(axs[0].get_lines()[::-1], labels, bbox_to_anchor=(0., 1.02, 1., .102),
618
+ ... loc='lower left', ncols=3, mode="expand", borderaxespad=0., frameon=False)
619
+ >>> plt.show()
620
+ """ # numpy/numpydoc#87 # noqa: E501
621
+
622
+ def __init__(self, xi, yi=None, axis=0, *, wi=None, random_state=None):
623
+ super().__init__(xi, yi, axis)
624
+
625
+ random_state = check_random_state(random_state)
626
+
627
+ self.xi = np.asarray(xi, dtype=np.float64)
628
+ self.set_yi(yi)
629
+ self.n = len(self.xi)
630
+
631
+ # cache derivative object to avoid re-computing the weights with every call.
632
+ self._diff_cij = None
633
+
634
+ if wi is not None:
635
+ self.wi = wi
636
+ else:
637
+ # See page 510 of Berrut and Trefethen 2004 for an explanation of the
638
+ # capacity scaling and the suggestion of using a random permutation of
639
+ # the input factors.
640
+ # At the moment, the permutation is not performed for xi that are
641
+ # appended later through the add_xi interface. It's not clear to me how
642
+ # to implement that and it seems that most situations that require
643
+ # these numerical stability improvements will be able to provide all
644
+ # the points to the constructor.
645
+ self._inv_capacity = 4.0 / (np.max(self.xi) - np.min(self.xi))
646
+ permute = random_state.permutation(self.n, )
647
+ inv_permute = np.zeros(self.n, dtype=np.int32)
648
+ inv_permute[permute] = np.arange(self.n)
649
+ self.wi = np.zeros(self.n)
650
+
651
+ for i in range(self.n):
652
+ dist = self._inv_capacity * (self.xi[i] - self.xi[permute])
653
+ dist[inv_permute[i]] = 1.0
654
+ prod = np.prod(dist)
655
+ if prod == 0.0:
656
+ raise ValueError("Interpolation points xi must be"
657
+ " distinct.")
658
+ self.wi[i] = 1.0 / prod
659
+
660
+ def set_yi(self, yi, axis=None):
661
+ """
662
+ Update the y values to be interpolated
663
+
664
+ The barycentric interpolation algorithm requires the calculation
665
+ of weights, but these depend only on the `xi`. The `yi` can be changed
666
+ at any time.
667
+
668
+ Parameters
669
+ ----------
670
+ yi : array_like
671
+ The y-coordinates of the points the polynomial will pass through.
672
+ If None, the y values must be supplied later.
673
+ axis : int, optional
674
+ Axis in the `yi` array corresponding to the x-coordinate values.
675
+
676
+ """
677
+ if yi is None:
678
+ self.yi = None
679
+ return
680
+ self._set_yi(yi, xi=self.xi, axis=axis)
681
+ self.yi = self._reshape_yi(yi)
682
+ self.n, self.r = self.yi.shape
683
+ self._diff_baryint = None
684
+
685
+ def add_xi(self, xi, yi=None):
686
+ """
687
+ Add more x values to the set to be interpolated
688
+
689
+ The barycentric interpolation algorithm allows easy updating by
690
+ adding more points for the polynomial to pass through.
691
+
692
+ Parameters
693
+ ----------
694
+ xi : array_like
695
+ The x coordinates of the points that the polynomial should pass
696
+ through.
697
+ yi : array_like, optional
698
+ The y coordinates of the points the polynomial should pass through.
699
+ Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
700
+ vector-valued.
701
+ If `yi` is not given, the y values will be supplied later. `yi`
702
+ should be given if and only if the interpolator has y values
703
+ specified.
704
+
705
+ Notes
706
+ -----
707
+ The new points added by `add_xi` are not randomly permuted
708
+ so there is potential for numerical instability,
709
+ especially for a large number of points. If this
710
+ happens, please reconstruct interpolation from scratch instead.
711
+ """
712
+ if yi is not None:
713
+ if self.yi is None:
714
+ raise ValueError("No previous yi value to update!")
715
+ yi = self._reshape_yi(yi, check=True)
716
+ self.yi = np.vstack((self.yi,yi))
717
+ else:
718
+ if self.yi is not None:
719
+ raise ValueError("No update to yi provided!")
720
+ old_n = self.n
721
+ self.xi = np.concatenate((self.xi,xi))
722
+ self.n = len(self.xi)
723
+ self.wi **= -1
724
+ old_wi = self.wi
725
+ self.wi = np.zeros(self.n)
726
+ self.wi[:old_n] = old_wi
727
+ for j in range(old_n, self.n):
728
+ self.wi[:j] *= self._inv_capacity * (self.xi[j]-self.xi[:j])
729
+ self.wi[j] = np.multiply.reduce(
730
+ self._inv_capacity * (self.xi[:j]-self.xi[j])
731
+ )
732
+ self.wi **= -1
733
+ self._diff_cij = None
734
+ self._diff_baryint = None
735
+
736
+ def __call__(self, x):
737
+ """Evaluate the interpolating polynomial at the points x
738
+
739
+ Parameters
740
+ ----------
741
+ x : array_like
742
+ Point or points at which to evaluate the interpolant.
743
+
744
+ Returns
745
+ -------
746
+ y : array_like
747
+ Interpolated values. Shape is determined by replacing
748
+ the interpolation axis in the original array with the shape of `x`.
749
+
750
+ Notes
751
+ -----
752
+ Currently the code computes an outer product between `x` and the
753
+ weights, that is, it constructs an intermediate array of size
754
+ ``(N, len(x))``, where N is the degree of the polynomial.
755
+ """
756
+ return _Interpolator1D.__call__(self, x)
757
+
758
+ def _evaluate(self, x):
759
+ if x.size == 0:
760
+ p = np.zeros((0, self.r), dtype=self.dtype)
761
+ else:
762
+ c = x[..., np.newaxis] - self.xi
763
+ z = c == 0
764
+ c[z] = 1
765
+ c = self.wi / c
766
+ with np.errstate(divide='ignore'):
767
+ p = np.dot(c, self.yi) / np.sum(c, axis=-1)[..., np.newaxis]
768
+ # Now fix where x==some xi
769
+ r = np.nonzero(z)
770
+ if len(r) == 1: # evaluation at a scalar
771
+ if len(r[0]) > 0: # equals one of the points
772
+ p = self.yi[r[0][0]]
773
+ else:
774
+ p[r[:-1]] = self.yi[r[-1]]
775
+ return p
776
+
777
+ def derivative(self, x, der=1):
778
+ """
779
+ Evaluate a single derivative of the polynomial at the point x.
780
+
781
+ Parameters
782
+ ----------
783
+ x : array_like
784
+ Point or points at which to evaluate the derivatives
785
+ der : integer, optional
786
+ Which derivative to evaluate (default: first derivative).
787
+ This number includes the function value as 0th derivative.
788
+
789
+ Returns
790
+ -------
791
+ d : ndarray
792
+ Derivative interpolated at the x-points. Shape of `d` is
793
+ determined by replacing the interpolation axis in the
794
+ original array with the shape of `x`.
795
+ """
796
+ x, x_shape = self._prepare_x(x)
797
+ y = self._evaluate_derivatives(x, der+1, all_lower=False)
798
+ return self._finish_y(y, x_shape)
799
+
800
+ def _evaluate_derivatives(self, x, der=None, all_lower=True):
801
+ # NB: der here is not the order of the highest derivative;
802
+ # instead, it is the size of the derivatives matrix that
803
+ # would be returned with all_lower=True, including the
804
+ # '0th' derivative (the undifferentiated function).
805
+ # E.g. to evaluate the 5th derivative alone, call
806
+ # _evaluate_derivatives(x, der=6, all_lower=False).
807
+
808
+ if (not all_lower) and (x.size == 0 or self.r == 0):
809
+ return np.zeros((0, self.r), dtype=self.dtype)
810
+
811
+ if (not all_lower) and der == 1:
812
+ return self._evaluate(x)
813
+
814
+ if (not all_lower) and (der > self.n):
815
+ return np.zeros((len(x), self.r), dtype=self.dtype)
816
+
817
+ if der is None:
818
+ der = self.n
819
+
820
+ if all_lower and (x.size == 0 or self.r == 0):
821
+ return np.zeros((der, len(x), self.r), dtype=self.dtype)
822
+
823
+ if self._diff_cij is None:
824
+ # c[i,j] = xi[i] - xi[j]
825
+ c = self.xi[:, np.newaxis] - self.xi
826
+
827
+ # avoid division by 0 (diagonal entries are so far zero by construction)
828
+ np.fill_diagonal(c, 1)
829
+
830
+ # c[i,j] = (w[j] / w[i]) / (xi[i] - xi[j]) (equation 9.4)
831
+ c = self.wi/ (c * self.wi[..., np.newaxis])
832
+
833
+ # fill in correct diagonal entries: each column sums to 0
834
+ np.fill_diagonal(c, 0)
835
+
836
+ # calculate diagonal
837
+ # c[j,j] = -sum_{i != j} c[i,j] (equation 9.5)
838
+ d = -c.sum(axis=1)
839
+ # c[i,j] = l_j(x_i)
840
+ np.fill_diagonal(c, d)
841
+
842
+ self._diff_cij = c
843
+
844
+ if self._diff_baryint is None:
845
+ # initialise and cache derivative interpolator and cijs;
846
+ # reuse weights wi (which depend only on interpolation points xi),
847
+ # to avoid unnecessary re-computation
848
+ self._diff_baryint = BarycentricInterpolator(xi=self.xi,
849
+ yi=self._diff_cij @ self.yi,
850
+ wi=self.wi)
851
+ self._diff_baryint._diff_cij = self._diff_cij
852
+
853
+ if all_lower:
854
+ # assemble matrix of derivatives from order 0 to order der-1,
855
+ # in the format required by _Interpolator1DWithDerivatives.
856
+ cn = np.zeros((der, len(x), self.r), dtype=self.dtype)
857
+ for d in range(der):
858
+ cn[d, :, :] = self._evaluate_derivatives(x, d+1, all_lower=False)
859
+ return cn
860
+
861
+ # recursively evaluate only the derivative requested
862
+ return self._diff_baryint._evaluate_derivatives(x, der-1, all_lower=False)
863
+
864
+
865
+ def barycentric_interpolate(xi, yi, x, axis=0, *, der=0):
866
+ """
867
+ Convenience function for polynomial interpolation.
868
+
869
+ Constructs a polynomial that passes through a given set of points,
870
+ then evaluates the polynomial. For reasons of numerical stability,
871
+ this function does not compute the coefficients of the polynomial.
872
+
873
+ This function uses a "barycentric interpolation" method that treats
874
+ the problem as a special case of rational function interpolation.
875
+ This algorithm is quite stable, numerically, but even in a world of
876
+ exact computation, unless the `x` coordinates are chosen very
877
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
878
+ polynomial interpolation itself is a very ill-conditioned process
879
+ due to the Runge phenomenon.
880
+
881
+ Parameters
882
+ ----------
883
+ xi : array_like
884
+ 1-D array of x coordinates of the points the polynomial should
885
+ pass through
886
+ yi : array_like
887
+ The y coordinates of the points the polynomial should pass through.
888
+ x : scalar or array_like
889
+ Point or points at which to evaluate the interpolant.
890
+ der : int or list or None, optional
891
+ How many derivatives to evaluate, or None for all potentially
892
+ nonzero derivatives (that is, a number equal to the number
893
+ of points), or a list of derivatives to evaluate. This number
894
+ includes the function value as the '0th' derivative.
895
+ axis : int, optional
896
+ Axis in the `yi` array corresponding to the x-coordinate values.
897
+
898
+ Returns
899
+ -------
900
+ y : scalar or array_like
901
+ Interpolated values. Shape is determined by replacing
902
+ the interpolation axis in the original array with the shape of `x`.
903
+
904
+ See Also
905
+ --------
906
+ BarycentricInterpolator : Barycentric interpolator
907
+
908
+ Notes
909
+ -----
910
+ Construction of the interpolation weights is a relatively slow process.
911
+ If you want to call this many times with the same xi (but possibly
912
+ varying yi or x) you should use the class `BarycentricInterpolator`.
913
+ This is what this function uses internally.
914
+
915
+ Examples
916
+ --------
917
+ We can interpolate 2D observed data using barycentric interpolation:
918
+
919
+ >>> import numpy as np
920
+ >>> import matplotlib.pyplot as plt
921
+ >>> from scipy.interpolate import barycentric_interpolate
922
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
923
+ >>> y_observed = np.sin(x_observed)
924
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
925
+ >>> y = barycentric_interpolate(x_observed, y_observed, x)
926
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
927
+ >>> plt.plot(x, y, label="barycentric interpolation")
928
+ >>> plt.legend()
929
+ >>> plt.show()
930
+
931
+ """
932
+ P = BarycentricInterpolator(xi, yi, axis=axis)
933
+ if der == 0:
934
+ return P(x)
935
+ elif _isscalar(der):
936
+ return P.derivative(x, der=der)
937
+ else:
938
+ return P.derivatives(x, der=np.amax(der)+1)[der]
venv/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (470 kB). View file
 
venv/lib/python3.10/site-packages/scipy/interpolate/_rbf.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
2
+
3
+ Written by John Travers <[email protected]>, February 2007
4
+ Based closely on Matlab code by Alex Chirokov
5
+ Additional, large, improvements by Robert Hetland
6
+ Some additional alterations by Travis Oliphant
7
+ Interpolation with multi-dimensional target domain by Josua Sassen
8
+
9
+ Permission to use, modify, and distribute this software is given under the
10
+ terms of the SciPy (BSD style) license. See LICENSE.txt that came with
11
+ this distribution for specifics.
12
+
13
+ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
14
+
15
+ Copyright (c) 2006-2007, Robert Hetland <[email protected]>
16
+ Copyright (c) 2007, John Travers <[email protected]>
17
+
18
+ Redistribution and use in source and binary forms, with or without
19
+ modification, are permitted provided that the following conditions are
20
+ met:
21
+
22
+ * Redistributions of source code must retain the above copyright
23
+ notice, this list of conditions and the following disclaimer.
24
+
25
+ * Redistributions in binary form must reproduce the above
26
+ copyright notice, this list of conditions and the following
27
+ disclaimer in the documentation and/or other materials provided
28
+ with the distribution.
29
+
30
+ * Neither the name of Robert Hetland nor the names of any
31
+ contributors may be used to endorse or promote products derived
32
+ from this software without specific prior written permission.
33
+
34
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45
+ """
46
+ import numpy as np
47
+
48
+ from scipy import linalg
49
+ from scipy.special import xlogy
50
+ from scipy.spatial.distance import cdist, pdist, squareform
51
+
52
+ __all__ = ['Rbf']
53
+
54
+
55
+ class Rbf:
56
+ """
57
+ Rbf(*args, **kwargs)
58
+
59
+ A class for radial basis function interpolation of functions from
60
+ N-D scattered data to an M-D domain.
61
+
62
+ .. legacy:: class
63
+
64
+ `Rbf` is legacy code, for new usage please use `RBFInterpolator`
65
+ instead.
66
+
67
+ Parameters
68
+ ----------
69
+ *args : arrays
70
+ x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
71
+ and d is the array of values at the nodes
72
+ function : str or callable, optional
73
+ The radial basis function, based on the radius, r, given by the norm
74
+ (default is Euclidean distance); the default is 'multiquadric'::
75
+
76
+ 'multiquadric': sqrt((r/self.epsilon)**2 + 1)
77
+ 'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
78
+ 'gaussian': exp(-(r/self.epsilon)**2)
79
+ 'linear': r
80
+ 'cubic': r**3
81
+ 'quintic': r**5
82
+ 'thin_plate': r**2 * log(r)
83
+
84
+ If callable, then it must take 2 arguments (self, r). The epsilon
85
+ parameter will be available as self.epsilon. Other keyword
86
+ arguments passed in will be available as well.
87
+
88
+ epsilon : float, optional
89
+ Adjustable constant for gaussian or multiquadrics functions
90
+ - defaults to approximate average distance between nodes (which is
91
+ a good start).
92
+ smooth : float, optional
93
+ Values greater than zero increase the smoothness of the
94
+ approximation. 0 is for interpolation (default), the function will
95
+ always go through the nodal points in this case.
96
+ norm : str, callable, optional
97
+ A function that returns the 'distance' between two points, with
98
+ inputs as arrays of positions (x, y, z, ...), and an output as an
99
+ array of distance. E.g., the default: 'euclidean', such that the result
100
+ is a matrix of the distances from each point in ``x1`` to each point in
101
+ ``x2``. For more options, see documentation of
102
+ `scipy.spatial.distances.cdist`.
103
+ mode : str, optional
104
+ Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
105
+ '1-D' the data `d` will be considered as 1-D and flattened
106
+ internally. When it is 'N-D' the data `d` is assumed to be an array of
107
+ shape (n_samples, m), where m is the dimension of the target domain.
108
+
109
+
110
+ Attributes
111
+ ----------
112
+ N : int
113
+ The number of data points (as determined by the input arrays).
114
+ di : ndarray
115
+ The 1-D array of data values at each of the data coordinates `xi`.
116
+ xi : ndarray
117
+ The 2-D array of data coordinates.
118
+ function : str or callable
119
+ The radial basis function. See description under Parameters.
120
+ epsilon : float
121
+ Parameter used by gaussian or multiquadrics functions. See Parameters.
122
+ smooth : float
123
+ Smoothing parameter. See description under Parameters.
124
+ norm : str or callable
125
+ The distance function. See description under Parameters.
126
+ mode : str
127
+ Mode of the interpolation. See description under Parameters.
128
+ nodes : ndarray
129
+ A 1-D array of node values for the interpolation.
130
+ A : internal property, do not use
131
+
132
+ See Also
133
+ --------
134
+ RBFInterpolator
135
+
136
+ Examples
137
+ --------
138
+ >>> import numpy as np
139
+ >>> from scipy.interpolate import Rbf
140
+ >>> rng = np.random.default_rng()
141
+ >>> x, y, z, d = rng.random((4, 50))
142
+ >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
143
+ >>> xi = yi = zi = np.linspace(0, 1, 20)
144
+ >>> di = rbfi(xi, yi, zi) # interpolated values
145
+ >>> di.shape
146
+ (20,)
147
+
148
+ """
149
+ # Available radial basis functions that can be selected as strings;
150
+ # they all start with _h_ (self._init_function relies on that)
151
+ def _h_multiquadric(self, r):
152
+ return np.sqrt((1.0/self.epsilon*r)**2 + 1)
153
+
154
+ def _h_inverse_multiquadric(self, r):
155
+ return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
156
+
157
+ def _h_gaussian(self, r):
158
+ return np.exp(-(1.0/self.epsilon*r)**2)
159
+
160
+ def _h_linear(self, r):
161
+ return r
162
+
163
+ def _h_cubic(self, r):
164
+ return r**3
165
+
166
+ def _h_quintic(self, r):
167
+ return r**5
168
+
169
+ def _h_thin_plate(self, r):
170
+ return xlogy(r**2, r)
171
+
172
+ # Setup self._function and do smoke test on initial r
173
+ def _init_function(self, r):
174
+ if isinstance(self.function, str):
175
+ self.function = self.function.lower()
176
+ _mapped = {'inverse': 'inverse_multiquadric',
177
+ 'inverse multiquadric': 'inverse_multiquadric',
178
+ 'thin-plate': 'thin_plate'}
179
+ if self.function in _mapped:
180
+ self.function = _mapped[self.function]
181
+
182
+ func_name = "_h_" + self.function
183
+ if hasattr(self, func_name):
184
+ self._function = getattr(self, func_name)
185
+ else:
186
+ functionlist = [x[3:] for x in dir(self)
187
+ if x.startswith('_h_')]
188
+ raise ValueError("function must be a callable or one of " +
189
+ ", ".join(functionlist))
190
+ self._function = getattr(self, "_h_"+self.function)
191
+ elif callable(self.function):
192
+ allow_one = False
193
+ if hasattr(self.function, 'func_code') or \
194
+ hasattr(self.function, '__code__'):
195
+ val = self.function
196
+ allow_one = True
197
+ elif hasattr(self.function, "__call__"):
198
+ val = self.function.__call__.__func__
199
+ else:
200
+ raise ValueError("Cannot determine number of arguments to "
201
+ "function")
202
+
203
+ argcount = val.__code__.co_argcount
204
+ if allow_one and argcount == 1:
205
+ self._function = self.function
206
+ elif argcount == 2:
207
+ self._function = self.function.__get__(self, Rbf)
208
+ else:
209
+ raise ValueError("Function argument must take 1 or 2 "
210
+ "arguments.")
211
+
212
+ a0 = self._function(r)
213
+ if a0.shape != r.shape:
214
+ raise ValueError("Callable must take array and return array of "
215
+ "the same shape")
216
+ return a0
217
+
218
+ def __init__(self, *args, **kwargs):
219
+ # `args` can be a variable number of arrays; we flatten them and store
220
+ # them as a single 2-D array `xi` of shape (n_args-1, array_size),
221
+ # plus a 1-D array `di` for the values.
222
+ # All arrays must have the same number of elements
223
+ self.xi = np.asarray([np.asarray(a, dtype=np.float64).flatten()
224
+ for a in args[:-1]])
225
+ self.N = self.xi.shape[-1]
226
+
227
+ self.mode = kwargs.pop('mode', '1-D')
228
+
229
+ if self.mode == '1-D':
230
+ self.di = np.asarray(args[-1]).flatten()
231
+ self._target_dim = 1
232
+ elif self.mode == 'N-D':
233
+ self.di = np.asarray(args[-1])
234
+ self._target_dim = self.di.shape[-1]
235
+ else:
236
+ raise ValueError("Mode has to be 1-D or N-D.")
237
+
238
+ if not all([x.size == self.di.shape[0] for x in self.xi]):
239
+ raise ValueError("All arrays must be equal length.")
240
+
241
+ self.norm = kwargs.pop('norm', 'euclidean')
242
+ self.epsilon = kwargs.pop('epsilon', None)
243
+ if self.epsilon is None:
244
+ # default epsilon is the "the average distance between nodes" based
245
+ # on a bounding hypercube
246
+ ximax = np.amax(self.xi, axis=1)
247
+ ximin = np.amin(self.xi, axis=1)
248
+ edges = ximax - ximin
249
+ edges = edges[np.nonzero(edges)]
250
+ self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
251
+
252
+ self.smooth = kwargs.pop('smooth', 0.0)
253
+ self.function = kwargs.pop('function', 'multiquadric')
254
+
255
+ # attach anything left in kwargs to self for use by any user-callable
256
+ # function or to save on the object returned.
257
+ for item, value in kwargs.items():
258
+ setattr(self, item, value)
259
+
260
+ # Compute weights
261
+ if self._target_dim > 1: # If we have more than one target dimension,
262
+ # we first factorize the matrix
263
+ self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
264
+ lu, piv = linalg.lu_factor(self.A)
265
+ for i in range(self._target_dim):
266
+ self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
267
+ else:
268
+ self.nodes = linalg.solve(self.A, self.di)
269
+
270
+ @property
271
+ def A(self):
272
+ # this only exists for backwards compatibility: self.A was available
273
+ # and, at least technically, public.
274
+ r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
275
+ return self._init_function(r) - np.eye(self.N)*self.smooth
276
+
277
+ def _call_norm(self, x1, x2):
278
+ return cdist(x1.T, x2.T, self.norm)
279
+
280
+ def __call__(self, *args):
281
+ args = [np.asarray(x) for x in args]
282
+ if not all([x.shape == y.shape for x in args for y in args]):
283
+ raise ValueError("Array lengths must be equal")
284
+ if self._target_dim > 1:
285
+ shp = args[0].shape + (self._target_dim,)
286
+ else:
287
+ shp = args[0].shape
288
+ xa = np.asarray([a.flatten() for a in args], dtype=np.float64)
289
+ r = self._call_norm(xa, self.xi)
290
+ return np.dot(self._function(r), self.nodes).reshape(shp)
venv/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module for RBF interpolation."""
2
+ import warnings
3
+ from itertools import combinations_with_replacement
4
+
5
+ import numpy as np
6
+ from numpy.linalg import LinAlgError
7
+ from scipy.spatial import KDTree
8
+ from scipy.special import comb
9
+ from scipy.linalg.lapack import dgesv # type: ignore[attr-defined]
10
+
11
+ from ._rbfinterp_pythran import (_build_system,
12
+ _build_evaluation_coefficients,
13
+ _polynomial_matrix)
14
+
15
+
16
+ __all__ = ["RBFInterpolator"]
17
+
18
+
19
+ # These RBFs are implemented.
20
+ _AVAILABLE = {
21
+ "linear",
22
+ "thin_plate_spline",
23
+ "cubic",
24
+ "quintic",
25
+ "multiquadric",
26
+ "inverse_multiquadric",
27
+ "inverse_quadratic",
28
+ "gaussian"
29
+ }
30
+
31
+
32
+ # The shape parameter does not need to be specified when using these RBFs.
33
+ _SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
34
+
35
+
36
+ # For RBFs that are conditionally positive definite of order m, the interpolant
37
+ # should include polynomial terms with degree >= m - 1. Define the minimum
38
+ # degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
39
+ # Approximation Methods with MATLAB". The RBFs that are not in this dictionary
40
+ # are positive definite and do not need polynomial terms.
41
+ _NAME_TO_MIN_DEGREE = {
42
+ "multiquadric": 0,
43
+ "linear": 0,
44
+ "thin_plate_spline": 1,
45
+ "cubic": 1,
46
+ "quintic": 2
47
+ }
48
+
49
+
50
+ def _monomial_powers(ndim, degree):
51
+ """Return the powers for each monomial in a polynomial.
52
+
53
+ Parameters
54
+ ----------
55
+ ndim : int
56
+ Number of variables in the polynomial.
57
+ degree : int
58
+ Degree of the polynomial.
59
+
60
+ Returns
61
+ -------
62
+ (nmonos, ndim) int ndarray
63
+ Array where each row contains the powers for each variable in a
64
+ monomial.
65
+
66
+ """
67
+ nmonos = comb(degree + ndim, ndim, exact=True)
68
+ out = np.zeros((nmonos, ndim), dtype=np.dtype("long"))
69
+ count = 0
70
+ for deg in range(degree + 1):
71
+ for mono in combinations_with_replacement(range(ndim), deg):
72
+ # `mono` is a tuple of variables in the current monomial with
73
+ # multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
74
+ for var in mono:
75
+ out[count, var] += 1
76
+
77
+ count += 1
78
+
79
+ return out
80
+
81
+
82
+ def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
83
+ """Build and solve the RBF interpolation system of equations.
84
+
85
+ Parameters
86
+ ----------
87
+ y : (P, N) float ndarray
88
+ Data point coordinates.
89
+ d : (P, S) float ndarray
90
+ Data values at `y`.
91
+ smoothing : (P,) float ndarray
92
+ Smoothing parameter for each data point.
93
+ kernel : str
94
+ Name of the RBF.
95
+ epsilon : float
96
+ Shape parameter.
97
+ powers : (R, N) int ndarray
98
+ The exponents for each monomial in the polynomial.
99
+
100
+ Returns
101
+ -------
102
+ coeffs : (P + R, S) float ndarray
103
+ Coefficients for each RBF and monomial.
104
+ shift : (N,) float ndarray
105
+ Domain shift used to create the polynomial matrix.
106
+ scale : (N,) float ndarray
107
+ Domain scaling used to create the polynomial matrix.
108
+
109
+ """
110
+ lhs, rhs, shift, scale = _build_system(
111
+ y, d, smoothing, kernel, epsilon, powers
112
+ )
113
+ _, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
114
+ if info < 0:
115
+ raise ValueError(f"The {-info}-th argument had an illegal value.")
116
+ elif info > 0:
117
+ msg = "Singular matrix."
118
+ nmonos = powers.shape[0]
119
+ if nmonos > 0:
120
+ pmat = _polynomial_matrix((y - shift)/scale, powers)
121
+ rank = np.linalg.matrix_rank(pmat)
122
+ if rank < nmonos:
123
+ msg = (
124
+ "Singular matrix. The matrix of monomials evaluated at "
125
+ "the data point coordinates does not have full column "
126
+ f"rank ({rank}/{nmonos})."
127
+ )
128
+
129
+ raise LinAlgError(msg)
130
+
131
+ return shift, scale, coeffs
132
+
133
+
134
+ class RBFInterpolator:
135
+ """Radial basis function (RBF) interpolation in N dimensions.
136
+
137
+ Parameters
138
+ ----------
139
+ y : (npoints, ndims) array_like
140
+ 2-D array of data point coordinates.
141
+ d : (npoints, ...) array_like
142
+ N-D array of data values at `y`. The length of `d` along the first
143
+ axis must be equal to the length of `y`. Unlike some interpolators, the
144
+ interpolation axis cannot be changed.
145
+ neighbors : int, optional
146
+ If specified, the value of the interpolant at each evaluation point
147
+ will be computed using only this many nearest data points. All the data
148
+ points are used by default.
149
+ smoothing : float or (npoints, ) array_like, optional
150
+ Smoothing parameter. The interpolant perfectly fits the data when this
151
+ is set to 0. For large values, the interpolant approaches a least
152
+ squares fit of a polynomial with the specified degree. Default is 0.
153
+ kernel : str, optional
154
+ Type of RBF. This should be one of
155
+
156
+ - 'linear' : ``-r``
157
+ - 'thin_plate_spline' : ``r**2 * log(r)``
158
+ - 'cubic' : ``r**3``
159
+ - 'quintic' : ``-r**5``
160
+ - 'multiquadric' : ``-sqrt(1 + r**2)``
161
+ - 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
162
+ - 'inverse_quadratic' : ``1/(1 + r**2)``
163
+ - 'gaussian' : ``exp(-r**2)``
164
+
165
+ Default is 'thin_plate_spline'.
166
+ epsilon : float, optional
167
+ Shape parameter that scales the input to the RBF. If `kernel` is
168
+ 'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
169
+ 1 and can be ignored because it has the same effect as scaling the
170
+ smoothing parameter. Otherwise, this must be specified.
171
+ degree : int, optional
172
+ Degree of the added polynomial. For some RBFs the interpolant may not
173
+ be well-posed if the polynomial degree is too small. Those RBFs and
174
+ their corresponding minimum degrees are
175
+
176
+ - 'multiquadric' : 0
177
+ - 'linear' : 0
178
+ - 'thin_plate_spline' : 1
179
+ - 'cubic' : 1
180
+ - 'quintic' : 2
181
+
182
+ The default value is the minimum degree for `kernel` or 0 if there is
183
+ no minimum degree. Set this to -1 for no added polynomial.
184
+
185
+ Notes
186
+ -----
187
+ An RBF is a scalar valued function in N-dimensional space whose value at
188
+ :math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
189
+ is the center of the RBF.
190
+
191
+ An RBF interpolant for the vector of data values :math:`d`, which are from
192
+ locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
193
+ plus a polynomial with a specified degree. The RBF interpolant is written
194
+ as
195
+
196
+ .. math::
197
+ f(x) = K(x, y) a + P(x) b,
198
+
199
+ where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
200
+ evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
201
+ monomials, which span polynomials with the specified degree, evaluated at
202
+ :math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
203
+ linear equations
204
+
205
+ .. math::
206
+ (K(y, y) + \\lambda I) a + P(y) b = d
207
+
208
+ and
209
+
210
+ .. math::
211
+ P(y)^T a = 0,
212
+
213
+ where :math:`\\lambda` is a non-negative smoothing parameter that controls
214
+ how well we want to fit the data. The data are fit exactly when the
215
+ smoothing parameter is 0.
216
+
217
+ The above system is uniquely solvable if the following requirements are
218
+ met:
219
+
220
+ - :math:`P(y)` must have full column rank. :math:`P(y)` always has full
221
+ column rank when `degree` is -1 or 0. When `degree` is 1,
222
+ :math:`P(y)` has full column rank if the data point locations are not
223
+ all collinear (N=2), coplanar (N=3), etc.
224
+ - If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
225
+ 'cubic', or 'quintic', then `degree` must not be lower than the
226
+ minimum value listed above.
227
+ - If `smoothing` is 0, then each data point location must be distinct.
228
+
229
+ When using an RBF that is not scale invariant ('multiquadric',
230
+ 'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
231
+ shape parameter must be chosen (e.g., through cross validation). Smaller
232
+ values for the shape parameter correspond to wider RBFs. The problem can
233
+ become ill-conditioned or singular when the shape parameter is too small.
234
+
235
+ The memory required to solve for the RBF interpolation coefficients
236
+ increases quadratically with the number of data points, which can become
237
+ impractical when interpolating more than about a thousand data points.
238
+ To overcome memory limitations for large interpolation problems, the
239
+ `neighbors` argument can be specified to compute an RBF interpolant for
240
+ each evaluation point using only the nearest data points.
241
+
242
+ .. versionadded:: 1.7.0
243
+
244
+ See Also
245
+ --------
246
+ NearestNDInterpolator
247
+ LinearNDInterpolator
248
+ CloughTocher2DInterpolator
249
+
250
+ References
251
+ ----------
252
+ .. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
253
+ World Scientific Publishing Co.
254
+
255
+ .. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
256
+
257
+ .. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
258
+
259
+ .. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
260
+
261
+ Examples
262
+ --------
263
+ Demonstrate interpolating scattered data to a grid in 2-D.
264
+
265
+ >>> import numpy as np
266
+ >>> import matplotlib.pyplot as plt
267
+ >>> from scipy.interpolate import RBFInterpolator
268
+ >>> from scipy.stats.qmc import Halton
269
+
270
+ >>> rng = np.random.default_rng()
271
+ >>> xobs = 2*Halton(2, seed=rng).random(100) - 1
272
+ >>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
273
+
274
+ >>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
275
+ >>> xflat = xgrid.reshape(2, -1).T
276
+ >>> yflat = RBFInterpolator(xobs, yobs)(xflat)
277
+ >>> ygrid = yflat.reshape(50, 50)
278
+
279
+ >>> fig, ax = plt.subplots()
280
+ >>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
281
+ >>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
282
+ >>> fig.colorbar(p)
283
+ >>> plt.show()
284
+
285
+ """
286
+
287
+ def __init__(self, y, d,
288
+ neighbors=None,
289
+ smoothing=0.0,
290
+ kernel="thin_plate_spline",
291
+ epsilon=None,
292
+ degree=None):
293
+ y = np.asarray(y, dtype=float, order="C")
294
+ if y.ndim != 2:
295
+ raise ValueError("`y` must be a 2-dimensional array.")
296
+
297
+ ny, ndim = y.shape
298
+
299
+ d_dtype = complex if np.iscomplexobj(d) else float
300
+ d = np.asarray(d, dtype=d_dtype, order="C")
301
+ if d.shape[0] != ny:
302
+ raise ValueError(
303
+ f"Expected the first axis of `d` to have length {ny}."
304
+ )
305
+
306
+ d_shape = d.shape[1:]
307
+ d = d.reshape((ny, -1))
308
+ # If `d` is complex, convert it to a float array with twice as many
309
+ # columns. Otherwise, the LHS matrix would need to be converted to
310
+ # complex and take up 2x more memory than necessary.
311
+ d = d.view(float)
312
+
313
+ if np.isscalar(smoothing):
314
+ smoothing = np.full(ny, smoothing, dtype=float)
315
+ else:
316
+ smoothing = np.asarray(smoothing, dtype=float, order="C")
317
+ if smoothing.shape != (ny,):
318
+ raise ValueError(
319
+ "Expected `smoothing` to be a scalar or have shape "
320
+ f"({ny},)."
321
+ )
322
+
323
+ kernel = kernel.lower()
324
+ if kernel not in _AVAILABLE:
325
+ raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
326
+
327
+ if epsilon is None:
328
+ if kernel in _SCALE_INVARIANT:
329
+ epsilon = 1.0
330
+ else:
331
+ raise ValueError(
332
+ "`epsilon` must be specified if `kernel` is not one of "
333
+ f"{_SCALE_INVARIANT}."
334
+ )
335
+ else:
336
+ epsilon = float(epsilon)
337
+
338
+ min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
339
+ if degree is None:
340
+ degree = max(min_degree, 0)
341
+ else:
342
+ degree = int(degree)
343
+ if degree < -1:
344
+ raise ValueError("`degree` must be at least -1.")
345
+ elif -1 < degree < min_degree:
346
+ warnings.warn(
347
+ f"`degree` should not be below {min_degree} except -1 "
348
+ f"when `kernel` is '{kernel}'."
349
+ f"The interpolant may not be uniquely "
350
+ f"solvable, and the smoothing parameter may have an "
351
+ f"unintuitive effect.",
352
+ UserWarning, stacklevel=2
353
+ )
354
+
355
+ if neighbors is None:
356
+ nobs = ny
357
+ else:
358
+ # Make sure the number of nearest neighbors used for interpolation
359
+ # does not exceed the number of observations.
360
+ neighbors = int(min(neighbors, ny))
361
+ nobs = neighbors
362
+
363
+ powers = _monomial_powers(ndim, degree)
364
+ # The polynomial matrix must have full column rank in order for the
365
+ # interpolant to be well-posed, which is not possible if there are
366
+ # fewer observations than monomials.
367
+ if powers.shape[0] > nobs:
368
+ raise ValueError(
369
+ f"At least {powers.shape[0]} data points are required when "
370
+ f"`degree` is {degree} and the number of dimensions is {ndim}."
371
+ )
372
+
373
+ if neighbors is None:
374
+ shift, scale, coeffs = _build_and_solve_system(
375
+ y, d, smoothing, kernel, epsilon, powers
376
+ )
377
+
378
+ # Make these attributes private since they do not always exist.
379
+ self._shift = shift
380
+ self._scale = scale
381
+ self._coeffs = coeffs
382
+
383
+ else:
384
+ self._tree = KDTree(y)
385
+
386
+ self.y = y
387
+ self.d = d
388
+ self.d_shape = d_shape
389
+ self.d_dtype = d_dtype
390
+ self.neighbors = neighbors
391
+ self.smoothing = smoothing
392
+ self.kernel = kernel
393
+ self.epsilon = epsilon
394
+ self.powers = powers
395
+
396
+ def _chunk_evaluator(
397
+ self,
398
+ x,
399
+ y,
400
+ shift,
401
+ scale,
402
+ coeffs,
403
+ memory_budget=1000000
404
+ ):
405
+ """
406
+ Evaluate the interpolation while controlling memory consumption.
407
+ We chunk the input if we need more memory than specified.
408
+
409
+ Parameters
410
+ ----------
411
+ x : (Q, N) float ndarray
412
+ array of points on which to evaluate
413
+ y: (P, N) float ndarray
414
+ array of points on which we know function values
415
+ shift: (N, ) ndarray
416
+ Domain shift used to create the polynomial matrix.
417
+ scale : (N,) float ndarray
418
+ Domain scaling used to create the polynomial matrix.
419
+ coeffs: (P+R, S) float ndarray
420
+ Coefficients in front of basis functions
421
+ memory_budget: int
422
+ Total amount of memory (in units of sizeof(float)) we wish
423
+ to devote for storing the array of coefficients for
424
+ interpolated points. If we need more memory than that, we
425
+ chunk the input.
426
+
427
+ Returns
428
+ -------
429
+ (Q, S) float ndarray
430
+ Interpolated array
431
+ """
432
+ nx, ndim = x.shape
433
+ if self.neighbors is None:
434
+ nnei = len(y)
435
+ else:
436
+ nnei = self.neighbors
437
+ # in each chunk we consume the same space we already occupy
438
+ chunksize = memory_budget // (self.powers.shape[0] + nnei) + 1
439
+ if chunksize <= nx:
440
+ out = np.empty((nx, self.d.shape[1]), dtype=float)
441
+ for i in range(0, nx, chunksize):
442
+ vec = _build_evaluation_coefficients(
443
+ x[i:i + chunksize, :],
444
+ y,
445
+ self.kernel,
446
+ self.epsilon,
447
+ self.powers,
448
+ shift,
449
+ scale)
450
+ out[i:i + chunksize, :] = np.dot(vec, coeffs)
451
+ else:
452
+ vec = _build_evaluation_coefficients(
453
+ x,
454
+ y,
455
+ self.kernel,
456
+ self.epsilon,
457
+ self.powers,
458
+ shift,
459
+ scale)
460
+ out = np.dot(vec, coeffs)
461
+ return out
462
+
463
+ def __call__(self, x):
464
+ """Evaluate the interpolant at `x`.
465
+
466
+ Parameters
467
+ ----------
468
+ x : (Q, N) array_like
469
+ Evaluation point coordinates.
470
+
471
+ Returns
472
+ -------
473
+ (Q, ...) ndarray
474
+ Values of the interpolant at `x`.
475
+
476
+ """
477
+ x = np.asarray(x, dtype=float, order="C")
478
+ if x.ndim != 2:
479
+ raise ValueError("`x` must be a 2-dimensional array.")
480
+
481
+ nx, ndim = x.shape
482
+ if ndim != self.y.shape[1]:
483
+ raise ValueError("Expected the second axis of `x` to have length "
484
+ f"{self.y.shape[1]}.")
485
+
486
+ # Our memory budget for storing RBF coefficients is
487
+ # based on how many floats in memory we already occupy
488
+ # If this number is below 1e6 we just use 1e6
489
+ # This memory budget is used to decide how we chunk
490
+ # the inputs
491
+ memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
492
+
493
+ if self.neighbors is None:
494
+ out = self._chunk_evaluator(
495
+ x,
496
+ self.y,
497
+ self._shift,
498
+ self._scale,
499
+ self._coeffs,
500
+ memory_budget=memory_budget)
501
+ else:
502
+ # Get the indices of the k nearest observation points to each
503
+ # evaluation point.
504
+ _, yindices = self._tree.query(x, self.neighbors)
505
+ if self.neighbors == 1:
506
+ # `KDTree` squeezes the output when neighbors=1.
507
+ yindices = yindices[:, None]
508
+
509
+ # Multiple evaluation points may have the same neighborhood of
510
+ # observation points. Make the neighborhoods unique so that we only
511
+ # compute the interpolation coefficients once for each
512
+ # neighborhood.
513
+ yindices = np.sort(yindices, axis=1)
514
+ yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
515
+ inv = np.reshape(inv, (-1,)) # flatten, we need 1-D indices
516
+ # `inv` tells us which neighborhood will be used by each evaluation
517
+ # point. Now we find which evaluation points will be using each
518
+ # neighborhood.
519
+ xindices = [[] for _ in range(len(yindices))]
520
+ for i, j in enumerate(inv):
521
+ xindices[j].append(i)
522
+
523
+ out = np.empty((nx, self.d.shape[1]), dtype=float)
524
+ for xidx, yidx in zip(xindices, yindices):
525
+ # `yidx` are the indices of the observations in this
526
+ # neighborhood. `xidx` are the indices of the evaluation points
527
+ # that are using this neighborhood.
528
+ xnbr = x[xidx]
529
+ ynbr = self.y[yidx]
530
+ dnbr = self.d[yidx]
531
+ snbr = self.smoothing[yidx]
532
+ shift, scale, coeffs = _build_and_solve_system(
533
+ ynbr,
534
+ dnbr,
535
+ snbr,
536
+ self.kernel,
537
+ self.epsilon,
538
+ self.powers,
539
+ )
540
+ out[xidx] = self._chunk_evaluator(
541
+ xnbr,
542
+ ynbr,
543
+ shift,
544
+ scale,
545
+ coeffs,
546
+ memory_budget=memory_budget)
547
+
548
+ out = out.view(self.d_dtype)
549
+ out = out.reshape((nx, ) + self.d_shape)
550
+ return out