diff --git a/ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8aa47e2015a8deb249a751213955398737b3320b
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a322f1416dfe5118ab829ae03b1ae52eff67c1b1047fdadd230dfcde4661b26
+size 9372
diff --git a/ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..66c65face8d81db7c219506b535f442858010ae8
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/11.input_layernorm.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bbc015ef43b7e35900effc49169eba511c5ed150c7a219254ba6ccde443569d5
+size 9387
diff --git a/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d8df74b597a9d196321a1d1357658233f78adae0
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/5.mlp.dense_h_to_4h.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6f6b67a50f3e2aa755f42e43f2799c14c2150c5a818dc1600d9c109135e5787
+size 33555533
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bcefe1e7b69d0e53b5bd2c6cc9fdd88c77813fc9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..263b3297c28274e4825d42e8a462553c73a54393
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..abe440eaacca5556842e108be3fc3da617b13e06
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7064cf2b78062271bd3c65aa5502e6ae978758a3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8db6563ed639b8c530066843b8c9014a8afcbc4d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55ce64159a2656edf9c7ef95e2f5508f7a2407cd
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..413dbfb550c965863b2c490b83164f86cf2f0e0e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1378e473b136aa04f28db815950be5ec3b98181
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py b/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py
new file mode 100644
index 0000000000000000000000000000000000000000..a73512d35eef168f625a1942a87d248e73a71aa2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py
@@ -0,0 +1,202 @@
+import pytest
+from pytest import raises as assert_raises
+import numpy as np
+from scipy.cluster.hierarchy import DisjointSet
+import string
+
+
+def generate_random_token():
+ k = len(string.ascii_letters)
+ tokens = list(np.arange(k, dtype=int))
+ tokens += list(np.arange(k, dtype=float))
+ tokens += list(string.ascii_letters)
+ tokens += [None for i in range(k)]
+ tokens = np.array(tokens, dtype=object)
+ rng = np.random.RandomState(seed=0)
+
+ while 1:
+ size = rng.randint(1, 3)
+ element = rng.choice(tokens, size)
+ if size == 1:
+ yield element[0]
+ else:
+ yield tuple(element)
+
+
+def get_elements(n):
+ # dict is deterministic without difficulty of comparing numpy ints
+ elements = {}
+ for element in generate_random_token():
+ if element not in elements:
+ elements[element] = len(elements)
+ if len(elements) >= n:
+ break
+ return list(elements.keys())
+
+
+def test_init():
+ n = 10
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ assert dis.n_subsets == n
+ assert list(dis) == elements
+
+
+def test_len():
+ n = 10
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ assert len(dis) == n
+
+ dis.add("dummy")
+ assert len(dis) == n + 1
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_contains(n):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ for x in elements:
+ assert x in dis
+
+ assert "dummy" not in dis
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_add(n):
+ elements = get_elements(n)
+ dis1 = DisjointSet(elements)
+
+ dis2 = DisjointSet()
+ for i, x in enumerate(elements):
+ dis2.add(x)
+ assert len(dis2) == i + 1
+
+ # test idempotency by adding element again
+ dis2.add(x)
+ assert len(dis2) == i + 1
+
+ assert list(dis1) == list(dis2)
+
+
+def test_element_not_present():
+ elements = get_elements(n=10)
+ dis = DisjointSet(elements)
+
+ with assert_raises(KeyError):
+ dis["dummy"]
+
+ with assert_raises(KeyError):
+ dis.merge(elements[0], "dummy")
+
+ with assert_raises(KeyError):
+ dis.connected(elements[0], "dummy")
+
+
+@pytest.mark.parametrize("direction", ["forwards", "backwards"])
+@pytest.mark.parametrize("n", [10, 100])
+def test_linear_union_sequence(n, direction):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ assert elements == list(dis)
+
+ indices = list(range(n - 1))
+ if direction == "backwards":
+ indices = indices[::-1]
+
+ for it, i in enumerate(indices):
+ assert not dis.connected(elements[i], elements[i + 1])
+ assert dis.merge(elements[i], elements[i + 1])
+ assert dis.connected(elements[i], elements[i + 1])
+ assert dis.n_subsets == n - 1 - it
+
+ roots = [dis[i] for i in elements]
+ if direction == "forwards":
+ assert all(elements[0] == r for r in roots)
+ else:
+ assert all(elements[-2] == r for r in roots)
+ assert not dis.merge(elements[0], elements[-1])
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_self_unions(n):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+
+ for x in elements:
+ assert dis.connected(x, x)
+ assert not dis.merge(x, x)
+ assert dis.connected(x, x)
+ assert dis.n_subsets == len(elements)
+
+ assert elements == list(dis)
+ roots = [dis[x] for x in elements]
+ assert elements == roots
+
+
+@pytest.mark.parametrize("order", ["ab", "ba"])
+@pytest.mark.parametrize("n", [10, 100])
+def test_equal_size_ordering(n, order):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+
+ rng = np.random.RandomState(seed=0)
+ indices = np.arange(n)
+ rng.shuffle(indices)
+
+ for i in range(0, len(indices), 2):
+ a, b = elements[indices[i]], elements[indices[i + 1]]
+ if order == "ab":
+ assert dis.merge(a, b)
+ else:
+ assert dis.merge(b, a)
+
+ expected = elements[min(indices[i], indices[i + 1])]
+ assert dis[a] == expected
+ assert dis[b] == expected
+
+
+@pytest.mark.parametrize("kmax", [5, 10])
+def test_binary_tree(kmax):
+ n = 2**kmax
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+ rng = np.random.RandomState(seed=0)
+
+ for k in 2**np.arange(kmax):
+ for i in range(0, n, 2 * k):
+ r1, r2 = rng.randint(0, k, size=2)
+ a, b = elements[i + r1], elements[i + k + r2]
+ assert not dis.connected(a, b)
+ assert dis.merge(a, b)
+ assert dis.connected(a, b)
+
+ assert elements == list(dis)
+ roots = [dis[i] for i in elements]
+ expected_indices = np.arange(n) - np.arange(n) % (2 * k)
+ expected = [elements[i] for i in expected_indices]
+ assert roots == expected
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_subsets(n):
+ elements = get_elements(n)
+ dis = DisjointSet(elements)
+
+ rng = np.random.RandomState(seed=0)
+ for i, j in rng.randint(0, n, (n, 2)):
+ x = elements[i]
+ y = elements[j]
+
+ expected = {element for element in dis if {dis[element]} == {dis[x]}}
+ assert dis.subset_size(x) == len(dis.subset(x))
+ assert expected == dis.subset(x)
+
+ expected = {dis[element]: set() for element in dis}
+ for element in dis:
+ expected[dis[element]].add(element)
+ expected = list(expected.values())
+ assert expected == dis.subsets()
+
+ dis.merge(x, y)
+ assert dis.subset(x) == dis.subset(y)
diff --git a/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py b/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py
new file mode 100644
index 0000000000000000000000000000000000000000..10b1416b83b44a3f5f4c8e9af396fab21f6ae1f3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py
@@ -0,0 +1,421 @@
+import warnings
+import sys
+from copy import deepcopy
+
+import numpy as np
+from numpy.testing import (
+ assert_array_equal, assert_equal, assert_, suppress_warnings
+)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
+ ClusterError, _krandinit)
+from scipy.cluster import _vq
+from scipy.conftest import array_api_compatible
+from scipy.sparse._sputils import matrix
+
+from scipy._lib._array_api import (
+ SCIPY_ARRAY_API, copy, cov, xp_assert_close, xp_assert_equal
+)
+
+pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")]
+skip_if_array_api = pytest.mark.skip_if_array_api
+
+TESTDATA_2D = np.array([
+ -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
+ -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
+ 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
+ -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
+ -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
+ -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
+ 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
+ -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
+ -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
+ -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
+ 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
+ -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
+ 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
+ -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
+ 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
+ -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
+ 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
+ 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
+ -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
+ 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
+ -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
+ -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
+ -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
+ 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
+ -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
+ 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
+ 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
+ -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
+ 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
+ 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
+ -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
+ -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
+ 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
+ -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
+ -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
+ -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
+ 2.11]).reshape((200, 2))
+
+
+# Global data
+X = np.array([[3.0, 3], [4, 3], [4, 2],
+ [9, 2], [5, 1], [6, 2], [9, 4],
+ [5, 2], [5, 4], [7, 4], [6, 5]])
+
+CODET1 = np.array([[3.0000, 3.0000],
+ [6.2000, 4.0000],
+ [5.8000, 1.8000]])
+
+CODET2 = np.array([[11.0/3, 8.0/3],
+ [6.7500, 4.2500],
+ [6.2500, 1.7500]])
+
+LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
+
+
+class TestWhiten:
+
+ def test_whiten(self, xp):
+ desired = xp.asarray([[5.08738849, 2.97091878],
+ [3.19909255, 0.69660580],
+ [4.51041982, 0.02640918],
+ [4.38567074, 0.95120889],
+ [2.32191480, 1.63195503]])
+
+ obs = xp.asarray([[0.98744510, 0.82766775],
+ [0.62093317, 0.19406729],
+ [0.87545741, 0.00735733],
+ [0.85124403, 0.26499712],
+ [0.45067590, 0.45464607]])
+ xp_assert_close(whiten(obs), desired, rtol=1e-5)
+
+ def test_whiten_zero_std(self, xp):
+ desired = xp.asarray([[0., 1.0, 2.86666544],
+ [0., 1.0, 1.32460034],
+ [0., 1.0, 3.74382172]])
+
+ obs = xp.asarray([[0., 1., 0.74109533],
+ [0., 1., 0.34243798],
+ [0., 1., 0.96785929]])
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+
+ xp_assert_close(whiten(obs), desired, rtol=1e-5)
+
+ assert_equal(len(w), 1)
+ assert_(issubclass(w[-1].category, RuntimeWarning))
+
+ def test_whiten_not_finite(self, xp):
+ for bad_value in xp.nan, xp.inf, -xp.inf:
+ obs = xp.asarray([[0.98744510, bad_value],
+ [0.62093317, 0.19406729],
+ [0.87545741, 0.00735733],
+ [0.85124403, 0.26499712],
+ [0.45067590, 0.45464607]])
+ assert_raises(ValueError, whiten, obs)
+
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
+ reason='`np.matrix` unsupported in array API mode')
+ def test_whiten_not_finite_matrix(self, xp):
+ for bad_value in np.nan, np.inf, -np.inf:
+ obs = matrix([[0.98744510, bad_value],
+ [0.62093317, 0.19406729],
+ [0.87545741, 0.00735733],
+ [0.85124403, 0.26499712],
+ [0.45067590, 0.45464607]])
+ assert_raises(ValueError, whiten, obs)
+
+
+class TestVq:
+
+ @skip_if_array_api(cpu_only=True)
+ def test_py_vq(self, xp):
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
+ # label1.dtype varies between int32 and int64 over platforms
+ label1 = py_vq(xp.asarray(X), xp.asarray(initc))[0]
+ xp_assert_equal(label1, xp.asarray(LABEL1, dtype=xp.int64),
+ check_dtype=False)
+
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
+ reason='`np.matrix` unsupported in array API mode')
+ def test_py_vq_matrix(self, xp):
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
+ # label1.dtype varies between int32 and int64 over platforms
+ label1 = py_vq(matrix(X), matrix(initc))[0]
+ assert_array_equal(label1, LABEL1)
+
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
+ def test_vq(self, xp):
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
+ label1, _ = _vq.vq(xp.asarray(X), xp.asarray(initc))
+ assert_array_equal(label1, LABEL1)
+ _, _ = vq(xp.asarray(X), xp.asarray(initc))
+
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
+ reason='`np.matrix` unsupported in array API mode')
+ def test_vq_matrix(self, xp):
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
+ label1, _ = _vq.vq(matrix(X), matrix(initc))
+ assert_array_equal(label1, LABEL1)
+ _, _ = vq(matrix(X), matrix(initc))
+
+ @skip_if_array_api(cpu_only=True)
+ def test_vq_1d(self, xp):
+ # Test special rank 1 vq algo, python implementation.
+ data = X[:, 0]
+ initc = data[:3]
+ a, b = _vq.vq(data, initc)
+ data = xp.asarray(data)
+ initc = xp.asarray(initc)
+ ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
+ # ta.dtype varies between int32 and int64 over platforms
+ xp_assert_equal(ta, xp.asarray(a, dtype=xp.int64), check_dtype=False)
+ xp_assert_equal(tb, xp.asarray(b))
+
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
+ def test__vq_sametype(self, xp):
+ a = xp.asarray([1.0, 2.0], dtype=xp.float64)
+ b = a.astype(xp.float32)
+ assert_raises(TypeError, _vq.vq, a, b)
+
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
+ def test__vq_invalid_type(self, xp):
+ a = xp.asarray([1, 2], dtype=int)
+ assert_raises(TypeError, _vq.vq, a, a)
+
+ @skip_if_array_api(cpu_only=True)
+ def test_vq_large_nfeat(self, xp):
+ X = np.random.rand(20, 20)
+ code_book = np.random.rand(3, 20)
+
+ codes0, dis0 = _vq.vq(X, code_book)
+ codes1, dis1 = py_vq(
+ xp.asarray(X), xp.asarray(code_book)
+ )
+ xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
+ # codes1.dtype varies between int32 and int64 over platforms
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
+
+ X = X.astype(np.float32)
+ code_book = code_book.astype(np.float32)
+
+ codes0, dis0 = _vq.vq(X, code_book)
+ codes1, dis1 = py_vq(
+ xp.asarray(X), xp.asarray(code_book)
+ )
+ xp_assert_close(dis1, xp.asarray(dis0, dtype=xp.float64), rtol=1e-5)
+ # codes1.dtype varies between int32 and int64 over platforms
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
+
+ @skip_if_array_api(cpu_only=True)
+ def test_vq_large_features(self, xp):
+ X = np.random.rand(10, 5) * 1000000
+ code_book = np.random.rand(2, 5) * 1000000
+
+ codes0, dis0 = _vq.vq(X, code_book)
+ codes1, dis1 = py_vq(
+ xp.asarray(X), xp.asarray(code_book)
+ )
+ xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
+ # codes1.dtype varies between int32 and int64 over platforms
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
+
+
+# Whole class skipped on GPU for now;
+# once pdist/cdist are hooked up for CuPy, more tests will work
+@skip_if_array_api(cpu_only=True)
+class TestKMean:
+
+ def test_large_features(self, xp):
+ # Generate a data set with large values, and run kmeans on it to
+ # (regression for 1077).
+ d = 300
+ n = 100
+
+ m1 = np.random.randn(d)
+ m2 = np.random.randn(d)
+ x = 10000 * np.random.randn(n, d) - 20000 * m1
+ y = 10000 * np.random.randn(n, d) + 20000 * m2
+
+ data = np.empty((x.shape[0] + y.shape[0], d), np.float64)
+ data[:x.shape[0]] = x
+ data[x.shape[0]:] = y
+
+ kmeans(xp.asarray(data), 2)
+
+ def test_kmeans_simple(self, xp):
+ np.random.seed(54321)
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
+ code1 = kmeans(xp.asarray(X), xp.asarray(initc), iter=1)[0]
+ xp_assert_close(code1, xp.asarray(CODET2))
+
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
+ reason='`np.matrix` unsupported in array API mode')
+ def test_kmeans_simple_matrix(self, xp):
+ np.random.seed(54321)
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
+ code1 = kmeans(matrix(X), matrix(initc), iter=1)[0]
+ xp_assert_close(code1, CODET2)
+
+ def test_kmeans_lost_cluster(self, xp):
+ # This will cause kmeans to have a cluster with no points.
+ data = xp.asarray(TESTDATA_2D)
+ initk = xp.asarray([[-1.8127404, -0.67128041],
+ [2.04621601, 0.07401111],
+ [-2.31149087, -0.05160469]])
+
+ kmeans(data, initk)
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ "One of the clusters is empty. Re-run kmeans with a "
+ "different initialization")
+ kmeans2(data, initk, missing='warn')
+
+ assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
+
+ def test_kmeans2_simple(self, xp):
+ np.random.seed(12345678)
+ initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
+ arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix]
+ for tp in arrays:
+ code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
+ code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
+
+ xp_assert_close(code1, xp.asarray(CODET1))
+ xp_assert_close(code2, xp.asarray(CODET2))
+
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
+ reason='`np.matrix` unsupported in array API mode')
+ def test_kmeans2_simple_matrix(self, xp):
+ np.random.seed(12345678)
+ initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
+ code1 = kmeans2(matrix(X), matrix(initc), iter=1)[0]
+ code2 = kmeans2(matrix(X), matrix(initc), iter=2)[0]
+
+ xp_assert_close(code1, CODET1)
+ xp_assert_close(code2, CODET2)
+
+ def test_kmeans2_rank1(self, xp):
+ data = xp.asarray(TESTDATA_2D)
+ data1 = data[:, 0]
+
+ initc = data1[:3]
+ code = copy(initc, xp=xp)
+ kmeans2(data1, code, iter=1)[0]
+ kmeans2(data1, code, iter=2)[0]
+
+ def test_kmeans2_rank1_2(self, xp):
+ data = xp.asarray(TESTDATA_2D)
+ data1 = data[:, 0]
+ kmeans2(data1, 2, iter=1)
+
+ def test_kmeans2_high_dim(self, xp):
+ # test kmeans2 when the number of dimensions exceeds the number
+ # of input points
+ data = xp.asarray(TESTDATA_2D)
+ data = xp.reshape(data, (20, 20))[:10, :]
+ kmeans2(data, 2)
+
+ def test_kmeans2_init(self, xp):
+ np.random.seed(12345)
+ data = xp.asarray(TESTDATA_2D)
+ k = 3
+
+ kmeans2(data, k, minit='points')
+ kmeans2(data[:, 1], k, minit='points') # special case (1-D)
+
+ kmeans2(data, k, minit='++')
+ kmeans2(data[:, 1], k, minit='++') # special case (1-D)
+
+ # minit='random' can give warnings, filter those
+ with suppress_warnings() as sup:
+ sup.filter(message="One of the clusters is empty. Re-run.")
+ kmeans2(data, k, minit='random')
+ kmeans2(data[:, 1], k, minit='random') # special case (1-D)
+
+ @pytest.mark.skipif(sys.platform == 'win32',
+ reason='Fails with MemoryError in Wine.')
+ def test_krandinit(self, xp):
+ data = xp.asarray(TESTDATA_2D)
+ datas = [xp.reshape(data, (200, 2)),
+ xp.reshape(data, (20, 20))[:10, :]]
+ k = int(1e6)
+ for data in datas:
+ rng = np.random.default_rng(1234)
+ init = _krandinit(data, k, rng, xp)
+ orig_cov = cov(data.T)
+ init_cov = cov(init.T)
+ xp_assert_close(orig_cov, init_cov, atol=1e-2)
+
+ def test_kmeans2_empty(self, xp):
+ # Regression test for gh-1032.
+ assert_raises(ValueError, kmeans2, xp.asarray([]), 2)
+
+ def test_kmeans_0k(self, xp):
+ # Regression test for gh-1073: fail when k arg is 0.
+ assert_raises(ValueError, kmeans, xp.asarray(X), 0)
+ assert_raises(ValueError, kmeans2, xp.asarray(X), 0)
+ assert_raises(ValueError, kmeans2, xp.asarray(X), xp.asarray([]))
+
+ def test_kmeans_large_thres(self, xp):
+ # Regression test for gh-1774
+ x = xp.asarray([1, 2, 3, 4, 10], dtype=xp.float64)
+ res = kmeans(x, 1, thresh=1e16)
+ xp_assert_close(res[0], xp.asarray([4.], dtype=xp.float64))
+ xp_assert_close(res[1], xp.asarray(2.3999999999999999, dtype=xp.float64)[()])
+
+ def test_kmeans2_kpp_low_dim(self, xp):
+ # Regression test for gh-11462
+ prev_res = xp.asarray([[-1.95266667, 0.898],
+ [-3.153375, 3.3945]], dtype=xp.float64)
+ np.random.seed(42)
+ res, _ = kmeans2(xp.asarray(TESTDATA_2D), 2, minit='++')
+ xp_assert_close(res, prev_res)
+
+ def test_kmeans2_kpp_high_dim(self, xp):
+ # Regression test for gh-11462
+ n_dim = 100
+ size = 10
+ centers = np.vstack([5 * np.ones(n_dim),
+ -5 * np.ones(n_dim)])
+ np.random.seed(42)
+ data = np.vstack([
+ np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
+ np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
+ ])
+
+ data = xp.asarray(data)
+ res, _ = kmeans2(data, 2, minit='++')
+ xp_assert_equal(xp.sign(res), xp.sign(xp.asarray(centers)))
+
+ def test_kmeans_diff_convergence(self, xp):
+ # Regression test for gh-8727
+ obs = xp.asarray([-3, -1, 0, 1, 1, 8], dtype=xp.float64)
+ res = kmeans(obs, xp.asarray([-3., 0.99]))
+ xp_assert_close(res[0], xp.asarray([-0.4, 8.], dtype=xp.float64))
+ xp_assert_close(res[1], xp.asarray(1.0666666666666667, dtype=xp.float64)[()])
+
+ def test_kmeans_and_kmeans2_random_seed(self, xp):
+
+ seed_list = [
+ 1234, np.random.RandomState(1234), np.random.default_rng(1234)
+ ]
+
+ for seed in seed_list:
+ seed1 = deepcopy(seed)
+ seed2 = deepcopy(seed)
+ data = xp.asarray(TESTDATA_2D)
+ # test for kmeans
+ res1, _ = kmeans(data, 2, seed=seed1)
+ res2, _ = kmeans(data, 2, seed=seed2)
+ xp_assert_close(res1, res2, xp=xp) # should be same results
+ # test for kmeans2
+ for minit in ["random", "points", "++"]:
+ res1, _ = kmeans2(data, 2, minit=minit, seed=seed1)
+ res2, _ = kmeans2(data, 2, minit=minit, seed=seed2)
+ xp_assert_close(res1, res2, xp=xp) # should be same results
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__init__.py b/venv/lib/python3.10/site-packages/scipy/interpolate/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ea5ba9d88b91c252e7533249aba47998a24610d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/__init__.py
@@ -0,0 +1,201 @@
+"""
+========================================
+Interpolation (:mod:`scipy.interpolate`)
+========================================
+
+.. currentmodule:: scipy.interpolate
+
+Sub-package for objects used in interpolation.
+
+As listed below, this sub-package contains spline functions and classes,
+1-D and multidimensional (univariate and multivariate)
+interpolation classes, Lagrange and Taylor polynomial interpolators, and
+wrappers for `FITPACK `__
+and DFITPACK functions.
+
+Univariate interpolation
+========================
+
+.. autosummary::
+ :toctree: generated/
+
+ interp1d
+ BarycentricInterpolator
+ KroghInterpolator
+ barycentric_interpolate
+ krogh_interpolate
+ pchip_interpolate
+ CubicHermiteSpline
+ PchipInterpolator
+ Akima1DInterpolator
+ CubicSpline
+ PPoly
+ BPoly
+
+
+Multivariate interpolation
+==========================
+
+Unstructured data:
+
+.. autosummary::
+ :toctree: generated/
+
+ griddata
+ LinearNDInterpolator
+ NearestNDInterpolator
+ CloughTocher2DInterpolator
+ RBFInterpolator
+ Rbf
+ interp2d
+
+For data on a grid:
+
+.. autosummary::
+ :toctree: generated/
+
+ interpn
+ RegularGridInterpolator
+ RectBivariateSpline
+
+.. seealso::
+
+ `scipy.ndimage.map_coordinates`
+
+Tensor product polynomials:
+
+.. autosummary::
+ :toctree: generated/
+
+ NdPPoly
+ NdBSpline
+
+1-D Splines
+===========
+
+.. autosummary::
+ :toctree: generated/
+
+ BSpline
+ make_interp_spline
+ make_lsq_spline
+ make_smoothing_spline
+
+Functional interface to FITPACK routines:
+
+.. autosummary::
+ :toctree: generated/
+
+ splrep
+ splprep
+ splev
+ splint
+ sproot
+ spalde
+ splder
+ splantider
+ insert
+
+Object-oriented FITPACK interface:
+
+.. autosummary::
+ :toctree: generated/
+
+ UnivariateSpline
+ InterpolatedUnivariateSpline
+ LSQUnivariateSpline
+
+
+
+2-D Splines
+===========
+
+For data on a grid:
+
+.. autosummary::
+ :toctree: generated/
+
+ RectBivariateSpline
+ RectSphereBivariateSpline
+
+For unstructured data:
+
+.. autosummary::
+ :toctree: generated/
+
+ BivariateSpline
+ SmoothBivariateSpline
+ SmoothSphereBivariateSpline
+ LSQBivariateSpline
+ LSQSphereBivariateSpline
+
+Low-level interface to FITPACK functions:
+
+.. autosummary::
+ :toctree: generated/
+
+ bisplrep
+ bisplev
+
+Additional tools
+================
+
+.. autosummary::
+ :toctree: generated/
+
+ lagrange
+ approximate_taylor_polynomial
+ pade
+
+.. seealso::
+
+ `scipy.ndimage.map_coordinates`,
+ `scipy.ndimage.spline_filter`,
+ `scipy.signal.resample`,
+ `scipy.signal.bspline`,
+ `scipy.signal.gauss_spline`,
+ `scipy.signal.qspline1d`,
+ `scipy.signal.cspline1d`,
+ `scipy.signal.qspline1d_eval`,
+ `scipy.signal.cspline1d_eval`,
+ `scipy.signal.qspline2d`,
+ `scipy.signal.cspline2d`.
+
+``pchip`` is an alias of `PchipInterpolator` for backward compatibility
+(should not be used in new code).
+"""
+from ._interpolate import *
+from ._fitpack_py import *
+
+# New interface to fitpack library:
+from ._fitpack2 import *
+
+from ._rbf import Rbf
+
+from ._rbfinterp import *
+
+from ._polyint import *
+
+from ._cubic import *
+
+from ._ndgriddata import *
+
+from ._bsplines import *
+
+from ._pade import *
+
+from ._rgi import *
+
+from ._ndbspline import NdBSpline
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import fitpack, fitpack2, interpolate, ndgriddata, polyint, rbf
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
+
+# Backward compatibility
+pchip = PchipInterpolator
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf3c9c3348b45479ef9e34dda7a7ec7b8e0ad63d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64fda7b7c44e75b2e0cb24ea9430e8855a69a18a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b73f8bffb2cea41f878d8ce61ba82870c6dde9f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5df5bab933786cfc15f3ebb444c94ccf948ad053
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec91c30181fe0980a169e134b43dfcdbbe39f817
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd575bf4327f326a17954fd83ef3f40403a64020
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a89ba7890fa24a19645bc3c22c99d6afb61f5e40
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..337e11b45f8c8fb725244fd394f9a3b064ac3875
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndbspline.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c10852a2866b3c2e11f63253cba5cdf866cb4429
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1129b96bb78c26319af0044716be04515b9e4387
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_pade.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1e8f7a117565b6ce2a47f413fafd989b2416db4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c6f4a6e2196b06d4afe898732281cc56034032c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d2744c51e0c50af3db4a9e64e316b4df8de78a28
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40e5666ffcf69e58100b7521283ae4a009a19f28
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8159eaa3282903acbf60df83994f68edcb27d8a8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09de26665698ae6d036e27cef1131ec7e1ebd771
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..665c41a6b476a93d06e77e3126593a024023f56c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/interpolate.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a217efe17224ff173527c7ee958ceccf29b42bf9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19d4c7420fc42bb2cc2d5c116571f4448f8c4d17
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/polyint.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59e4aae18882c53742c677bcc8ef54b54e527a45
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/__pycache__/rbf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..0b236497fc7c2bb5ca9350180744ccf392779b4e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_bsplines.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_bsplines.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cc6eb5411ede7cc7ca1b597c41125b2c0220af4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_bsplines.py
@@ -0,0 +1,2215 @@
+import operator
+from math import prod
+
+import numpy as np
+from scipy._lib._util import normalize_axis_index
+from scipy.linalg import (get_lapack_funcs, LinAlgError,
+ cholesky_banded, cho_solve_banded,
+ solve, solve_banded)
+from scipy.optimize import minimize_scalar
+from . import _bspl
+from . import _fitpack_impl
+from scipy.sparse import csr_array
+from scipy.special import poch
+from itertools import combinations
+
+__all__ = ["BSpline", "make_interp_spline", "make_lsq_spline",
+ "make_smoothing_spline"]
+
+
+def _get_dtype(dtype):
+ """Return np.complex128 for complex dtypes, np.float64 otherwise."""
+ if np.issubdtype(dtype, np.complexfloating):
+ return np.complex128
+ else:
+ return np.float64
+
+
+def _as_float_array(x, check_finite=False):
+ """Convert the input into a C contiguous float array.
+
+ NB: Upcasts half- and single-precision floats to double precision.
+ """
+ x = np.ascontiguousarray(x)
+ dtyp = _get_dtype(x.dtype)
+ x = x.astype(dtyp, copy=False)
+ if check_finite and not np.isfinite(x).all():
+ raise ValueError("Array must not contain infs or nans.")
+ return x
+
+
+def _dual_poly(j, k, t, y):
+ """
+ Dual polynomial of the B-spline B_{j,k,t} -
+ polynomial which is associated with B_{j,k,t}:
+ $p_{j,k}(y) = (y - t_{j+1})(y - t_{j+2})...(y - t_{j+k})$
+ """
+ if k == 0:
+ return 1
+ return np.prod([(y - t[j + i]) for i in range(1, k + 1)])
+
+
+def _diff_dual_poly(j, k, y, d, t):
+ """
+ d-th derivative of the dual polynomial $p_{j,k}(y)$
+ """
+ if d == 0:
+ return _dual_poly(j, k, t, y)
+ if d == k:
+ return poch(1, k)
+ comb = list(combinations(range(j + 1, j + k + 1), d))
+ res = 0
+ for i in range(len(comb) * len(comb[0])):
+ res += np.prod([(y - t[j + p]) for p in range(1, k + 1)
+ if (j + p) not in comb[i//d]])
+ return res
+
+
+class BSpline:
+ r"""Univariate spline in the B-spline basis.
+
+ .. math::
+
+ S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x)
+
+ where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
+ and knots `t`.
+
+ Parameters
+ ----------
+ t : ndarray, shape (n+k+1,)
+ knots
+ c : ndarray, shape (>=n, ...)
+ spline coefficients
+ k : int
+ B-spline degree
+ extrapolate : bool or 'periodic', optional
+ whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
+ or to return nans.
+ If True, extrapolates the first and last polynomial pieces of b-spline
+ functions active on the base interval.
+ If 'periodic', periodic extrapolation is used.
+ Default is True.
+ axis : int, optional
+ Interpolation axis. Default is zero.
+
+ Attributes
+ ----------
+ t : ndarray
+ knot vector
+ c : ndarray
+ spline coefficients
+ k : int
+ spline degree
+ extrapolate : bool
+ If True, extrapolates the first and last polynomial pieces of b-spline
+ functions active on the base interval.
+ axis : int
+ Interpolation axis.
+ tck : tuple
+ A read-only equivalent of ``(self.t, self.c, self.k)``
+
+ Methods
+ -------
+ __call__
+ basis_element
+ derivative
+ antiderivative
+ integrate
+ insert_knot
+ construct_fast
+ design_matrix
+ from_power_basis
+
+ Notes
+ -----
+ B-spline basis elements are defined via
+
+ .. math::
+
+ B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
+
+ B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
+ + \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
+
+ **Implementation details**
+
+ - At least ``k+1`` coefficients are required for a spline of degree `k`,
+ so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
+ ``j > n``, are ignored.
+
+ - B-spline basis elements of degree `k` form a partition of unity on the
+ *base interval*, ``t[k] <= x <= t[n]``.
+
+
+ Examples
+ --------
+
+ Translating the recursive definition of B-splines into Python code, we have:
+
+ >>> def B(x, k, i, t):
+ ... if k == 0:
+ ... return 1.0 if t[i] <= x < t[i+1] else 0.0
+ ... if t[i+k] == t[i]:
+ ... c1 = 0.0
+ ... else:
+ ... c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
+ ... if t[i+k+1] == t[i+1]:
+ ... c2 = 0.0
+ ... else:
+ ... c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
+ ... return c1 + c2
+
+ >>> def bspline(x, t, c, k):
+ ... n = len(t) - k - 1
+ ... assert (n >= k+1) and (len(c) >= n)
+ ... return sum(c[i] * B(x, k, i, t) for i in range(n))
+
+ Note that this is an inefficient (if straightforward) way to
+ evaluate B-splines --- this spline class does it in an equivalent,
+ but much more efficient way.
+
+ Here we construct a quadratic spline function on the base interval
+ ``2 <= x <= 4`` and compare with the naive way of evaluating the spline:
+
+ >>> from scipy.interpolate import BSpline
+ >>> k = 2
+ >>> t = [0, 1, 2, 3, 4, 5, 6]
+ >>> c = [-1, 2, 0, -1]
+ >>> spl = BSpline(t, c, k)
+ >>> spl(2.5)
+ array(1.375)
+ >>> bspline(2.5, t, c, k)
+ 1.375
+
+ Note that outside of the base interval results differ. This is because
+ `BSpline` extrapolates the first and last polynomial pieces of B-spline
+ functions active on the base interval.
+
+ >>> import matplotlib.pyplot as plt
+ >>> import numpy as np
+ >>> fig, ax = plt.subplots()
+ >>> xx = np.linspace(1.5, 4.5, 50)
+ >>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive')
+ >>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')
+ >>> ax.grid(True)
+ >>> ax.legend(loc='best')
+ >>> plt.show()
+
+
+ References
+ ----------
+ .. [1] Tom Lyche and Knut Morken, Spline methods,
+ http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
+ .. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
+
+ """
+
+ def __init__(self, t, c, k, extrapolate=True, axis=0):
+ super().__init__()
+
+ self.k = operator.index(k)
+ self.c = np.asarray(c)
+ self.t = np.ascontiguousarray(t, dtype=np.float64)
+
+ if extrapolate == 'periodic':
+ self.extrapolate = extrapolate
+ else:
+ self.extrapolate = bool(extrapolate)
+
+ n = self.t.shape[0] - self.k - 1
+
+ axis = normalize_axis_index(axis, self.c.ndim)
+
+ # Note that the normalized axis is stored in the object.
+ self.axis = axis
+ if axis != 0:
+ # roll the interpolation axis to be the first one in self.c
+ # More specifically, the target shape for self.c is (n, ...),
+ # and axis !=0 means that we have c.shape (..., n, ...)
+ # ^
+ # axis
+ self.c = np.moveaxis(self.c, axis, 0)
+
+ if k < 0:
+ raise ValueError("Spline order cannot be negative.")
+ if self.t.ndim != 1:
+ raise ValueError("Knot vector must be one-dimensional.")
+ if n < self.k + 1:
+ raise ValueError("Need at least %d knots for degree %d" %
+ (2*k + 2, k))
+ if (np.diff(self.t) < 0).any():
+ raise ValueError("Knots must be in a non-decreasing order.")
+ if len(np.unique(self.t[k:n+1])) < 2:
+ raise ValueError("Need at least two internal knots.")
+ if not np.isfinite(self.t).all():
+ raise ValueError("Knots should not have nans or infs.")
+ if self.c.ndim < 1:
+ raise ValueError("Coefficients must be at least 1-dimensional.")
+ if self.c.shape[0] < n:
+ raise ValueError("Knots, coefficients and degree are inconsistent.")
+
+ dt = _get_dtype(self.c.dtype)
+ self.c = np.ascontiguousarray(self.c, dtype=dt)
+
+ @classmethod
+ def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
+ """Construct a spline without making checks.
+
+ Accepts same parameters as the regular constructor. Input arrays
+ `t` and `c` must of correct shape and dtype.
+ """
+ self = object.__new__(cls)
+ self.t, self.c, self.k = t, c, k
+ self.extrapolate = extrapolate
+ self.axis = axis
+ return self
+
+ @property
+ def tck(self):
+ """Equivalent to ``(self.t, self.c, self.k)`` (read-only).
+ """
+ return self.t, self.c, self.k
+
+ @classmethod
+ def basis_element(cls, t, extrapolate=True):
+ """Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
+
+ Parameters
+ ----------
+ t : ndarray, shape (k+2,)
+ internal knots
+ extrapolate : bool or 'periodic', optional
+ whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``,
+ or to return nans.
+ If 'periodic', periodic extrapolation is used.
+ Default is True.
+
+ Returns
+ -------
+ basis_element : callable
+ A callable representing a B-spline basis element for the knot
+ vector `t`.
+
+ Notes
+ -----
+ The degree of the B-spline, `k`, is inferred from the length of `t` as
+ ``len(t)-2``. The knot vector is constructed by appending and prepending
+ ``k+1`` elements to internal knots `t`.
+
+ Examples
+ --------
+
+ Construct a cubic B-spline:
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import BSpline
+ >>> b = BSpline.basis_element([0, 1, 2, 3, 4])
+ >>> k = b.k
+ >>> b.t[k:-k]
+ array([ 0., 1., 2., 3., 4.])
+ >>> k
+ 3
+
+ Construct a quadratic B-spline on ``[0, 1, 1, 2]``, and compare
+ to its explicit form:
+
+ >>> t = [0, 1, 1, 2]
+ >>> b = BSpline.basis_element(t)
+ >>> def f(x):
+ ... return np.where(x < 1, x*x, (2. - x)**2)
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig, ax = plt.subplots()
+ >>> x = np.linspace(0, 2, 51)
+ >>> ax.plot(x, b(x), 'g', lw=3)
+ >>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4)
+ >>> ax.grid(True)
+ >>> plt.show()
+
+ """
+ k = len(t) - 2
+ t = _as_float_array(t)
+ t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
+ c = np.zeros_like(t)
+ c[k] = 1.
+ return cls.construct_fast(t, c, k, extrapolate)
+
+ @classmethod
+ def design_matrix(cls, x, t, k, extrapolate=False):
+ """
+ Returns a design matrix as a CSR format sparse array.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ Points to evaluate the spline at.
+ t : array_like, shape (nt,)
+ Sorted 1D array of knots.
+ k : int
+ B-spline degree.
+ extrapolate : bool or 'periodic', optional
+ Whether to extrapolate based on the first and last intervals
+ or raise an error. If 'periodic', periodic extrapolation is used.
+ Default is False.
+
+ .. versionadded:: 1.10.0
+
+ Returns
+ -------
+ design_matrix : `csr_array` object
+ Sparse matrix in CSR format where each row contains all the basis
+ elements of the input row (first row = basis elements of x[0],
+ ..., last row = basis elements x[-1]).
+
+ Examples
+ --------
+ Construct a design matrix for a B-spline
+
+ >>> from scipy.interpolate import make_interp_spline, BSpline
+ >>> import numpy as np
+ >>> x = np.linspace(0, np.pi * 2, 4)
+ >>> y = np.sin(x)
+ >>> k = 3
+ >>> bspl = make_interp_spline(x, y, k=k)
+ >>> design_matrix = bspl.design_matrix(x, bspl.t, k)
+ >>> design_matrix.toarray()
+ [[1. , 0. , 0. , 0. ],
+ [0.2962963 , 0.44444444, 0.22222222, 0.03703704],
+ [0.03703704, 0.22222222, 0.44444444, 0.2962963 ],
+ [0. , 0. , 0. , 1. ]]
+
+ Construct a design matrix for some vector of knots
+
+ >>> k = 2
+ >>> t = [-1, 0, 1, 2, 3, 4, 5, 6]
+ >>> x = [1, 2, 3, 4]
+ >>> design_matrix = BSpline.design_matrix(x, t, k).toarray()
+ >>> design_matrix
+ [[0.5, 0.5, 0. , 0. , 0. ],
+ [0. , 0.5, 0.5, 0. , 0. ],
+ [0. , 0. , 0.5, 0.5, 0. ],
+ [0. , 0. , 0. , 0.5, 0.5]]
+
+ This result is equivalent to the one created in the sparse format
+
+ >>> c = np.eye(len(t) - k - 1)
+ >>> design_matrix_gh = BSpline(t, c, k)(x)
+ >>> np.allclose(design_matrix, design_matrix_gh, atol=1e-14)
+ True
+
+ Notes
+ -----
+ .. versionadded:: 1.8.0
+
+ In each row of the design matrix all the basis elements are evaluated
+ at the certain point (first row - x[0], ..., last row - x[-1]).
+
+ `nt` is a length of the vector of knots: as far as there are
+ `nt - k - 1` basis elements, `nt` should be not less than `2 * k + 2`
+ to have at least `k + 1` basis element.
+
+ Out of bounds `x` raises a ValueError.
+ """
+ x = _as_float_array(x, True)
+ t = _as_float_array(t, True)
+
+ if extrapolate != 'periodic':
+ extrapolate = bool(extrapolate)
+
+ if k < 0:
+ raise ValueError("Spline order cannot be negative.")
+ if t.ndim != 1 or np.any(t[1:] < t[:-1]):
+ raise ValueError(f"Expect t to be a 1-D sorted array_like, but "
+ f"got t={t}.")
+ # There are `nt - k - 1` basis elements in a BSpline built on the
+ # vector of knots with length `nt`, so to have at least `k + 1` basis
+ # elements we need to have at least `2 * k + 2` elements in the vector
+ # of knots.
+ if len(t) < 2 * k + 2:
+ raise ValueError(f"Length t is not enough for k={k}.")
+
+ if extrapolate == 'periodic':
+ # With periodic extrapolation we map x to the segment
+ # [t[k], t[n]].
+ n = t.size - k - 1
+ x = t[k] + (x - t[k]) % (t[n] - t[k])
+ extrapolate = False
+ elif not extrapolate and (
+ (min(x) < t[k]) or (max(x) > t[t.shape[0] - k - 1])
+ ):
+ # Checks from `find_interval` function
+ raise ValueError(f'Out of bounds w/ x = {x}.')
+
+ # Compute number of non-zeros of final CSR array in order to determine
+ # the dtype of indices and indptr of the CSR array.
+ n = x.shape[0]
+ nnz = n * (k + 1)
+ if nnz < np.iinfo(np.int32).max:
+ int_dtype = np.int32
+ else:
+ int_dtype = np.int64
+ # Preallocate indptr and indices
+ indices = np.empty(n * (k + 1), dtype=int_dtype)
+ indptr = np.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype)
+
+ # indptr is not passed to Cython as it is already fully computed
+ data, indices = _bspl._make_design_matrix(
+ x, t, k, extrapolate, indices
+ )
+ return csr_array(
+ (data, indices, indptr),
+ shape=(x.shape[0], t.shape[0] - k - 1)
+ )
+
+ def __call__(self, x, nu=0, extrapolate=None):
+ """
+ Evaluate a spline function.
+
+ Parameters
+ ----------
+ x : array_like
+ points to evaluate the spline at.
+ nu : int, optional
+ derivative to evaluate (default is 0).
+ extrapolate : bool or 'periodic', optional
+ whether to extrapolate based on the first and last intervals
+ or return nans. If 'periodic', periodic extrapolation is used.
+ Default is `self.extrapolate`.
+
+ Returns
+ -------
+ y : array_like
+ Shape is determined by replacing the interpolation axis
+ in the coefficient array with the shape of `x`.
+
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ x = np.asarray(x)
+ x_shape, x_ndim = x.shape, x.ndim
+ x = np.ascontiguousarray(x.ravel(), dtype=np.float64)
+
+ # With periodic extrapolation we map x to the segment
+ # [self.t[k], self.t[n]].
+ if extrapolate == 'periodic':
+ n = self.t.size - self.k - 1
+ x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] -
+ self.t[self.k])
+ extrapolate = False
+
+ out = np.empty((len(x), prod(self.c.shape[1:])), dtype=self.c.dtype)
+ self._ensure_c_contiguous()
+ self._evaluate(x, nu, extrapolate, out)
+ out = out.reshape(x_shape + self.c.shape[1:])
+ if self.axis != 0:
+ # transpose to move the calculated values to the interpolation axis
+ l = list(range(out.ndim))
+ l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
+ out = out.transpose(l)
+ return out
+
+ def _evaluate(self, xp, nu, extrapolate, out):
+ _bspl.evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),
+ self.k, xp, nu, extrapolate, out)
+
+ def _ensure_c_contiguous(self):
+ """
+ c and t may be modified by the user. The Cython code expects
+ that they are C contiguous.
+
+ """
+ if not self.t.flags.c_contiguous:
+ self.t = self.t.copy()
+ if not self.c.flags.c_contiguous:
+ self.c = self.c.copy()
+
+ def derivative(self, nu=1):
+ """Return a B-spline representing the derivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Derivative order.
+ Default is 1.
+
+ Returns
+ -------
+ b : BSpline object
+ A new instance representing the derivative.
+
+ See Also
+ --------
+ splder, splantider
+
+ """
+ c = self.c
+ # pad the c array if needed
+ ct = len(self.t) - len(c)
+ if ct > 0:
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+ tck = _fitpack_impl.splder((self.t, c, self.k), nu)
+ return self.construct_fast(*tck, extrapolate=self.extrapolate,
+ axis=self.axis)
+
+ def antiderivative(self, nu=1):
+ """Return a B-spline representing the antiderivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Antiderivative order. Default is 1.
+
+ Returns
+ -------
+ b : BSpline object
+ A new instance representing the antiderivative.
+
+ Notes
+ -----
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
+ it will be set to False for the returned instance. This is done because
+ the antiderivative is no longer periodic and its correct evaluation
+ outside of the initially given x interval is difficult.
+
+ See Also
+ --------
+ splder, splantider
+
+ """
+ c = self.c
+ # pad the c array if needed
+ ct = len(self.t) - len(c)
+ if ct > 0:
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+ tck = _fitpack_impl.splantider((self.t, c, self.k), nu)
+
+ if self.extrapolate == 'periodic':
+ extrapolate = False
+ else:
+ extrapolate = self.extrapolate
+
+ return self.construct_fast(*tck, extrapolate=extrapolate,
+ axis=self.axis)
+
+ def integrate(self, a, b, extrapolate=None):
+ """Compute a definite integral of the spline.
+
+ Parameters
+ ----------
+ a : float
+ Lower limit of integration.
+ b : float
+ Upper limit of integration.
+ extrapolate : bool or 'periodic', optional
+ whether to extrapolate beyond the base interval,
+ ``t[k] .. t[-k-1]``, or take the spline to be zero outside of the
+ base interval. If 'periodic', periodic extrapolation is used.
+ If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ I : array_like
+ Definite integral of the spline over the interval ``[a, b]``.
+
+ Examples
+ --------
+ Construct the linear spline ``x if x < 1 else 2 - x`` on the base
+ interval :math:`[0, 2]`, and integrate it
+
+ >>> from scipy.interpolate import BSpline
+ >>> b = BSpline.basis_element([0, 1, 2])
+ >>> b.integrate(0, 1)
+ array(0.5)
+
+ If the integration limits are outside of the base interval, the result
+ is controlled by the `extrapolate` parameter
+
+ >>> b.integrate(-1, 1)
+ array(0.0)
+ >>> b.integrate(-1, 1, extrapolate=False)
+ array(0.5)
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig, ax = plt.subplots()
+ >>> ax.grid(True)
+ >>> ax.axvline(0, c='r', lw=5, alpha=0.5) # base interval
+ >>> ax.axvline(2, c='r', lw=5, alpha=0.5)
+ >>> xx = [-1, 1, 2]
+ >>> ax.plot(xx, b(xx))
+ >>> plt.show()
+
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+
+ # Prepare self.t and self.c.
+ self._ensure_c_contiguous()
+
+ # Swap integration bounds if needed.
+ sign = 1
+ if b < a:
+ a, b = b, a
+ sign = -1
+ n = self.t.size - self.k - 1
+
+ if extrapolate != "periodic" and not extrapolate:
+ # Shrink the integration interval, if needed.
+ a = max(a, self.t[self.k])
+ b = min(b, self.t[n])
+
+ if self.c.ndim == 1:
+ # Fast path: use FITPACK's routine
+ # (cf _fitpack_impl.splint).
+ integral = _fitpack_impl.splint(a, b, self.tck)
+ return integral * sign
+
+ out = np.empty((2, prod(self.c.shape[1:])), dtype=self.c.dtype)
+
+ # Compute the antiderivative.
+ c = self.c
+ ct = len(self.t) - len(c)
+ if ct > 0:
+ c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+ ta, ca, ka = _fitpack_impl.splantider((self.t, c, self.k), 1)
+
+ if extrapolate == 'periodic':
+ # Split the integral into the part over period (can be several
+ # of them) and the remaining part.
+
+ ts, te = self.t[self.k], self.t[n]
+ period = te - ts
+ interval = b - a
+ n_periods, left = divmod(interval, period)
+
+ if n_periods > 0:
+ # Evaluate the difference of antiderivatives.
+ x = np.asarray([ts, te], dtype=np.float64)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, False, out)
+ integral = out[1] - out[0]
+ integral *= n_periods
+ else:
+ integral = np.zeros((1, prod(self.c.shape[1:])),
+ dtype=self.c.dtype)
+
+ # Map a to [ts, te], b is always a + left.
+ a = ts + (a - ts) % period
+ b = a + left
+
+ # If b <= te then we need to integrate over [a, b], otherwise
+ # over [a, te] and from xs to what is remained.
+ if b <= te:
+ x = np.asarray([a, b], dtype=np.float64)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, False, out)
+ integral += out[1] - out[0]
+ else:
+ x = np.asarray([a, te], dtype=np.float64)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, False, out)
+ integral += out[1] - out[0]
+
+ x = np.asarray([ts, ts + b - te], dtype=np.float64)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, False, out)
+ integral += out[1] - out[0]
+ else:
+ # Evaluate the difference of antiderivatives.
+ x = np.asarray([a, b], dtype=np.float64)
+ _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+ ka, x, 0, extrapolate, out)
+ integral = out[1] - out[0]
+
+ integral *= sign
+ return integral.reshape(ca.shape[1:])
+
+ @classmethod
+ def from_power_basis(cls, pp, bc_type='not-a-knot'):
+ r"""
+ Construct a polynomial in the B-spline basis
+ from a piecewise polynomial in the power basis.
+
+ For now, accepts ``CubicSpline`` instances only.
+
+ Parameters
+ ----------
+ pp : CubicSpline
+ A piecewise polynomial in the power basis, as created
+ by ``CubicSpline``
+ bc_type : string, optional
+ Boundary condition type as in ``CubicSpline``: one of the
+ ``not-a-knot``, ``natural``, ``clamped``, or ``periodic``.
+ Necessary for construction an instance of ``BSpline`` class.
+ Default is ``not-a-knot``.
+
+ Returns
+ -------
+ b : BSpline object
+ A new instance representing the initial polynomial
+ in the B-spline basis.
+
+ Notes
+ -----
+ .. versionadded:: 1.8.0
+
+ Accepts only ``CubicSpline`` instances for now.
+
+ The algorithm follows from differentiation
+ the Marsden's identity [1]: each of coefficients of spline
+ interpolation function in the B-spline basis is computed as follows:
+
+ .. math::
+
+ c_j = \sum_{m=0}^{k} \frac{(k-m)!}{k!}
+ c_{m,i} (-1)^{k-m} D^m p_{j,k}(x_i)
+
+ :math:`c_{m, i}` - a coefficient of CubicSpline,
+ :math:`D^m p_{j, k}(x_i)` - an m-th defivative of a dual polynomial
+ in :math:`x_i`.
+
+ ``k`` always equals 3 for now.
+
+ First ``n - 2`` coefficients are computed in :math:`x_i = x_j`, e.g.
+
+ .. math::
+
+ c_1 = \sum_{m=0}^{k} \frac{(k-1)!}{k!} c_{m,1} D^m p_{j,3}(x_1)
+
+ Last ``nod + 2`` coefficients are computed in ``x[-2]``,
+ ``nod`` - number of derivatives at the ends.
+
+ For example, consider :math:`x = [0, 1, 2, 3, 4]`,
+ :math:`y = [1, 1, 1, 1, 1]` and bc_type = ``natural``
+
+ The coefficients of CubicSpline in the power basis:
+
+ :math:`[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]`
+
+ The knot vector: :math:`t = [0, 0, 0, 0, 1, 2, 3, 4, 4, 4, 4]`
+
+ In this case
+
+ .. math::
+
+ c_j = \frac{0!}{k!} c_{3, i} k! = c_{3, i} = 1,~j = 0, ..., 6
+
+ References
+ ----------
+ .. [1] Tom Lyche and Knut Morken, Spline Methods, 2005, Section 3.1.2
+
+ """
+ from ._cubic import CubicSpline
+ if not isinstance(pp, CubicSpline):
+ raise NotImplementedError("Only CubicSpline objects are accepted"
+ "for now. Got %s instead." % type(pp))
+ x = pp.x
+ coef = pp.c
+ k = pp.c.shape[0] - 1
+ n = x.shape[0]
+
+ if bc_type == 'not-a-knot':
+ t = _not_a_knot(x, k)
+ elif bc_type == 'natural' or bc_type == 'clamped':
+ t = _augknt(x, k)
+ elif bc_type == 'periodic':
+ t = _periodic_knots(x, k)
+ else:
+ raise TypeError('Unknown boundary condition: %s' % bc_type)
+
+ nod = t.shape[0] - (n + k + 1) # number of derivatives at the ends
+ c = np.zeros(n + nod, dtype=pp.c.dtype)
+ for m in range(k + 1):
+ for i in range(n - 2):
+ c[i] += poch(k + 1, -m) * coef[m, i]\
+ * np.power(-1, k - m)\
+ * _diff_dual_poly(i, k, x[i], m, t)
+ for j in range(n - 2, n + nod):
+ c[j] += poch(k + 1, -m) * coef[m, n - 2]\
+ * np.power(-1, k - m)\
+ * _diff_dual_poly(j, k, x[n - 2], m, t)
+ return cls.construct_fast(t, c, k, pp.extrapolate, pp.axis)
+
+ def insert_knot(self, x, m=1):
+ """Insert a new knot at `x` of multiplicity `m`.
+
+ Given the knots and coefficients of a B-spline representation, create a
+ new B-spline with a knot inserted `m` times at point `x`.
+
+ Parameters
+ ----------
+ x : float
+ The position of the new knot
+ m : int, optional
+ The number of times to insert the given knot (its multiplicity).
+ Default is 1.
+
+ Returns
+ -------
+ spl : BSpline object
+ A new BSpline object with the new knot inserted.
+
+ Notes
+ -----
+ Based on algorithms from [1]_ and [2]_.
+
+ In case of a periodic spline (``self.extrapolate == "periodic"``)
+ there must be either at least k interior knots t(j) satisfying
+ ``t(k+1)>> import numpy as np
+ >>> from scipy.interpolate import BSpline, make_interp_spline
+ >>> x = np.linspace(0, 10, 5)
+ >>> y = np.sin(x)
+ >>> spl = make_interp_spline(x, y, k=3)
+ >>> spl.t
+ array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
+
+ Insert a single knot
+
+ >>> spl_1 = spl.insert_knot(3)
+ >>> spl_1.t
+ array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
+
+ Insert a multiple knot
+
+ >>> spl_2 = spl.insert_knot(8, m=3)
+ >>> spl_2.t
+ array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
+
+ """
+ if x < self.t[self.k] or x > self.t[-self.k-1]:
+ raise ValueError(f"Cannot insert a knot at {x}.")
+ if m <= 0:
+ raise ValueError(f"`m` must be positive, got {m = }.")
+
+ extradim = self.c.shape[1:]
+ num_extra = prod(extradim)
+
+ tt = self.t.copy()
+ cc = self.c.copy()
+ cc = cc.reshape(-1, num_extra)
+
+ for _ in range(m):
+ tt, cc = _bspl.insert(x, tt, cc, self.k, self.extrapolate == "periodic")
+
+ return self.construct_fast(
+ tt, cc.reshape((-1,) + extradim), self.k, self.extrapolate, self.axis
+ )
+
+
+#################################
+# Interpolating spline helpers #
+#################################
+
+def _not_a_knot(x, k):
+ """Given data x, construct the knot vector w/ not-a-knot BC.
+ cf de Boor, XIII(12)."""
+ x = np.asarray(x)
+ if k % 2 != 1:
+ raise ValueError("Odd degree for now only. Got %s." % k)
+
+ m = (k - 1) // 2
+ t = x[m+1:-m-1]
+ t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)]
+ return t
+
+
+def _augknt(x, k):
+ """Construct a knot vector appropriate for the order-k interpolation."""
+ return np.r_[(x[0],)*k, x, (x[-1],)*k]
+
+
+def _convert_string_aliases(deriv, target_shape):
+ if isinstance(deriv, str):
+ if deriv == "clamped":
+ deriv = [(1, np.zeros(target_shape))]
+ elif deriv == "natural":
+ deriv = [(2, np.zeros(target_shape))]
+ else:
+ raise ValueError("Unknown boundary condition : %s" % deriv)
+ return deriv
+
+
+def _process_deriv_spec(deriv):
+ if deriv is not None:
+ try:
+ ords, vals = zip(*deriv)
+ except TypeError as e:
+ msg = ("Derivatives, `bc_type`, should be specified as a pair of "
+ "iterables of pairs of (order, value).")
+ raise ValueError(msg) from e
+ else:
+ ords, vals = [], []
+ return np.atleast_1d(ords, vals)
+
+
+def _woodbury_algorithm(A, ur, ll, b, k):
+ '''
+ Solve a cyclic banded linear system with upper right
+ and lower blocks of size ``(k-1) / 2`` using
+ the Woodbury formula
+
+ Parameters
+ ----------
+ A : 2-D array, shape(k, n)
+ Matrix of diagonals of original matrix (see
+ ``solve_banded`` documentation).
+ ur : 2-D array, shape(bs, bs)
+ Upper right block matrix.
+ ll : 2-D array, shape(bs, bs)
+ Lower left block matrix.
+ b : 1-D array, shape(n,)
+ Vector of constant terms of the system of linear equations.
+ k : int
+ B-spline degree.
+
+ Returns
+ -------
+ c : 1-D array, shape(n,)
+ Solution of the original system of linear equations.
+
+ Notes
+ -----
+ This algorithm works only for systems with banded matrix A plus
+ a correction term U @ V.T, where the matrix U @ V.T gives upper right
+ and lower left block of A
+ The system is solved with the following steps:
+ 1. New systems of linear equations are constructed:
+ A @ z_i = u_i,
+ u_i - column vector of U,
+ i = 1, ..., k - 1
+ 2. Matrix Z is formed from vectors z_i:
+ Z = [ z_1 | z_2 | ... | z_{k - 1} ]
+ 3. Matrix H = (1 + V.T @ Z)^{-1}
+ 4. The system A' @ y = b is solved
+ 5. x = y - Z @ (H @ V.T @ y)
+ Also, ``n`` should be greater than ``k``, otherwise corner block
+ elements will intersect with diagonals.
+
+ Examples
+ --------
+ Consider the case of n = 8, k = 5 (size of blocks - 2 x 2).
+ The matrix of a system: U: V:
+ x x x * * a b a b 0 0 0 0 1 0
+ x x x x * * c 0 c 0 0 0 0 0 1
+ x x x x x * * 0 0 0 0 0 0 0 0
+ * x x x x x * 0 0 0 0 0 0 0 0
+ * * x x x x x 0 0 0 0 0 0 0 0
+ d * * x x x x 0 0 d 0 1 0 0 0
+ e f * * x x x 0 0 e f 0 1 0 0
+
+ References
+ ----------
+ .. [1] William H. Press, Saul A. Teukolsky, William T. Vetterling
+ and Brian P. Flannery, Numerical Recipes, 2007, Section 2.7.3
+
+ '''
+ k_mod = k - k % 2
+ bs = int((k - 1) / 2) + (k + 1) % 2
+
+ n = A.shape[1] + 1
+ U = np.zeros((n - 1, k_mod))
+ VT = np.zeros((k_mod, n - 1)) # V transpose
+
+ # upper right block
+ U[:bs, :bs] = ur
+ VT[np.arange(bs), np.arange(bs) - bs] = 1
+
+ # lower left block
+ U[-bs:, -bs:] = ll
+ VT[np.arange(bs) - bs, np.arange(bs)] = 1
+
+ Z = solve_banded((bs, bs), A, U)
+
+ H = solve(np.identity(k_mod) + VT @ Z, np.identity(k_mod))
+
+ y = solve_banded((bs, bs), A, b)
+ c = y - Z @ (H @ (VT @ y))
+
+ return c
+
+
+def _periodic_knots(x, k):
+ '''
+ returns vector of nodes on circle
+ '''
+ xc = np.copy(x)
+ n = len(xc)
+ if k % 2 == 0:
+ dx = np.diff(xc)
+ xc[1: -1] -= dx[:-1] / 2
+ dx = np.diff(xc)
+ t = np.zeros(n + 2 * k)
+ t[k: -k] = xc
+ for i in range(0, k):
+ # filling first `k` elements in descending order
+ t[k - i - 1] = t[k - i] - dx[-(i % (n - 1)) - 1]
+ # filling last `k` elements in ascending order
+ t[-k + i] = t[-k + i - 1] + dx[i % (n - 1)]
+ return t
+
+
+def _make_interp_per_full_matr(x, y, t, k):
+ '''
+ Returns a solution of a system for B-spline interpolation with periodic
+ boundary conditions. First ``k - 1`` rows of matrix are conditions of
+ periodicity (continuity of ``k - 1`` derivatives at the boundary points).
+ Last ``n`` rows are interpolation conditions.
+ RHS is ``k - 1`` zeros and ``n`` ordinates in this case.
+
+ Parameters
+ ----------
+ x : 1-D array, shape (n,)
+ Values of x - coordinate of a given set of points.
+ y : 1-D array, shape (n,)
+ Values of y - coordinate of a given set of points.
+ t : 1-D array, shape(n+2*k,)
+ Vector of knots.
+ k : int
+ The maximum degree of spline
+
+ Returns
+ -------
+ c : 1-D array, shape (n+k-1,)
+ B-spline coefficients
+
+ Notes
+ -----
+ ``t`` is supposed to be taken on circle.
+
+ '''
+
+ x, y, t = map(np.asarray, (x, y, t))
+
+ n = x.size
+ # LHS: the collocation matrix + derivatives at edges
+ matr = np.zeros((n + k - 1, n + k - 1))
+
+ # derivatives at x[0] and x[-1]:
+ for i in range(k - 1):
+ bb = _bspl.evaluate_all_bspl(t, k, x[0], k, nu=i + 1)
+ matr[i, : k + 1] += bb
+ bb = _bspl.evaluate_all_bspl(t, k, x[-1], n + k - 1, nu=i + 1)[:-1]
+ matr[i, -k:] -= bb
+
+ # collocation matrix
+ for i in range(n):
+ xval = x[i]
+ # find interval
+ if xval == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, xval) - 1
+
+ # fill a row
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+ matr[i + k - 1, left-k:left+1] = bb
+
+ # RHS
+ b = np.r_[[0] * (k - 1), y]
+
+ c = solve(matr, b)
+ return c
+
+
+def _make_periodic_spline(x, y, t, k, axis):
+ '''
+ Compute the (coefficients of) interpolating B-spline with periodic
+ boundary conditions.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ Abscissas.
+ y : array_like, shape (n,)
+ Ordinates.
+ k : int
+ B-spline degree.
+ t : array_like, shape (n + 2 * k,).
+ Knots taken on a circle, ``k`` on the left and ``k`` on the right
+ of the vector ``x``.
+
+ Returns
+ -------
+ b : a BSpline object of the degree ``k`` and with knots ``t``.
+
+ Notes
+ -----
+ The original system is formed by ``n + k - 1`` equations where the first
+ ``k - 1`` of them stand for the ``k - 1`` derivatives continuity on the
+ edges while the other equations correspond to an interpolating case
+ (matching all the input points). Due to a special form of knot vector, it
+ can be proved that in the original system the first and last ``k``
+ coefficients of a spline function are the same, respectively. It follows
+ from the fact that all ``k - 1`` derivatives are equal term by term at ends
+ and that the matrix of the original system of linear equations is
+ non-degenerate. So, we can reduce the number of equations to ``n - 1``
+ (first ``k - 1`` equations could be reduced). Another trick of this
+ implementation is cyclic shift of values of B-splines due to equality of
+ ``k`` unknown coefficients. With this we can receive matrix of the system
+ with upper right and lower left blocks, and ``k`` diagonals. It allows
+ to use Woodbury formula to optimize the computations.
+
+ '''
+ n = y.shape[0]
+
+ extradim = prod(y.shape[1:])
+ y_new = y.reshape(n, extradim)
+ c = np.zeros((n + k - 1, extradim))
+
+ # n <= k case is solved with full matrix
+ if n <= k:
+ for i in range(extradim):
+ c[:, i] = _make_interp_per_full_matr(x, y_new[:, i], t, k)
+ c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))
+ return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)
+
+ nt = len(t) - k - 1
+
+ # size of block elements
+ kul = int(k / 2)
+
+ # kl = ku = k
+ ab = np.zeros((3 * k + 1, nt), dtype=np.float64, order='F')
+
+ # upper right and lower left blocks
+ ur = np.zeros((kul, kul))
+ ll = np.zeros_like(ur)
+
+ # `offset` is made to shift all the non-zero elements to the end of the
+ # matrix
+ _bspl._colloc(x, t, k, ab, offset=k)
+
+ # remove zeros before the matrix
+ ab = ab[-k - (k + 1) % 2:, :]
+
+ # The least elements in rows (except repetitions) are diagonals
+ # of block matrices. Upper right matrix is an upper triangular
+ # matrix while lower left is a lower triangular one.
+ for i in range(kul):
+ ur += np.diag(ab[-i - 1, i: kul], k=i)
+ ll += np.diag(ab[i, -kul - (k % 2): n - 1 + 2 * kul - i], k=-i)
+
+ # remove elements that occur in the last point
+ # (first and last points are equivalent)
+ A = ab[:, kul: -k + kul]
+
+ for i in range(extradim):
+ cc = _woodbury_algorithm(A, ur, ll, y_new[:, i][:-1], k)
+ c[:, i] = np.concatenate((cc[-kul:], cc, cc[:kul + k % 2]))
+ c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))
+ return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)
+
+
+def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0,
+ check_finite=True):
+ """Compute the (coefficients of) interpolating B-spline.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ Abscissas.
+ y : array_like, shape (n, ...)
+ Ordinates.
+ k : int, optional
+ B-spline degree. Default is cubic, ``k = 3``.
+ t : array_like, shape (nt + k + 1,), optional.
+ Knots.
+ The number of knots needs to agree with the number of data points and
+ the number of derivatives at the edges. Specifically, ``nt - n`` must
+ equal ``len(deriv_l) + len(deriv_r)``.
+ bc_type : 2-tuple or None
+ Boundary conditions.
+ Default is None, which means choosing the boundary conditions
+ automatically. Otherwise, it must be a length-two tuple where the first
+ element (``deriv_l``) sets the boundary conditions at ``x[0]`` and
+ the second element (``deriv_r``) sets the boundary conditions at
+ ``x[-1]``. Each of these must be an iterable of pairs
+ ``(order, value)`` which gives the values of derivatives of specified
+ orders at the given edge of the interpolation interval.
+ Alternatively, the following string aliases are recognized:
+
+ * ``"clamped"``: The first derivatives at the ends are zero. This is
+ equivalent to ``bc_type=([(1, 0.0)], [(1, 0.0)])``.
+ * ``"natural"``: The second derivatives at ends are zero. This is
+ equivalent to ``bc_type=([(2, 0.0)], [(2, 0.0)])``.
+ * ``"not-a-knot"`` (default): The first and second segments are the
+ same polynomial. This is equivalent to having ``bc_type=None``.
+ * ``"periodic"``: The values and the first ``k-1`` derivatives at the
+ ends are equivalent.
+
+ axis : int, optional
+ Interpolation axis. Default is 0.
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default is True.
+
+ Returns
+ -------
+ b : a BSpline object of the degree ``k`` and with knots ``t``.
+
+ See Also
+ --------
+ BSpline : base class representing the B-spline objects
+ CubicSpline : a cubic spline in the polynomial basis
+ make_lsq_spline : a similar factory function for spline fitting
+ UnivariateSpline : a wrapper over FITPACK spline fitting routines
+ splrep : a wrapper over FITPACK spline fitting routines
+
+ Examples
+ --------
+
+ Use cubic interpolation on Chebyshev nodes:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> def cheb_nodes(N):
+ ... jj = 2.*np.arange(N) + 1
+ ... x = np.cos(np.pi * jj / 2 / N)[::-1]
+ ... return x
+
+ >>> x = cheb_nodes(20)
+ >>> y = np.sqrt(1 - x**2)
+
+ >>> from scipy.interpolate import BSpline, make_interp_spline
+ >>> b = make_interp_spline(x, y)
+ >>> np.allclose(b(x), y)
+ True
+
+ Note that the default is a cubic spline with a not-a-knot boundary condition
+
+ >>> b.k
+ 3
+
+ Here we use a 'natural' spline, with zero 2nd derivatives at edges:
+
+ >>> l, r = [(2, 0.0)], [(2, 0.0)]
+ >>> b_n = make_interp_spline(x, y, bc_type=(l, r)) # or, bc_type="natural"
+ >>> np.allclose(b_n(x), y)
+ True
+ >>> x0, x1 = x[0], x[-1]
+ >>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0])
+ True
+
+ Interpolation of parametric curves is also supported. As an example, we
+ compute a discretization of a snail curve in polar coordinates
+
+ >>> phi = np.linspace(0, 2.*np.pi, 40)
+ >>> r = 0.3 + np.cos(phi)
+ >>> x, y = r*np.cos(phi), r*np.sin(phi) # convert to Cartesian coordinates
+
+ Build an interpolating curve, parameterizing it by the angle
+
+ >>> spl = make_interp_spline(phi, np.c_[x, y])
+
+ Evaluate the interpolant on a finer grid (note that we transpose the result
+ to unpack it into a pair of x- and y-arrays)
+
+ >>> phi_new = np.linspace(0, 2.*np.pi, 100)
+ >>> x_new, y_new = spl(phi_new).T
+
+ Plot the result
+
+ >>> plt.plot(x, y, 'o')
+ >>> plt.plot(x_new, y_new, '-')
+ >>> plt.show()
+
+ Build a B-spline curve with 2 dimensional y
+
+ >>> x = np.linspace(0, 2*np.pi, 10)
+ >>> y = np.array([np.sin(x), np.cos(x)])
+
+ Periodic condition is satisfied because y coordinates of points on the ends
+ are equivalent
+
+ >>> ax = plt.axes(projection='3d')
+ >>> xx = np.linspace(0, 2*np.pi, 100)
+ >>> bspl = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)
+ >>> ax.plot3D(xx, *bspl(xx))
+ >>> ax.scatter3D(x, *y, color='red')
+ >>> plt.show()
+
+ """
+ # convert string aliases for the boundary conditions
+ if bc_type is None or bc_type == 'not-a-knot' or bc_type == 'periodic':
+ deriv_l, deriv_r = None, None
+ elif isinstance(bc_type, str):
+ deriv_l, deriv_r = bc_type, bc_type
+ else:
+ try:
+ deriv_l, deriv_r = bc_type
+ except TypeError as e:
+ raise ValueError("Unknown boundary condition: %s" % bc_type) from e
+
+ y = np.asarray(y)
+
+ axis = normalize_axis_index(axis, y.ndim)
+
+ x = _as_float_array(x, check_finite)
+ y = _as_float_array(y, check_finite)
+
+ y = np.moveaxis(y, axis, 0) # now internally interp axis is zero
+
+ # sanity check the input
+ if bc_type == 'periodic' and not np.allclose(y[0], y[-1], atol=1e-15):
+ raise ValueError("First and last points does not match while "
+ "periodic case expected")
+ if x.size != y.shape[0]:
+ raise ValueError(f'Shapes of x {x.shape} and y {y.shape} are incompatible')
+ if np.any(x[1:] == x[:-1]):
+ raise ValueError("Expect x to not have duplicates")
+ if x.ndim != 1 or np.any(x[1:] < x[:-1]):
+ raise ValueError("Expect x to be a 1D strictly increasing sequence.")
+
+ # special-case k=0 right away
+ if k == 0:
+ if any(_ is not None for _ in (t, deriv_l, deriv_r)):
+ raise ValueError("Too much info for k=0: t and bc_type can only "
+ "be None.")
+ t = np.r_[x, x[-1]]
+ c = np.asarray(y)
+ c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
+ return BSpline.construct_fast(t, c, k, axis=axis)
+
+ # special-case k=1 (e.g., Lyche and Morken, Eq.(2.16))
+ if k == 1 and t is None:
+ if not (deriv_l is None and deriv_r is None):
+ raise ValueError("Too much info for k=1: bc_type can only be None.")
+ t = np.r_[x[0], x, x[-1]]
+ c = np.asarray(y)
+ c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
+ return BSpline.construct_fast(t, c, k, axis=axis)
+
+ k = operator.index(k)
+
+ if bc_type == 'periodic' and t is not None:
+ raise NotImplementedError("For periodic case t is constructed "
+ "automatically and can not be passed "
+ "manually")
+
+ # come up with a sensible knot vector, if needed
+ if t is None:
+ if deriv_l is None and deriv_r is None:
+ if bc_type == 'periodic':
+ t = _periodic_knots(x, k)
+ elif k == 2:
+ # OK, it's a bit ad hoc: Greville sites + omit
+ # 2nd and 2nd-to-last points, a la not-a-knot
+ t = (x[1:] + x[:-1]) / 2.
+ t = np.r_[(x[0],)*(k+1),
+ t[1:-1],
+ (x[-1],)*(k+1)]
+ else:
+ t = _not_a_knot(x, k)
+ else:
+ t = _augknt(x, k)
+
+ t = _as_float_array(t, check_finite)
+
+ if k < 0:
+ raise ValueError("Expect non-negative k.")
+ if t.ndim != 1 or np.any(t[1:] < t[:-1]):
+ raise ValueError("Expect t to be a 1-D sorted array_like.")
+ if t.size < x.size + k + 1:
+ raise ValueError('Got %d knots, need at least %d.' %
+ (t.size, x.size + k + 1))
+ if (x[0] < t[k]) or (x[-1] > t[-k]):
+ raise ValueError('Out of bounds w/ x = %s.' % x)
+
+ if bc_type == 'periodic':
+ return _make_periodic_spline(x, y, t, k, axis)
+
+ # Here : deriv_l, r = [(nu, value), ...]
+ deriv_l = _convert_string_aliases(deriv_l, y.shape[1:])
+ deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l)
+ nleft = deriv_l_ords.shape[0]
+
+ deriv_r = _convert_string_aliases(deriv_r, y.shape[1:])
+ deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r)
+ nright = deriv_r_ords.shape[0]
+
+ # have `n` conditions for `nt` coefficients; need nt-n derivatives
+ n = x.size
+ nt = t.size - k - 1
+
+ if nt - n != nleft + nright:
+ raise ValueError("The number of derivatives at boundaries does not "
+ f"match: expected {nt-n}, got {nleft}+{nright}")
+
+ # bail out if the `y` array is zero-sized
+ if y.size == 0:
+ c = np.zeros((nt,) + y.shape[1:], dtype=float)
+ return BSpline.construct_fast(t, c, k, axis=axis)
+
+ # set up the LHS: the collocation matrix + derivatives at boundaries
+ kl = ku = k
+ ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float64, order='F')
+ _bspl._colloc(x, t, k, ab, offset=nleft)
+ if nleft > 0:
+ _bspl._handle_lhs_derivatives(t, k, x[0], ab, kl, ku,
+ deriv_l_ords.astype(np.dtype("long")))
+ if nright > 0:
+ _bspl._handle_lhs_derivatives(t, k, x[-1], ab, kl, ku,
+ deriv_r_ords.astype(np.dtype("long")),
+ offset=nt-nright)
+
+ # set up the RHS: values to interpolate (+ derivative values, if any)
+ extradim = prod(y.shape[1:])
+ rhs = np.empty((nt, extradim), dtype=y.dtype)
+ if nleft > 0:
+ rhs[:nleft] = deriv_l_vals.reshape(-1, extradim)
+ rhs[nleft:nt - nright] = y.reshape(-1, extradim)
+ if nright > 0:
+ rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim)
+
+ # solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded
+ if check_finite:
+ ab, rhs = map(np.asarray_chkfinite, (ab, rhs))
+ gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs))
+ lu, piv, c, info = gbsv(kl, ku, ab, rhs,
+ overwrite_ab=True, overwrite_b=True)
+
+ if info > 0:
+ raise LinAlgError("Collocation matrix is singular.")
+ elif info < 0:
+ raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)
+
+ c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:]))
+ return BSpline.construct_fast(t, c, k, axis=axis)
+
+
+def make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True):
+ r"""Compute the (coefficients of) an LSQ (Least SQuared) based
+ fitting B-spline.
+
+ The result is a linear combination
+
+ .. math::
+
+ S(x) = \sum_j c_j B_j(x; t)
+
+ of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes
+
+ .. math::
+
+ \sum_{j} \left( w_j \times (S(x_j) - y_j) \right)^2
+
+ Parameters
+ ----------
+ x : array_like, shape (m,)
+ Abscissas.
+ y : array_like, shape (m, ...)
+ Ordinates.
+ t : array_like, shape (n + k + 1,).
+ Knots.
+ Knots and data points must satisfy Schoenberg-Whitney conditions.
+ k : int, optional
+ B-spline degree. Default is cubic, ``k = 3``.
+ w : array_like, shape (m,), optional
+ Weights for spline fitting. Must be positive. If ``None``,
+ then weights are all equal.
+ Default is ``None``.
+ axis : int, optional
+ Interpolation axis. Default is zero.
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
+ Default is True.
+
+ Returns
+ -------
+ b : a BSpline object of the degree ``k`` with knots ``t``.
+
+ See Also
+ --------
+ BSpline : base class representing the B-spline objects
+ make_interp_spline : a similar factory function for interpolating splines
+ LSQUnivariateSpline : a FITPACK-based spline fitting routine
+ splrep : a FITPACK-based fitting routine
+
+ Notes
+ -----
+ The number of data points must be larger than the spline degree ``k``.
+
+ Knots ``t`` must satisfy the Schoenberg-Whitney conditions,
+ i.e., there must be a subset of data points ``x[j]`` such that
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+ Examples
+ --------
+ Generate some noisy data:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> rng = np.random.default_rng()
+ >>> x = np.linspace(-3, 3, 50)
+ >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
+
+ Now fit a smoothing cubic spline with a pre-defined internal knots.
+ Here we make the knot vector (k+1)-regular by adding boundary knots:
+
+ >>> from scipy.interpolate import make_lsq_spline, BSpline
+ >>> t = [-1, 0, 1]
+ >>> k = 3
+ >>> t = np.r_[(x[0],)*(k+1),
+ ... t,
+ ... (x[-1],)*(k+1)]
+ >>> spl = make_lsq_spline(x, y, t, k)
+
+ For comparison, we also construct an interpolating spline for the same
+ set of data:
+
+ >>> from scipy.interpolate import make_interp_spline
+ >>> spl_i = make_interp_spline(x, y)
+
+ Plot both:
+
+ >>> xs = np.linspace(-3, 3, 100)
+ >>> plt.plot(x, y, 'ro', ms=5)
+ >>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline')
+ >>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline')
+ >>> plt.legend(loc='best')
+ >>> plt.show()
+
+ **NaN handling**: If the input arrays contain ``nan`` values, the result is
+ not useful since the underlying spline fitting routines cannot deal with
+ ``nan``. A workaround is to use zero weights for not-a-number data points:
+
+ >>> y[8] = np.nan
+ >>> w = np.isnan(y)
+ >>> y[w] = 0.
+ >>> tck = make_lsq_spline(x, y, t, w=~w)
+
+ Notice the need to replace a ``nan`` by a numerical value (precise value
+ does not matter as long as the corresponding weight is zero.)
+
+ """
+ x = _as_float_array(x, check_finite)
+ y = _as_float_array(y, check_finite)
+ t = _as_float_array(t, check_finite)
+ if w is not None:
+ w = _as_float_array(w, check_finite)
+ else:
+ w = np.ones_like(x)
+ k = operator.index(k)
+
+ axis = normalize_axis_index(axis, y.ndim)
+
+ y = np.moveaxis(y, axis, 0) # now internally interp axis is zero
+
+ if x.ndim != 1 or np.any(x[1:] - x[:-1] <= 0):
+ raise ValueError("Expect x to be a 1-D sorted array_like.")
+ if x.shape[0] < k+1:
+ raise ValueError("Need more x points.")
+ if k < 0:
+ raise ValueError("Expect non-negative k.")
+ if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0):
+ raise ValueError("Expect t to be a 1-D sorted array_like.")
+ if x.size != y.shape[0]:
+ raise ValueError(f'Shapes of x {x.shape} and y {y.shape} are incompatible')
+ if k > 0 and np.any((x < t[k]) | (x > t[-k])):
+ raise ValueError('Out of bounds w/ x = %s.' % x)
+ if x.size != w.size:
+ raise ValueError(f'Shapes of x {x.shape} and w {w.shape} are incompatible')
+
+ # number of coefficients
+ n = t.size - k - 1
+
+ # construct A.T @ A and rhs with A the collocation matrix, and
+ # rhs = A.T @ y for solving the LSQ problem ``A.T @ A @ c = A.T @ y``
+ lower = True
+ extradim = prod(y.shape[1:])
+ ab = np.zeros((k+1, n), dtype=np.float64, order='F')
+ rhs = np.zeros((n, extradim), dtype=y.dtype, order='F')
+ _bspl._norm_eq_lsq(x, t, k,
+ y.reshape(-1, extradim),
+ w,
+ ab, rhs)
+ rhs = rhs.reshape((n,) + y.shape[1:])
+
+ # have observation matrix & rhs, can solve the LSQ problem
+ cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower,
+ check_finite=check_finite)
+ c = cho_solve_banded((cho_decomp, lower), rhs, overwrite_b=True,
+ check_finite=check_finite)
+
+ c = np.ascontiguousarray(c)
+ return BSpline.construct_fast(t, c, k, axis=axis)
+
+
+#############################
+# Smoothing spline helpers #
+#############################
+
+def _compute_optimal_gcv_parameter(X, wE, y, w):
+ """
+ Returns an optimal regularization parameter from the GCV criteria [1].
+
+ Parameters
+ ----------
+ X : array, shape (5, n)
+ 5 bands of the design matrix ``X`` stored in LAPACK banded storage.
+ wE : array, shape (5, n)
+ 5 bands of the penalty matrix :math:`W^{-1} E` stored in LAPACK banded
+ storage.
+ y : array, shape (n,)
+ Ordinates.
+ w : array, shape (n,)
+ Vector of weights.
+
+ Returns
+ -------
+ lam : float
+ An optimal from the GCV criteria point of view regularization
+ parameter.
+
+ Notes
+ -----
+ No checks are performed.
+
+ References
+ ----------
+ .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models
+ for observational data, Philadelphia, Pennsylvania: Society for
+ Industrial and Applied Mathematics, 1990, pp. 45-65.
+ :doi:`10.1137/1.9781611970128`
+
+ """
+
+ def compute_banded_symmetric_XT_W_Y(X, w, Y):
+ """
+ Assuming that the product :math:`X^T W Y` is symmetric and both ``X``
+ and ``Y`` are 5-banded, compute the unique bands of the product.
+
+ Parameters
+ ----------
+ X : array, shape (5, n)
+ 5 bands of the matrix ``X`` stored in LAPACK banded storage.
+ w : array, shape (n,)
+ Array of weights
+ Y : array, shape (5, n)
+ 5 bands of the matrix ``Y`` stored in LAPACK banded storage.
+
+ Returns
+ -------
+ res : array, shape (4, n)
+ The result of the product :math:`X^T Y` stored in the banded way.
+
+ Notes
+ -----
+ As far as the matrices ``X`` and ``Y`` are 5-banded, their product
+ :math:`X^T W Y` is 7-banded. It is also symmetric, so we can store only
+ unique diagonals.
+
+ """
+ # compute W Y
+ W_Y = np.copy(Y)
+
+ W_Y[2] *= w
+ for i in range(2):
+ W_Y[i, 2 - i:] *= w[:-2 + i]
+ W_Y[3 + i, :-1 - i] *= w[1 + i:]
+
+ n = X.shape[1]
+ res = np.zeros((4, n))
+ for i in range(n):
+ for j in range(min(n-i, 4)):
+ res[-j-1, i + j] = sum(X[j:, i] * W_Y[:5-j, i + j])
+ return res
+
+ def compute_b_inv(A):
+ """
+ Inverse 3 central bands of matrix :math:`A=U^T D^{-1} U` assuming that
+ ``U`` is a unit upper triangular banded matrix using an algorithm
+ proposed in [1].
+
+ Parameters
+ ----------
+ A : array, shape (4, n)
+ Matrix to inverse, stored in LAPACK banded storage.
+
+ Returns
+ -------
+ B : array, shape (4, n)
+ 3 unique bands of the symmetric matrix that is an inverse to ``A``.
+ The first row is filled with zeros.
+
+ Notes
+ -----
+ The algorithm is based on the cholesky decomposition and, therefore,
+ in case matrix ``A`` is close to not positive defined, the function
+ raises LinalgError.
+
+ Both matrices ``A`` and ``B`` are stored in LAPACK banded storage.
+
+ References
+ ----------
+ .. [1] M. F. Hutchinson and F. R. de Hoog, "Smoothing noisy data with
+ spline functions," Numerische Mathematik, vol. 47, no. 1,
+ pp. 99-106, 1985.
+ :doi:`10.1007/BF01389878`
+
+ """
+
+ def find_b_inv_elem(i, j, U, D, B):
+ rng = min(3, n - i - 1)
+ rng_sum = 0.
+ if j == 0:
+ # use 2-nd formula from [1]
+ for k in range(1, rng + 1):
+ rng_sum -= U[-k - 1, i + k] * B[-k - 1, i + k]
+ rng_sum += D[i]
+ B[-1, i] = rng_sum
+ else:
+ # use 1-st formula from [1]
+ for k in range(1, rng + 1):
+ diag = abs(k - j)
+ ind = i + min(k, j)
+ rng_sum -= U[-k - 1, i + k] * B[-diag - 1, ind + diag]
+ B[-j - 1, i + j] = rng_sum
+
+ U = cholesky_banded(A)
+ for i in range(2, 5):
+ U[-i, i-1:] /= U[-1, :-i+1]
+ D = 1. / (U[-1])**2
+ U[-1] /= U[-1]
+
+ n = U.shape[1]
+
+ B = np.zeros(shape=(4, n))
+ for i in range(n - 1, -1, -1):
+ for j in range(min(3, n - i - 1), -1, -1):
+ find_b_inv_elem(i, j, U, D, B)
+ # the first row contains garbage and should be removed
+ B[0] = [0.] * n
+ return B
+
+ def _gcv(lam, X, XtWX, wE, XtE):
+ r"""
+ Computes the generalized cross-validation criteria [1].
+
+ Parameters
+ ----------
+ lam : float, (:math:`\lambda \geq 0`)
+ Regularization parameter.
+ X : array, shape (5, n)
+ Matrix is stored in LAPACK banded storage.
+ XtWX : array, shape (4, n)
+ Product :math:`X^T W X` stored in LAPACK banded storage.
+ wE : array, shape (5, n)
+ Matrix :math:`W^{-1} E` stored in LAPACK banded storage.
+ XtE : array, shape (4, n)
+ Product :math:`X^T E` stored in LAPACK banded storage.
+
+ Returns
+ -------
+ res : float
+ Value of the GCV criteria with the regularization parameter
+ :math:`\lambda`.
+
+ Notes
+ -----
+ Criteria is computed from the formula (1.3.2) [3]:
+
+ .. math:
+
+ GCV(\lambda) = \dfrac{1}{n} \sum\limits_{k = 1}^{n} \dfrac{ \left(
+ y_k - f_{\lambda}(x_k) \right)^2}{\left( 1 - \Tr{A}/n\right)^2}$.
+ The criteria is discussed in section 1.3 [3].
+
+ The numerator is computed using (2.2.4) [3] and the denominator is
+ computed using an algorithm from [2] (see in the ``compute_b_inv``
+ function).
+
+ References
+ ----------
+ .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models
+ for observational data, Philadelphia, Pennsylvania: Society for
+ Industrial and Applied Mathematics, 1990, pp. 45-65.
+ :doi:`10.1137/1.9781611970128`
+ .. [2] M. F. Hutchinson and F. R. de Hoog, "Smoothing noisy data with
+ spline functions," Numerische Mathematik, vol. 47, no. 1,
+ pp. 99-106, 1985.
+ :doi:`10.1007/BF01389878`
+ .. [3] E. Zemlyanoy, "Generalized cross-validation smoothing splines",
+ BSc thesis, 2022. Might be available (in Russian)
+ `here `_
+
+ """
+ # Compute the numerator from (2.2.4) [3]
+ n = X.shape[1]
+ c = solve_banded((2, 2), X + lam * wE, y)
+ res = np.zeros(n)
+ # compute ``W^{-1} E c`` with respect to banded-storage of ``E``
+ tmp = wE * c
+ for i in range(n):
+ for j in range(max(0, i - n + 3), min(5, i + 3)):
+ res[i] += tmp[j, i + 2 - j]
+ numer = np.linalg.norm(lam * res)**2 / n
+
+ # compute the denominator
+ lhs = XtWX + lam * XtE
+ try:
+ b_banded = compute_b_inv(lhs)
+ # compute the trace of the product b_banded @ XtX
+ tr = b_banded * XtWX
+ tr[:-1] *= 2
+ # find the denominator
+ denom = (1 - sum(sum(tr)) / n)**2
+ except LinAlgError:
+ # cholesky decomposition cannot be performed
+ raise ValueError('Seems like the problem is ill-posed')
+
+ res = numer / denom
+
+ return res
+
+ n = X.shape[1]
+
+ XtWX = compute_banded_symmetric_XT_W_Y(X, w, X)
+ XtE = compute_banded_symmetric_XT_W_Y(X, w, wE)
+
+ def fun(lam):
+ return _gcv(lam, X, XtWX, wE, XtE)
+
+ gcv_est = minimize_scalar(fun, bounds=(0, n), method='Bounded')
+ if gcv_est.success:
+ return gcv_est.x
+ raise ValueError(f"Unable to find minimum of the GCV "
+ f"function: {gcv_est.message}")
+
+
+def _coeff_of_divided_diff(x):
+ """
+ Returns the coefficients of the divided difference.
+
+ Parameters
+ ----------
+ x : array, shape (n,)
+ Array which is used for the computation of divided difference.
+
+ Returns
+ -------
+ res : array_like, shape (n,)
+ Coefficients of the divided difference.
+
+ Notes
+ -----
+ Vector ``x`` should have unique elements, otherwise an error division by
+ zero might be raised.
+
+ No checks are performed.
+
+ """
+ n = x.shape[0]
+ res = np.zeros(n)
+ for i in range(n):
+ pp = 1.
+ for k in range(n):
+ if k != i:
+ pp *= (x[i] - x[k])
+ res[i] = 1. / pp
+ return res
+
+
+def make_smoothing_spline(x, y, w=None, lam=None):
+ r"""
+ Compute the (coefficients of) smoothing cubic spline function using
+ ``lam`` to control the tradeoff between the amount of smoothness of the
+ curve and its proximity to the data. In case ``lam`` is None, using the
+ GCV criteria [1] to find it.
+
+ A smoothing spline is found as a solution to the regularized weighted
+ linear regression problem:
+
+ .. math::
+
+ \sum\limits_{i=1}^n w_i\lvert y_i - f(x_i) \rvert^2 +
+ \lambda\int\limits_{x_1}^{x_n} (f^{(2)}(u))^2 d u
+
+ where :math:`f` is a spline function, :math:`w` is a vector of weights and
+ :math:`\lambda` is a regularization parameter.
+
+ If ``lam`` is None, we use the GCV criteria to find an optimal
+ regularization parameter, otherwise we solve the regularized weighted
+ linear regression problem with given parameter. The parameter controls
+ the tradeoff in the following way: the larger the parameter becomes, the
+ smoother the function gets.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ Abscissas. `n` must be at least 5.
+ y : array_like, shape (n,)
+ Ordinates. `n` must be at least 5.
+ w : array_like, shape (n,), optional
+ Vector of weights. Default is ``np.ones_like(x)``.
+ lam : float, (:math:`\lambda \geq 0`), optional
+ Regularization parameter. If ``lam`` is None, then it is found from
+ the GCV criteria. Default is None.
+
+ Returns
+ -------
+ func : a BSpline object.
+ A callable representing a spline in the B-spline basis
+ as a solution of the problem of smoothing splines using
+ the GCV criteria [1] in case ``lam`` is None, otherwise using the
+ given parameter ``lam``.
+
+ Notes
+ -----
+ This algorithm is a clean room reimplementation of the algorithm
+ introduced by Woltring in FORTRAN [2]. The original version cannot be used
+ in SciPy source code because of the license issues. The details of the
+ reimplementation are discussed here (available only in Russian) [4].
+
+ If the vector of weights ``w`` is None, we assume that all the points are
+ equal in terms of weights, and vector of weights is vector of ones.
+
+ Note that in weighted residual sum of squares, weights are not squared:
+ :math:`\sum\limits_{i=1}^n w_i\lvert y_i - f(x_i) \rvert^2` while in
+ ``splrep`` the sum is built from the squared weights.
+
+ In cases when the initial problem is ill-posed (for example, the product
+ :math:`X^T W X` where :math:`X` is a design matrix is not a positive
+ defined matrix) a ValueError is raised.
+
+ References
+ ----------
+ .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models for
+ observational data, Philadelphia, Pennsylvania: Society for Industrial
+ and Applied Mathematics, 1990, pp. 45-65.
+ :doi:`10.1137/1.9781611970128`
+ .. [2] H. J. Woltring, A Fortran package for generalized, cross-validatory
+ spline smoothing and differentiation, Advances in Engineering
+ Software, vol. 8, no. 2, pp. 104-113, 1986.
+ :doi:`10.1016/0141-1195(86)90098-7`
+ .. [3] T. Hastie, J. Friedman, and R. Tisbshirani, "Smoothing Splines" in
+ The elements of Statistical Learning: Data Mining, Inference, and
+ prediction, New York: Springer, 2017, pp. 241-249.
+ :doi:`10.1007/978-0-387-84858-7`
+ .. [4] E. Zemlyanoy, "Generalized cross-validation smoothing splines",
+ BSc thesis, 2022.
+ ``_ (in
+ Russian)
+
+ Examples
+ --------
+ Generate some noisy data
+
+ >>> import numpy as np
+ >>> np.random.seed(1234)
+ >>> n = 200
+ >>> def func(x):
+ ... return x**3 + x**2 * np.sin(4 * x)
+ >>> x = np.sort(np.random.random_sample(n) * 4 - 2)
+ >>> y = func(x) + np.random.normal(scale=1.5, size=n)
+
+ Make a smoothing spline function
+
+ >>> from scipy.interpolate import make_smoothing_spline
+ >>> spl = make_smoothing_spline(x, y)
+
+ Plot both
+
+ >>> import matplotlib.pyplot as plt
+ >>> grid = np.linspace(x[0], x[-1], 400)
+ >>> plt.plot(grid, spl(grid), label='Spline')
+ >>> plt.plot(grid, func(grid), label='Original function')
+ >>> plt.scatter(x, y, marker='.')
+ >>> plt.legend(loc='best')
+ >>> plt.show()
+
+ """
+
+ x = np.ascontiguousarray(x, dtype=float)
+ y = np.ascontiguousarray(y, dtype=float)
+
+ if any(x[1:] - x[:-1] <= 0):
+ raise ValueError('``x`` should be an ascending array')
+
+ if x.ndim != 1 or y.ndim != 1 or x.shape[0] != y.shape[0]:
+ raise ValueError('``x`` and ``y`` should be one dimensional and the'
+ ' same size')
+
+ if w is None:
+ w = np.ones(len(x))
+ else:
+ w = np.ascontiguousarray(w)
+ if any(w <= 0):
+ raise ValueError('Invalid vector of weights')
+
+ t = np.r_[[x[0]] * 3, x, [x[-1]] * 3]
+ n = x.shape[0]
+
+ if n <= 4:
+ raise ValueError('``x`` and ``y`` length must be at least 5')
+
+ # It is known that the solution to the stated minimization problem exists
+ # and is a natural cubic spline with vector of knots equal to the unique
+ # elements of ``x`` [3], so we will solve the problem in the basis of
+ # natural splines.
+
+ # create design matrix in the B-spline basis
+ X_bspl = BSpline.design_matrix(x, t, 3)
+ # move from B-spline basis to the basis of natural splines using equations
+ # (2.1.7) [4]
+ # central elements
+ X = np.zeros((5, n))
+ for i in range(1, 4):
+ X[i, 2: -2] = X_bspl[i: i - 4, 3: -3][np.diag_indices(n - 4)]
+
+ # first elements
+ X[1, 1] = X_bspl[0, 0]
+ X[2, :2] = ((x[2] + x[1] - 2 * x[0]) * X_bspl[0, 0],
+ X_bspl[1, 1] + X_bspl[1, 2])
+ X[3, :2] = ((x[2] - x[0]) * X_bspl[1, 1], X_bspl[2, 2])
+
+ # last elements
+ X[1, -2:] = (X_bspl[-3, -3], (x[-1] - x[-3]) * X_bspl[-2, -2])
+ X[2, -2:] = (X_bspl[-2, -3] + X_bspl[-2, -2],
+ (2 * x[-1] - x[-2] - x[-3]) * X_bspl[-1, -1])
+ X[3, -2] = X_bspl[-1, -1]
+
+ # create penalty matrix and divide it by vector of weights: W^{-1} E
+ wE = np.zeros((5, n))
+ wE[2:, 0] = _coeff_of_divided_diff(x[:3]) / w[:3]
+ wE[1:, 1] = _coeff_of_divided_diff(x[:4]) / w[:4]
+ for j in range(2, n - 2):
+ wE[:, j] = (x[j+2] - x[j-2]) * _coeff_of_divided_diff(x[j-2:j+3])\
+ / w[j-2: j+3]
+
+ wE[:-1, -2] = -_coeff_of_divided_diff(x[-4:]) / w[-4:]
+ wE[:-2, -1] = _coeff_of_divided_diff(x[-3:]) / w[-3:]
+ wE *= 6
+
+ if lam is None:
+ lam = _compute_optimal_gcv_parameter(X, wE, y, w)
+ elif lam < 0.:
+ raise ValueError('Regularization parameter should be non-negative')
+
+ # solve the initial problem in the basis of natural splines
+ c = solve_banded((2, 2), X + lam * wE, y)
+ # move back to B-spline basis using equations (2.2.10) [4]
+ c_ = np.r_[c[0] * (t[5] + t[4] - 2 * t[3]) + c[1],
+ c[0] * (t[5] - t[3]) + c[1],
+ c[1: -1],
+ c[-1] * (t[-4] - t[-6]) + c[-2],
+ c[-1] * (2 * t[-4] - t[-5] - t[-6]) + c[-2]]
+
+ return BSpline.construct_fast(t, c_, 3)
+
+
+########################
+# FITPACK look-alikes #
+########################
+
+def fpcheck(x, t, k):
+ """ Check consistency of the data vector `x` and the knot vector `t`.
+
+ Return None if inputs are consistent, raises a ValueError otherwise.
+ """
+ # This routine is a clone of the `fpchec` Fortran routine,
+ # https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpchec.f
+ # which carries the following comment:
+ #
+ # subroutine fpchec verifies the number and the position of the knots
+ # t(j),j=1,2,...,n of a spline of degree k, in relation to the number
+ # and the position of the data points x(i),i=1,2,...,m. if all of the
+ # following conditions are fulfilled, the error parameter ier is set
+ # to zero. if one of the conditions is violated ier is set to ten.
+ # 1) k+1 <= n-k-1 <= m
+ # 2) t(1) <= t(2) <= ... <= t(k+1)
+ # t(n-k) <= t(n-k+1) <= ... <= t(n)
+ # 3) t(k+1) < t(k+2) < ... < t(n-k)
+ # 4) t(k+1) <= x(i) <= t(n-k)
+ # 5) the conditions specified by schoenberg and whitney must hold
+ # for at least one subset of data points, i.e. there must be a
+ # subset of data points y(j) such that
+ # t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
+ x = np.asarray(x)
+ t = np.asarray(t)
+
+ if x.ndim != 1 or t.ndim != 1:
+ raise ValueError(f"Expect `x` and `t` be 1D sequences. Got {x = } and {t = }")
+
+ m = x.shape[0]
+ n = t.shape[0]
+ nk1 = n - k - 1
+
+ # check condition no 1
+ # c 1) k+1 <= n-k-1 <= m
+ if not (k + 1 <= nk1 <= m):
+ raise ValueError(f"Need k+1 <= n-k-1 <= m. Got {m = }, {n = } and {k = }.")
+
+ # check condition no 2
+ # c 2) t(1) <= t(2) <= ... <= t(k+1)
+ # c t(n-k) <= t(n-k+1) <= ... <= t(n)
+ if (t[:k+1] > t[1:k+2]).any():
+ raise ValueError(f"First k knots must be ordered; got {t = }.")
+
+ if (t[nk1:] < t[nk1-1:-1]).any():
+ raise ValueError(f"Last k knots must be ordered; got {t = }.")
+
+ # c check condition no 3
+ # c 3) t(k+1) < t(k+2) < ... < t(n-k)
+ if (t[k+1:n-k] <= t[k:n-k-1]).any():
+ raise ValueError(f"Internal knots must be distinct. Got {t = }.")
+
+ # c check condition no 4
+ # c 4) t(k+1) <= x(i) <= t(n-k)
+ # NB: FITPACK's fpchec only checks x[0] & x[-1], so we follow.
+ if (x[0] < t[k]) or (x[-1] > t[n-k-1]):
+ raise ValueError(f"Out of bounds: {x = } and {t = }.")
+
+ # c check condition no 5
+ # c 5) the conditions specified by schoenberg and whitney must hold
+ # c for at least one subset of data points, i.e. there must be a
+ # c subset of data points y(j) such that
+ # c t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
+ mesg = f"Schoenberg-Whitney condition is violated with {t = } and {x =}."
+
+ if (x[0] >= t[k+1]) or (x[-1] <= t[n-k-2]):
+ raise ValueError(mesg)
+
+ m = x.shape[0]
+ l = k+1
+ nk3 = n - k - 3
+ if nk3 < 2:
+ return
+ for j in range(1, nk3+1):
+ tj = t[j]
+ l += 1
+ tl = t[l]
+ i = np.argmax(x > tj)
+ if i >= m-1:
+ raise ValueError(mesg)
+ if x[i] >= tl:
+ raise ValueError(mesg)
+ return
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_cubic.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_cubic.py
new file mode 100644
index 0000000000000000000000000000000000000000..997776150afa44ff3f6d14356f2b1bc3b1fb5e5e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_cubic.py
@@ -0,0 +1,970 @@
+"""Interpolation algorithms using piecewise cubic polynomials."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import warnings
+
+import numpy as np
+
+from scipy.linalg import solve, solve_banded
+
+from . import PPoly
+from ._polyint import _isscalar
+
+if TYPE_CHECKING:
+ from typing import Literal
+
+__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
+ "Akima1DInterpolator", "CubicSpline"]
+
+
+def prepare_input(x, y, axis, dydx=None):
+ """Prepare input for cubic spline interpolators.
+
+ All data are converted to numpy arrays and checked for correctness.
+ Axes equal to `axis` of arrays `y` and `dydx` are moved to be the 0th
+ axis. The value of `axis` is converted to lie in
+ [0, number of dimensions of `y`).
+ """
+
+ x, y = map(np.asarray, (x, y))
+ if np.issubdtype(x.dtype, np.complexfloating):
+ raise ValueError("`x` must contain real values.")
+ x = x.astype(float)
+
+ if np.issubdtype(y.dtype, np.complexfloating):
+ dtype = complex
+ else:
+ dtype = float
+
+ if dydx is not None:
+ dydx = np.asarray(dydx)
+ if y.shape != dydx.shape:
+ raise ValueError("The shapes of `y` and `dydx` must be identical.")
+ if np.issubdtype(dydx.dtype, np.complexfloating):
+ dtype = complex
+ dydx = dydx.astype(dtype, copy=False)
+
+ y = y.astype(dtype, copy=False)
+ axis = axis % y.ndim
+ if x.ndim != 1:
+ raise ValueError("`x` must be 1-dimensional.")
+ if x.shape[0] < 2:
+ raise ValueError("`x` must contain at least 2 elements.")
+ if x.shape[0] != y.shape[axis]:
+ raise ValueError(f"The length of `y` along `axis`={axis} doesn't "
+ "match the length of `x`")
+
+ if not np.all(np.isfinite(x)):
+ raise ValueError("`x` must contain only finite values.")
+ if not np.all(np.isfinite(y)):
+ raise ValueError("`y` must contain only finite values.")
+
+ if dydx is not None and not np.all(np.isfinite(dydx)):
+ raise ValueError("`dydx` must contain only finite values.")
+
+ dx = np.diff(x)
+ if np.any(dx <= 0):
+ raise ValueError("`x` must be strictly increasing sequence.")
+
+ y = np.moveaxis(y, axis, 0)
+ if dydx is not None:
+ dydx = np.moveaxis(dydx, axis, 0)
+
+ return x, dx, y, axis, dydx
+
+
+class CubicHermiteSpline(PPoly):
+ """Piecewise-cubic interpolator matching values and first derivatives.
+
+ The result is represented as a `PPoly` instance.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ 1-D array containing values of the independent variable.
+ Values must be real, finite and in strictly increasing order.
+ y : array_like
+ Array containing values of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ dydx : array_like
+ Array containing derivatives of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ axis : int, optional
+ Axis along which `y` is assumed to be varying. Meaning that for
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+ Default is 0.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. If None (default), it is set to True.
+
+ Attributes
+ ----------
+ x : ndarray, shape (n,)
+ Breakpoints. The same ``x`` which was passed to the constructor.
+ c : ndarray, shape (4, n-1, ...)
+ Coefficients of the polynomials on each segment. The trailing
+ dimensions match the dimensions of `y`, excluding ``axis``.
+ For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
+ axis : int
+ Interpolation axis. The same axis which was passed to the
+ constructor.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ roots
+
+ See Also
+ --------
+ Akima1DInterpolator : Akima 1D interpolator.
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
+
+ Notes
+ -----
+ If you want to create a higher-order spline matching higher-order
+ derivatives, use `BPoly.from_derivatives`.
+
+ References
+ ----------
+ .. [1] `Cubic Hermite spline
+ `_
+ on Wikipedia.
+ """
+
+ def __init__(self, x, y, dydx, axis=0, extrapolate=None):
+ if extrapolate is None:
+ extrapolate = True
+
+ x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
+
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
+ slope = np.diff(y, axis=0) / dxr
+ t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
+
+ c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
+ c[0] = t / dxr
+ c[1] = (slope - dydx[:-1]) / dxr - t
+ c[2] = dydx[:-1]
+ c[3] = y[:-1]
+
+ super().__init__(c, x, extrapolate=extrapolate)
+ self.axis = axis
+
+
+class PchipInterpolator(CubicHermiteSpline):
+ r"""PCHIP 1-D monotonic cubic interpolation.
+
+ ``x`` and ``y`` are arrays of values used to approximate some function f,
+ with ``y = f(x)``. The interpolant uses monotonic cubic splines
+ to find the value of new points. (PCHIP stands for Piecewise Cubic
+ Hermite Interpolating Polynomial).
+
+ Parameters
+ ----------
+ x : ndarray, shape (npoints, )
+ A 1-D array of monotonically increasing real values. ``x`` cannot
+ include duplicate values (otherwise f is overspecified)
+ y : ndarray, shape (..., npoints, ...)
+ A N-D array of real values. ``y``'s length along the interpolation
+ axis must be equal to the length of ``x``. Use the ``axis``
+ parameter to select the interpolation axis.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated and will raise an error in SciPy 1.15.0.
+ If you are trying to use the real components of the passed array,
+ use ``np.real`` on ``y``.
+
+ axis : int, optional
+ Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
+ to ``axis=0``.
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ roots
+
+ See Also
+ --------
+ CubicHermiteSpline : Piecewise-cubic interpolator.
+ Akima1DInterpolator : Akima 1D interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
+
+ Notes
+ -----
+ The interpolator preserves monotonicity in the interpolation data and does
+ not overshoot if the data is not smooth.
+
+ The first derivatives are guaranteed to be continuous, but the second
+ derivatives may jump at :math:`x_k`.
+
+ Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
+ by using PCHIP algorithm [1]_.
+
+ Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
+ are the slopes at internal points :math:`x_k`.
+ If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
+ them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
+ weighted harmonic mean
+
+ .. math::
+
+ \frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
+
+ where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
+
+ The end slopes are set using a one-sided scheme [2]_.
+
+
+ References
+ ----------
+ .. [1] F. N. Fritsch and J. Butland,
+ A method for constructing local
+ monotone piecewise cubic interpolants,
+ SIAM J. Sci. Comput., 5(2), 300-304 (1984).
+ :doi:`10.1137/0905021`.
+ .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
+ :doi:`10.1137/1.9780898717952`
+
+ """
+
+ def __init__(self, x, y, axis=0, extrapolate=None):
+ x, _, y, axis, _ = prepare_input(x, y, axis)
+ if np.iscomplexobj(y):
+ msg = ("`PchipInterpolator` only works with real values for `y`. "
+ "Passing an array with a complex dtype for `y` is deprecated "
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
+ "use the real components of the passed array, use `np.real` on "
+ "the array before passing to `PchipInterpolator`.")
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
+ dk = self._find_derivatives(xp, y)
+ super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
+ self.axis = axis
+
+ @staticmethod
+ def _edge_case(h0, h1, m0, m1):
+ # one-sided three-point estimate for the derivative
+ d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
+
+ # try to preserve shape
+ mask = np.sign(d) != np.sign(m0)
+ mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
+ mmm = (~mask) & mask2
+
+ d[mask] = 0.
+ d[mmm] = 3.*m0[mmm]
+
+ return d
+
+ @staticmethod
+ def _find_derivatives(x, y):
+ # Determine the derivatives at the points y_k, d_k, by using
+ # PCHIP algorithm is:
+ # We choose the derivatives at the point x_k by
+ # Let m_k be the slope of the kth segment (between k and k+1)
+ # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
+ # else use weighted harmonic mean:
+ # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
+ # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
+ # where h_k is the spacing between x_k and x_{k+1}
+ y_shape = y.shape
+ if y.ndim == 1:
+ # So that _edge_case doesn't end up assigning to scalars
+ x = x[:, None]
+ y = y[:, None]
+
+ hk = x[1:] - x[:-1]
+ mk = (y[1:] - y[:-1]) / hk
+
+ if y.shape[0] == 2:
+ # edge case: only have two points, use linear interpolation
+ dk = np.zeros_like(y)
+ dk[0] = mk
+ dk[1] = mk
+ return dk.reshape(y_shape)
+
+ smk = np.sign(mk)
+ condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
+
+ w1 = 2*hk[1:] + hk[:-1]
+ w2 = hk[1:] + 2*hk[:-1]
+
+ # values where division by zero occurs will be excluded
+ # by 'condition' afterwards
+ with np.errstate(divide='ignore', invalid='ignore'):
+ whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
+
+ dk = np.zeros_like(y)
+ dk[1:-1][condition] = 0.0
+ dk[1:-1][~condition] = 1.0 / whmean[~condition]
+
+ # special case endpoints, as suggested in
+ # Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
+ dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
+ dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
+
+ return dk.reshape(y_shape)
+
+
+def pchip_interpolate(xi, yi, x, der=0, axis=0):
+ """
+ Convenience function for pchip interpolation.
+
+ xi and yi are arrays of values used to approximate some function f,
+ with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
+ to find the value of new points x and the derivatives there.
+
+ See `scipy.interpolate.PchipInterpolator` for details.
+
+ Parameters
+ ----------
+ xi : array_like
+ A sorted list of x-coordinates, of length N.
+ yi : array_like
+ A 1-D array of real values. `yi`'s length along the interpolation
+ axis must be equal to the length of `xi`. If N-D array, use axis
+ parameter to select correct axis.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated and will raise an error in
+ SciPy 1.15.0. If you are trying to use the real components of
+ the passed array, use ``np.real`` on `yi`.
+
+ x : scalar or array_like
+ Of length M.
+ der : int or list, optional
+ Derivatives to extract. The 0th derivative can be included to
+ return the function value.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values.
+
+ Returns
+ -------
+ y : scalar or array_like
+ The result, of length R or length M or M by R.
+
+ See Also
+ --------
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+
+ Examples
+ --------
+ We can interpolate 2D observed data using pchip interpolation:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import pchip_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = pchip_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="pchip interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+
+ """
+ P = PchipInterpolator(xi, yi, axis=axis)
+
+ if der == 0:
+ return P(x)
+ elif _isscalar(der):
+ return P.derivative(der)(x)
+ else:
+ return [P.derivative(nu)(x) for nu in der]
+
+
+class Akima1DInterpolator(CubicHermiteSpline):
+ r"""
+ Akima interpolator
+
+ Fit piecewise cubic polynomials, given vectors x and y. The interpolation
+ method by Akima uses a continuously differentiable sub-spline built from
+ piecewise cubic polynomials. The resultant curve passes through the given
+ data points and will appear smooth and natural.
+
+ Parameters
+ ----------
+ x : ndarray, shape (npoints, )
+ 1-D array of monotonically increasing real values.
+ y : ndarray, shape (..., npoints, ...)
+ N-D array of real values. The length of ``y`` along the interpolation axis
+ must be equal to the length of ``x``. Use the ``axis`` parameter to
+ select the interpolation axis.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated and will raise an error in SciPy 1.15.0.
+ If you are trying to use the real components of the passed array,
+ use ``np.real`` on ``y``.
+
+ axis : int, optional
+ Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
+ to ``axis=0``.
+ method : {'akima', 'makima'}, optional
+ If ``"makima"``, use the modified Akima interpolation [2]_.
+ Defaults to ``"akima"``, use the Akima interpolation [1]_.
+
+ .. versionadded:: 1.13.0
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ roots
+
+ See Also
+ --------
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ CubicSpline : Cubic spline data interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints
+
+ Notes
+ -----
+ .. versionadded:: 0.14
+
+ Use only for precise data, as the fitted curve passes through the given
+ points exactly. This routine is useful for plotting a pleasingly smooth
+ curve through a few given points for purposes of plotting.
+
+ Let :math:`\delta_i = (y_{i+1} - y_i) / (x_{i+1} - x_i)` be the slopes of
+ the interval :math:`\left[x_i, x_{i+1}\right)`. Akima's derivative at
+ :math:`x_i` is defined as:
+
+ .. math::
+
+ d_i = \frac{w_1}{w_1 + w_2}\delta_{i-1} + \frac{w_2}{w_1 + w_2}\delta_i
+
+ In the Akima interpolation [1]_ (``method="akima"``), the weights are:
+
+ .. math::
+
+ \begin{aligned}
+ w_1 &= |\delta_{i+1} - \delta_i| \\
+ w_2 &= |\delta_{i-1} - \delta_{i-2}|
+ \end{aligned}
+
+ In the modified Akima interpolation [2]_ (``method="makima"``),
+ to eliminate overshoot and avoid edge cases of both numerator and
+ denominator being equal to 0, the weights are modified as follows:
+
+ .. math::
+
+ \begin{align*}
+ w_1 &= |\delta_{i+1} - \delta_i| + |\delta_{i+1} + \delta_i| / 2 \\
+ w_2 &= |\delta_{i-1} - \delta_{i-2}| + |\delta_{i-1} + \delta_{i-2}| / 2
+ \end{align*}
+
+ Examples
+ --------
+ Comparison of ``method="akima"`` and ``method="makima"``:
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import Akima1DInterpolator
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.linspace(1, 7, 7)
+ >>> y = np.array([-1, -1, -1, 0, 1, 1, 1])
+ >>> xs = np.linspace(min(x), max(x), num=100)
+ >>> y_akima = Akima1DInterpolator(x, y, method="akima")(xs)
+ >>> y_makima = Akima1DInterpolator(x, y, method="makima")(xs)
+
+ >>> fig, ax = plt.subplots()
+ >>> ax.plot(x, y, "o", label="data")
+ >>> ax.plot(xs, y_akima, label="akima")
+ >>> ax.plot(xs, y_makima, label="makima")
+ >>> ax.legend()
+ >>> fig.show()
+
+ The overshoot that occured in ``"akima"`` has been avoided in ``"makima"``.
+
+ References
+ ----------
+ .. [1] A new method of interpolation and smooth curve fitting based
+ on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
+ 589-602. :doi:`10.1145/321607.321609`
+ .. [2] Makima Piecewise Cubic Interpolation. Cleve Moler and Cosmin Ionita, 2019.
+ https://blogs.mathworks.com/cleve/2019/04/29/makima-piecewise-cubic-interpolation/
+
+ """
+
+ def __init__(self, x, y, axis=0, *, method: Literal["akima", "makima"]="akima"):
+ if method not in {"akima", "makima"}:
+ raise NotImplementedError(f"`method`={method} is unsupported.")
+ # Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
+ # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
+
+ if np.iscomplexobj(y):
+ msg = ("`Akima1DInterpolator` only works with real values for `y`. "
+ "Passing an array with a complex dtype for `y` is deprecated "
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
+ "use the real components of the passed array, use `np.real` on "
+ "the array before passing to `Akima1DInterpolator`.")
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+
+ # determine slopes between breakpoints
+ m = np.empty((x.size + 3, ) + y.shape[1:])
+ dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
+ m[2:-2] = np.diff(y, axis=0) / dx
+
+ # add two additional points on the left ...
+ m[1] = 2. * m[2] - m[3]
+ m[0] = 2. * m[1] - m[2]
+ # ... and on the right
+ m[-2] = 2. * m[-3] - m[-4]
+ m[-1] = 2. * m[-2] - m[-3]
+
+ # if m1 == m2 != m3 == m4, the slope at the breakpoint is not
+ # defined. This is the fill value:
+ t = .5 * (m[3:] + m[:-3])
+ # get the denominator of the slope t
+ dm = np.abs(np.diff(m, axis=0))
+ if method == "makima":
+ pm = np.abs(m[1:] + m[:-1])
+ f1 = dm[2:] + 0.5 * pm[2:]
+ f2 = dm[:-2] + 0.5 * pm[:-2]
+ else:
+ f1 = dm[2:]
+ f2 = dm[:-2]
+ f12 = f1 + f2
+ # These are the mask of where the slope at breakpoint is defined:
+ ind = np.nonzero(f12 > 1e-9 * np.max(f12, initial=-np.inf))
+ x_ind, y_ind = ind[0], ind[1:]
+ # Set the slope at breakpoint
+ t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
+ f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
+
+ super().__init__(x, y, t, axis=0, extrapolate=False)
+ self.axis = axis
+
+ def extend(self, c, x, right=True):
+ raise NotImplementedError("Extending a 1-D Akima interpolator is not "
+ "yet implemented")
+
+ # These are inherited from PPoly, but they do not produce an Akima
+ # interpolator. Hence stub them out.
+ @classmethod
+ def from_spline(cls, tck, extrapolate=None):
+ raise NotImplementedError("This method does not make sense for "
+ "an Akima interpolator.")
+
+ @classmethod
+ def from_bernstein_basis(cls, bp, extrapolate=None):
+ raise NotImplementedError("This method does not make sense for "
+ "an Akima interpolator.")
+
+
+class CubicSpline(CubicHermiteSpline):
+ """Cubic spline data interpolator.
+
+ Interpolate data with a piecewise cubic polynomial which is twice
+ continuously differentiable [1]_. The result is represented as a `PPoly`
+ instance with breakpoints matching the given data.
+
+ Parameters
+ ----------
+ x : array_like, shape (n,)
+ 1-D array containing values of the independent variable.
+ Values must be real, finite and in strictly increasing order.
+ y : array_like
+ Array containing values of the dependent variable. It can have
+ arbitrary number of dimensions, but the length along ``axis``
+ (see below) must match the length of ``x``. Values must be finite.
+ axis : int, optional
+ Axis along which `y` is assumed to be varying. Meaning that for
+ ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+ Default is 0.
+ bc_type : string or 2-tuple, optional
+ Boundary condition type. Two additional equations, given by the
+ boundary conditions, are required to determine all coefficients of
+ polynomials on each segment [2]_.
+
+ If `bc_type` is a string, then the specified condition will be applied
+ at both ends of a spline. Available conditions are:
+
+ * 'not-a-knot' (default): The first and second segment at a curve end
+ are the same polynomial. It is a good default when there is no
+ information on boundary conditions.
+ * 'periodic': The interpolated functions is assumed to be periodic
+ of period ``x[-1] - x[0]``. The first and last value of `y` must be
+ identical: ``y[0] == y[-1]``. This boundary condition will result in
+ ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
+ * 'clamped': The first derivative at curves ends are zero. Assuming
+ a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
+ * 'natural': The second derivative at curve ends are zero. Assuming
+ a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
+
+ If `bc_type` is a 2-tuple, the first and the second value will be
+ applied at the curve start and end respectively. The tuple values can
+ be one of the previously mentioned strings (except 'periodic') or a
+ tuple `(order, deriv_values)` allowing to specify arbitrary
+ derivatives at curve ends:
+
+ * `order`: the derivative order, 1 or 2.
+ * `deriv_value`: array_like containing derivative values, shape must
+ be the same as `y`, excluding ``axis`` dimension. For example, if
+ `y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
+ the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
+ and have the shape (n0, n1).
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. If None (default), ``extrapolate`` is
+ set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
+
+ Attributes
+ ----------
+ x : ndarray, shape (n,)
+ Breakpoints. The same ``x`` which was passed to the constructor.
+ c : ndarray, shape (4, n-1, ...)
+ Coefficients of the polynomials on each segment. The trailing
+ dimensions match the dimensions of `y`, excluding ``axis``.
+ For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
+ ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
+ axis : int
+ Interpolation axis. The same axis which was passed to the
+ constructor.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ roots
+
+ See Also
+ --------
+ Akima1DInterpolator : Akima 1D interpolator.
+ PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+ PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
+
+ Notes
+ -----
+ Parameters `bc_type` and ``extrapolate`` work independently, i.e. the
+ former controls only construction of a spline, and the latter only
+ evaluation.
+
+ When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
+ a condition that the first derivative is equal to the linear interpolant
+ slope. When both boundary conditions are 'not-a-knot' and n = 3, the
+ solution is sought as a parabola passing through given points.
+
+ When 'not-a-knot' boundary conditions is applied to both ends, the
+ resulting spline will be the same as returned by `splrep` (with ``s=0``)
+ and `InterpolatedUnivariateSpline`, but these two methods use a
+ representation in B-spline basis.
+
+ .. versionadded:: 0.18.0
+
+ Examples
+ --------
+ In this example the cubic spline is used to interpolate a sampled sinusoid.
+ You can see that the spline continuity property holds for the first and
+ second derivatives and violates only for the third derivative.
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import CubicSpline
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+ >>> cs = CubicSpline(x, y)
+ >>> xs = np.arange(-0.5, 9.6, 0.1)
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
+ >>> ax.plot(x, y, 'o', label='data')
+ >>> ax.plot(xs, np.sin(xs), label='true')
+ >>> ax.plot(xs, cs(xs), label="S")
+ >>> ax.plot(xs, cs(xs, 1), label="S'")
+ >>> ax.plot(xs, cs(xs, 2), label="S''")
+ >>> ax.plot(xs, cs(xs, 3), label="S'''")
+ >>> ax.set_xlim(-0.5, 9.5)
+ >>> ax.legend(loc='lower left', ncol=2)
+ >>> plt.show()
+
+ In the second example, the unit circle is interpolated with a spline. A
+ periodic boundary condition is used. You can see that the first derivative
+ values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
+ computed. Note that a circle cannot be exactly represented by a cubic
+ spline. To increase precision, more breakpoints would be required.
+
+ >>> theta = 2 * np.pi * np.linspace(0, 1, 5)
+ >>> y = np.c_[np.cos(theta), np.sin(theta)]
+ >>> cs = CubicSpline(theta, y, bc_type='periodic')
+ >>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
+ ds/dx=0.0 ds/dy=1.0
+ >>> xs = 2 * np.pi * np.linspace(0, 1, 100)
+ >>> fig, ax = plt.subplots(figsize=(6.5, 4))
+ >>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
+ >>> ax.plot(np.cos(xs), np.sin(xs), label='true')
+ >>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
+ >>> ax.axes.set_aspect('equal')
+ >>> ax.legend(loc='center')
+ >>> plt.show()
+
+ The third example is the interpolation of a polynomial y = x**3 on the
+ interval 0 <= x<= 1. A cubic spline can represent this function exactly.
+ To achieve that we need to specify values and first derivatives at
+ endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
+ y'(1) = 3.
+
+ >>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
+ >>> x = np.linspace(0, 1)
+ >>> np.allclose(x**3, cs(x))
+ True
+
+ References
+ ----------
+ .. [1] `Cubic Spline Interpolation
+ `_
+ on Wikiversity.
+ .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
+ """
+
+ def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
+ x, dx, y, axis, _ = prepare_input(x, y, axis)
+ n = len(x)
+
+ bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
+
+ if extrapolate is None:
+ if bc[0] == 'periodic':
+ extrapolate = 'periodic'
+ else:
+ extrapolate = True
+
+ if y.size == 0:
+ # bail out early for zero-sized arrays
+ s = np.zeros_like(y)
+ else:
+ dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
+ slope = np.diff(y, axis=0) / dxr
+
+ # If bc is 'not-a-knot' this change is just a convention.
+ # If bc is 'periodic' then we already checked that y[0] == y[-1],
+ # and the spline is just a constant, we handle this case in the
+ # same way by setting the first derivatives to slope, which is 0.
+ if n == 2:
+ if bc[0] in ['not-a-knot', 'periodic']:
+ bc[0] = (1, slope[0])
+ if bc[1] in ['not-a-knot', 'periodic']:
+ bc[1] = (1, slope[0])
+
+ # This is a special case, when both conditions are 'not-a-knot'
+ # and n == 3. In this case 'not-a-knot' can't be handled regularly
+ # as the both conditions are identical. We handle this case by
+ # constructing a parabola passing through given points.
+ if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
+ A = np.zeros((3, 3)) # This is a standard matrix.
+ b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
+
+ A[0, 0] = 1
+ A[0, 1] = 1
+ A[1, 0] = dx[1]
+ A[1, 1] = 2 * (dx[0] + dx[1])
+ A[1, 2] = dx[0]
+ A[2, 1] = 1
+ A[2, 2] = 1
+
+ b[0] = 2 * slope[0]
+ b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
+ b[2] = 2 * slope[1]
+
+ s = solve(A, b, overwrite_a=True, overwrite_b=True,
+ check_finite=False)
+ elif n == 3 and bc[0] == 'periodic':
+ # In case when number of points is 3 we compute the derivatives
+ # manually
+ t = (slope / dxr).sum(0) / (1. / dxr).sum(0)
+ s = np.broadcast_to(t, (n,) + y.shape[1:])
+ else:
+ # Find derivative values at each x[i] by solving a tridiagonal
+ # system.
+ A = np.zeros((3, n)) # This is a banded matrix representation.
+ b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+
+ # Filling the system for i=1..n-2
+ # (x[i-1] - x[i]) * s[i-1] +\
+ # 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
+ # (x[i] - x[i-1]) * s[i+1] =\
+ # 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
+ # (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
+
+ A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
+ A[0, 2:] = dx[:-1] # The upper diagonal
+ A[-1, :-2] = dx[1:] # The lower diagonal
+
+ b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
+
+ bc_start, bc_end = bc
+
+ if bc_start == 'periodic':
+ # Due to the periodicity, and because y[-1] = y[0], the
+ # linear system has (n-1) unknowns/equations instead of n:
+ A = A[:, 0:-1]
+ A[1, 0] = 2 * (dx[-1] + dx[0])
+ A[0, 1] = dx[-1]
+
+ b = b[:-1]
+
+ # Also, due to the periodicity, the system is not tri-diagonal.
+ # We need to compute a "condensed" matrix of shape (n-2, n-2).
+ # See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
+ # for more explanations.
+ # The condensed matrix is obtained by removing the last column
+ # and last row of the (n-1, n-1) system matrix. The removed
+ # values are saved in scalar variables with the (n-1, n-1)
+ # system matrix indices forming their names:
+ a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
+ a_m1_m2 = dx[-1]
+ a_m1_m1 = 2 * (dx[-1] + dx[-2])
+ a_m2_m1 = dx[-3]
+ a_0_m1 = dx[0]
+
+ b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
+ b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
+
+ Ac = A[:, :-1]
+ b1 = b[:-1]
+ b2 = np.zeros_like(b1)
+ b2[0] = -a_0_m1
+ b2[-1] = -a_m2_m1
+
+ # s1 and s2 are the solutions of (n-2, n-2) system
+ s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
+ overwrite_b=False, check_finite=False)
+
+ s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
+ overwrite_b=False, check_finite=False)
+
+ # computing the s[n-2] solution:
+ s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
+ (a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
+
+ # s is the solution of the (n, n) system:
+ s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+ s[:-2] = s1 + s_m1 * s2
+ s[-2] = s_m1
+ s[-1] = s[0]
+ else:
+ if bc_start == 'not-a-knot':
+ A[1, 0] = dx[1]
+ A[0, 1] = x[2] - x[0]
+ d = x[2] - x[0]
+ b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
+ dxr[0]**2 * slope[1]) / d
+ elif bc_start[0] == 1:
+ A[1, 0] = 1
+ A[0, 1] = 0
+ b[0] = bc_start[1]
+ elif bc_start[0] == 2:
+ A[1, 0] = 2 * dx[0]
+ A[0, 1] = dx[0]
+ b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
+
+ if bc_end == 'not-a-knot':
+ A[1, -1] = dx[-2]
+ A[-1, -2] = x[-1] - x[-3]
+ d = x[-1] - x[-3]
+ b[-1] = ((dxr[-1]**2*slope[-2] +
+ (2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
+ elif bc_end[0] == 1:
+ A[1, -1] = 1
+ A[-1, -2] = 0
+ b[-1] = bc_end[1]
+ elif bc_end[0] == 2:
+ A[1, -1] = 2 * dx[-1]
+ A[-1, -2] = dx[-1]
+ b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
+
+ s = solve_banded((1, 1), A, b, overwrite_ab=True,
+ overwrite_b=True, check_finite=False)
+
+ super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
+ self.axis = axis
+
+ @staticmethod
+ def _validate_bc(bc_type, y, expected_deriv_shape, axis):
+ """Validate and prepare boundary conditions.
+
+ Returns
+ -------
+ validated_bc : 2-tuple
+ Boundary conditions for a curve start and end.
+ y : ndarray
+ y casted to complex dtype if one of the boundary conditions has
+ complex dtype.
+ """
+ if isinstance(bc_type, str):
+ if bc_type == 'periodic':
+ if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
+ raise ValueError(
+ f"The first and last `y` point along axis {axis} must "
+ "be identical (within machine precision) when "
+ "bc_type='periodic'.")
+
+ bc_type = (bc_type, bc_type)
+
+ else:
+ if len(bc_type) != 2:
+ raise ValueError("`bc_type` must contain 2 elements to "
+ "specify start and end conditions.")
+
+ if 'periodic' in bc_type:
+ raise ValueError("'periodic' `bc_type` is defined for both "
+ "curve ends and cannot be used with other "
+ "boundary conditions.")
+
+ validated_bc = []
+ for bc in bc_type:
+ if isinstance(bc, str):
+ if bc == 'clamped':
+ validated_bc.append((1, np.zeros(expected_deriv_shape)))
+ elif bc == 'natural':
+ validated_bc.append((2, np.zeros(expected_deriv_shape)))
+ elif bc in ['not-a-knot', 'periodic']:
+ validated_bc.append(bc)
+ else:
+ raise ValueError(f"bc_type={bc} is not allowed.")
+ else:
+ try:
+ deriv_order, deriv_value = bc
+ except Exception as e:
+ raise ValueError(
+ "A specified derivative value must be "
+ "given in the form (order, value)."
+ ) from e
+
+ if deriv_order not in [1, 2]:
+ raise ValueError("The specified derivative order must "
+ "be 1 or 2.")
+
+ deriv_value = np.asarray(deriv_value)
+ if deriv_value.shape != expected_deriv_shape:
+ raise ValueError(
+ "`deriv_value` shape {} is not the expected one {}."
+ .format(deriv_value.shape, expected_deriv_shape))
+
+ if np.issubdtype(deriv_value.dtype, np.complexfloating):
+ y = y.astype(complex, copy=False)
+
+ validated_bc.append((deriv_order, deriv_value))
+
+ return validated_bc, y
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..cc527980099835534aad06ab0dbcb4dc4f8c7364
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack2.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack2.py
new file mode 100644
index 0000000000000000000000000000000000000000..88e916287c98ea12f75d06871dc4a493082493a2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack2.py
@@ -0,0 +1,2362 @@
+"""
+fitpack --- curve and surface fitting with splines
+
+fitpack is based on a collection of Fortran routines DIERCKX
+by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
+to double routines by Pearu Peterson.
+"""
+# Created by Pearu Peterson, June,August 2003
+__all__ = [
+ 'UnivariateSpline',
+ 'InterpolatedUnivariateSpline',
+ 'LSQUnivariateSpline',
+ 'BivariateSpline',
+ 'LSQBivariateSpline',
+ 'SmoothBivariateSpline',
+ 'LSQSphereBivariateSpline',
+ 'SmoothSphereBivariateSpline',
+ 'RectBivariateSpline',
+ 'RectSphereBivariateSpline']
+
+
+import warnings
+
+from numpy import zeros, concatenate, ravel, diff, array, ones # noqa:F401
+import numpy as np
+
+from . import _fitpack_impl
+from . import dfitpack
+
+
+dfitpack_int = dfitpack.types.intvar.dtype
+
+
+# ############### Univariate spline ####################
+
+_curfit_messages = {1: """
+The required storage space exceeds the available storage space, as
+specified by the parameter nest: nest too small. If nest is already
+large (say nest > m/2), it may also indicate that s is too small.
+The approximation returned is the weighted least-squares spline
+according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
+gives the corresponding weighted sum of squared residuals (fp>s).
+""",
+ 2: """
+A theoretically impossible result was found during the iteration
+process for finding a smoothing spline with fp = s: s too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
+ 3: """
+The maximal number of iterations maxit (set to 20 by the program)
+allowed for finding a smoothing spline with fp=s has been reached: s
+too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
+ 10: """
+Error on entry, no approximation returned. The following conditions
+must hold:
+xb<=x[0]0, i=0..m-1
+if iopt=-1:
+ xb>> import numpy as np
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
+ >>> w = np.isnan(y)
+ >>> y[w] = 0.
+ >>> spl = UnivariateSpline(x, y, w=~w)
+
+ Notice the need to replace a ``nan`` by a numerical value (precise value
+ does not matter as long as the corresponding weight is zero.)
+
+ References
+ ----------
+ Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
+
+ .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
+ integration of experimental data using spline functions",
+ J.Comp.Appl.Maths 1 (1975) 165-184.
+ .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
+ grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
+ 1286-1304.
+ .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
+ functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
+ .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> rng = np.random.default_rng()
+ >>> x = np.linspace(-3, 3, 50)
+ >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
+ >>> plt.plot(x, y, 'ro', ms=5)
+
+ Use the default value for the smoothing parameter:
+
+ >>> spl = UnivariateSpline(x, y)
+ >>> xs = np.linspace(-3, 3, 1000)
+ >>> plt.plot(xs, spl(xs), 'g', lw=3)
+
+ Manually change the amount of smoothing:
+
+ >>> spl.set_smoothing_factor(0.5)
+ >>> plt.plot(xs, spl(xs), 'b', lw=3)
+ >>> plt.show()
+
+ """
+
+ def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
+ ext=0, check_finite=False):
+
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, s, ext,
+ check_finite)
+
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+ data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
+ xe=bbox[1], s=s)
+ if data[-1] == 1:
+ # nest too small, setting to maximum bound
+ data = self._reset_nest(data)
+ self._data = data
+ self._reset_class()
+
+ @staticmethod
+ def validate_input(x, y, w, bbox, k, s, ext, check_finite):
+ x, y, bbox = np.asarray(x), np.asarray(y), np.asarray(bbox)
+ if w is not None:
+ w = np.asarray(w)
+ if check_finite:
+ w_finite = np.isfinite(w).all() if w is not None else True
+ if (not np.isfinite(x).all() or not np.isfinite(y).all() or
+ not w_finite):
+ raise ValueError("x and y array must not contain "
+ "NaNs or infs.")
+ if s is None or s > 0:
+ if not np.all(diff(x) >= 0.0):
+ raise ValueError("x must be increasing if s > 0")
+ else:
+ if not np.all(diff(x) > 0.0):
+ raise ValueError("x must be strictly increasing if s = 0")
+ if x.size != y.size:
+ raise ValueError("x and y should have a same length")
+ elif w is not None and not x.size == y.size == w.size:
+ raise ValueError("x, y, and w should have a same length")
+ elif bbox.shape != (2,):
+ raise ValueError("bbox shape should be (2,)")
+ elif not (1 <= k <= 5):
+ raise ValueError("k should be 1 <= k <= 5")
+ elif s is not None and not s >= 0.0:
+ raise ValueError("s should be s >= 0.0")
+
+ try:
+ ext = _extrap_modes[ext]
+ except KeyError as e:
+ raise ValueError("Unknown extrapolation mode %s." % ext) from e
+
+ return x, y, w, bbox, ext
+
+ @classmethod
+ def _from_tck(cls, tck, ext=0):
+ """Construct a spline object from given tck"""
+ self = cls.__new__(cls)
+ t, c, k = tck
+ self._eval_args = tck
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+ self._data = (None, None, None, None, None, k, None, len(t), t,
+ c, None, None, None, None)
+ self.ext = ext
+ return self
+
+ def _reset_class(self):
+ data = self._data
+ n, t, c, k, ier = data[7], data[8], data[9], data[5], data[-1]
+ self._eval_args = t[:n], c[:n], k
+ if ier == 0:
+ # the spline returned has a residual sum of squares fp
+ # such that abs(fp-s)/s <= tol with tol a relative
+ # tolerance set to 0.001 by the program
+ pass
+ elif ier == -1:
+ # the spline returned is an interpolating spline
+ self._set_class(InterpolatedUnivariateSpline)
+ elif ier == -2:
+ # the spline returned is the weighted least-squares
+ # polynomial of degree k. In this extreme case fp gives
+ # the upper bound fp0 for the smoothing factor s.
+ self._set_class(LSQUnivariateSpline)
+ else:
+ # error
+ if ier == 1:
+ self._set_class(LSQUnivariateSpline)
+ message = _curfit_messages.get(ier, 'ier=%s' % (ier))
+ warnings.warn(message, stacklevel=3)
+
+ def _set_class(self, cls):
+ self._spline_class = cls
+ if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
+ LSQUnivariateSpline):
+ self.__class__ = cls
+ else:
+ # It's an unknown subclass -- don't change class. cf. #731
+ pass
+
+ def _reset_nest(self, data, nest=None):
+ n = data[10]
+ if nest is None:
+ k, m = data[5], len(data[0])
+ nest = m+k+1 # this is the maximum bound for nest
+ else:
+ if not n <= nest:
+ raise ValueError("`nest` can only be increased")
+ t, c, fpint, nrdata = (np.resize(data[j], nest) for j in
+ [8, 9, 11, 12])
+
+ args = data[:8] + (t, c, n, fpint, nrdata, data[13])
+ data = dfitpack.fpcurf1(*args)
+ return data
+
+ def set_smoothing_factor(self, s):
+ """ Continue spline computation with the given smoothing
+ factor s and with the knots found at the last call.
+
+ This routine modifies the spline in place.
+
+ """
+ data = self._data
+ if data[6] == -1:
+ warnings.warn('smoothing factor unchanged for'
+ 'LSQ spline with fixed knots',
+ stacklevel=2)
+ return
+ args = data[:6] + (s,) + data[7:]
+ data = dfitpack.fpcurf1(*args)
+ if data[-1] == 1:
+ # nest too small, setting to maximum bound
+ data = self._reset_nest(data)
+ self._data = data
+ self._reset_class()
+
+ def __call__(self, x, nu=0, ext=None):
+ """
+ Evaluate spline (or its nu-th derivative) at positions x.
+
+ Parameters
+ ----------
+ x : array_like
+ A 1-D array of points at which to return the value of the smoothed
+ spline or its derivatives. Note: `x` can be unordered but the
+ evaluation is more efficient if `x` is (partially) ordered.
+ nu : int
+ The order of derivative of the spline to compute.
+ ext : int
+ Controls the value returned for elements of `x` not in the
+ interval defined by the knot sequence.
+
+ * if ext=0 or 'extrapolate', return the extrapolated value.
+ * if ext=1 or 'zeros', return 0
+ * if ext=2 or 'raise', raise a ValueError
+ * if ext=3 or 'const', return the boundary value.
+
+ The default value is 0, passed from the initialization of
+ UnivariateSpline.
+
+ """
+ x = np.asarray(x)
+ # empty input yields empty output
+ if x.size == 0:
+ return array([])
+ if ext is None:
+ ext = self.ext
+ else:
+ try:
+ ext = _extrap_modes[ext]
+ except KeyError as e:
+ raise ValueError("Unknown extrapolation mode %s." % ext) from e
+ return _fitpack_impl.splev(x, self._eval_args, der=nu, ext=ext)
+
+ def get_knots(self):
+ """ Return positions of interior knots of the spline.
+
+ Internally, the knot vector contains ``2*k`` additional boundary knots.
+ """
+ data = self._data
+ k, n = data[5], data[7]
+ return data[8][k:n-k]
+
+ def get_coeffs(self):
+ """Return spline coefficients."""
+ data = self._data
+ k, n = data[5], data[7]
+ return data[9][:n-k-1]
+
+ def get_residual(self):
+ """Return weighted sum of squared residuals of the spline approximation.
+
+ This is equivalent to::
+
+ sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
+
+ """
+ return self._data[10]
+
+ def integral(self, a, b):
+ """ Return definite integral of the spline between two given points.
+
+ Parameters
+ ----------
+ a : float
+ Lower limit of integration.
+ b : float
+ Upper limit of integration.
+
+ Returns
+ -------
+ integral : float
+ The value of the definite integral of the spline between limits.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(0, 3, 11)
+ >>> y = x**2
+ >>> spl = UnivariateSpline(x, y)
+ >>> spl.integral(0, 3)
+ 9.0
+
+ which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
+ of 0 and 3.
+
+ A caveat is that this routine assumes the spline to be zero outside of
+ the data limits:
+
+ >>> spl.integral(-1, 4)
+ 9.0
+ >>> spl.integral(-1, 0)
+ 0.0
+
+ """
+ return _fitpack_impl.splint(a, b, self._eval_args)
+
+ def derivatives(self, x):
+ """ Return all derivatives of the spline at the point x.
+
+ Parameters
+ ----------
+ x : float
+ The point to evaluate the derivatives at.
+
+ Returns
+ -------
+ der : ndarray, shape(k+1,)
+ Derivatives of the orders 0 to k.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(0, 3, 11)
+ >>> y = x**2
+ >>> spl = UnivariateSpline(x, y)
+ >>> spl.derivatives(1.5)
+ array([2.25, 3.0, 2.0, 0])
+
+ """
+ return _fitpack_impl.spalde(x, self._eval_args)
+
+ def roots(self):
+ """ Return the zeros of the spline.
+
+ Notes
+ -----
+ Restriction: only cubic splines are supported by FITPACK. For non-cubic
+ splines, use `PPoly.root` (see below for an example).
+
+ Examples
+ --------
+
+ For some data, this method may miss a root. This happens when one of
+ the spline knots (which FITPACK places automatically) happens to
+ coincide with the true root. A workaround is to convert to `PPoly`,
+ which uses a different root-finding algorithm.
+
+ For example,
+
+ >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
+ >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
+ ... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03,
+ ... 6.520430e-03, 8.170770e-03]
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> spl = UnivariateSpline(x, y, s=0)
+ >>> spl.roots()
+ array([], dtype=float64)
+
+ Converting to a PPoly object does find the roots at `x=2`:
+
+ >>> from scipy.interpolate import splrep, PPoly
+ >>> tck = splrep(x, y, s=0)
+ >>> ppoly = PPoly.from_spline(tck)
+ >>> ppoly.roots(extrapolate=False)
+ array([2.])
+
+ See Also
+ --------
+ sproot
+ PPoly.roots
+
+ """
+ k = self._data[5]
+ if k == 3:
+ t = self._eval_args[0]
+ mest = 3 * (len(t) - 7)
+ return _fitpack_impl.sproot(self._eval_args, mest=mest)
+ raise NotImplementedError('finding roots unsupported for '
+ 'non-cubic splines')
+
+ def derivative(self, n=1):
+ """
+ Construct a new spline representing the derivative of this spline.
+
+ Parameters
+ ----------
+ n : int, optional
+ Order of derivative to evaluate. Default: 1
+
+ Returns
+ -------
+ spline : UnivariateSpline
+ Spline of order k2=k-n representing the derivative of this
+ spline.
+
+ See Also
+ --------
+ splder, antiderivative
+
+ Notes
+ -----
+
+ .. versionadded:: 0.13.0
+
+ Examples
+ --------
+ This can be used for finding maxima of a curve:
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(0, 10, 70)
+ >>> y = np.sin(x)
+ >>> spl = UnivariateSpline(x, y, k=4, s=0)
+
+ Now, differentiate the spline and find the zeros of the
+ derivative. (NB: `sproot` only works for order 3 splines, so we
+ fit an order 4 spline):
+
+ >>> spl.derivative().roots() / np.pi
+ array([ 0.50000001, 1.5 , 2.49999998])
+
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
+ :math:`\\cos(x) = \\sin'(x)`.
+
+ """
+ tck = _fitpack_impl.splder(self._eval_args, n)
+ # if self.ext is 'const', derivative.ext will be 'zeros'
+ ext = 1 if self.ext == 3 else self.ext
+ return UnivariateSpline._from_tck(tck, ext=ext)
+
+ def antiderivative(self, n=1):
+ """
+ Construct a new spline representing the antiderivative of this spline.
+
+ Parameters
+ ----------
+ n : int, optional
+ Order of antiderivative to evaluate. Default: 1
+
+ Returns
+ -------
+ spline : UnivariateSpline
+ Spline of order k2=k+n representing the antiderivative of this
+ spline.
+
+ Notes
+ -----
+
+ .. versionadded:: 0.13.0
+
+ See Also
+ --------
+ splantider, derivative
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.interpolate import UnivariateSpline
+ >>> x = np.linspace(0, np.pi/2, 70)
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+ >>> spl = UnivariateSpline(x, y, s=0)
+
+ The derivative is the inverse operation of the antiderivative,
+ although some floating point error accumulates:
+
+ >>> spl(1.7), spl.antiderivative().derivative()(1.7)
+ (array(2.1565429877197317), array(2.1565429877201865))
+
+ Antiderivative can be used to evaluate definite integrals:
+
+ >>> ispl = spl.antiderivative()
+ >>> ispl(np.pi/2) - ispl(0)
+ 2.2572053588768486
+
+ This is indeed an approximation to the complete elliptic integral
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+ >>> from scipy.special import ellipk
+ >>> ellipk(0.8)
+ 2.2572053268208538
+
+ """
+ tck = _fitpack_impl.splantider(self._eval_args, n)
+ return UnivariateSpline._from_tck(tck, self.ext)
+
+
+class InterpolatedUnivariateSpline(UnivariateSpline):
+ """
+ 1-D interpolating spline for a given set of data points.
+
+ Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data.
+ Spline function passes through all provided points. Equivalent to
+ `UnivariateSpline` with `s` = 0.
+
+ Parameters
+ ----------
+ x : (N,) array_like
+ Input dimension of data points -- must be strictly increasing
+ y : (N,) array_like
+ input dimension of data points
+ w : (N,) array_like, optional
+ Weights for spline fitting. Must be positive. If None (default),
+ weights are all 1.
+ bbox : (2,) array_like, optional
+ 2-sequence specifying the boundary of the approximation interval. If
+ None (default), ``bbox=[x[0], x[-1]]``.
+ k : int, optional
+ Degree of the smoothing spline. Must be ``1 <= k <= 5``. Default is
+ ``k = 3``, a cubic spline.
+ ext : int or str, optional
+ Controls the extrapolation mode for elements
+ not in the interval defined by the knot sequence.
+
+ * if ext=0 or 'extrapolate', return the extrapolated value.
+ * if ext=1 or 'zeros', return 0
+ * if ext=2 or 'raise', raise a ValueError
+ * if ext=3 of 'const', return the boundary value.
+
+ The default value is 0.
+
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination or non-sensical results) if the inputs
+ do contain infinities or NaNs.
+ Default is False.
+
+ See Also
+ --------
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ LSQUnivariateSpline :
+ a spline for which knots are user-selected
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ splrep :
+ a function to find the B-spline representation of a 1-D curve
+ splev :
+ a function to evaluate a B-spline or its derivatives
+ sproot :
+ a function to find the roots of a cubic B-spline
+ splint :
+ a function to evaluate the definite integral of a B-spline between two
+ given points
+ spalde :
+ a function to evaluate all derivatives of a B-spline
+
+ Notes
+ -----
+ The number of data points must be larger than the spline degree `k`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import InterpolatedUnivariateSpline
+ >>> rng = np.random.default_rng()
+ >>> x = np.linspace(-3, 3, 50)
+ >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
+ >>> spl = InterpolatedUnivariateSpline(x, y)
+ >>> plt.plot(x, y, 'ro', ms=5)
+ >>> xs = np.linspace(-3, 3, 1000)
+ >>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
+ >>> plt.show()
+
+ Notice that the ``spl(x)`` interpolates `y`:
+
+ >>> spl.get_residual()
+ 0.0
+
+ """
+
+ def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
+ ext=0, check_finite=False):
+
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
+ ext, check_finite)
+ if not np.all(diff(x) > 0.0):
+ raise ValueError('x must be strictly increasing')
+
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+ self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
+ xe=bbox[1], s=0)
+ self._reset_class()
+
+
+_fpchec_error_string = """The input parameters have been rejected by fpchec. \
+This means that at least one of the following conditions is violated:
+
+1) k+1 <= n-k-1 <= m
+2) t(1) <= t(2) <= ... <= t(k+1)
+ t(n-k) <= t(n-k+1) <= ... <= t(n)
+3) t(k+1) < t(k+2) < ... < t(n-k)
+4) t(k+1) <= x(i) <= t(n-k)
+5) The conditions specified by Schoenberg and Whitney must hold
+ for at least one subset of data points, i.e., there must be a
+ subset of data points y(j) such that
+ t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
+"""
+
+
+class LSQUnivariateSpline(UnivariateSpline):
+ """
+ 1-D spline with explicit internal knots.
+
+ Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
+ specifies the internal knots of the spline
+
+ Parameters
+ ----------
+ x : (N,) array_like
+ Input dimension of data points -- must be increasing
+ y : (N,) array_like
+ Input dimension of data points
+ t : (M,) array_like
+ interior knots of the spline. Must be in ascending order and::
+
+ bbox[0] < t[0] < ... < t[-1] < bbox[-1]
+
+ w : (N,) array_like, optional
+ weights for spline fitting. Must be positive. If None (default),
+ weights are all 1.
+ bbox : (2,) array_like, optional
+ 2-sequence specifying the boundary of the approximation interval. If
+ None (default), ``bbox = [x[0], x[-1]]``.
+ k : int, optional
+ Degree of the smoothing spline. Must be 1 <= `k` <= 5.
+ Default is `k` = 3, a cubic spline.
+ ext : int or str, optional
+ Controls the extrapolation mode for elements
+ not in the interval defined by the knot sequence.
+
+ * if ext=0 or 'extrapolate', return the extrapolated value.
+ * if ext=1 or 'zeros', return 0
+ * if ext=2 or 'raise', raise a ValueError
+ * if ext=3 of 'const', return the boundary value.
+
+ The default value is 0.
+
+ check_finite : bool, optional
+ Whether to check that the input arrays contain only finite numbers.
+ Disabling may give a performance gain, but may result in problems
+ (crashes, non-termination or non-sensical results) if the inputs
+ do contain infinities or NaNs.
+ Default is False.
+
+ Raises
+ ------
+ ValueError
+ If the interior knots do not satisfy the Schoenberg-Whitney conditions
+
+ See Also
+ --------
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ InterpolatedUnivariateSpline :
+ a interpolating univariate spline for a given set of data points.
+ splrep :
+ a function to find the B-spline representation of a 1-D curve
+ splev :
+ a function to evaluate a B-spline or its derivatives
+ sproot :
+ a function to find the roots of a cubic B-spline
+ splint :
+ a function to evaluate the definite integral of a B-spline between two
+ given points
+ spalde :
+ a function to evaluate all derivatives of a B-spline
+
+ Notes
+ -----
+ The number of data points must be larger than the spline degree `k`.
+
+ Knots `t` must satisfy the Schoenberg-Whitney conditions,
+ i.e., there must be a subset of data points ``x[j]`` such that
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
+ >>> import matplotlib.pyplot as plt
+ >>> rng = np.random.default_rng()
+ >>> x = np.linspace(-3, 3, 50)
+ >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
+
+ Fit a smoothing spline with a pre-defined internal knots:
+
+ >>> t = [-1, 0, 1]
+ >>> spl = LSQUnivariateSpline(x, y, t)
+
+ >>> xs = np.linspace(-3, 3, 1000)
+ >>> plt.plot(x, y, 'ro', ms=5)
+ >>> plt.plot(xs, spl(xs), 'g-', lw=3)
+ >>> plt.show()
+
+ Check the knot vector:
+
+ >>> spl.get_knots()
+ array([-3., -1., 0., 1., 3.])
+
+ Constructing lsq spline using the knots from another spline:
+
+ >>> x = np.arange(10)
+ >>> s = UnivariateSpline(x, x, s=0)
+ >>> s.get_knots()
+ array([ 0., 2., 3., 4., 5., 6., 7., 9.])
+ >>> knt = s.get_knots()
+ >>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
+ >>> s1.get_knots()
+ array([ 0., 2., 3., 4., 5., 6., 7., 9.])
+
+ """
+
+ def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
+ ext=0, check_finite=False):
+
+ x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
+ ext, check_finite)
+ if not np.all(diff(x) >= 0.0):
+ raise ValueError('x must be increasing')
+
+ # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+ xb = bbox[0]
+ xe = bbox[1]
+ if xb is None:
+ xb = x[0]
+ if xe is None:
+ xe = x[-1]
+ t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
+ n = len(t)
+ if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
+ raise ValueError('Interior knots t must satisfy '
+ 'Schoenberg-Whitney conditions')
+ if not dfitpack.fpchec(x, t, k) == 0:
+ raise ValueError(_fpchec_error_string)
+ data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
+ self._data = data[:-3] + (None, None, data[-1])
+ self._reset_class()
+
+
+# ############### Bivariate spline ####################
+
+class _BivariateSplineBase:
+ """ Base class for Bivariate spline s(x,y) interpolation on the rectangle
+ [xb,xe] x [yb, ye] calculated from a given set of data points
+ (x,y,z).
+
+ See Also
+ --------
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+ BivariateSpline :
+ a base class for bivariate splines.
+ SphereBivariateSpline :
+ a bivariate spline on a spherical grid
+ """
+
+ @classmethod
+ def _from_tck(cls, tck):
+ """Construct a spline object from given tck and degree"""
+ self = cls.__new__(cls)
+ if len(tck) != 5:
+ raise ValueError("tck should be a 5 element tuple of tx,"
+ " ty, c, kx, ky")
+ self.tck = tck[:3]
+ self.degrees = tck[3:]
+ return self
+
+ def get_residual(self):
+ """ Return weighted sum of squared residuals of the spline
+ approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
+ """
+ return self.fp
+
+ def get_knots(self):
+ """ Return a tuple (tx,ty) where tx,ty contain knots positions
+ of the spline with respect to x-, y-variable, respectively.
+ The position of interior and additional knots are given as
+ t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
+ """
+ return self.tck[:2]
+
+ def get_coeffs(self):
+ """ Return spline coefficients."""
+ return self.tck[2]
+
+ def __call__(self, x, y, dx=0, dy=0, grid=True):
+ """
+ Evaluate the spline or its derivatives at given positions.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Input coordinates.
+
+ If `grid` is False, evaluate the spline at points ``(x[i],
+ y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
+ is obeyed.
+
+ If `grid` is True: evaluate spline at the grid points
+ defined by the coordinate arrays x, y. The arrays must be
+ sorted to increasing order.
+
+ The ordering of axes is consistent with
+ ``np.meshgrid(..., indexing="ij")`` and inconsistent with the
+ default ordering ``np.meshgrid(..., indexing="xy")``.
+ dx : int
+ Order of x-derivative
+
+ .. versionadded:: 0.14.0
+ dy : int
+ Order of y-derivative
+
+ .. versionadded:: 0.14.0
+ grid : bool
+ Whether to evaluate the results on a grid spanned by the
+ input arrays, or at points specified by the input arrays.
+
+ .. versionadded:: 0.14.0
+
+ Examples
+ --------
+ Suppose that we want to bilinearly interpolate an exponentially decaying
+ function in 2 dimensions.
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import RectBivariateSpline
+
+ We sample the function on a coarse grid. Note that the default indexing="xy"
+ of meshgrid would result in an unexpected (transposed) result after
+ interpolation.
+
+ >>> xarr = np.linspace(-3, 3, 100)
+ >>> yarr = np.linspace(-3, 3, 100)
+ >>> xgrid, ygrid = np.meshgrid(xarr, yarr, indexing="ij")
+
+ The function to interpolate decays faster along one axis than the other.
+
+ >>> zdata = np.exp(-np.sqrt((xgrid / 2) ** 2 + ygrid**2))
+
+ Next we sample on a finer grid using interpolation (kx=ky=1 for bilinear).
+
+ >>> rbs = RectBivariateSpline(xarr, yarr, zdata, kx=1, ky=1)
+ >>> xarr_fine = np.linspace(-3, 3, 200)
+ >>> yarr_fine = np.linspace(-3, 3, 200)
+ >>> xgrid_fine, ygrid_fine = np.meshgrid(xarr_fine, yarr_fine, indexing="ij")
+ >>> zdata_interp = rbs(xgrid_fine, ygrid_fine, grid=False)
+
+ And check that the result agrees with the input by plotting both.
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(1, 2, 1, aspect="equal")
+ >>> ax2 = fig.add_subplot(1, 2, 2, aspect="equal")
+ >>> ax1.imshow(zdata)
+ >>> ax2.imshow(zdata_interp)
+ >>> plt.show()
+ """
+ x = np.asarray(x)
+ y = np.asarray(y)
+
+ tx, ty, c = self.tck[:3]
+ kx, ky = self.degrees
+ if grid:
+ if x.size == 0 or y.size == 0:
+ return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
+
+ if (x.size >= 2) and (not np.all(np.diff(x) >= 0.0)):
+ raise ValueError("x must be strictly increasing when `grid` is True")
+ if (y.size >= 2) and (not np.all(np.diff(y) >= 0.0)):
+ raise ValueError("y must be strictly increasing when `grid` is True")
+
+ if dx or dy:
+ z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
+ if not ier == 0:
+ raise ValueError("Error code returned by parder: %s" % ier)
+ else:
+ z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
+ if not ier == 0:
+ raise ValueError("Error code returned by bispev: %s" % ier)
+ else:
+ # standard Numpy broadcasting
+ if x.shape != y.shape:
+ x, y = np.broadcast_arrays(x, y)
+
+ shape = x.shape
+ x = x.ravel()
+ y = y.ravel()
+
+ if x.size == 0 or y.size == 0:
+ return np.zeros(shape, dtype=self.tck[2].dtype)
+
+ if dx or dy:
+ z, ier = dfitpack.pardeu(tx, ty, c, kx, ky, dx, dy, x, y)
+ if not ier == 0:
+ raise ValueError("Error code returned by pardeu: %s" % ier)
+ else:
+ z, ier = dfitpack.bispeu(tx, ty, c, kx, ky, x, y)
+ if not ier == 0:
+ raise ValueError("Error code returned by bispeu: %s" % ier)
+
+ z = z.reshape(shape)
+ return z
+
+ def partial_derivative(self, dx, dy):
+ """Construct a new spline representing a partial derivative of this
+ spline.
+
+ Parameters
+ ----------
+ dx, dy : int
+ Orders of the derivative in x and y respectively. They must be
+ non-negative integers and less than the respective degree of the
+ original spline (self) in that direction (``kx``, ``ky``).
+
+ Returns
+ -------
+ spline :
+ A new spline of degrees (``kx - dx``, ``ky - dy``) representing the
+ derivative of this spline.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.9.0
+
+ """
+ if dx == 0 and dy == 0:
+ return self
+ else:
+ kx, ky = self.degrees
+ if not (dx >= 0 and dy >= 0):
+ raise ValueError("order of derivative must be positive or"
+ " zero")
+ if not (dx < kx and dy < ky):
+ raise ValueError("order of derivative must be less than"
+ " degree of spline")
+ tx, ty, c = self.tck[:3]
+ newc, ier = dfitpack.pardtc(tx, ty, c, kx, ky, dx, dy)
+ if ier != 0:
+ # This should not happen under normal conditions.
+ raise ValueError("Unexpected error code returned by"
+ " pardtc: %d" % ier)
+ nx = len(tx)
+ ny = len(ty)
+ newtx = tx[dx:nx - dx]
+ newty = ty[dy:ny - dy]
+ newkx, newky = kx - dx, ky - dy
+ newclen = (nx - dx - kx - 1) * (ny - dy - ky - 1)
+ return _DerivedBivariateSpline._from_tck((newtx, newty,
+ newc[:newclen],
+ newkx, newky))
+
+
+_surfit_messages = {1: """
+The required storage space exceeds the available storage space: nxest
+or nyest too small, or s too small.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+ 2: """
+A theoretically impossible result was found during the iteration
+process for finding a smoothing spline with fp = s: s too small or
+badly chosen eps.
+Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
+ 3: """
+the maximal number of iterations maxit (set to 20 by the program)
+allowed for finding a smoothing spline with fp=s has been reached:
+s too small.
+Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
+ 4: """
+No more knots can be added because the number of b-spline coefficients
+(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
+either s or m too small.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+ 5: """
+No more knots can be added because the additional knot would (quasi)
+coincide with an old one: s too small or too large a weight to an
+inaccurate data point.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+ 10: """
+Error on entry, no approximation returned. The following conditions
+must hold:
+xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
+If iopt==-1, then
+ xb>> import numpy as np
+ >>> from scipy.interpolate import RectBivariateSpline
+ >>> def f(x, y):
+ ... return np.exp(-np.sqrt((x / 2) ** 2 + y**2))
+
+ We sample the function on a coarse grid and set up the interpolator. Note that
+ the default ``indexing="xy"`` of meshgrid would result in an unexpected
+ (transposed) result after interpolation.
+
+ >>> xarr = np.linspace(-3, 3, 21)
+ >>> yarr = np.linspace(-3, 3, 21)
+ >>> xgrid, ygrid = np.meshgrid(xarr, yarr, indexing="ij")
+ >>> zdata = f(xgrid, ygrid)
+ >>> rbs = RectBivariateSpline(xarr, yarr, zdata, kx=1, ky=1)
+
+ Next we sample the function along a diagonal slice through the coordinate space
+ on a finer grid using interpolation.
+
+ >>> xinterp = np.linspace(-3, 3, 201)
+ >>> yinterp = np.linspace(3, -3, 201)
+ >>> zinterp = rbs.ev(xinterp, yinterp)
+
+ And check that the interpolation passes through the function evaluations as a
+ function of the distance from the origin along the slice.
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(1, 1, 1)
+ >>> ax1.plot(np.sqrt(xarr**2 + yarr**2), np.diag(zdata), "or")
+ >>> ax1.plot(np.sqrt(xinterp**2 + yinterp**2), zinterp, "-b")
+ >>> plt.show()
+ """
+ return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
+
+ def integral(self, xa, xb, ya, yb):
+ """
+ Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
+
+ Parameters
+ ----------
+ xa, xb : float
+ The end-points of the x integration interval.
+ ya, yb : float
+ The end-points of the y integration interval.
+
+ Returns
+ -------
+ integ : float
+ The value of the resulting integral.
+
+ """
+ tx, ty, c = self.tck[:3]
+ kx, ky = self.degrees
+ return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
+
+ @staticmethod
+ def _validate_input(x, y, z, w, kx, ky, eps):
+ x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
+ if not x.size == y.size == z.size:
+ raise ValueError('x, y, and z should have a same length')
+
+ if w is not None:
+ w = np.asarray(w)
+ if x.size != w.size:
+ raise ValueError('x, y, z, and w should have a same length')
+ elif not np.all(w >= 0.0):
+ raise ValueError('w should be positive')
+ if (eps is not None) and (not 0.0 < eps < 1.0):
+ raise ValueError('eps should be between (0, 1)')
+ if not x.size >= (kx + 1) * (ky + 1):
+ raise ValueError('The length of x, y and z should be at least'
+ ' (kx+1) * (ky+1)')
+ return x, y, z, w
+
+
+class _DerivedBivariateSpline(_BivariateSplineBase):
+ """Bivariate spline constructed from the coefficients and knots of another
+ spline.
+
+ Notes
+ -----
+ The class is not meant to be instantiated directly from the data to be
+ interpolated or smoothed. As a result, its ``fp`` attribute and
+ ``get_residual`` method are inherited but overridden; ``AttributeError`` is
+ raised when they are accessed.
+
+ The other inherited attributes can be used as usual.
+ """
+ _invalid_why = ("is unavailable, because _DerivedBivariateSpline"
+ " instance is not constructed from data that are to be"
+ " interpolated or smoothed, but derived from the"
+ " underlying knots and coefficients of another spline"
+ " object")
+
+ @property
+ def fp(self):
+ raise AttributeError("attribute \"fp\" %s" % self._invalid_why)
+
+ def get_residual(self):
+ raise AttributeError("method \"get_residual\" %s" % self._invalid_why)
+
+
+class SmoothBivariateSpline(BivariateSpline):
+ """
+ Smooth bivariate spline approximation.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ 1-D sequences of data points (order is not important).
+ w : array_like, optional
+ Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
+ bbox : array_like, optional
+ Sequence of length 4 specifying the boundary of the rectangular
+ approximation domain. By default,
+ ``bbox=[min(x), max(x), min(y), max(y)]``.
+ kx, ky : ints, optional
+ Degrees of the bivariate spline. Default is 3.
+ s : float, optional
+ Positive smoothing factor defined for estimation condition:
+ ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
+ Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
+ estimate of the standard deviation of ``z[i]``.
+ eps : float, optional
+ A threshold for determining the effective rank of an over-determined
+ linear system of equations. `eps` should have a value within the open
+ interval ``(0, 1)``, the default is 1e-16.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
+
+ If the input data is such that input dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolating.
+
+ This routine constructs spline knot vectors automatically via the FITPACK
+ algorithm. The spline knots may be placed away from the data points. For
+ some data sets, this routine may fail to construct an interpolating spline,
+ even if one is requested via ``s=0`` parameter. In such situations, it is
+ recommended to use `bisplrep` / `bisplev` directly instead of this routine
+ and, if needed, increase the values of ``nxest`` and ``nyest`` parameters
+ of `bisplrep`.
+
+ For linear interpolation, prefer `LinearNDInterpolator`.
+ See ``https://gist.github.com/ev-br/8544371b40f414b7eaf3fe6217209bff``
+ for discussion.
+
+ """
+
+ def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
+ eps=1e-16):
+
+ x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
+ bbox = ravel(bbox)
+ if not bbox.shape == (4,):
+ raise ValueError('bbox shape should be (4,)')
+ if s is not None and not s >= 0.0:
+ raise ValueError("s should be s >= 0.0")
+
+ xb, xe, yb, ye = bbox
+ nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
+ xb, xe, yb,
+ ye, kx, ky,
+ s=s, eps=eps,
+ lwrk2=1)
+ if ier > 10: # lwrk2 was to small, re-run
+ nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
+ xb, xe, yb,
+ ye, kx, ky,
+ s=s,
+ eps=eps,
+ lwrk2=ier)
+ if ier in [0, -1, -2]: # normal return
+ pass
+ else:
+ message = _surfit_messages.get(ier, 'ier=%s' % (ier))
+ warnings.warn(message, stacklevel=2)
+
+ self.fp = fp
+ self.tck = tx[:nx], ty[:ny], c[:(nx-kx-1)*(ny-ky-1)]
+ self.degrees = kx, ky
+
+
+class LSQBivariateSpline(BivariateSpline):
+ """
+ Weighted least-squares bivariate spline approximation.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ 1-D sequences of data points (order is not important).
+ tx, ty : array_like
+ Strictly ordered 1-D sequences of knots coordinates.
+ w : array_like, optional
+ Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
+ bbox : (4,) array_like, optional
+ Sequence of length 4 specifying the boundary of the rectangular
+ approximation domain. By default,
+ ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
+ kx, ky : ints, optional
+ Degrees of the bivariate spline. Default is 3.
+ eps : float, optional
+ A threshold for determining the effective rank of an over-determined
+ linear system of equations. `eps` should have a value within the open
+ interval ``(0, 1)``, the default is 1e-16.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh.
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
+
+ If the input data is such that input dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolating.
+
+ """
+
+ def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
+ eps=None):
+
+ x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
+ bbox = ravel(bbox)
+ if not bbox.shape == (4,):
+ raise ValueError('bbox shape should be (4,)')
+
+ nx = 2*kx+2+len(tx)
+ ny = 2*ky+2+len(ty)
+ # The Fortran subroutine "surfit" (called as dfitpack.surfit_lsq)
+ # requires that the knot arrays passed as input should be "real
+ # array(s) of dimension nmax" where "nmax" refers to the greater of nx
+ # and ny. We pad the tx1/ty1 arrays here so that this is satisfied, and
+ # slice them to the desired sizes upon return.
+ nmax = max(nx, ny)
+ tx1 = zeros((nmax,), float)
+ ty1 = zeros((nmax,), float)
+ tx1[kx+1:nx-kx-1] = tx
+ ty1[ky+1:ny-ky-1] = ty
+
+ xb, xe, yb, ye = bbox
+ tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, nx, tx1, ny, ty1,
+ w, xb, xe, yb, ye,
+ kx, ky, eps, lwrk2=1)
+ if ier > 10:
+ tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z,
+ nx, tx1, ny, ty1, w,
+ xb, xe, yb, ye,
+ kx, ky, eps, lwrk2=ier)
+ if ier in [0, -1, -2]: # normal return
+ pass
+ else:
+ if ier < -2:
+ deficiency = (nx-kx-1)*(ny-ky-1)+ier
+ message = _surfit_messages.get(-3) % (deficiency)
+ else:
+ message = _surfit_messages.get(ier, 'ier=%s' % (ier))
+ warnings.warn(message, stacklevel=2)
+ self.fp = fp
+ self.tck = tx1[:nx], ty1[:ny], c
+ self.degrees = kx, ky
+
+
+class RectBivariateSpline(BivariateSpline):
+ """
+ Bivariate spline approximation over a rectangular mesh.
+
+ Can be used for both smoothing and interpolating data.
+
+ Parameters
+ ----------
+ x,y : array_like
+ 1-D arrays of coordinates in strictly ascending order.
+ Evaluated points outside the data range will be extrapolated.
+ z : array_like
+ 2-D array of data with shape (x.size,y.size).
+ bbox : array_like, optional
+ Sequence of length 4 specifying the boundary of the rectangular
+ approximation domain, which means the start and end spline knots of
+ each dimension are set by these values. By default,
+ ``bbox=[min(x), max(x), min(y), max(y)]``.
+ kx, ky : ints, optional
+ Degrees of the bivariate spline. Default is 3.
+ s : float, optional
+ Positive smoothing factor defined for estimation condition:
+ ``sum((z[i]-f(x[i], y[i]))**2, axis=0) <= s`` where f is a spline
+ function. Default is ``s=0``, which is for interpolation.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+
+ If the input data is such that input dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolating.
+
+ """
+
+ def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
+ x, y, bbox = ravel(x), ravel(y), ravel(bbox)
+ z = np.asarray(z)
+ if not np.all(diff(x) > 0.0):
+ raise ValueError('x must be strictly increasing')
+ if not np.all(diff(y) > 0.0):
+ raise ValueError('y must be strictly increasing')
+ if not x.size == z.shape[0]:
+ raise ValueError('x dimension of z must have same number of '
+ 'elements as x')
+ if not y.size == z.shape[1]:
+ raise ValueError('y dimension of z must have same number of '
+ 'elements as y')
+ if not bbox.shape == (4,):
+ raise ValueError('bbox shape should be (4,)')
+ if s is not None and not s >= 0.0:
+ raise ValueError("s should be s >= 0.0")
+
+ z = ravel(z)
+ xb, xe, yb, ye = bbox
+ nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
+ ye, kx, ky, s)
+
+ if ier not in [0, -1, -2]:
+ msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
+ raise ValueError(msg)
+
+ self.fp = fp
+ self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
+ self.degrees = kx, ky
+
+
+_spherefit_messages = _surfit_messages.copy()
+_spherefit_messages[10] = """
+ERROR. On entry, the input data are controlled on validity. The following
+ restrictions must be satisfied:
+ -1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 00, i=1,...,m
+ lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
+ kwrk >= m+(ntest-7)*(npest-7)
+ if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
+ 0=0: s>=0
+ if one of these conditions is found to be violated,control
+ is immediately repassed to the calling program. in that
+ case there is no approximation returned."""
+_spherefit_messages[-3] = """
+WARNING. The coefficients of the spline returned have been computed as the
+ minimal norm least-squares solution of a (numerically) rank
+ deficient system (deficiency=%i, rank=%i). Especially if the rank
+ deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
+ the results may be inaccurate. They could also seriously depend on
+ the value of eps."""
+
+
+class SphereBivariateSpline(_BivariateSplineBase):
+ """
+ Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
+ given set of data points (theta,phi,r).
+
+ .. versionadded:: 0.11.0
+
+ See Also
+ --------
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQUnivariateSpline :
+ a univariate spline using weighted least-squares fitting
+ """
+
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+ """
+ Evaluate the spline or its derivatives at given positions.
+
+ Parameters
+ ----------
+ theta, phi : array_like
+ Input coordinates.
+
+ If `grid` is False, evaluate the spline at points
+ ``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
+ Numpy broadcasting is obeyed.
+
+ If `grid` is True: evaluate spline at the grid points
+ defined by the coordinate arrays theta, phi. The arrays
+ must be sorted to increasing order.
+ The ordering of axes is consistent with
+ ``np.meshgrid(..., indexing="ij")`` and inconsistent with the
+ default ordering ``np.meshgrid(..., indexing="xy")``.
+ dtheta : int, optional
+ Order of theta-derivative
+
+ .. versionadded:: 0.14.0
+ dphi : int
+ Order of phi-derivative
+
+ .. versionadded:: 0.14.0
+ grid : bool
+ Whether to evaluate the results on a grid spanned by the
+ input arrays, or at points specified by the input arrays.
+
+ .. versionadded:: 0.14.0
+
+ Examples
+ --------
+
+ Suppose that we want to use splines to interpolate a bivariate function on a
+ sphere. The value of the function is known on a grid of longitudes and
+ colatitudes.
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import RectSphereBivariateSpline
+ >>> def f(theta, phi):
+ ... return np.sin(theta) * np.cos(phi)
+
+ We evaluate the function on the grid. Note that the default indexing="xy"
+ of meshgrid would result in an unexpected (transposed) result after
+ interpolation.
+
+ >>> thetaarr = np.linspace(0, np.pi, 22)[1:-1]
+ >>> phiarr = np.linspace(0, 2 * np.pi, 21)[:-1]
+ >>> thetagrid, phigrid = np.meshgrid(thetaarr, phiarr, indexing="ij")
+ >>> zdata = f(thetagrid, phigrid)
+
+ We next set up the interpolator and use it to evaluate the function
+ on a finer grid.
+
+ >>> rsbs = RectSphereBivariateSpline(thetaarr, phiarr, zdata)
+ >>> thetaarr_fine = np.linspace(0, np.pi, 200)
+ >>> phiarr_fine = np.linspace(0, 2 * np.pi, 200)
+ >>> zdata_fine = rsbs(thetaarr_fine, phiarr_fine)
+
+ Finally we plot the coarsly-sampled input data alongside the
+ finely-sampled interpolated data to check that they agree.
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(1, 2, 1)
+ >>> ax2 = fig.add_subplot(1, 2, 2)
+ >>> ax1.imshow(zdata)
+ >>> ax2.imshow(zdata_fine)
+ >>> plt.show()
+ """
+ theta = np.asarray(theta)
+ phi = np.asarray(phi)
+
+ if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
+ raise ValueError("requested theta out of bounds.")
+
+ return _BivariateSplineBase.__call__(self, theta, phi,
+ dx=dtheta, dy=dphi, grid=grid)
+
+ def ev(self, theta, phi, dtheta=0, dphi=0):
+ """
+ Evaluate the spline at points
+
+ Returns the interpolated value at ``(theta[i], phi[i]),
+ i=0,...,len(theta)-1``.
+
+ Parameters
+ ----------
+ theta, phi : array_like
+ Input coordinates. Standard Numpy broadcasting is obeyed.
+ The ordering of axes is consistent with
+ np.meshgrid(..., indexing="ij") and inconsistent with the
+ default ordering np.meshgrid(..., indexing="xy").
+ dtheta : int, optional
+ Order of theta-derivative
+
+ .. versionadded:: 0.14.0
+ dphi : int, optional
+ Order of phi-derivative
+
+ .. versionadded:: 0.14.0
+
+ Examples
+ --------
+ Suppose that we want to use splines to interpolate a bivariate function on a
+ sphere. The value of the function is known on a grid of longitudes and
+ colatitudes.
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import RectSphereBivariateSpline
+ >>> def f(theta, phi):
+ ... return np.sin(theta) * np.cos(phi)
+
+ We evaluate the function on the grid. Note that the default indexing="xy"
+ of meshgrid would result in an unexpected (transposed) result after
+ interpolation.
+
+ >>> thetaarr = np.linspace(0, np.pi, 22)[1:-1]
+ >>> phiarr = np.linspace(0, 2 * np.pi, 21)[:-1]
+ >>> thetagrid, phigrid = np.meshgrid(thetaarr, phiarr, indexing="ij")
+ >>> zdata = f(thetagrid, phigrid)
+
+ We next set up the interpolator and use it to evaluate the function
+ at points not on the original grid.
+
+ >>> rsbs = RectSphereBivariateSpline(thetaarr, phiarr, zdata)
+ >>> thetainterp = np.linspace(thetaarr[0], thetaarr[-1], 200)
+ >>> phiinterp = np.linspace(phiarr[0], phiarr[-1], 200)
+ >>> zinterp = rsbs.ev(thetainterp, phiinterp)
+
+ Finally we plot the original data for a diagonal slice through the
+ initial grid, and the spline approximation along the same slice.
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(1, 1, 1)
+ >>> ax1.plot(np.sin(thetaarr) * np.sin(phiarr), np.diag(zdata), "or")
+ >>> ax1.plot(np.sin(thetainterp) * np.sin(phiinterp), zinterp, "-b")
+ >>> plt.show()
+ """
+ return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
+
+
+class SmoothSphereBivariateSpline(SphereBivariateSpline):
+ """
+ Smooth bivariate spline approximation in spherical coordinates.
+
+ .. versionadded:: 0.11.0
+
+ Parameters
+ ----------
+ theta, phi, r : array_like
+ 1-D sequences of data points (order is not important). Coordinates
+ must be given in radians. Theta must lie within the interval
+ ``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
+ w : array_like, optional
+ Positive 1-D sequence of weights.
+ s : float, optional
+ Positive smoothing factor defined for estimation condition:
+ ``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
+ Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
+ estimate of the standard deviation of ``r[i]``.
+ eps : float, optional
+ A threshold for determining the effective rank of an over-determined
+ linear system of equations. `eps` should have a value within the open
+ interval ``(0, 1)``, the default is 1e-16.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh.
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ For more information, see the FITPACK_ site about this function.
+
+ .. _FITPACK: http://www.netlib.org/dierckx/sphere.f
+
+ Examples
+ --------
+ Suppose we have global data on a coarse grid (the input data does not
+ have to be on a grid):
+
+ >>> import numpy as np
+ >>> theta = np.linspace(0., np.pi, 7)
+ >>> phi = np.linspace(0., 2*np.pi, 9)
+ >>> data = np.empty((theta.shape[0], phi.shape[0]))
+ >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
+ >>> data[1:-1,1], data[1:-1,-1] = 1., 1.
+ >>> data[1,1:-1], data[-2,1:-1] = 1., 1.
+ >>> data[2:-2,2], data[2:-2,-2] = 2., 2.
+ >>> data[2,2:-2], data[-3,2:-2] = 2., 2.
+ >>> data[3,3:-2] = 3.
+ >>> data = np.roll(data, 4, 1)
+
+ We need to set up the interpolator object
+
+ >>> lats, lons = np.meshgrid(theta, phi)
+ >>> from scipy.interpolate import SmoothSphereBivariateSpline
+ >>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ ... data.T.ravel(), s=3.5)
+
+ As a first test, we'll see what the algorithm returns when run on the
+ input coordinates
+
+ >>> data_orig = lut(theta, phi)
+
+ Finally we interpolate the data to a finer grid
+
+ >>> fine_lats = np.linspace(0., np.pi, 70)
+ >>> fine_lons = np.linspace(0., 2 * np.pi, 90)
+
+ >>> data_smth = lut(fine_lats, fine_lons)
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(131)
+ >>> ax1.imshow(data, interpolation='nearest')
+ >>> ax2 = fig.add_subplot(132)
+ >>> ax2.imshow(data_orig, interpolation='nearest')
+ >>> ax3 = fig.add_subplot(133)
+ >>> ax3.imshow(data_smth, interpolation='nearest')
+ >>> plt.show()
+
+ """
+
+ def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
+
+ theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
+
+ # input validation
+ if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
+ raise ValueError('theta should be between [0, pi]')
+ if not ((0.0 <= phi).all() and (phi <= 2.0 * np.pi).all()):
+ raise ValueError('phi should be between [0, 2pi]')
+ if w is not None:
+ w = np.asarray(w)
+ if not (w >= 0.0).all():
+ raise ValueError('w should be positive')
+ if not s >= 0.0:
+ raise ValueError('s should be positive')
+ if not 0.0 < eps < 1.0:
+ raise ValueError('eps should be between (0, 1)')
+
+ nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
+ r, w=w, s=s,
+ eps=eps)
+ if ier not in [0, -1, -2]:
+ message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
+ raise ValueError(message)
+
+ self.fp = fp
+ self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
+ self.degrees = (3, 3)
+
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+
+ theta = np.asarray(theta)
+ phi = np.asarray(phi)
+
+ if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
+ raise ValueError("requested phi out of bounds.")
+
+ return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
+ dphi=dphi, grid=grid)
+
+
+class LSQSphereBivariateSpline(SphereBivariateSpline):
+ """
+ Weighted least-squares bivariate spline approximation in spherical
+ coordinates.
+
+ Determines a smoothing bicubic spline according to a given
+ set of knots in the `theta` and `phi` directions.
+
+ .. versionadded:: 0.11.0
+
+ Parameters
+ ----------
+ theta, phi, r : array_like
+ 1-D sequences of data points (order is not important). Coordinates
+ must be given in radians. Theta must lie within the interval
+ ``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
+ tt, tp : array_like
+ Strictly ordered 1-D sequences of knots coordinates.
+ Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
+ w : array_like, optional
+ Positive 1-D sequence of weights, of the same length as `theta`, `phi`
+ and `r`.
+ eps : float, optional
+ A threshold for determining the effective rank of an over-determined
+ linear system of equations. `eps` should have a value within the
+ open interval ``(0, 1)``, the default is 1e-16.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ RectSphereBivariateSpline :
+ a bivariate spline over a rectangular mesh on a sphere
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh.
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ For more information, see the FITPACK_ site about this function.
+
+ .. _FITPACK: http://www.netlib.org/dierckx/sphere.f
+
+ Examples
+ --------
+ Suppose we have global data on a coarse grid (the input data does not
+ have to be on a grid):
+
+ >>> from scipy.interpolate import LSQSphereBivariateSpline
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+
+ >>> theta = np.linspace(0, np.pi, num=7)
+ >>> phi = np.linspace(0, 2*np.pi, num=9)
+ >>> data = np.empty((theta.shape[0], phi.shape[0]))
+ >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
+ >>> data[1:-1,1], data[1:-1,-1] = 1., 1.
+ >>> data[1,1:-1], data[-2,1:-1] = 1., 1.
+ >>> data[2:-2,2], data[2:-2,-2] = 2., 2.
+ >>> data[2,2:-2], data[-3,2:-2] = 2., 2.
+ >>> data[3,3:-2] = 3.
+ >>> data = np.roll(data, 4, 1)
+
+ We need to set up the interpolator object. Here, we must also specify the
+ coordinates of the knots to use.
+
+ >>> lats, lons = np.meshgrid(theta, phi)
+ >>> knotst, knotsp = theta.copy(), phi.copy()
+ >>> knotst[0] += .0001
+ >>> knotst[-1] -= .0001
+ >>> knotsp[0] += .0001
+ >>> knotsp[-1] -= .0001
+ >>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ ... data.T.ravel(), knotst, knotsp)
+
+ As a first test, we'll see what the algorithm returns when run on the
+ input coordinates
+
+ >>> data_orig = lut(theta, phi)
+
+ Finally we interpolate the data to a finer grid
+
+ >>> fine_lats = np.linspace(0., np.pi, 70)
+ >>> fine_lons = np.linspace(0., 2*np.pi, 90)
+ >>> data_lsq = lut(fine_lats, fine_lons)
+
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(131)
+ >>> ax1.imshow(data, interpolation='nearest')
+ >>> ax2 = fig.add_subplot(132)
+ >>> ax2.imshow(data_orig, interpolation='nearest')
+ >>> ax3 = fig.add_subplot(133)
+ >>> ax3.imshow(data_lsq, interpolation='nearest')
+ >>> plt.show()
+
+ """
+
+ def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
+
+ theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
+ tt, tp = np.asarray(tt), np.asarray(tp)
+
+ if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
+ raise ValueError('theta should be between [0, pi]')
+ if not ((0.0 <= phi).all() and (phi <= 2*np.pi).all()):
+ raise ValueError('phi should be between [0, 2pi]')
+ if not ((0.0 < tt).all() and (tt < np.pi).all()):
+ raise ValueError('tt should be between (0, pi)')
+ if not ((0.0 < tp).all() and (tp < 2*np.pi).all()):
+ raise ValueError('tp should be between (0, 2pi)')
+ if w is not None:
+ w = np.asarray(w)
+ if not (w >= 0.0).all():
+ raise ValueError('w should be positive')
+ if not 0.0 < eps < 1.0:
+ raise ValueError('eps should be between (0, 1)')
+
+ nt_, np_ = 8 + len(tt), 8 + len(tp)
+ tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
+ tt_[4:-4], tp_[4:-4] = tt, tp
+ tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
+ tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
+ w=w, eps=eps)
+ if ier > 0:
+ message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
+ raise ValueError(message)
+
+ self.fp = fp
+ self.tck = tt_, tp_, c
+ self.degrees = (3, 3)
+
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+
+ theta = np.asarray(theta)
+ phi = np.asarray(phi)
+
+ if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
+ raise ValueError("requested phi out of bounds.")
+
+ return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
+ dphi=dphi, grid=grid)
+
+
+_spfit_messages = _surfit_messages.copy()
+_spfit_messages[10] = """
+ERROR: on entry, the input data are controlled on validity
+ the following restrictions must be satisfied.
+ -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
+ -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
+ -1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
+ mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
+ kwrk>=5+mu+mv+nuest+nvest,
+ lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
+ 0< u(i-1)=0: s>=0
+ if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
+ if one of these conditions is found to be violated,control is
+ immediately repassed to the calling program. in that case there is no
+ approximation returned."""
+
+
+class RectSphereBivariateSpline(SphereBivariateSpline):
+ """
+ Bivariate spline approximation over a rectangular mesh on a sphere.
+
+ Can be used for smoothing data.
+
+ .. versionadded:: 0.11.0
+
+ Parameters
+ ----------
+ u : array_like
+ 1-D array of colatitude coordinates in strictly ascending order.
+ Coordinates must be given in radians and lie within the open interval
+ ``(0, pi)``.
+ v : array_like
+ 1-D array of longitude coordinates in strictly ascending order.
+ Coordinates must be given in radians. First element (``v[0]``) must lie
+ within the interval ``[-pi, pi)``. Last element (``v[-1]``) must satisfy
+ ``v[-1] <= v[0] + 2*pi``.
+ r : array_like
+ 2-D array of data with shape ``(u.size, v.size)``.
+ s : float, optional
+ Positive smoothing factor defined for estimation condition
+ (``s=0`` is for interpolation).
+ pole_continuity : bool or (bool, bool), optional
+ Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
+ ``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
+ will be 1 or 0 when this is True or False, respectively.
+ Defaults to False.
+ pole_values : float or (float, float), optional
+ Data values at the poles ``u=0`` and ``u=pi``. Either the whole
+ parameter or each individual element can be None. Defaults to None.
+ pole_exact : bool or (bool, bool), optional
+ Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
+ value is considered to be the right function value, and it will be
+ fitted exactly. If False, the value will be considered to be a data
+ value just like the other data values. Defaults to False.
+ pole_flat : bool or (bool, bool), optional
+ For the poles at ``u=0`` and ``u=pi``, specify whether or not the
+ approximation has vanishing derivatives. Defaults to False.
+
+ See Also
+ --------
+ BivariateSpline :
+ a base class for bivariate splines.
+ UnivariateSpline :
+ a smooth univariate spline to fit a given set of data points.
+ SmoothBivariateSpline :
+ a smoothing bivariate spline through the given points
+ LSQBivariateSpline :
+ a bivariate spline using weighted least-squares fitting
+ SmoothSphereBivariateSpline :
+ a smoothing bivariate spline in spherical coordinates
+ LSQSphereBivariateSpline :
+ a bivariate spline in spherical coordinates using weighted
+ least-squares fitting
+ RectBivariateSpline :
+ a bivariate spline over a rectangular mesh.
+ bisplrep :
+ a function to find a bivariate B-spline representation of a surface
+ bisplev :
+ a function to evaluate a bivariate B-spline and its derivatives
+
+ Notes
+ -----
+ Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
+ ``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
+ least-squares spline approximation is not implemented yet.
+
+ When actually performing the interpolation, the requested `v` values must
+ lie within the same length 2pi interval that the original `v` values were
+ chosen from.
+
+ For more information, see the FITPACK_ site about this function.
+
+ .. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
+
+ Examples
+ --------
+ Suppose we have global data on a coarse grid
+
+ >>> import numpy as np
+ >>> lats = np.linspace(10, 170, 9) * np.pi / 180.
+ >>> lons = np.linspace(0, 350, 18) * np.pi / 180.
+ >>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+ ... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+ We want to interpolate it to a global one-degree grid
+
+ >>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
+ >>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
+ >>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
+
+ We need to set up the interpolator object
+
+ >>> from scipy.interpolate import RectSphereBivariateSpline
+ >>> lut = RectSphereBivariateSpline(lats, lons, data)
+
+ Finally we interpolate the data. The `RectSphereBivariateSpline` object
+ only takes 1-D arrays as input, therefore we need to do some reshaping.
+
+ >>> data_interp = lut.ev(new_lats.ravel(),
+ ... new_lons.ravel()).reshape((360, 180)).T
+
+ Looking at the original and the interpolated data, one can see that the
+ interpolant reproduces the original data very well:
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(211)
+ >>> ax1.imshow(data, interpolation='nearest')
+ >>> ax2 = fig.add_subplot(212)
+ >>> ax2.imshow(data_interp, interpolation='nearest')
+ >>> plt.show()
+
+ Choosing the optimal value of ``s`` can be a delicate task. Recommended
+ values for ``s`` depend on the accuracy of the data values. If the user
+ has an idea of the statistical errors on the data, she can also find a
+ proper estimate for ``s``. By assuming that, if she specifies the
+ right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
+ reproduces the function underlying the data, she can evaluate
+ ``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
+ For example, if she knows that the statistical errors on her
+ ``r(i,j)``-values are not greater than 0.1, she may expect that a good
+ ``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
+
+ If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
+ be determined by trial and error. The best is then to start with a very
+ large value of ``s`` (to determine the least-squares polynomial and the
+ corresponding upper bound ``fp0`` for ``s``) and then to progressively
+ decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
+ ``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
+ shows more detail) to obtain closer fits.
+
+ The interpolation results for different values of ``s`` give some insight
+ into this process:
+
+ >>> fig2 = plt.figure()
+ >>> s = [3e9, 2e9, 1e9, 1e8]
+ >>> for idx, sval in enumerate(s, 1):
+ ... lut = RectSphereBivariateSpline(lats, lons, data, s=sval)
+ ... data_interp = lut.ev(new_lats.ravel(),
+ ... new_lons.ravel()).reshape((360, 180)).T
+ ... ax = fig2.add_subplot(2, 2, idx)
+ ... ax.imshow(data_interp, interpolation='nearest')
+ ... ax.set_title(f"s = {sval:g}")
+ >>> plt.show()
+
+ """
+
+ def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
+ pole_exact=False, pole_flat=False):
+ iopt = np.array([0, 0, 0], dtype=dfitpack_int)
+ ider = np.array([-1, 0, -1, 0], dtype=dfitpack_int)
+ if pole_values is None:
+ pole_values = (None, None)
+ elif isinstance(pole_values, (float, np.float32, np.float64)):
+ pole_values = (pole_values, pole_values)
+ if isinstance(pole_continuity, bool):
+ pole_continuity = (pole_continuity, pole_continuity)
+ if isinstance(pole_exact, bool):
+ pole_exact = (pole_exact, pole_exact)
+ if isinstance(pole_flat, bool):
+ pole_flat = (pole_flat, pole_flat)
+
+ r0, r1 = pole_values
+ iopt[1:] = pole_continuity
+ if r0 is None:
+ ider[0] = -1
+ else:
+ ider[0] = pole_exact[0]
+
+ if r1 is None:
+ ider[2] = -1
+ else:
+ ider[2] = pole_exact[1]
+
+ ider[1], ider[3] = pole_flat
+
+ u, v = np.ravel(u), np.ravel(v)
+ r = np.asarray(r)
+
+ if not (0.0 < u[0] and u[-1] < np.pi):
+ raise ValueError('u should be between (0, pi)')
+ if not -np.pi <= v[0] < np.pi:
+ raise ValueError('v[0] should be between [-pi, pi)')
+ if not v[-1] <= v[0] + 2*np.pi:
+ raise ValueError('v[-1] should be v[0] + 2pi or less ')
+
+ if not np.all(np.diff(u) > 0.0):
+ raise ValueError('u must be strictly increasing')
+ if not np.all(np.diff(v) > 0.0):
+ raise ValueError('v must be strictly increasing')
+
+ if not u.size == r.shape[0]:
+ raise ValueError('u dimension of r must have same number of '
+ 'elements as u')
+ if not v.size == r.shape[1]:
+ raise ValueError('v dimension of r must have same number of '
+ 'elements as v')
+
+ if pole_continuity[1] is False and pole_flat[1] is True:
+ raise ValueError('if pole_continuity is False, so must be '
+ 'pole_flat')
+ if pole_continuity[0] is False and pole_flat[0] is True:
+ raise ValueError('if pole_continuity is False, so must be '
+ 'pole_flat')
+
+ if not s >= 0.0:
+ raise ValueError('s should be positive')
+
+ r = np.ravel(r)
+ nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
+ u.copy(),
+ v.copy(),
+ r.copy(),
+ r0, r1, s)
+
+ if ier not in [0, -1, -2]:
+ msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
+ raise ValueError(msg)
+
+ self.fp = fp
+ self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
+ self.degrees = (3, 3)
+ self.v0 = v[0]
+
+ def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+
+ theta = np.asarray(theta)
+ phi = np.asarray(phi)
+
+ return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
+ dphi=dphi, grid=grid)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c660e2b183dca3f3f1d594bbbe6936a5eb9d7a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_impl.py
@@ -0,0 +1,805 @@
+"""
+fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
+ FITPACK is a collection of FORTRAN programs for curve and surface
+ fitting with splines and tensor product splines.
+
+See
+ https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
+or
+ http://www.netlib.org/dierckx/
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+TODO: Make interfaces to the following fitpack functions:
+ For univariate splines: cocosp, concon, fourco, insert
+ For bivariate splines: profil, regrid, parsur, surev
+"""
+
+__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
+
+import warnings
+import numpy as np
+from . import _fitpack
+from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
+ empty, iinfo, asarray)
+
+# Try to replace _fitpack interface with
+# f2py-generated version
+from . import dfitpack
+
+
+dfitpack_int = dfitpack.types.intvar.dtype
+
+
+def _int_overflow(x, exception, msg=None):
+ """Cast the value to an dfitpack_int and raise an OverflowError if the value
+ cannot fit.
+ """
+ if x > iinfo(dfitpack_int).max:
+ if msg is None:
+ msg = f'{x!r} cannot fit into an {dfitpack_int!r}'
+ raise exception(msg)
+ return dfitpack_int.type(x)
+
+
+_iermess = {
+ 0: ["The spline has a residual sum of squares fp such that "
+ "abs(fp-s)/s<=0.001", None],
+ -1: ["The spline is an interpolating spline (fp=0)", None],
+ -2: ["The spline is weighted least-squares polynomial of degree k.\n"
+ "fp gives the upper bound fp0 for the smoothing factor s", None],
+ 1: ["The required storage space exceeds the available storage space.\n"
+ "Probable causes: data (x,y) size is too small or smoothing parameter"
+ "\ns is too small (fp>s).", ValueError],
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
+ "with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
+ ValueError],
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 10: ["Error on input data", ValueError],
+ 'unknown': ["An error occurred", TypeError]
+}
+
+_iermess2 = {
+ 0: ["The spline has a residual sum of squares fp such that "
+ "abs(fp-s)/s<=0.001", None],
+ -1: ["The spline is an interpolating spline (fp=0)", None],
+ -2: ["The spline is weighted least-squares polynomial of degree kx and ky."
+ "\nfp gives the upper bound fp0 for the smoothing factor s", None],
+ -3: ["Warning. The coefficients of the spline have been computed as the\n"
+ "minimal norm least-squares solution of a rank deficient system.",
+ None],
+ 1: ["The required storage space exceeds the available storage space.\n"
+ "Probable causes: nxest or nyest too small or s is too small. (fp>s)",
+ ValueError],
+ 2: ["A theoretically impossible result when finding a smoothing spline\n"
+ "with fp = s. Probable causes: s too small or badly chosen eps.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
+ "spline with fp=s has been reached. Probable cause: s too small.\n"
+ "(abs(fp-s)/s>0.001)", ValueError],
+ 4: ["No more knots can be added because the number of B-spline\n"
+ "coefficients already exceeds the number of data points m.\n"
+ "Probable causes: either s or m too small. (fp>s)", ValueError],
+ 5: ["No more knots can be added because the additional knot would\n"
+ "coincide with an old one. Probable cause: s too small or too large\n"
+ "a weight to an inaccurate data point. (fp>s)", ValueError],
+ 10: ["Error on input data", ValueError],
+ 11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
+ "the minimal least-squares solution of a rank deficient system of\n"
+ "linear equations.", ValueError],
+ 'unknown': ["An error occurred", TypeError]
+}
+
+_parcur_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
+ 'ub': 0, 'ue': 1}
+
+
+def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
+ full_output=0, nest=None, per=0, quiet=1):
+ # see the docstring of `_fitpack_py/splprep`
+ if task <= 0:
+ _parcur_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int), 'u': array([], float),
+ 'ub': 0, 'ue': 1}
+ x = atleast_1d(x)
+ idim, m = x.shape
+ if per:
+ for i in range(idim):
+ if x[i][0] != x[i][-1]:
+ if not quiet:
+ warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
+ (i, m, i)),
+ stacklevel=2)
+ x[i][-1] = x[i][0]
+ if not 0 < idim < 11:
+ raise TypeError('0 < idim < 11 must hold')
+ if w is None:
+ w = ones(m, float)
+ else:
+ w = atleast_1d(w)
+ ipar = (u is not None)
+ if ipar:
+ _parcur_cache['u'] = u
+ if ub is None:
+ _parcur_cache['ub'] = u[0]
+ else:
+ _parcur_cache['ub'] = ub
+ if ue is None:
+ _parcur_cache['ue'] = u[-1]
+ else:
+ _parcur_cache['ue'] = ue
+ else:
+ _parcur_cache['u'] = zeros(m, float)
+ if not (1 <= k <= 5):
+ raise TypeError('1 <= k= %d <=5 must hold' % k)
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
+ raise TypeError('Mismatch of input dimensions')
+ if s is None:
+ s = m - sqrt(2*m)
+ if t is None and task == -1:
+ raise TypeError('Knots must be given for task=-1')
+ if t is not None:
+ _parcur_cache['t'] = atleast_1d(t)
+ n = len(_parcur_cache['t'])
+ if task == -1 and n < 2*k + 2:
+ raise TypeError('There must be at least 2*k+2 knots for task=-1')
+ if m <= k:
+ raise TypeError('m > k must hold')
+ if nest is None:
+ nest = m + 2*k
+
+ if (task >= 0 and s == 0) or (nest < 0):
+ if per:
+ nest = m + 2*k
+ else:
+ nest = m + k + 1
+ nest = max(nest, 2*k + 3)
+ u = _parcur_cache['u']
+ ub = _parcur_cache['ub']
+ ue = _parcur_cache['ue']
+ t = _parcur_cache['t']
+ wrk = _parcur_cache['wrk']
+ iwrk = _parcur_cache['iwrk']
+ t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
+ task, ipar, s, t, nest, wrk, iwrk, per)
+ _parcur_cache['u'] = o['u']
+ _parcur_cache['ub'] = o['ub']
+ _parcur_cache['ue'] = o['ue']
+ _parcur_cache['t'] = t
+ _parcur_cache['wrk'] = o['wrk']
+ _parcur_cache['iwrk'] = o['iwrk']
+ ier = o['ier']
+ fp = o['fp']
+ n = len(t)
+ u = o['u']
+ c.shape = idim, n - k - 1
+ tcku = [t, list(c), k], u
+ if ier <= 0 and not quiet:
+ warnings.warn(RuntimeWarning(_iermess[ier][0] +
+ "\tk=%d n=%d m=%d fp=%f s=%f" %
+ (k, len(t), m, fp, s)),
+ stacklevel=2)
+ if ier > 0 and not full_output:
+ if ier in [1, 2, 3]:
+ warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
+ else:
+ try:
+ raise _iermess[ier][1](_iermess[ier][0])
+ except KeyError as e:
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
+ if full_output:
+ try:
+ return tcku, fp, ier, _iermess[ier][0]
+ except KeyError:
+ return tcku, fp, ier, _iermess['unknown'][0]
+ else:
+ return tcku
+
+
+_curfit_cache = {'t': array([], float), 'wrk': array([], float),
+ 'iwrk': array([], dfitpack_int)}
+
+
+def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
+ full_output=0, per=0, quiet=1):
+ # see the docstring of `_fitpack_py/splrep`
+ if task <= 0:
+ _curfit_cache = {}
+ x, y = map(atleast_1d, [x, y])
+ m = len(x)
+ if w is None:
+ w = ones(m, float)
+ if s is None:
+ s = 0.0
+ else:
+ w = atleast_1d(w)
+ if s is None:
+ s = m - sqrt(2*m)
+ if not len(w) == m:
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
+ if (m != len(y)) or (m != len(w)):
+ raise TypeError('Lengths of the first three arguments (x,y,w) must '
+ 'be equal')
+ if not (1 <= k <= 5):
+ raise TypeError('Given degree of the spline (k=%d) is not supported. '
+ '(1<=k<=5)' % k)
+ if m <= k:
+ raise TypeError('m > k must hold')
+ if xb is None:
+ xb = x[0]
+ if xe is None:
+ xe = x[-1]
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if t is not None:
+ task = -1
+ if task == -1:
+ if t is None:
+ raise TypeError('Knots must be given for task=-1')
+ numknots = len(t)
+ _curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
+ _curfit_cache['t'][k+1:-k-1] = t
+ nest = len(_curfit_cache['t'])
+ elif task == 0:
+ if per:
+ nest = max(m + 2*k, 2*k + 3)
+ else:
+ nest = max(m + k + 1, 2*k + 3)
+ t = empty((nest,), float)
+ _curfit_cache['t'] = t
+ if task <= 0:
+ if per:
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
+ else:
+ _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
+ _curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
+ try:
+ t = _curfit_cache['t']
+ wrk = _curfit_cache['wrk']
+ iwrk = _curfit_cache['iwrk']
+ except KeyError as e:
+ raise TypeError("must call with task=1 only after"
+ " call with task=0,-1") from e
+ if not per:
+ n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
+ xb, xe, k, s)
+ else:
+ n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
+ tck = (t[:n], c[:n], k)
+ if ier <= 0 and not quiet:
+ _mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
+ (k, len(t), m, fp, s))
+ warnings.warn(RuntimeWarning(_mess), stacklevel=2)
+ if ier > 0 and not full_output:
+ if ier in [1, 2, 3]:
+ warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
+ else:
+ try:
+ raise _iermess[ier][1](_iermess[ier][0])
+ except KeyError as e:
+ raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
+ if full_output:
+ try:
+ return tck, fp, ier, _iermess[ier][0]
+ except KeyError:
+ return tck, fp, ier, _iermess['unknown'][0]
+ else:
+ return tck
+
+
+def splev(x, tck, der=0, ext=0):
+ # see the docstring of `_fitpack_py/splev`
+ t, c, k = tck
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, x=x, t=t, k=k, der=der:
+ splev(x, [t, c, k], der, ext), c))
+ else:
+ if not (0 <= der <= k):
+ raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
+ if ext not in (0, 1, 2, 3):
+ raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
+
+ x = asarray(x)
+ shape = x.shape
+ x = atleast_1d(x).ravel()
+ if der == 0:
+ y, ier = dfitpack.splev(t, c, k, x, ext)
+ else:
+ y, ier = dfitpack.splder(t, c, k, x, der, ext)
+
+ if ier == 10:
+ raise ValueError("Invalid input data")
+ if ier == 1:
+ raise ValueError("Found x value not in the domain")
+ if ier:
+ raise TypeError("An error occurred")
+
+ return y.reshape(shape)
+
+
+def splint(a, b, tck, full_output=0):
+ # see the docstring of `_fitpack_py/splint`
+ t, c, k = tck
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, a=a, b=b, t=t, k=k:
+ splint(a, b, [t, c, k]), c))
+ else:
+ aint, wrk = dfitpack.splint(t, c, k, a, b)
+ if full_output:
+ return aint, wrk
+ else:
+ return aint
+
+
+def sproot(tck, mest=10):
+ # see the docstring of `_fitpack_py/sproot`
+ t, c, k = tck
+ if k != 3:
+ raise ValueError("sproot works only for cubic (k=3) splines")
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ return list(map(lambda c, t=t, k=k, mest=mest:
+ sproot([t, c, k], mest), c))
+ else:
+ if len(t) < 8:
+ raise TypeError("The number of knots %d>=8" % len(t))
+ z, m, ier = dfitpack.sproot(t, c, mest)
+ if ier == 10:
+ raise TypeError("Invalid input data. "
+ "t1<=..<=t4 1:
+ return list(map(lambda x, tck=tck: spalde(x, tck), x))
+ d, ier = dfitpack.spalde(t, c, k+1, x[0])
+ if ier == 0:
+ return d
+ if ier == 10:
+ raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
+ raise TypeError("Unknown error")
+
+# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
+# full_output=0,nest=None,per=0,quiet=1):
+
+
+_surfit_cache = {'tx': array([], float), 'ty': array([], float),
+ 'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
+
+
+def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
+ kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
+ full_output=0, nxest=None, nyest=None, quiet=1):
+ """
+ Find a bivariate B-spline representation of a surface.
+
+ Given a set of data points (x[i], y[i], z[i]) representing a surface
+ z=f(x,y), compute a B-spline representation of the surface. Based on
+ the routine SURFIT from FITPACK.
+
+ Parameters
+ ----------
+ x, y, z : ndarray
+ Rank-1 arrays of data points.
+ w : ndarray, optional
+ Rank-1 array of weights. By default ``w=np.ones(len(x))``.
+ xb, xe : float, optional
+ End points of approximation interval in `x`.
+ By default ``xb = x.min(), xe=x.max()``.
+ yb, ye : float, optional
+ End points of approximation interval in `y`.
+ By default ``yb=y.min(), ye = y.max()``.
+ kx, ky : int, optional
+ The degrees of the spline (1 <= kx, ky <= 5).
+ Third order (kx=ky=3) is recommended.
+ task : int, optional
+ If task=0, find knots in x and y and coefficients for a given
+ smoothing factor, s.
+ If task=1, find knots and coefficients for another value of the
+ smoothing factor, s. bisplrep must have been previously called
+ with task=0 or task=1.
+ If task=-1, find coefficients for a given set of knots tx, ty.
+ s : float, optional
+ A non-negative smoothing factor. If weights correspond
+ to the inverse of the standard-deviation of the errors in z,
+ then a good s-value should be found in the range
+ ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
+ eps : float, optional
+ A threshold for determining the effective rank of an
+ over-determined linear system of equations (0 < eps < 1).
+ `eps` is not likely to need changing.
+ tx, ty : ndarray, optional
+ Rank-1 arrays of the knots of the spline for task=-1
+ full_output : int, optional
+ Non-zero to return optional outputs.
+ nxest, nyest : int, optional
+ Over-estimates of the total number of knots. If None then
+ ``nxest = max(kx+sqrt(m/2),2*kx+3)``,
+ ``nyest = max(ky+sqrt(m/2),2*ky+3)``.
+ quiet : int, optional
+ Non-zero to suppress printing of messages.
+
+ Returns
+ -------
+ tck : array_like
+ A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
+ coefficients (c) of the bivariate B-spline representation of the
+ surface along with the degree of the spline.
+ fp : ndarray
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int
+ An integer flag about splrep success. Success is indicated if
+ ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+ Otherwise an error is raised.
+ msg : str
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev
+ UnivariateSpline, BivariateSpline
+
+ Notes
+ -----
+ See `bisplev` to evaluate the value of the B-spline given its tck
+ representation.
+
+ If the input data is such that input dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolation.
+
+ References
+ ----------
+ .. [1] Dierckx P.:An algorithm for surface fitting with spline functions
+ Ima J. Numer. Anal. 1 (1981) 267-283.
+ .. [2] Dierckx P.:An algorithm for surface fitting with spline functions
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
+ .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
+ m = len(x)
+ if not (m == len(y) == len(z)):
+ raise TypeError('len(x)==len(y)==len(z) must hold.')
+ if w is None:
+ w = ones(m, float)
+ else:
+ w = atleast_1d(w)
+ if not len(w) == m:
+ raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
+ if xb is None:
+ xb = x.min()
+ if xe is None:
+ xe = x.max()
+ if yb is None:
+ yb = y.min()
+ if ye is None:
+ ye = y.max()
+ if not (-1 <= task <= 1):
+ raise TypeError('task must be -1, 0 or 1')
+ if s is None:
+ s = m - sqrt(2*m)
+ if tx is None and task == -1:
+ raise TypeError('Knots_x must be given for task=-1')
+ if tx is not None:
+ _surfit_cache['tx'] = atleast_1d(tx)
+ nx = len(_surfit_cache['tx'])
+ if ty is None and task == -1:
+ raise TypeError('Knots_y must be given for task=-1')
+ if ty is not None:
+ _surfit_cache['ty'] = atleast_1d(ty)
+ ny = len(_surfit_cache['ty'])
+ if task == -1 and nx < 2*kx+2:
+ raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
+ if task == -1 and ny < 2*ky+2:
+ raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
+ if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
+ raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
+ 'supported. (1<=k<=5)' % (kx, ky))
+ if m < (kx + 1)*(ky + 1):
+ raise TypeError('m >= (kx+1)(ky+1) must hold')
+ if nxest is None:
+ nxest = int(kx + sqrt(m/2))
+ if nyest is None:
+ nyest = int(ky + sqrt(m/2))
+ nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
+ if task >= 0 and s == 0:
+ nxest = int(kx + sqrt(3*m))
+ nyest = int(ky + sqrt(3*m))
+ if task == -1:
+ _surfit_cache['tx'] = atleast_1d(tx)
+ _surfit_cache['ty'] = atleast_1d(ty)
+ tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
+ wrk = _surfit_cache['wrk']
+ u = nxest - kx - 1
+ v = nyest - ky - 1
+ km = max(kx, ky) + 1
+ ne = max(nxest, nyest)
+ bx, by = kx*v + ky + 1, ky*u + kx + 1
+ b1, b2 = bx, bx + v - ky
+ if bx > by:
+ b1, b2 = by, by + u - kx
+ msg = "Too many data points to interpolate"
+ lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
+ 2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
+ OverflowError,
+ msg=msg)
+ lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, OverflowError, msg=msg)
+ tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
+ task, s, eps, tx, ty, nxest, nyest,
+ wrk, lwrk1, lwrk2)
+ _curfit_cache['tx'] = tx
+ _curfit_cache['ty'] = ty
+ _curfit_cache['wrk'] = o['wrk']
+ ier, fp = o['ier'], o['fp']
+ tck = [tx, ty, c, kx, ky]
+
+ ierm = min(11, max(-3, ier))
+ if ierm <= 0 and not quiet:
+ _mess = (_iermess2[ierm][0] +
+ "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
+ (kx, ky, len(tx), len(ty), m, fp, s))
+ warnings.warn(RuntimeWarning(_mess), stacklevel=2)
+ if ierm > 0 and not full_output:
+ if ier in [1, 2, 3, 4, 5]:
+ _mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
+ (kx, ky, len(tx), len(ty), m, fp, s))
+ warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess), stacklevel=2)
+ else:
+ try:
+ raise _iermess2[ierm][1](_iermess2[ierm][0])
+ except KeyError as e:
+ raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
+ if full_output:
+ try:
+ return tck, fp, ier, _iermess2[ierm][0]
+ except KeyError:
+ return tck, fp, ier, _iermess2['unknown'][0]
+ else:
+ return tck
+
+
+def bisplev(x, y, tck, dx=0, dy=0):
+ """
+ Evaluate a bivariate B-spline and its derivatives.
+
+ Return a rank-2 array of spline function values (or spline derivative
+ values) at points given by the cross-product of the rank-1 arrays `x` and
+ `y`. In special cases, return an array or just a float if either `x` or
+ `y` or both are floats. Based on BISPEV and PARDER from FITPACK.
+
+ Parameters
+ ----------
+ x, y : ndarray
+ Rank-1 arrays specifying the domain over which to evaluate the
+ spline or its derivative.
+ tck : tuple
+ A sequence of length 5 returned by `bisplrep` containing the knot
+ locations, the coefficients, and the degree of the spline:
+ [tx, ty, c, kx, ky].
+ dx, dy : int, optional
+ The orders of the partial derivatives in `x` and `y` respectively.
+
+ Returns
+ -------
+ vals : ndarray
+ The B-spline or its derivative evaluated over the set formed by
+ the cross-product of `x` and `y`.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev
+ UnivariateSpline, BivariateSpline
+
+ Notes
+ -----
+ See `bisplrep` to generate the `tck` representation.
+
+ References
+ ----------
+ .. [1] Dierckx P. : An algorithm for surface fitting
+ with spline functions
+ Ima J. Numer. Anal. 1 (1981) 267-283.
+ .. [2] Dierckx P. : An algorithm for surface fitting
+ with spline functions
+ report tw50, Dept. Computer Science,K.U.Leuven, 1980.
+ .. [3] Dierckx P. : Curve and surface fitting with splines,
+ Monographs on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ tx, ty, c, kx, ky = tck
+ if not (0 <= dx < kx):
+ raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
+ if not (0 <= dy < ky):
+ raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
+ x, y = map(atleast_1d, [x, y])
+ if (len(x.shape) != 1) or (len(y.shape) != 1):
+ raise ValueError("First two entries should be rank-1 arrays.")
+
+ msg = "Too many data points to interpolate."
+
+ _int_overflow(x.size * y.size, MemoryError, msg=msg)
+
+ if dx != 0 or dy != 0:
+ _int_overflow((tx.size - kx - 1)*(ty.size - ky - 1),
+ MemoryError, msg=msg)
+ z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
+ else:
+ z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
+
+ if ier == 10:
+ raise ValueError("Invalid input data")
+ if ier:
+ raise TypeError("An error occurred")
+ z.shape = len(x), len(y)
+ if len(z) > 1:
+ return z
+ if len(z[0]) > 1:
+ return z[0]
+ return z[0][0]
+
+
+def dblint(xa, xb, ya, yb, tck):
+ """Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
+
+ Parameters
+ ----------
+ xa, xb : float
+ The end-points of the x integration interval.
+ ya, yb : float
+ The end-points of the y integration interval.
+ tck : list [tx, ty, c, kx, ky]
+ A sequence of length 5 returned by bisplrep containing the knot
+ locations tx, ty, the coefficients c, and the degrees kx, ky
+ of the spline.
+
+ Returns
+ -------
+ integ : float
+ The value of the resulting integral.
+ """
+ tx, ty, c, kx, ky = tck
+ return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
+
+
+def insert(x, tck, m=1, per=0):
+ # see the docstring of `_fitpack_py/insert`
+ t, c, k = tck
+ try:
+ c[0][0]
+ parametric = True
+ except Exception:
+ parametric = False
+ if parametric:
+ cc = []
+ for c_vals in c:
+ tt, cc_val, kk = insert(x, [t, c_vals, k], m)
+ cc.append(cc_val)
+ return (tt, cc, kk)
+ else:
+ tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
+ if ier == 10:
+ raise ValueError("Invalid input data")
+ if ier:
+ raise TypeError("An error occurred")
+ return (tt, cc, k)
+
+
+def splder(tck, n=1):
+ # see the docstring of `_fitpack_py/splder`
+ if n < 0:
+ return splantider(tck, -n)
+
+ t, c, k = tck
+
+ if n > k:
+ raise ValueError(f"Order of derivative (n = {n!r}) must be <= "
+ f"order of spline (k = {tck[2]!r})")
+
+ # Extra axes for the trailing dims of the `c` array:
+ sh = (slice(None),) + ((None,)*len(c.shape[1:]))
+
+ with np.errstate(invalid='raise', divide='raise'):
+ try:
+ for j in range(n):
+ # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
+
+ # Compute the denominator in the differentiation formula.
+ # (and append trailing dims, if necessary)
+ dt = t[k+1:-1] - t[1:-k-1]
+ dt = dt[sh]
+ # Compute the new coefficients
+ c = (c[1:-1-k] - c[:-2-k]) * k / dt
+ # Pad coefficient array to same size as knots (FITPACK
+ # convention)
+ c = np.r_[c, np.zeros((k,) + c.shape[1:])]
+ # Adjust knots
+ t = t[1:-1]
+ k -= 1
+ except FloatingPointError as e:
+ raise ValueError(("The spline has internal repeated knots "
+ "and is not differentiable %d times") % n) from e
+
+ return t, c, k
+
+
+def splantider(tck, n=1):
+ # see the docstring of `_fitpack_py/splantider`
+ if n < 0:
+ return splder(tck, -n)
+
+ t, c, k = tck
+
+ # Extra axes for the trailing dims of the `c` array:
+ sh = (slice(None),) + (None,)*len(c.shape[1:])
+
+ for j in range(n):
+ # This is the inverse set of operations to splder.
+
+ # Compute the multiplier in the antiderivative formula.
+ dt = t[k+1:] - t[:-k-1]
+ dt = dt[sh]
+ # Compute the new coefficients
+ c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
+ c = np.r_[np.zeros((1,) + c.shape[1:]),
+ c,
+ [c[-1]] * (k+2)]
+ # New knots
+ t = np.r_[t[0], t, t[-1]]
+ k += 1
+
+ return t, c, k
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py
new file mode 100644
index 0000000000000000000000000000000000000000..91ee711fead98bacd3f15b4175520a4d387390df
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_fitpack_py.py
@@ -0,0 +1,796 @@
+__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
+ 'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
+
+
+import numpy as np
+
+# These are in the API for fitpack even if not used in fitpack.py itself.
+from ._fitpack_impl import bisplrep, bisplev, dblint # noqa: F401
+from . import _fitpack_impl as _impl
+from ._bsplines import BSpline
+
+
+def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
+ full_output=0, nest=None, per=0, quiet=1):
+ """
+ Find the B-spline representation of an N-D curve.
+
+ Given a list of N rank-1 arrays, `x`, which represent a curve in
+ N-dimensional space parametrized by `u`, find a smooth approximating
+ spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
+
+ Parameters
+ ----------
+ x : array_like
+ A list of sample vector arrays representing the curve.
+ w : array_like, optional
+ Strictly positive rank-1 array of weights the same length as `x[0]`.
+ The weights are used in computing the weighted least-squares spline
+ fit. If the errors in the `x` values have standard-deviation given by
+ the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
+ u : array_like, optional
+ An array of parameter values. If not given, these values are
+ calculated automatically as ``M = len(x[0])``, where
+
+ v[0] = 0
+
+ v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
+
+ u[i] = v[i] / v[M-1]
+
+ ub, ue : int, optional
+ The end-points of the parameters interval. Defaults to
+ u[0] and u[-1].
+ k : int, optional
+ Degree of the spline. Cubic splines are recommended.
+ Even values of `k` should be avoided especially with a small s-value.
+ ``1 <= k <= 5``, default is 3.
+ task : int, optional
+ If task==0 (default), find t and c for a given smoothing factor, s.
+ If task==1, find t and c for another value of the smoothing factor, s.
+ There must have been a previous call with task=0 or task=1
+ for the same set of data.
+ If task=-1 find the weighted least square spline for a given set of
+ knots, t.
+ s : float, optional
+ A smoothing condition. The amount of smoothness is determined by
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
+ where g(x) is the smoothed interpolation of (x,y). The user can
+ use `s` to control the trade-off between closeness and smoothness
+ of fit. Larger `s` means more smoothing while smaller values of `s`
+ indicate less smoothing. Recommended values of `s` depend on the
+ weights, w. If the weights represent the inverse of the
+ standard-deviation of y, then a good `s` value should be found in
+ the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
+ data points in x, y, and w.
+ t : array, optional
+ The knots needed for ``task=-1``.
+ There must be at least ``2*k+2`` knots.
+ full_output : int, optional
+ If non-zero, then return optional outputs.
+ nest : int, optional
+ An over-estimate of the total number of knots of the spline to
+ help in determining the storage space. By default nest=m/2.
+ Always large enough is nest=m+k+1.
+ per : int, optional
+ If non-zero, data points are considered periodic with period
+ ``x[m-1] - x[0]`` and a smooth periodic spline approximation is
+ returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
+ quiet : int, optional
+ Non-zero to suppress messages.
+
+ Returns
+ -------
+ tck : tuple
+ A tuple, ``(t,c,k)`` containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline.
+ u : array
+ An array of the values of the parameter.
+ fp : float
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int
+ An integer flag about splrep success. Success is indicated
+ if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+ Otherwise an error is raised.
+ msg : str
+ A message corresponding to the integer flag, ier.
+
+ See Also
+ --------
+ splrep, splev, sproot, spalde, splint,
+ bisplrep, bisplev
+ UnivariateSpline, BivariateSpline
+ BSpline
+ make_interp_spline
+
+ Notes
+ -----
+ See `splev` for evaluation of the spline and its derivatives.
+ The number of dimensions N must be smaller than 11.
+
+ The number of coefficients in the `c` array is ``k+1`` less than the number
+ of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
+ the array of coefficients to have the same length as the array of knots.
+ These additional coefficients are ignored by evaluation routines, `splev`
+ and `BSpline`.
+
+ References
+ ----------
+ .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
+ parametric splines, Computer Graphics and Image Processing",
+ 20 (1982) 171-184.
+ .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
+ parametric splines", report tw55, Dept. Computer Science,
+ K.U.Leuven, 1981.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Generate a discretization of a limacon curve in the polar coordinates:
+
+ >>> import numpy as np
+ >>> phi = np.linspace(0, 2.*np.pi, 40)
+ >>> r = 0.5 + np.cos(phi) # polar coords
+ >>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
+
+ And interpolate:
+
+ >>> from scipy.interpolate import splprep, splev
+ >>> tck, u = splprep([x, y], s=0)
+ >>> new_points = splev(u, tck)
+
+ Notice that (i) we force interpolation by using `s=0`,
+ (ii) the parameterization, ``u``, is generated automatically.
+ Now plot the result:
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig, ax = plt.subplots()
+ >>> ax.plot(x, y, 'ro')
+ >>> ax.plot(new_points[0], new_points[1], 'r-')
+ >>> plt.show()
+
+ """
+
+ res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
+ quiet)
+ return res
+
+
+def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
+ full_output=0, per=0, quiet=1):
+ """
+ Find the B-spline representation of a 1-D curve.
+
+ Given the set of data points ``(x[i], y[i])`` determine a smooth spline
+ approximation of degree k on the interval ``xb <= x <= xe``.
+
+ Parameters
+ ----------
+ x, y : array_like
+ The data points defining a curve ``y = f(x)``.
+ w : array_like, optional
+ Strictly positive rank-1 array of weights the same length as `x` and `y`.
+ The weights are used in computing the weighted least-squares spline
+ fit. If the errors in the `y` values have standard-deviation given by the
+ vector ``d``, then `w` should be ``1/d``. Default is ``ones(len(x))``.
+ xb, xe : float, optional
+ The interval to fit. If None, these default to ``x[0]`` and ``x[-1]``
+ respectively.
+ k : int, optional
+ The degree of the spline fit. It is recommended to use cubic splines.
+ Even values of `k` should be avoided especially with small `s` values.
+ ``1 <= k <= 5``.
+ task : {1, 0, -1}, optional
+ If ``task==0``, find ``t`` and ``c`` for a given smoothing factor, `s`.
+
+ If ``task==1`` find ``t`` and ``c`` for another value of the smoothing factor,
+ `s`. There must have been a previous call with ``task=0`` or ``task=1`` for
+ the same set of data (``t`` will be stored an used internally)
+
+ If ``task=-1`` find the weighted least square spline for a given set of
+ knots, ``t``. These should be interior knots as knots on the ends will be
+ added automatically.
+ s : float, optional
+ A smoothing condition. The amount of smoothness is determined by
+ satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s`` where ``g(x)``
+ is the smoothed interpolation of ``(x,y)``. The user can use `s` to control
+ the tradeoff between closeness and smoothness of fit. Larger `s` means
+ more smoothing while smaller values of `s` indicate less smoothing.
+ Recommended values of `s` depend on the weights, `w`. If the weights
+ represent the inverse of the standard-deviation of `y`, then a good `s`
+ value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))`` where ``m`` is
+ the number of datapoints in `x`, `y`, and `w`. default : ``s=m-sqrt(2*m)`` if
+ weights are supplied. ``s = 0.0`` (interpolating) if no weights are
+ supplied.
+ t : array_like, optional
+ The knots needed for ``task=-1``. If given then task is automatically set
+ to ``-1``.
+ full_output : bool, optional
+ If non-zero, then return optional outputs.
+ per : bool, optional
+ If non-zero, data points are considered periodic with period ``x[m-1]`` -
+ ``x[0]`` and a smooth periodic spline approximation is returned. Values of
+ ``y[m-1]`` and ``w[m-1]`` are not used.
+ The default is zero, corresponding to boundary condition 'not-a-knot'.
+ quiet : bool, optional
+ Non-zero to suppress messages.
+
+ Returns
+ -------
+ tck : tuple
+ A tuple ``(t,c,k)`` containing the vector of knots, the B-spline
+ coefficients, and the degree of the spline.
+ fp : array, optional
+ The weighted sum of squared residuals of the spline approximation.
+ ier : int, optional
+ An integer flag about splrep success. Success is indicated if ``ier<=0``.
+ If ``ier in [1,2,3]``, an error occurred but was not raised. Otherwise an
+ error is raised.
+ msg : str, optional
+ A message corresponding to the integer flag, `ier`.
+
+ See Also
+ --------
+ UnivariateSpline, BivariateSpline
+ splprep, splev, sproot, spalde, splint
+ bisplrep, bisplev
+ BSpline
+ make_interp_spline
+
+ Notes
+ -----
+ See `splev` for evaluation of the spline and its derivatives. Uses the
+ FORTRAN routine ``curfit`` from FITPACK.
+
+ The user is responsible for assuring that the values of `x` are unique.
+ Otherwise, `splrep` will not return sensible results.
+
+ If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
+ i.e., there must be a subset of data points ``x[j]`` such that
+ ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+ This routine zero-pads the coefficients array ``c`` to have the same length
+ as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
+ by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
+ `splprep`, which does not zero-pad the coefficients.
+
+ The default boundary condition is 'not-a-knot', i.e. the first and second
+ segment at a curve end are the same polynomial. More boundary conditions are
+ available in `CubicSpline`.
+
+ References
+ ----------
+ Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
+
+ .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
+ integration of experimental data using spline functions",
+ J.Comp.Appl.Maths 1 (1975) 165-184.
+ .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
+ grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
+ 1286-1304.
+ .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
+ functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
+ .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ You can interpolate 1-D points with a B-spline curve.
+ Further examples are given in
+ :ref:`in the tutorial `.
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import splev, splrep
+ >>> x = np.linspace(0, 10, 10)
+ >>> y = np.sin(x)
+ >>> spl = splrep(x, y)
+ >>> x2 = np.linspace(0, 10, 200)
+ >>> y2 = splev(x2, spl)
+ >>> plt.plot(x, y, 'o', x2, y2)
+ >>> plt.show()
+
+ """
+ res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
+ return res
+
+
+def splev(x, tck, der=0, ext=0):
+ """
+ Evaluate a B-spline or its derivatives.
+
+ Given the knots and coefficients of a B-spline representation, evaluate
+ the value of the smoothing polynomial and its derivatives. This is a
+ wrapper around the FORTRAN routines splev and splder of FITPACK.
+
+ Parameters
+ ----------
+ x : array_like
+ An array of points at which to return the value of the smoothed
+ spline or its derivatives. If `tck` was returned from `splprep`,
+ then the parameter values, u should be given.
+ tck : 3-tuple or a BSpline object
+ If a tuple, then it should be a sequence of length 3 returned by
+ `splrep` or `splprep` containing the knots, coefficients, and degree
+ of the spline. (Also see Notes.)
+ der : int, optional
+ The order of derivative of the spline to compute (must be less than
+ or equal to k, the degree of the spline).
+ ext : int, optional
+ Controls the value returned for elements of ``x`` not in the
+ interval defined by the knot sequence.
+
+ * if ext=0, return the extrapolated value.
+ * if ext=1, return 0
+ * if ext=2, raise a ValueError
+ * if ext=3, return the boundary value.
+
+ The default value is 0.
+
+ Returns
+ -------
+ y : ndarray or list of ndarrays
+ An array of values representing the spline function evaluated at
+ the points in `x`. If `tck` was returned from `splprep`, then this
+ is a list of arrays representing the curve in an N-D space.
+
+ See Also
+ --------
+ splprep, splrep, sproot, spalde, splint
+ bisplrep, bisplev
+ BSpline
+
+ Notes
+ -----
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using `BSpline` objects.
+
+ References
+ ----------
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+ Theory, 6, p.50-62, 1972.
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+ Applics, 10, p.134-149, 1972.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
+ "not allowed. Use BSpline.__call__(x) instead.")
+ raise ValueError(mesg)
+
+ # remap the out-of-bounds behavior
+ try:
+ extrapolate = {0: True, }[ext]
+ except KeyError as e:
+ raise ValueError("Extrapolation mode %s is not supported "
+ "by BSpline." % ext) from e
+
+ return tck(x, der, extrapolate=extrapolate)
+ else:
+ return _impl.splev(x, tck, der, ext)
+
+
+def splint(a, b, tck, full_output=0):
+ """
+ Evaluate the definite integral of a B-spline between two given points.
+
+ Parameters
+ ----------
+ a, b : float
+ The end-points of the integration interval.
+ tck : tuple or a BSpline instance
+ If a tuple, then it should be a sequence of length 3, containing the
+ vector of knots, the B-spline coefficients, and the degree of the
+ spline (see `splev`).
+ full_output : int, optional
+ Non-zero to return optional output.
+
+ Returns
+ -------
+ integral : float
+ The resulting integral.
+ wrk : ndarray
+ An array containing the integrals of the normalized B-splines
+ defined on the set of knots.
+ (Only returned if `full_output` is non-zero)
+
+ See Also
+ --------
+ splprep, splrep, sproot, spalde, splev
+ bisplrep, bisplev
+ BSpline
+
+ Notes
+ -----
+ `splint` silently assumes that the spline function is zero outside the data
+ interval (`a`, `b`).
+
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using the `BSpline` objects.
+
+ References
+ ----------
+ .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
+ J. Inst. Maths Applics, 17, p.37-41, 1976.
+ .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+ Examples are given :ref:`in the tutorial `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
+ "not allowed. Use BSpline.integrate() instead.")
+ raise ValueError(mesg)
+
+ if full_output != 0:
+ mesg = ("full_output = %s is not supported. Proceeding as if "
+ "full_output = 0" % full_output)
+
+ return tck.integrate(a, b, extrapolate=False)
+ else:
+ return _impl.splint(a, b, tck, full_output)
+
+
+def sproot(tck, mest=10):
+ """
+ Find the roots of a cubic B-spline.
+
+ Given the knots (>=8) and coefficients of a cubic B-spline return the
+ roots of the spline.
+
+ Parameters
+ ----------
+ tck : tuple or a BSpline object
+ If a tuple, then it should be a sequence of length 3, containing the
+ vector of knots, the B-spline coefficients, and the degree of the
+ spline.
+ The number of knots must be >= 8, and the degree must be 3.
+ The knots must be a montonically increasing sequence.
+ mest : int, optional
+ An estimate of the number of zeros (Default is 10).
+
+ Returns
+ -------
+ zeros : ndarray
+ An array giving the roots of the spline.
+
+ See Also
+ --------
+ splprep, splrep, splint, spalde, splev
+ bisplrep, bisplev
+ BSpline
+
+ Notes
+ -----
+ Manipulating the tck-tuples directly is not recommended. In new code,
+ prefer using the `BSpline` objects.
+
+ References
+ ----------
+ .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+ Theory, 6, p.50-62, 1972.
+ .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+ Applics, 10, p.134-149, 1972.
+ .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+ on Numerical Analysis, Oxford University Press, 1993.
+
+ Examples
+ --------
+
+ For some data, this method may miss a root. This happens when one of
+ the spline knots (which FITPACK places automatically) happens to
+ coincide with the true root. A workaround is to convert to `PPoly`,
+ which uses a different root-finding algorithm.
+
+ For example,
+
+ >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
+ >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
+ ... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03,
+ ... 6.520430e-03, 8.170770e-03]
+ >>> from scipy.interpolate import splrep, sproot, PPoly
+ >>> tck = splrep(x, y, s=0)
+ >>> sproot(tck)
+ array([], dtype=float64)
+
+ Converting to a PPoly object does find the roots at `x=2`:
+
+ >>> ppoly = PPoly.from_spline(tck)
+ >>> ppoly.roots(extrapolate=False)
+ array([2.])
+
+
+ Further examples are given :ref:`in the tutorial
+ `.
+
+ """
+ if isinstance(tck, BSpline):
+ if tck.c.ndim > 1:
+ mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
+ "not allowed.")
+ raise ValueError(mesg)
+
+ t, c, k = tck.tck
+
+ # _impl.sproot expects the interpolation axis to be last, so roll it.
+ # NB: This transpose is a no-op if c is 1D.
+ sh = tuple(range(c.ndim))
+ c = c.transpose(sh[1:] + (0,))
+ return _impl.sproot((t, c, k), mest)
+ else:
+ return _impl.sproot(tck, mest)
+
+
+def spalde(x, tck):
+ """
+ Evaluate all derivatives of a B-spline.
+
+ Given the knots and coefficients of a cubic B-spline compute all
+ derivatives up to order k at a point (or set of points).
+
+ Parameters
+ ----------
+ x : array_like
+ A point or a set of points at which to evaluate the derivatives.
+ Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
+ tck : tuple
+ A tuple (t,c,k) containing the vector of knots,
+ the B-spline coefficients, and the degree of the spline.
+
+ Returns
+ -------
+ results : {ndarray, list of ndarrays}
+ An array (or a list of arrays) containing all derivatives
+ up to order k inclusive for each point `x`.
+
+ See Also
+ --------
+ splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
+ UnivariateSpline, BivariateSpline
+
+ References
+ ----------
+ .. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
+ 6 (1972) 50-62.
+ .. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
+ applics 10 (1972) 134-149.
+ .. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
+ Numerical Analysis, Oxford University Press, 1993.
+
+ """
+ if isinstance(tck, BSpline):
+ raise TypeError("spalde does not accept BSpline instances.")
+ else:
+ return _impl.spalde(x, tck)
+
+
+def insert(x, tck, m=1, per=0):
+ """
+ Insert knots into a B-spline.
+
+ Given the knots and coefficients of a B-spline representation, create a
+ new B-spline with a knot inserted `m` times at point `x`.
+ This is a wrapper around the FORTRAN routine insert of FITPACK.
+
+ Parameters
+ ----------
+ x (u) : float
+ A knot value at which to insert a new knot. If `tck` was returned
+ from ``splprep``, then the parameter values, u should be given.
+ tck : a `BSpline` instance or a tuple
+ If tuple, then it is expected to be a tuple (t,c,k) containing
+ the vector of knots, the B-spline coefficients, and the degree of
+ the spline.
+ m : int, optional
+ The number of times to insert the given knot (its multiplicity).
+ Default is 1.
+ per : int, optional
+ If non-zero, the input spline is considered periodic.
+
+ Returns
+ -------
+ BSpline instance or a tuple
+ A new B-spline with knots t, coefficients c, and degree k.
+ ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
+ In case of a periodic spline (``per != 0``) there must be
+ either at least k interior knots t(j) satisfying ``t(k+1)>> from scipy.interpolate import splrep, insert
+ >>> import numpy as np
+ >>> x = np.linspace(0, 10, 5)
+ >>> y = np.sin(x)
+ >>> tck = splrep(x, y)
+ >>> tck[0]
+ array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
+
+ A knot is inserted:
+
+ >>> tck_inserted = insert(3, tck)
+ >>> tck_inserted[0]
+ array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
+
+ Some knots are inserted:
+
+ >>> tck_inserted2 = insert(8, tck, m=3)
+ >>> tck_inserted2[0]
+ array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
+
+ """
+ if isinstance(tck, BSpline):
+
+ t, c, k = tck.tck
+
+ # FITPACK expects the interpolation axis to be last, so roll it over
+ # NB: if c array is 1D, transposes are no-ops
+ sh = tuple(range(c.ndim))
+ c = c.transpose(sh[1:] + (0,))
+ t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
+
+ # and roll the last axis back
+ c_ = np.asarray(c_)
+ c_ = c_.transpose((sh[-1],) + sh[:-1])
+ return BSpline(t_, c_, k_)
+ else:
+ return _impl.insert(x, tck, m, per)
+
+
+def splder(tck, n=1):
+ """
+ Compute the spline representation of the derivative of a given spline
+
+ Parameters
+ ----------
+ tck : BSpline instance or a tuple of (t, c, k)
+ Spline whose derivative to compute
+ n : int, optional
+ Order of derivative to evaluate. Default: 1
+
+ Returns
+ -------
+ `BSpline` instance or tuple
+ Spline of order k2=k-n representing the derivative
+ of the input spline.
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
+ a BSpline object is constructed and returned.
+
+ See Also
+ --------
+ splantider, splev, spalde
+ BSpline
+
+ Notes
+ -----
+
+ .. versionadded:: 0.13.0
+
+ Examples
+ --------
+ This can be used for finding maxima of a curve:
+
+ >>> from scipy.interpolate import splrep, splder, sproot
+ >>> import numpy as np
+ >>> x = np.linspace(0, 10, 70)
+ >>> y = np.sin(x)
+ >>> spl = splrep(x, y, k=4)
+
+ Now, differentiate the spline and find the zeros of the
+ derivative. (NB: `sproot` only works for order 3 splines, so we
+ fit an order 4 spline):
+
+ >>> dspl = splder(spl)
+ >>> sproot(dspl) / np.pi
+ array([ 0.50000001, 1.5 , 2.49999998])
+
+ This agrees well with roots :math:`\\pi/2 + n\\pi` of
+ :math:`\\cos(x) = \\sin'(x)`.
+
+ """
+ if isinstance(tck, BSpline):
+ return tck.derivative(n)
+ else:
+ return _impl.splder(tck, n)
+
+
+def splantider(tck, n=1):
+ """
+ Compute the spline for the antiderivative (integral) of a given spline.
+
+ Parameters
+ ----------
+ tck : BSpline instance or a tuple of (t, c, k)
+ Spline whose antiderivative to compute
+ n : int, optional
+ Order of antiderivative to evaluate. Default: 1
+
+ Returns
+ -------
+ BSpline instance or a tuple of (t2, c2, k2)
+ Spline of order k2=k+n representing the antiderivative of the input
+ spline.
+ A tuple is returned iff the input argument `tck` is a tuple, otherwise
+ a BSpline object is constructed and returned.
+
+ See Also
+ --------
+ splder, splev, spalde
+ BSpline
+
+ Notes
+ -----
+ The `splder` function is the inverse operation of this function.
+ Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
+ rounding error.
+
+ .. versionadded:: 0.13.0
+
+ Examples
+ --------
+ >>> from scipy.interpolate import splrep, splder, splantider, splev
+ >>> import numpy as np
+ >>> x = np.linspace(0, np.pi/2, 70)
+ >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+ >>> spl = splrep(x, y)
+
+ The derivative is the inverse operation of the antiderivative,
+ although some floating point error accumulates:
+
+ >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
+ (array(2.1565429877197317), array(2.1565429877201865))
+
+ Antiderivative can be used to evaluate definite integrals:
+
+ >>> ispl = splantider(spl)
+ >>> splev(np.pi/2, ispl) - splev(0, ispl)
+ 2.2572053588768486
+
+ This is indeed an approximation to the complete elliptic integral
+ :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+ >>> from scipy.special import ellipk
+ >>> ellipk(0.8)
+ 2.2572053268208538
+
+ """
+ if isinstance(tck, BSpline):
+ return tck.antiderivative(n)
+ else:
+ return _impl.splantider(tck, n)
+
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_interpolate.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_interpolate.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d9d460f52e7541721a61967750ca20896a5bfc8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_interpolate.py
@@ -0,0 +1,2473 @@
+__all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly']
+
+from math import prod
+import warnings
+
+import numpy as np
+from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
+ ravel, poly1d, asarray, intp)
+
+import scipy.special as spec
+from scipy._lib._util import copy_if_needed
+from scipy.special import comb
+
+from . import _fitpack_py
+from . import dfitpack
+from ._polyint import _Interpolator1D
+from . import _ppoly
+from .interpnd import _ndim_coords_from_arrays
+from ._bsplines import make_interp_spline, BSpline
+
+
+def lagrange(x, w):
+ r"""
+ Return a Lagrange interpolating polynomial.
+
+ Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
+ polynomial through the points ``(x, w)``.
+
+ Warning: This implementation is numerically unstable. Do not expect to
+ be able to use more than about 20 points even if they are chosen optimally.
+
+ Parameters
+ ----------
+ x : array_like
+ `x` represents the x-coordinates of a set of datapoints.
+ w : array_like
+ `w` represents the y-coordinates of a set of datapoints, i.e., f(`x`).
+
+ Returns
+ -------
+ lagrange : `numpy.poly1d` instance
+ The Lagrange interpolating polynomial.
+
+ Examples
+ --------
+ Interpolate :math:`f(x) = x^3` by 3 points.
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import lagrange
+ >>> x = np.array([0, 1, 2])
+ >>> y = x**3
+ >>> poly = lagrange(x, y)
+
+ Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
+ it is given by
+
+ .. math::
+
+ \begin{aligned}
+ L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
+ &= x (-2 + 3x)
+ \end{aligned}
+
+ >>> from numpy.polynomial.polynomial import Polynomial
+ >>> Polynomial(poly.coef[::-1]).coef
+ array([ 0., -2., 3.])
+
+ >>> import matplotlib.pyplot as plt
+ >>> x_new = np.arange(0, 2.1, 0.1)
+ >>> plt.scatter(x, y, label='data')
+ >>> plt.plot(x_new, Polynomial(poly.coef[::-1])(x_new), label='Polynomial')
+ >>> plt.plot(x_new, 3*x_new**2 - 2*x_new + 0*x_new,
+ ... label=r"$3 x^2 - 2 x$", linestyle='-.')
+ >>> plt.legend()
+ >>> plt.show()
+
+ """
+
+ M = len(x)
+ p = poly1d(0.0)
+ for j in range(M):
+ pt = poly1d(w[j])
+ for k in range(M):
+ if k == j:
+ continue
+ fac = x[j]-x[k]
+ pt *= poly1d([1.0, -x[k]])/fac
+ p += pt
+ return p
+
+
+# !! Need to find argument for keeping initialize. If it isn't
+# !! found, get rid of it!
+
+
+dep_mesg = """\
+`interp2d` is deprecated in SciPy 1.10 and will be removed in SciPy 1.14.0.
+
+For legacy code, nearly bug-for-bug compatible replacements are
+`RectBivariateSpline` on regular grids, and `bisplrep`/`bisplev` for
+scattered 2D data.
+
+In new code, for regular grids use `RegularGridInterpolator` instead.
+For scattered data, prefer `LinearNDInterpolator` or
+`CloughTocher2DInterpolator`.
+
+For more details see
+`https://scipy.github.io/devdocs/notebooks/interp_transition_guide.html`
+"""
+
+class interp2d:
+ """
+ interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
+ fill_value=None)
+
+ .. deprecated:: 1.10.0
+
+ `interp2d` is deprecated in SciPy 1.10 and will be removed in SciPy
+ 1.14.0.
+
+ For legacy code, nearly bug-for-bug compatible replacements are
+ `RectBivariateSpline` on regular grids, and `bisplrep`/`bisplev` for
+ scattered 2D data.
+
+ In new code, for regular grids use `RegularGridInterpolator` instead.
+ For scattered data, prefer `LinearNDInterpolator` or
+ `CloughTocher2DInterpolator`.
+
+ For more details see
+ `https://scipy.github.io/devdocs/notebooks/interp_transition_guide.html
+ `_
+
+
+ Interpolate over a 2-D grid.
+
+ `x`, `y` and `z` are arrays of values used to approximate some function
+ f: ``z = f(x, y)`` which returns a scalar value `z`. This class returns a
+ function whose call method uses spline interpolation to find the value
+ of new points.
+
+ If `x` and `y` represent a regular grid, consider using
+ `RectBivariateSpline`.
+
+ If `z` is a vector value, consider using `interpn`.
+
+ Note that calling `interp2d` with NaNs present in input values, or with
+ decreasing values in `x` an `y` results in undefined behaviour.
+
+ Methods
+ -------
+ __call__
+
+ Parameters
+ ----------
+ x, y : array_like
+ Arrays defining the data point coordinates.
+ The data point coordinates need to be sorted by increasing order.
+
+ If the points lie on a regular grid, `x` can specify the column
+ coordinates and `y` the row coordinates, for example::
+
+ >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
+
+ Otherwise, `x` and `y` must specify the full coordinates for each
+ point, for example::
+
+ >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,4,2,5,3,6]
+
+ If `x` and `y` are multidimensional, they are flattened before use.
+ z : array_like
+ The values of the function to interpolate at the data points. If
+ `z` is a multidimensional array, it is flattened before use assuming
+ Fortran-ordering (order='F'). The length of a flattened `z` array
+ is either len(`x`)*len(`y`) if `x` and `y` specify the column and
+ row coordinates or ``len(z) == len(x) == len(y)`` if `x` and `y`
+ specify coordinates for each point.
+ kind : {'linear', 'cubic', 'quintic'}, optional
+ The kind of spline interpolation to use. Default is 'linear'.
+ copy : bool, optional
+ If True, the class makes internal copies of x, y and z.
+ If False, references may be used. The default is to copy.
+ bounds_error : bool, optional
+ If True, when interpolated values are requested outside of the
+ domain of the input data (x,y), a ValueError is raised.
+ If False, then `fill_value` is used.
+ fill_value : number, optional
+ If provided, the value to use for points outside of the
+ interpolation domain. If omitted (None), values outside
+ the domain are extrapolated via nearest-neighbor extrapolation.
+
+ See Also
+ --------
+ RectBivariateSpline :
+ Much faster 2-D interpolation if your input data is on a grid
+ bisplrep, bisplev :
+ Spline interpolation based on FITPACK
+ BivariateSpline : a more recent wrapper of the FITPACK routines
+ interp1d : 1-D version of this function
+ RegularGridInterpolator : interpolation on a regular or rectilinear grid
+ in arbitrary dimensions.
+ interpn : Multidimensional interpolation on regular grids (wraps
+ `RegularGridInterpolator` and `RectBivariateSpline`).
+
+ Notes
+ -----
+ The minimum number of data points required along the interpolation
+ axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
+ quintic interpolation.
+
+ The interpolator is constructed by `bisplrep`, with a smoothing factor
+ of 0. If more control over smoothing is needed, `bisplrep` should be
+ used directly.
+
+ The coordinates of the data points to interpolate `xnew` and `ynew`
+ have to be sorted by ascending order.
+ `interp2d` is legacy and is not
+ recommended for use in new code. New code should use
+ `RegularGridInterpolator` instead.
+
+ Examples
+ --------
+ Construct a 2-D grid and interpolate on it:
+
+ >>> import numpy as np
+ >>> from scipy import interpolate
+ >>> x = np.arange(-5.01, 5.01, 0.25)
+ >>> y = np.arange(-5.01, 5.01, 0.25)
+ >>> xx, yy = np.meshgrid(x, y)
+ >>> z = np.sin(xx**2+yy**2)
+ >>> f = interpolate.interp2d(x, y, z, kind='cubic')
+
+ Now use the obtained interpolation function and plot the result:
+
+ >>> import matplotlib.pyplot as plt
+ >>> xnew = np.arange(-5.01, 5.01, 1e-2)
+ >>> ynew = np.arange(-5.01, 5.01, 1e-2)
+ >>> znew = f(xnew, ynew)
+ >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
+ >>> plt.show()
+ """
+
+ def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
+ fill_value=None):
+ warnings.warn(dep_mesg, DeprecationWarning, stacklevel=2)
+
+ x = ravel(x)
+ y = ravel(y)
+ z = asarray(z)
+
+ rectangular_grid = (z.size == len(x) * len(y))
+ if rectangular_grid:
+ if z.ndim == 2:
+ if z.shape != (len(y), len(x)):
+ raise ValueError("When on a regular grid with x.size = m "
+ "and y.size = n, if z.ndim == 2, then z "
+ "must have shape (n, m)")
+ if not np.all(x[1:] >= x[:-1]):
+ j = np.argsort(x)
+ x = x[j]
+ z = z[:, j]
+ if not np.all(y[1:] >= y[:-1]):
+ j = np.argsort(y)
+ y = y[j]
+ z = z[j, :]
+ z = ravel(z.T)
+ else:
+ z = ravel(z)
+ if len(x) != len(y):
+ raise ValueError(
+ "x and y must have equal lengths for non rectangular grid")
+ if len(z) != len(x):
+ raise ValueError(
+ "Invalid length for input z for non rectangular grid")
+
+ interpolation_types = {'linear': 1, 'cubic': 3, 'quintic': 5}
+ try:
+ kx = ky = interpolation_types[kind]
+ except KeyError as e:
+ raise ValueError(
+ f"Unsupported interpolation type {repr(kind)}, must be "
+ f"either of {', '.join(map(repr, interpolation_types))}."
+ ) from e
+
+ if not rectangular_grid:
+ # TODO: surfit is really not meant for interpolation!
+ self.tck = _fitpack_py.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
+ else:
+ nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
+ x, y, z, None, None, None, None,
+ kx=kx, ky=ky, s=0.0)
+ self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
+ kx, ky)
+
+ self.bounds_error = bounds_error
+ self.fill_value = fill_value
+ self.x, self.y, self.z = (array(a, copy=copy) for a in (x, y, z))
+
+ self.x_min, self.x_max = np.amin(x), np.amax(x)
+ self.y_min, self.y_max = np.amin(y), np.amax(y)
+
+ def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
+ """Interpolate the function.
+
+ Parameters
+ ----------
+ x : 1-D array
+ x-coordinates of the mesh on which to interpolate.
+ y : 1-D array
+ y-coordinates of the mesh on which to interpolate.
+ dx : int >= 0, < kx
+ Order of partial derivatives in x.
+ dy : int >= 0, < ky
+ Order of partial derivatives in y.
+ assume_sorted : bool, optional
+ If False, values of `x` and `y` can be in any order and they are
+ sorted first.
+ If True, `x` and `y` have to be arrays of monotonically
+ increasing values.
+
+ Returns
+ -------
+ z : 2-D array with shape (len(y), len(x))
+ The interpolated values.
+ """
+ warnings.warn(dep_mesg, DeprecationWarning, stacklevel=2)
+
+ x = atleast_1d(x)
+ y = atleast_1d(y)
+
+ if x.ndim != 1 or y.ndim != 1:
+ raise ValueError("x and y should both be 1-D arrays")
+
+ if not assume_sorted:
+ x = np.sort(x, kind="mergesort")
+ y = np.sort(y, kind="mergesort")
+
+ if self.bounds_error or self.fill_value is not None:
+ out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
+ out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
+
+ any_out_of_bounds_x = np.any(out_of_bounds_x)
+ any_out_of_bounds_y = np.any(out_of_bounds_y)
+
+ if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
+ raise ValueError(
+ f"Values out of range; x must be in {(self.x_min, self.x_max)!r}, "
+ f"y in {(self.y_min, self.y_max)!r}"
+ )
+
+ z = _fitpack_py.bisplev(x, y, self.tck, dx, dy)
+ z = atleast_2d(z)
+ z = transpose(z)
+
+ if self.fill_value is not None:
+ if any_out_of_bounds_x:
+ z[:, out_of_bounds_x] = self.fill_value
+ if any_out_of_bounds_y:
+ z[out_of_bounds_y, :] = self.fill_value
+
+ if len(z) == 1:
+ z = z[0]
+ return array(z)
+
+
+def _check_broadcast_up_to(arr_from, shape_to, name):
+ """Helper to check that arr_from broadcasts up to shape_to"""
+ shape_from = arr_from.shape
+ if len(shape_to) >= len(shape_from):
+ for t, f in zip(shape_to[::-1], shape_from[::-1]):
+ if f != 1 and f != t:
+ break
+ else: # all checks pass, do the upcasting that we need later
+ if arr_from.size != 1 and arr_from.shape != shape_to:
+ arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
+ return arr_from.ravel()
+ # at least one check failed
+ raise ValueError(f'{name} argument must be able to broadcast up '
+ f'to shape {shape_to} but had shape {shape_from}')
+
+
+def _do_extrapolate(fill_value):
+ """Helper to check if fill_value == "extrapolate" without warnings"""
+ return (isinstance(fill_value, str) and
+ fill_value == 'extrapolate')
+
+
+class interp1d(_Interpolator1D):
+ """
+ Interpolate a 1-D function.
+
+ .. legacy:: class
+
+ For a guide to the intended replacements for `interp1d` see
+ :ref:`tutorial-interpolate_1Dsection`.
+
+ `x` and `y` are arrays of values used to approximate some function f:
+ ``y = f(x)``. This class returns a function whose call method uses
+ interpolation to find the value of new points.
+
+ Parameters
+ ----------
+ x : (npoints, ) array_like
+ A 1-D array of real values.
+ y : (..., npoints, ...) array_like
+ A N-D array of real values. The length of `y` along the interpolation
+ axis must be equal to the length of `x`. Use the ``axis`` parameter
+ to select correct axis. Unlike other interpolators, the default
+ interpolation axis is the last axis of `y`.
+ kind : str or int, optional
+ Specifies the kind of interpolation as a string or as an integer
+ specifying the order of the spline interpolator to use.
+ The string has to be one of 'linear', 'nearest', 'nearest-up', 'zero',
+ 'slinear', 'quadratic', 'cubic', 'previous', or 'next'. 'zero',
+ 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of
+ zeroth, first, second or third order; 'previous' and 'next' simply
+ return the previous or next value of the point; 'nearest-up' and
+ 'nearest' differ when interpolating half-integers (e.g. 0.5, 1.5)
+ in that 'nearest-up' rounds up and 'nearest' rounds down. Default
+ is 'linear'.
+ axis : int, optional
+ Axis in the ``y`` array corresponding to the x-coordinate values. Unlike
+ other interpolators, defaults to ``axis=-1``.
+ copy : bool, optional
+ If ``True``, the class makes internal copies of x and y. If ``False``,
+ references to ``x`` and ``y`` are used if possible. The default is to copy.
+ bounds_error : bool, optional
+ If True, a ValueError is raised any time interpolation is attempted on
+ a value outside of the range of x (where extrapolation is
+ necessary). If False, out of bounds values are assigned `fill_value`.
+ By default, an error is raised unless ``fill_value="extrapolate"``.
+ fill_value : array-like or (array-like, array_like) or "extrapolate", optional
+ - if a ndarray (or float), this value will be used to fill in for
+ requested points outside of the data range. If not provided, then
+ the default is NaN. The array-like must broadcast properly to the
+ dimensions of the non-interpolation axes.
+ - If a two-element tuple, then the first element is used as a
+ fill value for ``x_new < x[0]`` and the second element is used for
+ ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
+ list or ndarray, regardless of shape) is taken to be a single
+ array-like argument meant to be used for both bounds as
+ ``below, above = fill_value, fill_value``. Using a two-element tuple
+ or ndarray requires ``bounds_error=False``.
+
+ .. versionadded:: 0.17.0
+ - If "extrapolate", then points outside the data range will be
+ extrapolated.
+
+ .. versionadded:: 0.17.0
+ assume_sorted : bool, optional
+ If False, values of `x` can be in any order and they are sorted first.
+ If True, `x` has to be an array of monotonically increasing values.
+
+ Attributes
+ ----------
+ fill_value
+
+ Methods
+ -------
+ __call__
+
+ See Also
+ --------
+ splrep, splev
+ Spline interpolation/smoothing based on FITPACK.
+ UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
+ interp2d : 2-D interpolation
+
+ Notes
+ -----
+ Calling `interp1d` with NaNs present in input values results in
+ undefined behaviour.
+
+ Input values `x` and `y` must be convertible to `float` values like
+ `int` or `float`.
+
+ If the values in `x` are not unique, the resulting behavior is
+ undefined and specific to the choice of `kind`, i.e., changing
+ `kind` will change the behavior for duplicates.
+
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy import interpolate
+ >>> x = np.arange(0, 10)
+ >>> y = np.exp(-x/3.0)
+ >>> f = interpolate.interp1d(x, y)
+
+ >>> xnew = np.arange(0, 9, 0.1)
+ >>> ynew = f(xnew) # use interpolation function returned by `interp1d`
+ >>> plt.plot(x, y, 'o', xnew, ynew, '-')
+ >>> plt.show()
+ """
+
+ def __init__(self, x, y, kind='linear', axis=-1,
+ copy=True, bounds_error=None, fill_value=np.nan,
+ assume_sorted=False):
+ """ Initialize a 1-D linear interpolation class."""
+ _Interpolator1D.__init__(self, x, y, axis=axis)
+
+ self.bounds_error = bounds_error # used by fill_value setter
+
+ # `copy` keyword semantics changed in NumPy 2.0, once that is
+ # the minimum version this can use `copy=None`.
+ self.copy = copy
+ if not copy:
+ self.copy = copy_if_needed
+
+ if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
+ order = {'zero': 0, 'slinear': 1,
+ 'quadratic': 2, 'cubic': 3}[kind]
+ kind = 'spline'
+ elif isinstance(kind, int):
+ order = kind
+ kind = 'spline'
+ elif kind not in ('linear', 'nearest', 'nearest-up', 'previous',
+ 'next'):
+ raise NotImplementedError("%s is unsupported: Use fitpack "
+ "routines for other types." % kind)
+ x = array(x, copy=self.copy)
+ y = array(y, copy=self.copy)
+
+ if not assume_sorted:
+ ind = np.argsort(x, kind="mergesort")
+ x = x[ind]
+ y = np.take(y, ind, axis=axis)
+
+ if x.ndim != 1:
+ raise ValueError("the x array must have exactly one dimension.")
+ if y.ndim == 0:
+ raise ValueError("the y array must have at least one dimension.")
+
+ # Force-cast y to a floating-point type, if it's not yet one
+ if not issubclass(y.dtype.type, np.inexact):
+ y = y.astype(np.float64)
+
+ # Backward compatibility
+ self.axis = axis % y.ndim
+
+ # Interpolation goes internally along the first axis
+ self.y = y
+ self._y = self._reshape_yi(self.y)
+ self.x = x
+ del y, x # clean up namespace to prevent misuse; use attributes
+ self._kind = kind
+
+ # Adjust to interpolation kind; store reference to *unbound*
+ # interpolation methods, in order to avoid circular references to self
+ # stored in the bound instance methods, and therefore delayed garbage
+ # collection. See: https://docs.python.org/reference/datamodel.html
+ if kind in ('linear', 'nearest', 'nearest-up', 'previous', 'next'):
+ # Make a "view" of the y array that is rotated to the interpolation
+ # axis.
+ minval = 1
+ if kind == 'nearest':
+ # Do division before addition to prevent possible integer
+ # overflow
+ self._side = 'left'
+ self.x_bds = self.x / 2.0
+ self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
+
+ self._call = self.__class__._call_nearest
+ elif kind == 'nearest-up':
+ # Do division before addition to prevent possible integer
+ # overflow
+ self._side = 'right'
+ self.x_bds = self.x / 2.0
+ self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
+
+ self._call = self.__class__._call_nearest
+ elif kind == 'previous':
+ # Side for np.searchsorted and index for clipping
+ self._side = 'left'
+ self._ind = 0
+ # Move x by one floating point value to the left
+ self._x_shift = np.nextafter(self.x, -np.inf)
+ self._call = self.__class__._call_previousnext
+ if _do_extrapolate(fill_value):
+ self._check_and_update_bounds_error_for_extrapolation()
+ # assume y is sorted by x ascending order here.
+ fill_value = (np.nan, np.take(self.y, -1, axis))
+ elif kind == 'next':
+ self._side = 'right'
+ self._ind = 1
+ # Move x by one floating point value to the right
+ self._x_shift = np.nextafter(self.x, np.inf)
+ self._call = self.__class__._call_previousnext
+ if _do_extrapolate(fill_value):
+ self._check_and_update_bounds_error_for_extrapolation()
+ # assume y is sorted by x ascending order here.
+ fill_value = (np.take(self.y, 0, axis), np.nan)
+ else:
+ # Check if we can delegate to numpy.interp (2x-10x faster).
+ np_dtypes = (np.dtype(np.float64), np.dtype(int))
+ cond = self.x.dtype in np_dtypes and self.y.dtype in np_dtypes
+ cond = cond and self.y.ndim == 1
+ cond = cond and not _do_extrapolate(fill_value)
+
+ if cond:
+ self._call = self.__class__._call_linear_np
+ else:
+ self._call = self.__class__._call_linear
+ else:
+ minval = order + 1
+
+ rewrite_nan = False
+ xx, yy = self.x, self._y
+ if order > 1:
+ # Quadratic or cubic spline. If input contains even a single
+ # nan, then the output is all nans. We cannot just feed data
+ # with nans to make_interp_spline because it calls LAPACK.
+ # So, we make up a bogus x and y with no nans and use it
+ # to get the correct shape of the output, which we then fill
+ # with nans.
+ # For slinear or zero order spline, we just pass nans through.
+ mask = np.isnan(self.x)
+ if mask.any():
+ sx = self.x[~mask]
+ if sx.size == 0:
+ raise ValueError("`x` array is all-nan")
+ xx = np.linspace(np.nanmin(self.x),
+ np.nanmax(self.x),
+ len(self.x))
+ rewrite_nan = True
+ if np.isnan(self._y).any():
+ yy = np.ones_like(self._y)
+ rewrite_nan = True
+
+ self._spline = make_interp_spline(xx, yy, k=order,
+ check_finite=False)
+ if rewrite_nan:
+ self._call = self.__class__._call_nan_spline
+ else:
+ self._call = self.__class__._call_spline
+
+ if len(self.x) < minval:
+ raise ValueError("x and y arrays must have at "
+ "least %d entries" % minval)
+
+ self.fill_value = fill_value # calls the setter, can modify bounds_err
+
+ @property
+ def fill_value(self):
+ """The fill value."""
+ # backwards compat: mimic a public attribute
+ return self._fill_value_orig
+
+ @fill_value.setter
+ def fill_value(self, fill_value):
+ # extrapolation only works for nearest neighbor and linear methods
+ if _do_extrapolate(fill_value):
+ self._check_and_update_bounds_error_for_extrapolation()
+ self._extrapolate = True
+ else:
+ broadcast_shape = (self.y.shape[:self.axis] +
+ self.y.shape[self.axis + 1:])
+ if len(broadcast_shape) == 0:
+ broadcast_shape = (1,)
+ # it's either a pair (_below_range, _above_range) or a single value
+ # for both above and below range
+ if isinstance(fill_value, tuple) and len(fill_value) == 2:
+ below_above = [np.asarray(fill_value[0]),
+ np.asarray(fill_value[1])]
+ names = ('fill_value (below)', 'fill_value (above)')
+ for ii in range(2):
+ below_above[ii] = _check_broadcast_up_to(
+ below_above[ii], broadcast_shape, names[ii])
+ else:
+ fill_value = np.asarray(fill_value)
+ below_above = [_check_broadcast_up_to(
+ fill_value, broadcast_shape, 'fill_value')] * 2
+ self._fill_value_below, self._fill_value_above = below_above
+ self._extrapolate = False
+ if self.bounds_error is None:
+ self.bounds_error = True
+ # backwards compat: fill_value was a public attr; make it writeable
+ self._fill_value_orig = fill_value
+
+ def _check_and_update_bounds_error_for_extrapolation(self):
+ if self.bounds_error:
+ raise ValueError("Cannot extrapolate and raise "
+ "at the same time.")
+ self.bounds_error = False
+
+ def _call_linear_np(self, x_new):
+ # Note that out-of-bounds values are taken care of in self._evaluate
+ return np.interp(x_new, self.x, self.y)
+
+ def _call_linear(self, x_new):
+ # 2. Find where in the original data, the values to interpolate
+ # would be inserted.
+ # Note: If x_new[n] == x[m], then m is returned by searchsorted.
+ x_new_indices = searchsorted(self.x, x_new)
+
+ # 3. Clip x_new_indices so that they are within the range of
+ # self.x indices and at least 1. Removes mis-interpolation
+ # of x_new[n] = x[0]
+ x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
+
+ # 4. Calculate the slope of regions that each x_new value falls in.
+ lo = x_new_indices - 1
+ hi = x_new_indices
+
+ x_lo = self.x[lo]
+ x_hi = self.x[hi]
+ y_lo = self._y[lo]
+ y_hi = self._y[hi]
+
+ # Note that the following two expressions rely on the specifics of the
+ # broadcasting semantics.
+ slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
+
+ # 5. Calculate the actual value for each entry in x_new.
+ y_new = slope*(x_new - x_lo)[:, None] + y_lo
+
+ return y_new
+
+ def _call_nearest(self, x_new):
+ """ Find nearest neighbor interpolated y_new = f(x_new)."""
+
+ # 2. Find where in the averaged data the values to interpolate
+ # would be inserted.
+ # Note: use side='left' (right) to searchsorted() to define the
+ # halfway point to be nearest to the left (right) neighbor
+ x_new_indices = searchsorted(self.x_bds, x_new, side=self._side)
+
+ # 3. Clip x_new_indices so that they are within the range of x indices.
+ x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
+
+ # 4. Calculate the actual value for each entry in x_new.
+ y_new = self._y[x_new_indices]
+
+ return y_new
+
+ def _call_previousnext(self, x_new):
+ """Use previous/next neighbor of x_new, y_new = f(x_new)."""
+
+ # 1. Get index of left/right value
+ x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
+
+ # 2. Clip x_new_indices so that they are within the range of x indices.
+ x_new_indices = x_new_indices.clip(1-self._ind,
+ len(self.x)-self._ind).astype(intp)
+
+ # 3. Calculate the actual value for each entry in x_new.
+ y_new = self._y[x_new_indices+self._ind-1]
+
+ return y_new
+
+ def _call_spline(self, x_new):
+ return self._spline(x_new)
+
+ def _call_nan_spline(self, x_new):
+ out = self._spline(x_new)
+ out[...] = np.nan
+ return out
+
+ def _evaluate(self, x_new):
+ # 1. Handle values in x_new that are outside of x. Throw error,
+ # or return a list of mask array indicating the outofbounds values.
+ # The behavior is set by the bounds_error variable.
+ x_new = asarray(x_new)
+ y_new = self._call(self, x_new)
+ if not self._extrapolate:
+ below_bounds, above_bounds = self._check_bounds(x_new)
+ if len(y_new) > 0:
+ # Note fill_value must be broadcast up to the proper size
+ # and flattened to work here
+ y_new[below_bounds] = self._fill_value_below
+ y_new[above_bounds] = self._fill_value_above
+ return y_new
+
+ def _check_bounds(self, x_new):
+ """Check the inputs for being in the bounds of the interpolated data.
+
+ Parameters
+ ----------
+ x_new : array
+
+ Returns
+ -------
+ out_of_bounds : bool array
+ The mask on x_new of values that are out of the bounds.
+ """
+
+ # If self.bounds_error is True, we raise an error if any x_new values
+ # fall outside the range of x. Otherwise, we return an array indicating
+ # which values are outside the boundary region.
+ below_bounds = x_new < self.x[0]
+ above_bounds = x_new > self.x[-1]
+
+ if self.bounds_error and below_bounds.any():
+ below_bounds_value = x_new[np.argmax(below_bounds)]
+ raise ValueError(f"A value ({below_bounds_value}) in x_new is below "
+ f"the interpolation range's minimum value ({self.x[0]}).")
+ if self.bounds_error and above_bounds.any():
+ above_bounds_value = x_new[np.argmax(above_bounds)]
+ raise ValueError(f"A value ({above_bounds_value}) in x_new is above "
+ f"the interpolation range's maximum value ({self.x[-1]}).")
+
+ # !! Should we emit a warning if some values are out of bounds?
+ # !! matlab does not.
+ return below_bounds, above_bounds
+
+
+class _PPolyBase:
+ """Base class for piecewise polynomials."""
+ __slots__ = ('c', 'x', 'extrapolate', 'axis')
+
+ def __init__(self, c, x, extrapolate=None, axis=0):
+ self.c = np.asarray(c)
+ self.x = np.ascontiguousarray(x, dtype=np.float64)
+
+ if extrapolate is None:
+ extrapolate = True
+ elif extrapolate != 'periodic':
+ extrapolate = bool(extrapolate)
+ self.extrapolate = extrapolate
+
+ if self.c.ndim < 2:
+ raise ValueError("Coefficients array must be at least "
+ "2-dimensional.")
+
+ if not (0 <= axis < self.c.ndim - 1):
+ raise ValueError(f"axis={axis} must be between 0 and {self.c.ndim-1}")
+
+ self.axis = axis
+ if axis != 0:
+ # move the interpolation axis to be the first one in self.c
+ # More specifically, the target shape for self.c is (k, m, ...),
+ # and axis !=0 means that we have c.shape (..., k, m, ...)
+ # ^
+ # axis
+ # So we roll two of them.
+ self.c = np.moveaxis(self.c, axis+1, 0)
+ self.c = np.moveaxis(self.c, axis+1, 0)
+
+ if self.x.ndim != 1:
+ raise ValueError("x must be 1-dimensional")
+ if self.x.size < 2:
+ raise ValueError("at least 2 breakpoints are needed")
+ if self.c.ndim < 2:
+ raise ValueError("c must have at least 2 dimensions")
+ if self.c.shape[0] == 0:
+ raise ValueError("polynomial must be at least of order 0")
+ if self.c.shape[1] != self.x.size-1:
+ raise ValueError("number of coefficients != len(x)-1")
+ dx = np.diff(self.x)
+ if not (np.all(dx >= 0) or np.all(dx <= 0)):
+ raise ValueError("`x` must be strictly increasing or decreasing.")
+
+ dtype = self._get_dtype(self.c.dtype)
+ self.c = np.ascontiguousarray(self.c, dtype=dtype)
+
+ def _get_dtype(self, dtype):
+ if np.issubdtype(dtype, np.complexfloating) \
+ or np.issubdtype(self.c.dtype, np.complexfloating):
+ return np.complex128
+ else:
+ return np.float64
+
+ @classmethod
+ def construct_fast(cls, c, x, extrapolate=None, axis=0):
+ """
+ Construct the piecewise polynomial without making checks.
+
+ Takes the same parameters as the constructor. Input arguments
+ ``c`` and ``x`` must be arrays of the correct shape and type. The
+ ``c`` array can only be of dtypes float and complex, and ``x``
+ array must have dtype float.
+ """
+ self = object.__new__(cls)
+ self.c = c
+ self.x = x
+ self.axis = axis
+ if extrapolate is None:
+ extrapolate = True
+ self.extrapolate = extrapolate
+ return self
+
+ def _ensure_c_contiguous(self):
+ """
+ c and x may be modified by the user. The Cython code expects
+ that they are C contiguous.
+ """
+ if not self.x.flags.c_contiguous:
+ self.x = self.x.copy()
+ if not self.c.flags.c_contiguous:
+ self.c = self.c.copy()
+
+ def extend(self, c, x):
+ """
+ Add additional breakpoints and coefficients to the polynomial.
+
+ Parameters
+ ----------
+ c : ndarray, size (k, m, ...)
+ Additional coefficients for polynomials in intervals. Note that
+ the first additional interval will be formed using one of the
+ ``self.x`` end points.
+ x : ndarray, size (m,)
+ Additional breakpoints. Must be sorted in the same order as
+ ``self.x`` and either to the right or to the left of the current
+ breakpoints.
+ """
+
+ c = np.asarray(c)
+ x = np.asarray(x)
+
+ if c.ndim < 2:
+ raise ValueError("invalid dimensions for c")
+ if x.ndim != 1:
+ raise ValueError("invalid dimensions for x")
+ if x.shape[0] != c.shape[1]:
+ raise ValueError(f"Shapes of x {x.shape} and c {c.shape} are incompatible")
+ if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
+ raise ValueError("Shapes of c {} and self.c {} are incompatible"
+ .format(c.shape, self.c.shape))
+
+ if c.size == 0:
+ return
+
+ dx = np.diff(x)
+ if not (np.all(dx >= 0) or np.all(dx <= 0)):
+ raise ValueError("`x` is not sorted.")
+
+ if self.x[-1] >= self.x[0]:
+ if not x[-1] >= x[0]:
+ raise ValueError("`x` is in the different order "
+ "than `self.x`.")
+
+ if x[0] >= self.x[-1]:
+ action = 'append'
+ elif x[-1] <= self.x[0]:
+ action = 'prepend'
+ else:
+ raise ValueError("`x` is neither on the left or on the right "
+ "from `self.x`.")
+ else:
+ if not x[-1] <= x[0]:
+ raise ValueError("`x` is in the different order "
+ "than `self.x`.")
+
+ if x[0] <= self.x[-1]:
+ action = 'append'
+ elif x[-1] >= self.x[0]:
+ action = 'prepend'
+ else:
+ raise ValueError("`x` is neither on the left or on the right "
+ "from `self.x`.")
+
+ dtype = self._get_dtype(c.dtype)
+
+ k2 = max(c.shape[0], self.c.shape[0])
+ c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
+ dtype=dtype)
+
+ if action == 'append':
+ c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
+ c2[k2-c.shape[0]:, self.c.shape[1]:] = c
+ self.x = np.r_[self.x, x]
+ elif action == 'prepend':
+ c2[k2-self.c.shape[0]:, :c.shape[1]] = c
+ c2[k2-c.shape[0]:, c.shape[1]:] = self.c
+ self.x = np.r_[x, self.x]
+
+ self.c = c2
+
+ def __call__(self, x, nu=0, extrapolate=None):
+ """
+ Evaluate the piecewise polynomial or its derivative.
+
+ Parameters
+ ----------
+ x : array_like
+ Points to evaluate the interpolant at.
+ nu : int, optional
+ Order of derivative to evaluate. Must be non-negative.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used.
+ If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ y : array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of x.
+
+ Notes
+ -----
+ Derivatives are evaluated piecewise for each polynomial
+ segment, even if the polynomial is not differentiable at the
+ breakpoints. The polynomial intervals are considered half-open,
+ ``[a, b)``, except for the last interval which is closed
+ ``[a, b]``.
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ x = np.asarray(x)
+ x_shape, x_ndim = x.shape, x.ndim
+ x = np.ascontiguousarray(x.ravel(), dtype=np.float64)
+
+ # With periodic extrapolation we map x to the segment
+ # [self.x[0], self.x[-1]].
+ if extrapolate == 'periodic':
+ x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
+ extrapolate = False
+
+ out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
+ self._ensure_c_contiguous()
+ self._evaluate(x, nu, extrapolate, out)
+ out = out.reshape(x_shape + self.c.shape[2:])
+ if self.axis != 0:
+ # transpose to move the calculated values to the interpolation axis
+ l = list(range(out.ndim))
+ l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
+ out = out.transpose(l)
+ return out
+
+
+class PPoly(_PPolyBase):
+ """
+ Piecewise polynomial in terms of coefficients and breakpoints
+
+ The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
+ local power basis::
+
+ S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
+
+ where ``k`` is the degree of the polynomial.
+
+ Parameters
+ ----------
+ c : ndarray, shape (k, m, ...)
+ Polynomial coefficients, order `k` and `m` intervals.
+ x : ndarray, shape (m+1,)
+ Polynomial breakpoints. Must be sorted in either increasing or
+ decreasing order.
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. Default is True.
+ axis : int, optional
+ Interpolation axis. Default is zero.
+
+ Attributes
+ ----------
+ x : ndarray
+ Breakpoints.
+ c : ndarray
+ Coefficients of the polynomials. They are reshaped
+ to a 3-D array with the last dimension representing
+ the trailing dimensions of the original coefficient array.
+ axis : int
+ Interpolation axis.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ solve
+ roots
+ extend
+ from_spline
+ from_bernstein_basis
+ construct_fast
+
+ See also
+ --------
+ BPoly : piecewise polynomials in the Bernstein basis
+
+ Notes
+ -----
+ High-order polynomials in the power basis can be numerically
+ unstable. Precision problems can start to appear for orders
+ larger than 20-30.
+ """
+
+ def _evaluate(self, x, nu, extrapolate, out):
+ _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, x, nu, bool(extrapolate), out)
+
+ def derivative(self, nu=1):
+ """
+ Construct a new piecewise polynomial representing the derivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Order of derivative to evaluate. Default is 1, i.e., compute the
+ first derivative. If negative, the antiderivative is returned.
+
+ Returns
+ -------
+ pp : PPoly
+ Piecewise polynomial of order k2 = k - n representing the derivative
+ of this polynomial.
+
+ Notes
+ -----
+ Derivatives are evaluated piecewise for each polynomial
+ segment, even if the polynomial is not differentiable at the
+ breakpoints. The polynomial intervals are considered half-open,
+ ``[a, b)``, except for the last interval which is closed
+ ``[a, b]``.
+ """
+ if nu < 0:
+ return self.antiderivative(-nu)
+
+ # reduce order
+ if nu == 0:
+ c2 = self.c.copy()
+ else:
+ c2 = self.c[:-nu, :].copy()
+
+ if c2.shape[0] == 0:
+ # derivative of order 0 is zero
+ c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
+
+ # multiply by the correct rising factorials
+ factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
+ c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
+
+ # construct a compatible polynomial
+ return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
+
+ def antiderivative(self, nu=1):
+ """
+ Construct a new piecewise polynomial representing the antiderivative.
+
+ Antiderivative is also the indefinite integral of the function,
+ and derivative is its inverse operation.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Order of antiderivative to evaluate. Default is 1, i.e., compute
+ the first integral. If negative, the derivative is returned.
+
+ Returns
+ -------
+ pp : PPoly
+ Piecewise polynomial of order k2 = k + n representing
+ the antiderivative of this polynomial.
+
+ Notes
+ -----
+ The antiderivative returned by this function is continuous and
+ continuously differentiable to order n-1, up to floating point
+ rounding error.
+
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
+ it will be set to False for the returned instance. This is done because
+ the antiderivative is no longer periodic and its correct evaluation
+ outside of the initially given x interval is difficult.
+ """
+ if nu <= 0:
+ return self.derivative(-nu)
+
+ c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
+ dtype=self.c.dtype)
+ c[:-nu] = self.c
+
+ # divide by the correct rising factorials
+ factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
+ c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
+
+ # fix continuity of added degrees of freedom
+ self._ensure_c_contiguous()
+ _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
+ self.x, nu - 1)
+
+ if self.extrapolate == 'periodic':
+ extrapolate = False
+ else:
+ extrapolate = self.extrapolate
+
+ # construct a compatible polynomial
+ return self.construct_fast(c, self.x, extrapolate, self.axis)
+
+ def integrate(self, a, b, extrapolate=None):
+ """
+ Compute a definite integral over a piecewise polynomial.
+
+ Parameters
+ ----------
+ a : float
+ Lower integration bound
+ b : float
+ Upper integration bound
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used.
+ If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ ig : array_like
+ Definite integral of the piecewise polynomial over [a, b]
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+
+ # Swap integration bounds if needed
+ sign = 1
+ if b < a:
+ a, b = b, a
+ sign = -1
+
+ range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
+ self._ensure_c_contiguous()
+
+ # Compute the integral.
+ if extrapolate == 'periodic':
+ # Split the integral into the part over period (can be several
+ # of them) and the remaining part.
+
+ xs, xe = self.x[0], self.x[-1]
+ period = xe - xs
+ interval = b - a
+ n_periods, left = divmod(interval, period)
+
+ if n_periods > 0:
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, xs, xe, False, out=range_int)
+ range_int *= n_periods
+ else:
+ range_int.fill(0)
+
+ # Map a to [xs, xe], b is always a + left.
+ a = xs + (a - xs) % period
+ b = a + left
+
+ # If b <= xe then we need to integrate over [a, b], otherwise
+ # over [a, xe] and from xs to what is remained.
+ remainder_int = np.empty_like(range_int)
+ if b <= xe:
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, a, b, False, out=remainder_int)
+ range_int += remainder_int
+ else:
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, a, xe, False, out=remainder_int)
+ range_int += remainder_int
+
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, xs, xs + left + a - xe, False, out=remainder_int)
+ range_int += remainder_int
+ else:
+ _ppoly.integrate(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, a, b, bool(extrapolate), out=range_int)
+
+ # Return
+ range_int *= sign
+ return range_int.reshape(self.c.shape[2:])
+
+ def solve(self, y=0., discontinuity=True, extrapolate=None):
+ """
+ Find real solutions of the equation ``pp(x) == y``.
+
+ Parameters
+ ----------
+ y : float, optional
+ Right-hand side. Default is zero.
+ discontinuity : bool, optional
+ Whether to report sign changes across discontinuities at
+ breakpoints as roots.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to return roots from the polynomial
+ extrapolated based on first and last intervals, 'periodic' works
+ the same as False. If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ roots : ndarray
+ Roots of the polynomial(s).
+
+ If the PPoly object describes multiple polynomials, the
+ return value is an object array whose each element is an
+ ndarray containing the roots.
+
+ Notes
+ -----
+ This routine works only on real-valued polynomials.
+
+ If the piecewise polynomial contains sections that are
+ identically zero, the root list will contain the start point
+ of the corresponding interval, followed by a ``nan`` value.
+
+ If the polynomial is discontinuous across a breakpoint, and
+ there is a sign change across the breakpoint, this is reported
+ if the `discont` parameter is True.
+
+ Examples
+ --------
+
+ Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
+ ``[-2, 1], [1, 2]``:
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import PPoly
+ >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
+ >>> pp.solve()
+ array([-1., 1.])
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+
+ self._ensure_c_contiguous()
+
+ if np.issubdtype(self.c.dtype, np.complexfloating):
+ raise ValueError("Root finding is only for "
+ "real-valued polynomials")
+
+ y = float(y)
+ r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, y, bool(discontinuity),
+ bool(extrapolate))
+ if self.c.ndim == 2:
+ return r[0]
+ else:
+ r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
+ # this for-loop is equivalent to ``r2[...] = r``, but that's broken
+ # in NumPy 1.6.0
+ for ii, root in enumerate(r):
+ r2[ii] = root
+
+ return r2.reshape(self.c.shape[2:])
+
+ def roots(self, discontinuity=True, extrapolate=None):
+ """
+ Find real roots of the piecewise polynomial.
+
+ Parameters
+ ----------
+ discontinuity : bool, optional
+ Whether to report sign changes across discontinuities at
+ breakpoints as roots.
+ extrapolate : {bool, 'periodic', None}, optional
+ If bool, determines whether to return roots from the polynomial
+ extrapolated based on first and last intervals, 'periodic' works
+ the same as False. If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ roots : ndarray
+ Roots of the polynomial(s).
+
+ If the PPoly object describes multiple polynomials, the
+ return value is an object array whose each element is an
+ ndarray containing the roots.
+
+ See Also
+ --------
+ PPoly.solve
+ """
+ return self.solve(0, discontinuity, extrapolate)
+
+ @classmethod
+ def from_spline(cls, tck, extrapolate=None):
+ """
+ Construct a piecewise polynomial from a spline
+
+ Parameters
+ ----------
+ tck
+ A spline, as returned by `splrep` or a BSpline object.
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used. Default is True.
+
+ Examples
+ --------
+ Construct an interpolating spline and convert it to a `PPoly` instance
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import splrep, PPoly
+ >>> x = np.linspace(0, 1, 11)
+ >>> y = np.sin(2*np.pi*x)
+ >>> tck = splrep(x, y, s=0)
+ >>> p = PPoly.from_spline(tck)
+ >>> isinstance(p, PPoly)
+ True
+
+ Note that this function only supports 1D splines out of the box.
+
+ If the ``tck`` object represents a parametric spline (e.g. constructed
+ by `splprep` or a `BSpline` with ``c.ndim > 1``), you will need to loop
+ over the dimensions manually.
+
+ >>> from scipy.interpolate import splprep, splev
+ >>> t = np.linspace(0, 1, 11)
+ >>> x = np.sin(2*np.pi*t)
+ >>> y = np.cos(2*np.pi*t)
+ >>> (t, c, k), u = splprep([x, y], s=0)
+
+ Note that ``c`` is a list of two arrays of length 11.
+
+ >>> unew = np.arange(0, 1.01, 0.01)
+ >>> out = splev(unew, (t, c, k))
+
+ To convert this spline to the power basis, we convert each
+ component of the list of b-spline coefficients, ``c``, into the
+ corresponding cubic polynomial.
+
+ >>> polys = [PPoly.from_spline((t, cj, k)) for cj in c]
+ >>> polys[0].c.shape
+ (4, 14)
+
+ Note that the coefficients of the polynomials `polys` are in the
+ power basis and their dimensions reflect just that: here 4 is the order
+ (degree+1), and 14 is the number of intervals---which is nothing but
+ the length of the knot array of the original `tck` minus one.
+
+ Optionally, we can stack the components into a single `PPoly` along
+ the third dimension:
+
+ >>> cc = np.dstack([p.c for p in polys]) # has shape = (4, 14, 2)
+ >>> poly = PPoly(cc, polys[0].x)
+ >>> np.allclose(poly(unew).T, # note the transpose to match `splev`
+ ... out, atol=1e-15)
+ True
+
+ """
+ if isinstance(tck, BSpline):
+ t, c, k = tck.tck
+ if extrapolate is None:
+ extrapolate = tck.extrapolate
+ else:
+ t, c, k = tck
+
+ cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
+ for m in range(k, -1, -1):
+ y = _fitpack_py.splev(t[:-1], tck, der=m)
+ cvals[k - m, :] = y/spec.gamma(m+1)
+
+ return cls.construct_fast(cvals, t, extrapolate)
+
+ @classmethod
+ def from_bernstein_basis(cls, bp, extrapolate=None):
+ """
+ Construct a piecewise polynomial in the power basis
+ from a polynomial in Bernstein basis.
+
+ Parameters
+ ----------
+ bp : BPoly
+ A Bernstein basis polynomial, as created by BPoly
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used. Default is True.
+ """
+ if not isinstance(bp, BPoly):
+ raise TypeError(".from_bernstein_basis only accepts BPoly instances. "
+ "Got %s instead." % type(bp))
+
+ dx = np.diff(bp.x)
+ k = bp.c.shape[0] - 1 # polynomial order
+
+ rest = (None,)*(bp.c.ndim-2)
+
+ c = np.zeros_like(bp.c)
+ for a in range(k+1):
+ factor = (-1)**a * comb(k, a) * bp.c[a]
+ for s in range(a, k+1):
+ val = comb(k-a, s-a) * (-1)**s
+ c[k-s] += factor * val / dx[(slice(None),)+rest]**s
+
+ if extrapolate is None:
+ extrapolate = bp.extrapolate
+
+ return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
+
+
+class BPoly(_PPolyBase):
+ """Piecewise polynomial in terms of coefficients and breakpoints.
+
+ The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
+ Bernstein polynomial basis::
+
+ S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
+
+ where ``k`` is the degree of the polynomial, and::
+
+ b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
+
+ with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
+ coefficient.
+
+ Parameters
+ ----------
+ c : ndarray, shape (k, m, ...)
+ Polynomial coefficients, order `k` and `m` intervals
+ x : ndarray, shape (m+1,)
+ Polynomial breakpoints. Must be sorted in either increasing or
+ decreasing order.
+ extrapolate : bool, optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs. If 'periodic',
+ periodic extrapolation is used. Default is True.
+ axis : int, optional
+ Interpolation axis. Default is zero.
+
+ Attributes
+ ----------
+ x : ndarray
+ Breakpoints.
+ c : ndarray
+ Coefficients of the polynomials. They are reshaped
+ to a 3-D array with the last dimension representing
+ the trailing dimensions of the original coefficient array.
+ axis : int
+ Interpolation axis.
+
+ Methods
+ -------
+ __call__
+ extend
+ derivative
+ antiderivative
+ integrate
+ construct_fast
+ from_power_basis
+ from_derivatives
+
+ See also
+ --------
+ PPoly : piecewise polynomials in the power basis
+
+ Notes
+ -----
+ Properties of Bernstein polynomials are well documented in the literature,
+ see for example [1]_ [2]_ [3]_.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
+
+ .. [2] Kenneth I. Joy, Bernstein polynomials,
+ http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
+
+ .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
+ vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import BPoly
+ >>> x = [0, 1]
+ >>> c = [[1], [2], [3]]
+ >>> bp = BPoly(c, x)
+
+ This creates a 2nd order polynomial
+
+ .. math::
+
+ B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3
+ \\times b_{2, 2}(x) \\\\
+ = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
+
+ """ # noqa: E501
+
+ def _evaluate(self, x, nu, extrapolate, out):
+ _ppoly.evaluate_bernstein(
+ self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+ self.x, x, nu, bool(extrapolate), out)
+
+ def derivative(self, nu=1):
+ """
+ Construct a new piecewise polynomial representing the derivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Order of derivative to evaluate. Default is 1, i.e., compute the
+ first derivative. If negative, the antiderivative is returned.
+
+ Returns
+ -------
+ bp : BPoly
+ Piecewise polynomial of order k - nu representing the derivative of
+ this polynomial.
+
+ """
+ if nu < 0:
+ return self.antiderivative(-nu)
+
+ if nu > 1:
+ bp = self
+ for k in range(nu):
+ bp = bp.derivative()
+ return bp
+
+ # reduce order
+ if nu == 0:
+ c2 = self.c.copy()
+ else:
+ # For a polynomial
+ # B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
+ # we use the fact that
+ # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
+ # which leads to
+ # B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
+ #
+ # finally, for an interval [y, y + dy] with dy != 1,
+ # we need to correct for an extra power of dy
+
+ rest = (None,)*(self.c.ndim-2)
+
+ k = self.c.shape[0] - 1
+ dx = np.diff(self.x)[(None, slice(None))+rest]
+ c2 = k * np.diff(self.c, axis=0) / dx
+
+ if c2.shape[0] == 0:
+ # derivative of order 0 is zero
+ c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
+
+ # construct a compatible polynomial
+ return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
+
+ def antiderivative(self, nu=1):
+ """
+ Construct a new piecewise polynomial representing the antiderivative.
+
+ Parameters
+ ----------
+ nu : int, optional
+ Order of antiderivative to evaluate. Default is 1, i.e., compute
+ the first integral. If negative, the derivative is returned.
+
+ Returns
+ -------
+ bp : BPoly
+ Piecewise polynomial of order k + nu representing the
+ antiderivative of this polynomial.
+
+ Notes
+ -----
+ If antiderivative is computed and ``self.extrapolate='periodic'``,
+ it will be set to False for the returned instance. This is done because
+ the antiderivative is no longer periodic and its correct evaluation
+ outside of the initially given x interval is difficult.
+ """
+ if nu <= 0:
+ return self.derivative(-nu)
+
+ if nu > 1:
+ bp = self
+ for k in range(nu):
+ bp = bp.antiderivative()
+ return bp
+
+ # Construct the indefinite integrals on individual intervals
+ c, x = self.c, self.x
+ k = c.shape[0]
+ c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
+
+ c2[1:, ...] = np.cumsum(c, axis=0) / k
+ delta = x[1:] - x[:-1]
+ c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
+
+ # Now fix continuity: on the very first interval, take the integration
+ # constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
+ # the integration constant is then equal to the jump of the `bp` at x_j.
+ # The latter is given by the coefficient of B_{n+1, n+1}
+ # *on the previous interval* (other B. polynomials are zero at the
+ # breakpoint). Finally, use the fact that BPs form a partition of unity.
+ c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
+
+ if self.extrapolate == 'periodic':
+ extrapolate = False
+ else:
+ extrapolate = self.extrapolate
+
+ return self.construct_fast(c2, x, extrapolate, axis=self.axis)
+
+ def integrate(self, a, b, extrapolate=None):
+ """
+ Compute a definite integral over a piecewise polynomial.
+
+ Parameters
+ ----------
+ a : float
+ Lower integration bound
+ b : float
+ Upper integration bound
+ extrapolate : {bool, 'periodic', None}, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs. If 'periodic', periodic
+ extrapolation is used. If None (default), use `self.extrapolate`.
+
+ Returns
+ -------
+ array_like
+ Definite integral of the piecewise polynomial over [a, b]
+
+ """
+ # XXX: can probably use instead the fact that
+ # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
+ ib = self.antiderivative()
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+
+ # ib.extrapolate shouldn't be 'periodic', it is converted to
+ # False for 'periodic. in antiderivative() call.
+ if extrapolate != 'periodic':
+ ib.extrapolate = extrapolate
+
+ if extrapolate == 'periodic':
+ # Split the integral into the part over period (can be several
+ # of them) and the remaining part.
+
+ # For simplicity and clarity convert to a <= b case.
+ if a <= b:
+ sign = 1
+ else:
+ a, b = b, a
+ sign = -1
+
+ xs, xe = self.x[0], self.x[-1]
+ period = xe - xs
+ interval = b - a
+ n_periods, left = divmod(interval, period)
+ res = n_periods * (ib(xe) - ib(xs))
+
+ # Map a and b to [xs, xe].
+ a = xs + (a - xs) % period
+ b = a + left
+
+ # If b <= xe then we need to integrate over [a, b], otherwise
+ # over [a, xe] and from xs to what is remained.
+ if b <= xe:
+ res += ib(b) - ib(a)
+ else:
+ res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
+
+ return sign * res
+ else:
+ return ib(b) - ib(a)
+
+ def extend(self, c, x):
+ k = max(self.c.shape[0], c.shape[0])
+ self.c = self._raise_degree(self.c, k - self.c.shape[0])
+ c = self._raise_degree(c, k - c.shape[0])
+ return _PPolyBase.extend(self, c, x)
+ extend.__doc__ = _PPolyBase.extend.__doc__
+
+ @classmethod
+ def from_power_basis(cls, pp, extrapolate=None):
+ """
+ Construct a piecewise polynomial in Bernstein basis
+ from a power basis polynomial.
+
+ Parameters
+ ----------
+ pp : PPoly
+ A piecewise polynomial in the power basis
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used. Default is True.
+ """
+ if not isinstance(pp, PPoly):
+ raise TypeError(".from_power_basis only accepts PPoly instances. "
+ "Got %s instead." % type(pp))
+
+ dx = np.diff(pp.x)
+ k = pp.c.shape[0] - 1 # polynomial order
+
+ rest = (None,)*(pp.c.ndim-2)
+
+ c = np.zeros_like(pp.c)
+ for a in range(k+1):
+ factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
+ for j in range(k-a, k+1):
+ c[j] += factor * comb(j, k-a)
+
+ if extrapolate is None:
+ extrapolate = pp.extrapolate
+
+ return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
+
+ @classmethod
+ def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
+ """Construct a piecewise polynomial in the Bernstein basis,
+ compatible with the specified values and derivatives at breakpoints.
+
+ Parameters
+ ----------
+ xi : array_like
+ sorted 1-D array of x-coordinates
+ yi : array_like or list of array_likes
+ ``yi[i][j]`` is the ``j``\\ th derivative known at ``xi[i]``
+ orders : None or int or array_like of ints. Default: None.
+ Specifies the degree of local polynomials. If not None, some
+ derivatives are ignored.
+ extrapolate : bool or 'periodic', optional
+ If bool, determines whether to extrapolate to out-of-bounds points
+ based on first and last intervals, or to return NaNs.
+ If 'periodic', periodic extrapolation is used. Default is True.
+
+ Notes
+ -----
+ If ``k`` derivatives are specified at a breakpoint ``x``, the
+ constructed polynomial is exactly ``k`` times continuously
+ differentiable at ``x``, unless the ``order`` is provided explicitly.
+ In the latter case, the smoothness of the polynomial at
+ the breakpoint is controlled by the ``order``.
+
+ Deduces the number of derivatives to match at each end
+ from ``order`` and the number of derivatives available. If
+ possible it uses the same number of derivatives from
+ each end; if the number is odd it tries to take the
+ extra one from y2. In any case if not enough derivatives
+ are available at one end or another it draws enough to
+ make up the total from the other end.
+
+ If the order is too high and not enough derivatives are available,
+ an exception is raised.
+
+ Examples
+ --------
+
+ >>> from scipy.interpolate import BPoly
+ >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
+
+ Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
+ such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
+
+ >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
+
+ Creates a piecewise polynomial `f(x)`, such that
+ `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
+ Based on the number of derivatives provided, the order of the
+ local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
+ Notice that no restriction is imposed on the derivatives at
+ ``x = 1`` and ``x = 2``.
+
+ Indeed, the explicit form of the polynomial is::
+
+ f(x) = | x * (1 - x), 0 <= x < 1
+ | 2 * (x - 1), 1 <= x <= 2
+
+ So that f'(1-0) = -1 and f'(1+0) = 2
+
+ """
+ xi = np.asarray(xi)
+ if len(xi) != len(yi):
+ raise ValueError("xi and yi need to have the same length")
+ if np.any(xi[1:] - xi[:1] <= 0):
+ raise ValueError("x coordinates are not in increasing order")
+
+ # number of intervals
+ m = len(xi) - 1
+
+ # global poly order is k-1, local orders are <=k and can vary
+ try:
+ k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
+ except TypeError as e:
+ raise ValueError(
+ "Using a 1-D array for y? Please .reshape(-1, 1)."
+ ) from e
+
+ if orders is None:
+ orders = [None] * m
+ else:
+ if isinstance(orders, (int, np.integer)):
+ orders = [orders] * m
+ k = max(k, max(orders))
+
+ if any(o <= 0 for o in orders):
+ raise ValueError("Orders must be positive.")
+
+ c = []
+ for i in range(m):
+ y1, y2 = yi[i], yi[i+1]
+ if orders[i] is None:
+ n1, n2 = len(y1), len(y2)
+ else:
+ n = orders[i]+1
+ n1 = min(n//2, len(y1))
+ n2 = min(n - n1, len(y2))
+ n1 = min(n - n2, len(y2))
+ if n1+n2 != n:
+ mesg = ("Point %g has %d derivatives, point %g"
+ " has %d derivatives, but order %d requested" % (
+ xi[i], len(y1), xi[i+1], len(y2), orders[i]))
+ raise ValueError(mesg)
+
+ if not (n1 <= len(y1) and n2 <= len(y2)):
+ raise ValueError("`order` input incompatible with"
+ " length y1 or y2.")
+
+ b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
+ y1[:n1], y2[:n2])
+ if len(b) < k:
+ b = BPoly._raise_degree(b, k - len(b))
+ c.append(b)
+
+ c = np.asarray(c)
+ return cls(c.swapaxes(0, 1), xi, extrapolate)
+
+ @staticmethod
+ def _construct_from_derivatives(xa, xb, ya, yb):
+ r"""Compute the coefficients of a polynomial in the Bernstein basis
+ given the values and derivatives at the edges.
+
+ Return the coefficients of a polynomial in the Bernstein basis
+ defined on ``[xa, xb]`` and having the values and derivatives at the
+ endpoints `xa` and `xb` as specified by `ya` and `yb`.
+ The polynomial constructed is of the minimal possible degree, i.e.,
+ if the lengths of `ya` and `yb` are `na` and `nb`, the degree
+ of the polynomial is ``na + nb - 1``.
+
+ Parameters
+ ----------
+ xa : float
+ Left-hand end point of the interval
+ xb : float
+ Right-hand end point of the interval
+ ya : array_like
+ Derivatives at `xa`. ``ya[0]`` is the value of the function, and
+ ``ya[i]`` for ``i > 0`` is the value of the ``i``\ th derivative.
+ yb : array_like
+ Derivatives at `xb`.
+
+ Returns
+ -------
+ array
+ coefficient array of a polynomial having specified derivatives
+
+ Notes
+ -----
+ This uses several facts from life of Bernstein basis functions.
+ First of all,
+
+ .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
+
+ If B(x) is a linear combination of the form
+
+ .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
+
+ then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
+ Iterating the latter one, one finds for the q-th derivative
+
+ .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
+
+ with
+
+ .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
+
+ This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
+ `c_q` are found one by one by iterating `q = 0, ..., na`.
+
+ At ``x = xb`` it's the same with ``a = n - q``.
+
+ """
+ ya, yb = np.asarray(ya), np.asarray(yb)
+ if ya.shape[1:] != yb.shape[1:]:
+ raise ValueError('Shapes of ya {} and yb {} are incompatible'
+ .format(ya.shape, yb.shape))
+
+ dta, dtb = ya.dtype, yb.dtype
+ if (np.issubdtype(dta, np.complexfloating) or
+ np.issubdtype(dtb, np.complexfloating)):
+ dt = np.complex128
+ else:
+ dt = np.float64
+
+ na, nb = len(ya), len(yb)
+ n = na + nb
+
+ c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
+
+ # compute coefficients of a polynomial degree na+nb-1
+ # walk left-to-right
+ for q in range(0, na):
+ c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
+ for j in range(0, q):
+ c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
+
+ # now walk right-to-left
+ for q in range(0, nb):
+ c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
+ for j in range(0, q):
+ c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
+
+ return c
+
+ @staticmethod
+ def _raise_degree(c, d):
+ r"""Raise a degree of a polynomial in the Bernstein basis.
+
+ Given the coefficients of a polynomial degree `k`, return (the
+ coefficients of) the equivalent polynomial of degree `k+d`.
+
+ Parameters
+ ----------
+ c : array_like
+ coefficient array, 1-D
+ d : integer
+
+ Returns
+ -------
+ array
+ coefficient array, 1-D array of length `c.shape[0] + d`
+
+ Notes
+ -----
+ This uses the fact that a Bernstein polynomial `b_{a, k}` can be
+ identically represented as a linear combination of polynomials of
+ a higher degree `k+d`:
+
+ .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
+ comb(d, j) / comb(k+d, a+j)
+
+ """
+ if d == 0:
+ return c
+
+ k = c.shape[0] - 1
+ out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
+
+ for a in range(c.shape[0]):
+ f = c[a] * comb(k, a)
+ for j in range(d+1):
+ out[a+j] += f * comb(d, j) / comb(k+d, a+j)
+ return out
+
+
+class NdPPoly:
+ """
+ Piecewise tensor product polynomial
+
+ The value at point ``xp = (x', y', z', ...)`` is evaluated by first
+ computing the interval indices `i` such that::
+
+ x[0][i[0]] <= x' < x[0][i[0]+1]
+ x[1][i[1]] <= y' < x[1][i[1]+1]
+ ...
+
+ and then computing::
+
+ S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
+ * (xp[0] - x[0][i[0]])**m0
+ * ...
+ * (xp[n] - x[n][i[n]])**mn
+ for m0 in range(k[0]+1)
+ ...
+ for mn in range(k[n]+1))
+
+ where ``k[j]`` is the degree of the polynomial in dimension j. This
+ representation is the piecewise multivariate power basis.
+
+ Parameters
+ ----------
+ c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
+ Polynomial coefficients, with polynomial order `kj` and
+ `mj+1` intervals for each dimension `j`.
+ x : ndim-tuple of ndarrays, shapes (mj+1,)
+ Polynomial breakpoints for each dimension. These must be
+ sorted in increasing order.
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs. Default: True.
+
+ Attributes
+ ----------
+ x : tuple of ndarrays
+ Breakpoints.
+ c : ndarray
+ Coefficients of the polynomials.
+
+ Methods
+ -------
+ __call__
+ derivative
+ antiderivative
+ integrate
+ integrate_1d
+ construct_fast
+
+ See also
+ --------
+ PPoly : piecewise polynomials in 1D
+
+ Notes
+ -----
+ High-order polynomials in the power basis can be numerically
+ unstable.
+
+ """
+
+ def __init__(self, c, x, extrapolate=None):
+ self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
+ self.c = np.asarray(c)
+ if extrapolate is None:
+ extrapolate = True
+ self.extrapolate = bool(extrapolate)
+
+ ndim = len(self.x)
+ if any(v.ndim != 1 for v in self.x):
+ raise ValueError("x arrays must all be 1-dimensional")
+ if any(v.size < 2 for v in self.x):
+ raise ValueError("x arrays must all contain at least 2 points")
+ if c.ndim < 2*ndim:
+ raise ValueError("c must have at least 2*len(x) dimensions")
+ if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
+ raise ValueError("x-coordinates are not in increasing order")
+ if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
+ raise ValueError("x and c do not agree on the number of intervals")
+
+ dtype = self._get_dtype(self.c.dtype)
+ self.c = np.ascontiguousarray(self.c, dtype=dtype)
+
+ @classmethod
+ def construct_fast(cls, c, x, extrapolate=None):
+ """
+ Construct the piecewise polynomial without making checks.
+
+ Takes the same parameters as the constructor. Input arguments
+ ``c`` and ``x`` must be arrays of the correct shape and type. The
+ ``c`` array can only be of dtypes float and complex, and ``x``
+ array must have dtype float.
+
+ """
+ self = object.__new__(cls)
+ self.c = c
+ self.x = x
+ if extrapolate is None:
+ extrapolate = True
+ self.extrapolate = extrapolate
+ return self
+
+ def _get_dtype(self, dtype):
+ if np.issubdtype(dtype, np.complexfloating) \
+ or np.issubdtype(self.c.dtype, np.complexfloating):
+ return np.complex128
+ else:
+ return np.float64
+
+ def _ensure_c_contiguous(self):
+ if not self.c.flags.c_contiguous:
+ self.c = self.c.copy()
+ if not isinstance(self.x, tuple):
+ self.x = tuple(self.x)
+
+ def __call__(self, x, nu=None, extrapolate=None):
+ """
+ Evaluate the piecewise polynomial or its derivative
+
+ Parameters
+ ----------
+ x : array-like
+ Points to evaluate the interpolant at.
+ nu : tuple, optional
+ Orders of derivatives to evaluate. Each must be non-negative.
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Returns
+ -------
+ y : array-like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of x.
+
+ Notes
+ -----
+ Derivatives are evaluated piecewise for each polynomial
+ segment, even if the polynomial is not differentiable at the
+ breakpoints. The polynomial intervals are considered half-open,
+ ``[a, b)``, except for the last interval which is closed
+ ``[a, b]``.
+
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ else:
+ extrapolate = bool(extrapolate)
+
+ ndim = len(self.x)
+
+ x = _ndim_coords_from_arrays(x)
+ x_shape = x.shape
+ x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float64)
+
+ if nu is None:
+ nu = np.zeros((ndim,), dtype=np.intc)
+ else:
+ nu = np.asarray(nu, dtype=np.intc)
+ if nu.ndim != 1 or nu.shape[0] != ndim:
+ raise ValueError("invalid number of derivative orders nu")
+
+ dim1 = prod(self.c.shape[:ndim])
+ dim2 = prod(self.c.shape[ndim:2*ndim])
+ dim3 = prod(self.c.shape[2*ndim:])
+ ks = np.array(self.c.shape[:ndim], dtype=np.intc)
+
+ out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
+ self._ensure_c_contiguous()
+
+ _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
+ self.x,
+ ks,
+ x,
+ nu,
+ bool(extrapolate),
+ out)
+
+ return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
+
+ def _derivative_inplace(self, nu, axis):
+ """
+ Compute 1-D derivative along a selected dimension in-place
+ May result to non-contiguous c array.
+ """
+ if nu < 0:
+ return self._antiderivative_inplace(-nu, axis)
+
+ ndim = len(self.x)
+ axis = axis % ndim
+
+ # reduce order
+ if nu == 0:
+ # noop
+ return
+ else:
+ sl = [slice(None)]*ndim
+ sl[axis] = slice(None, -nu, None)
+ c2 = self.c[tuple(sl)]
+
+ if c2.shape[axis] == 0:
+ # derivative of order 0 is zero
+ shp = list(c2.shape)
+ shp[axis] = 1
+ c2 = np.zeros(shp, dtype=c2.dtype)
+
+ # multiply by the correct rising factorials
+ factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
+ sl = [None]*c2.ndim
+ sl[axis] = slice(None)
+ c2 *= factor[tuple(sl)]
+
+ self.c = c2
+
+ def _antiderivative_inplace(self, nu, axis):
+ """
+ Compute 1-D antiderivative along a selected dimension
+ May result to non-contiguous c array.
+ """
+ if nu <= 0:
+ return self._derivative_inplace(-nu, axis)
+
+ ndim = len(self.x)
+ axis = axis % ndim
+
+ perm = list(range(ndim))
+ perm[0], perm[axis] = perm[axis], perm[0]
+ perm = perm + list(range(ndim, self.c.ndim))
+
+ c = self.c.transpose(perm)
+
+ c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
+ dtype=c.dtype)
+ c2[:-nu] = c
+
+ # divide by the correct rising factorials
+ factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
+ c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
+
+ # fix continuity of added degrees of freedom
+ perm2 = list(range(c2.ndim))
+ perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
+
+ c2 = c2.transpose(perm2)
+ c2 = c2.copy()
+ _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
+ self.x[axis], nu-1)
+
+ c2 = c2.transpose(perm2)
+ c2 = c2.transpose(perm)
+
+ # Done
+ self.c = c2
+
+ def derivative(self, nu):
+ """
+ Construct a new piecewise polynomial representing the derivative.
+
+ Parameters
+ ----------
+ nu : ndim-tuple of int
+ Order of derivatives to evaluate for each dimension.
+ If negative, the antiderivative is returned.
+
+ Returns
+ -------
+ pp : NdPPoly
+ Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
+ representing the derivative of this polynomial.
+
+ Notes
+ -----
+ Derivatives are evaluated piecewise for each polynomial
+ segment, even if the polynomial is not differentiable at the
+ breakpoints. The polynomial intervals in each dimension are
+ considered half-open, ``[a, b)``, except for the last interval
+ which is closed ``[a, b]``.
+
+ """
+ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
+
+ for axis, n in enumerate(nu):
+ p._derivative_inplace(n, axis)
+
+ p._ensure_c_contiguous()
+ return p
+
+ def antiderivative(self, nu):
+ """
+ Construct a new piecewise polynomial representing the antiderivative.
+
+ Antiderivative is also the indefinite integral of the function,
+ and derivative is its inverse operation.
+
+ Parameters
+ ----------
+ nu : ndim-tuple of int
+ Order of derivatives to evaluate for each dimension.
+ If negative, the derivative is returned.
+
+ Returns
+ -------
+ pp : PPoly
+ Piecewise polynomial of order k2 = k + n representing
+ the antiderivative of this polynomial.
+
+ Notes
+ -----
+ The antiderivative returned by this function is continuous and
+ continuously differentiable to order n-1, up to floating point
+ rounding error.
+
+ """
+ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
+
+ for axis, n in enumerate(nu):
+ p._antiderivative_inplace(n, axis)
+
+ p._ensure_c_contiguous()
+ return p
+
+ def integrate_1d(self, a, b, axis, extrapolate=None):
+ r"""
+ Compute NdPPoly representation for one dimensional definite integral
+
+ The result is a piecewise polynomial representing the integral:
+
+ .. math::
+
+ p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
+
+ where the dimension integrated over is specified with the
+ `axis` parameter.
+
+ Parameters
+ ----------
+ a, b : float
+ Lower and upper bound for integration.
+ axis : int
+ Dimension over which to compute the 1-D integrals
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Returns
+ -------
+ ig : NdPPoly or array-like
+ Definite integral of the piecewise polynomial over [a, b].
+ If the polynomial was 1D, an array is returned,
+ otherwise, an NdPPoly object.
+
+ """
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ else:
+ extrapolate = bool(extrapolate)
+
+ ndim = len(self.x)
+ axis = int(axis) % ndim
+
+ # reuse 1-D integration routines
+ c = self.c
+ swap = list(range(c.ndim))
+ swap.insert(0, swap[axis])
+ del swap[axis + 1]
+ swap.insert(1, swap[ndim + axis])
+ del swap[ndim + axis + 1]
+
+ c = c.transpose(swap)
+ p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
+ self.x[axis],
+ extrapolate=extrapolate)
+ out = p.integrate(a, b, extrapolate=extrapolate)
+
+ # Construct result
+ if ndim == 1:
+ return out.reshape(c.shape[2:])
+ else:
+ c = out.reshape(c.shape[2:])
+ x = self.x[:axis] + self.x[axis+1:]
+ return self.construct_fast(c, x, extrapolate=extrapolate)
+
+ def integrate(self, ranges, extrapolate=None):
+ """
+ Compute a definite integral over a piecewise polynomial.
+
+ Parameters
+ ----------
+ ranges : ndim-tuple of 2-tuples float
+ Sequence of lower and upper bounds for each dimension,
+ ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
+ extrapolate : bool, optional
+ Whether to extrapolate to out-of-bounds points based on first
+ and last intervals, or to return NaNs.
+
+ Returns
+ -------
+ ig : array_like
+ Definite integral of the piecewise polynomial over
+ [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
+
+ """
+
+ ndim = len(self.x)
+
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ else:
+ extrapolate = bool(extrapolate)
+
+ if not hasattr(ranges, '__len__') or len(ranges) != ndim:
+ raise ValueError("Range not a sequence of correct length")
+
+ self._ensure_c_contiguous()
+
+ # Reuse 1D integration routine
+ c = self.c
+ for n, (a, b) in enumerate(ranges):
+ swap = list(range(c.ndim))
+ swap.insert(1, swap[ndim - n])
+ del swap[ndim - n + 1]
+
+ c = c.transpose(swap)
+
+ p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
+ out = p.integrate(a, b, extrapolate=extrapolate)
+ c = out.reshape(c.shape[2:])
+
+ return c
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py
new file mode 100644
index 0000000000000000000000000000000000000000..826dddb311d78bf8d5381b18e46ca1ba86c04b6d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_ndbspline.py
@@ -0,0 +1,358 @@
+import itertools
+import functools
+import operator
+import numpy as np
+
+from math import prod
+
+from . import _bspl # type: ignore
+
+import scipy.sparse.linalg as ssl
+from scipy.sparse import csr_array
+
+from ._bsplines import _not_a_knot
+
+__all__ = ["NdBSpline"]
+
+
+def _get_dtype(dtype):
+ """Return np.complex128 for complex dtypes, np.float64 otherwise."""
+ if np.issubdtype(dtype, np.complexfloating):
+ return np.complex128
+ else:
+ return np.float64
+
+
+class NdBSpline:
+ """Tensor product spline object.
+
+ The value at point ``xp = (x1, x2, ..., xN)`` is evaluated as a linear
+ combination of products of one-dimensional b-splines in each of the ``N``
+ dimensions::
+
+ c[i1, i2, ..., iN] * B(x1; i1, t1) * B(x2; i2, t2) * ... * B(xN; iN, tN)
+
+
+ Here ``B(x; i, t)`` is the ``i``-th b-spline defined by the knot vector
+ ``t`` evaluated at ``x``.
+
+ Parameters
+ ----------
+ t : tuple of 1D ndarrays
+ knot vectors in directions 1, 2, ... N,
+ ``len(t[i]) == n[i] + k + 1``
+ c : ndarray, shape (n1, n2, ..., nN, ...)
+ b-spline coefficients
+ k : int or length-d tuple of integers
+ spline degrees.
+ A single integer is interpreted as having this degree for
+ all dimensions.
+ extrapolate : bool, optional
+ Whether to extrapolate out-of-bounds inputs, or return `nan`.
+ Default is to extrapolate.
+
+ Attributes
+ ----------
+ t : tuple of ndarrays
+ Knots vectors.
+ c : ndarray
+ Coefficients of the tensor-produce spline.
+ k : tuple of integers
+ Degrees for each dimension.
+ extrapolate : bool, optional
+ Whether to extrapolate or return nans for out-of-bounds inputs.
+ Defaults to true.
+
+ Methods
+ -------
+ __call__
+ design_matrix
+
+ See Also
+ --------
+ BSpline : a one-dimensional B-spline object
+ NdPPoly : an N-dimensional piecewise tensor product polynomial
+
+ """
+ def __init__(self, t, c, k, *, extrapolate=None):
+ ndim = len(t)
+
+ try:
+ len(k)
+ except TypeError:
+ # make k a tuple
+ k = (k,)*ndim
+
+ if len(k) != ndim:
+ raise ValueError(f"{len(t) = } != {len(k) = }.")
+
+ self.k = tuple(operator.index(ki) for ki in k)
+ self.t = tuple(np.ascontiguousarray(ti, dtype=float) for ti in t)
+ self.c = np.asarray(c)
+
+ if extrapolate is None:
+ extrapolate = True
+ self.extrapolate = bool(extrapolate)
+
+ self.c = np.asarray(c)
+
+ for d in range(ndim):
+ td = self.t[d]
+ kd = self.k[d]
+ n = td.shape[0] - kd - 1
+ if kd < 0:
+ raise ValueError(f"Spline degree in dimension {d} cannot be"
+ f" negative.")
+ if td.ndim != 1:
+ raise ValueError(f"Knot vector in dimension {d} must be"
+ f" one-dimensional.")
+ if n < kd + 1:
+ raise ValueError(f"Need at least {2*kd + 2} knots for degree"
+ f" {kd} in dimension {d}.")
+ if (np.diff(td) < 0).any():
+ raise ValueError(f"Knots in dimension {d} must be in a"
+ f" non-decreasing order.")
+ if len(np.unique(td[kd:n + 1])) < 2:
+ raise ValueError(f"Need at least two internal knots in"
+ f" dimension {d}.")
+ if not np.isfinite(td).all():
+ raise ValueError(f"Knots in dimension {d} should not have"
+ f" nans or infs.")
+ if self.c.ndim < ndim:
+ raise ValueError(f"Coefficients must be at least"
+ f" {d}-dimensional.")
+ if self.c.shape[d] != n:
+ raise ValueError(f"Knots, coefficients and degree in dimension"
+ f" {d} are inconsistent:"
+ f" got {self.c.shape[d]} coefficients for"
+ f" {len(td)} knots, need at least {n} for"
+ f" k={k}.")
+
+ dt = _get_dtype(self.c.dtype)
+ self.c = np.ascontiguousarray(self.c, dtype=dt)
+
+ def __call__(self, xi, *, nu=None, extrapolate=None):
+ """Evaluate the tensor product b-spline at ``xi``.
+
+ Parameters
+ ----------
+ xi : array_like, shape(..., ndim)
+ The coordinates to evaluate the interpolator at.
+ This can be a list or tuple of ndim-dimensional points
+ or an array with the shape (num_points, ndim).
+ nu : array_like, optional, shape (ndim,)
+ Orders of derivatives to evaluate. Each must be non-negative.
+ Defaults to the zeroth derivivative.
+ extrapolate : bool, optional
+ Whether to exrapolate based on first and last intervals in each
+ dimension, or return `nan`. Default is to ``self.extrapolate``.
+
+ Returns
+ -------
+ values : ndarray, shape ``xi.shape[:-1] + self.c.shape[ndim:]``
+ Interpolated values at ``xi``
+ """
+ ndim = len(self.t)
+
+ if extrapolate is None:
+ extrapolate = self.extrapolate
+ extrapolate = bool(extrapolate)
+
+ if nu is None:
+ nu = np.zeros((ndim,), dtype=np.intc)
+ else:
+ nu = np.asarray(nu, dtype=np.intc)
+ if nu.ndim != 1 or nu.shape[0] != ndim:
+ raise ValueError(
+ f"invalid number of derivative orders {nu = } for "
+ f"ndim = {len(self.t)}.")
+ if any(nu < 0):
+ raise ValueError(f"derivatives must be positive, got {nu = }")
+
+ # prepare xi : shape (..., m1, ..., md) -> (1, m1, ..., md)
+ xi = np.asarray(xi, dtype=float)
+ xi_shape = xi.shape
+ xi = xi.reshape(-1, xi_shape[-1])
+ xi = np.ascontiguousarray(xi)
+
+ if xi_shape[-1] != ndim:
+ raise ValueError(f"Shapes: xi.shape={xi_shape} and ndim={ndim}")
+
+ # prepare k & t
+ _k = np.asarray(self.k, dtype=np.dtype("long"))
+
+ # pack the knots into a single array
+ len_t = [len(ti) for ti in self.t]
+ _t = np.empty((ndim, max(len_t)), dtype=float)
+ _t.fill(np.nan)
+ for d in range(ndim):
+ _t[d, :len(self.t[d])] = self.t[d]
+ len_t = np.asarray(len_t, dtype=np.dtype("long"))
+
+ # tabulate the flat indices for iterating over the (k+1)**ndim subarray
+ shape = tuple(kd + 1 for kd in self.k)
+ indices = np.unravel_index(np.arange(prod(shape)), shape)
+ _indices_k1d = np.asarray(indices, dtype=np.intp).T
+
+ # prepare the coefficients: flatten the trailing dimensions
+ c1 = self.c.reshape(self.c.shape[:ndim] + (-1,))
+ c1r = c1.ravel()
+
+ # replacement for np.ravel_multi_index for indexing of `c1`:
+ _strides_c1 = np.asarray([s // c1.dtype.itemsize
+ for s in c1.strides], dtype=np.intp)
+
+ num_c_tr = c1.shape[-1] # # of trailing coefficients
+ out = np.empty(xi.shape[:-1] + (num_c_tr,), dtype=c1.dtype)
+
+ _bspl.evaluate_ndbspline(xi,
+ _t,
+ len_t,
+ _k,
+ nu,
+ extrapolate,
+ c1r,
+ num_c_tr,
+ _strides_c1,
+ _indices_k1d,
+ out,)
+
+ return out.reshape(xi_shape[:-1] + self.c.shape[ndim:])
+
+ @classmethod
+ def design_matrix(cls, xvals, t, k, extrapolate=True):
+ """Construct the design matrix as a CSR format sparse array.
+
+ Parameters
+ ----------
+ xvals : ndarray, shape(npts, ndim)
+ Data points. ``xvals[j, :]`` gives the ``j``-th data point as an
+ ``ndim``-dimensional array.
+ t : tuple of 1D ndarrays, length-ndim
+ Knot vectors in directions 1, 2, ... ndim,
+ k : int
+ B-spline degree.
+ extrapolate : bool, optional
+ Whether to extrapolate out-of-bounds values of raise a `ValueError`
+
+ Returns
+ -------
+ design_matrix : a CSR array
+ Each row of the design matrix corresponds to a value in `xvals` and
+ contains values of b-spline basis elements which are non-zero
+ at this value.
+
+ """
+ xvals = np.asarray(xvals, dtype=float)
+ ndim = xvals.shape[-1]
+ if len(t) != ndim:
+ raise ValueError(
+ f"Data and knots are inconsistent: len(t) = {len(t)} for "
+ f" {ndim = }."
+ )
+ try:
+ len(k)
+ except TypeError:
+ # make k a tuple
+ k = (k,)*ndim
+
+ kk = np.asarray(k, dtype=np.int32)
+ data, indices, indptr = _bspl._colloc_nd(xvals, t, kk)
+ return csr_array((data, indices, indptr))
+
+
+def _iter_solve(a, b, solver=ssl.gcrotmk, **solver_args):
+ # work around iterative solvers not accepting multiple r.h.s.
+
+ # also work around a.dtype == float64 and b.dtype == complex128
+ # cf https://github.com/scipy/scipy/issues/19644
+ if np.issubdtype(b.dtype, np.complexfloating):
+ real = _iter_solve(a, b.real, solver, **solver_args)
+ imag = _iter_solve(a, b.imag, solver, **solver_args)
+ return real + 1j*imag
+
+ if b.ndim == 2 and b.shape[1] !=1:
+ res = np.empty_like(b)
+ for j in range(b.shape[1]):
+ res[:, j], info = solver(a, b[:, j], **solver_args)
+ if info != 0:
+ raise ValueError(f"{solver = } returns {info =} for column {j}.")
+ return res
+ else:
+ res, info = solver(a, b, **solver_args)
+ if info != 0:
+ raise ValueError(f"{solver = } returns {info = }.")
+ return res
+
+
+def make_ndbspl(points, values, k=3, *, solver=ssl.gcrotmk, **solver_args):
+ """Construct an interpolating NdBspline.
+
+ Parameters
+ ----------
+ points : tuple of ndarrays of float, with shapes (m1,), ... (mN,)
+ The points defining the regular grid in N dimensions. The points in
+ each dimension (i.e. every element of the `points` tuple) must be
+ strictly ascending or descending.
+ values : ndarray of float, shape (m1, ..., mN, ...)
+ The data on the regular grid in n dimensions.
+ k : int, optional
+ The spline degree. Must be odd. Default is cubic, k=3
+ solver : a `scipy.sparse.linalg` solver (iterative or direct), optional.
+ An iterative solver from `scipy.sparse.linalg` or a direct one,
+ `sparse.sparse.linalg.spsolve`.
+ Used to solve the sparse linear system
+ ``design_matrix @ coefficients = rhs`` for the coefficients.
+ Default is `scipy.sparse.linalg.gcrotmk`
+ solver_args : dict, optional
+ Additional arguments for the solver. The call signature is
+ ``solver(csr_array, rhs_vector, **solver_args)``
+
+ Returns
+ -------
+ spl : NdBSpline object
+
+ Notes
+ -----
+ Boundary conditions are not-a-knot in all dimensions.
+ """
+ ndim = len(points)
+ xi_shape = tuple(len(x) for x in points)
+
+ try:
+ len(k)
+ except TypeError:
+ # make k a tuple
+ k = (k,)*ndim
+
+ for d, point in enumerate(points):
+ numpts = len(np.atleast_1d(point))
+ if numpts <= k[d]:
+ raise ValueError(f"There are {numpts} points in dimension {d},"
+ f" but order {k[d]} requires at least "
+ f" {k[d]+1} points per dimension.")
+
+ t = tuple(_not_a_knot(np.asarray(points[d], dtype=float), k[d])
+ for d in range(ndim))
+ xvals = np.asarray([xv for xv in itertools.product(*points)], dtype=float)
+
+ # construct the colocation matrix
+ matr = NdBSpline.design_matrix(xvals, t, k)
+
+ # Solve for the coefficients given `values`.
+ # Trailing dimensions: first ndim dimensions are data, the rest are batch
+ # dimensions, so stack `values` into a 2D array for `spsolve` to undestand.
+ v_shape = values.shape
+ vals_shape = (prod(v_shape[:ndim]), prod(v_shape[ndim:]))
+ vals = values.reshape(vals_shape)
+
+ if solver != ssl.spsolve:
+ solver = functools.partial(_iter_solve, solver=solver)
+ if "atol" not in solver_args:
+ # avoid a DeprecationWarning, grumble grumble
+ solver_args["atol"] = 1e-6
+
+ coef = solver(matr, vals, **solver_args)
+ coef = coef.reshape(xi_shape + v_shape[ndim:])
+ return NdBSpline(t, coef, k)
+
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py
new file mode 100644
index 0000000000000000000000000000000000000000..2724d78f61416256357c4b2789c860a604845548
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_ndgriddata.py
@@ -0,0 +1,332 @@
+"""
+Convenience interface to N-D interpolation
+
+.. versionadded:: 0.9
+
+"""
+import numpy as np
+from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
+ CloughTocher2DInterpolator, _ndim_coords_from_arrays
+from scipy.spatial import cKDTree
+
+__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
+ 'CloughTocher2DInterpolator']
+
+#------------------------------------------------------------------------------
+# Nearest-neighbor interpolation
+#------------------------------------------------------------------------------
+
+
+class NearestNDInterpolator(NDInterpolatorBase):
+ """NearestNDInterpolator(x, y).
+
+ Nearest-neighbor interpolator in N > 1 dimensions.
+
+ .. versionadded:: 0.9
+
+ Methods
+ -------
+ __call__
+
+ Parameters
+ ----------
+ x : (npoints, ndims) 2-D ndarray of floats
+ Data point coordinates.
+ y : (npoints, ) 1-D ndarray of float or complex
+ Data values.
+ rescale : boolean, optional
+ Rescale points to unit cube before performing interpolation.
+ This is useful if some of the input dimensions have
+ incommensurable units and differ by many orders of magnitude.
+
+ .. versionadded:: 0.14.0
+ tree_options : dict, optional
+ Options passed to the underlying ``cKDTree``.
+
+ .. versionadded:: 0.17.0
+
+ See Also
+ --------
+ griddata :
+ Interpolate unstructured D-D data.
+ LinearNDInterpolator :
+ Piecewise linear interpolator in N dimensions.
+ CloughTocher2DInterpolator :
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
+ interpn : Interpolation on a regular grid or rectilinear grid.
+ RegularGridInterpolator : Interpolator on a regular or rectilinear grid
+ in arbitrary dimensions (`interpn` wraps this
+ class).
+
+ Notes
+ -----
+ Uses ``scipy.spatial.cKDTree``
+
+ .. note:: For data on a regular grid use `interpn` instead.
+
+ Examples
+ --------
+ We can interpolate values on a 2D plane:
+
+ >>> from scipy.interpolate import NearestNDInterpolator
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> rng = np.random.default_rng()
+ >>> x = rng.random(10) - 0.5
+ >>> y = rng.random(10) - 0.5
+ >>> z = np.hypot(x, y)
+ >>> X = np.linspace(min(x), max(x))
+ >>> Y = np.linspace(min(y), max(y))
+ >>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
+ >>> interp = NearestNDInterpolator(list(zip(x, y)), z)
+ >>> Z = interp(X, Y)
+ >>> plt.pcolormesh(X, Y, Z, shading='auto')
+ >>> plt.plot(x, y, "ok", label="input point")
+ >>> plt.legend()
+ >>> plt.colorbar()
+ >>> plt.axis("equal")
+ >>> plt.show()
+
+ """
+
+ def __init__(self, x, y, rescale=False, tree_options=None):
+ NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
+ need_contiguous=False,
+ need_values=False)
+ if tree_options is None:
+ tree_options = dict()
+ self.tree = cKDTree(self.points, **tree_options)
+ self.values = np.asarray(y)
+
+ def __call__(self, *args, **query_options):
+ """
+ Evaluate interpolator at given points.
+
+ Parameters
+ ----------
+ x1, x2, ... xn : array-like of float
+ Points where to interpolate data at.
+ x1, x2, ... xn can be array-like of float with broadcastable shape.
+ or x1 can be array-like of float with shape ``(..., ndim)``
+ **query_options
+ This allows ``eps``, ``p``, ``distance_upper_bound``, and ``workers``
+ being passed to the cKDTree's query function to be explicitly set.
+ See `scipy.spatial.cKDTree.query` for an overview of the different options.
+
+ .. versionadded:: 1.12.0
+
+ """
+ # For the sake of enabling subclassing, NDInterpolatorBase._set_xi performs
+ # some operations which are not required by NearestNDInterpolator.__call__,
+ # hence here we operate on xi directly, without calling a parent class function.
+ xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
+ xi = self._check_call_shape(xi)
+ xi = self._scale_x(xi)
+
+ # We need to handle two important cases:
+ # (1) the case where xi has trailing dimensions (..., ndim), and
+ # (2) the case where y has trailing dimensions
+ # We will first flatten xi to deal with case (1),
+ # do the computation in flattened array while retaining y's dimensionality,
+ # and then reshape the interpolated values back to match xi's shape.
+
+ # Flatten xi for the query
+ xi_flat = xi.reshape(-1, xi.shape[-1])
+ original_shape = xi.shape
+ flattened_shape = xi_flat.shape
+
+ # if distance_upper_bound is set to not be infinite,
+ # then we need to consider the case where cKDtree
+ # does not find any points within distance_upper_bound to return.
+ # It marks those points as having infinte distance, which is what will be used
+ # below to mask the array and return only the points that were deemed
+ # to have a close enough neighbor to return something useful.
+ dist, i = self.tree.query(xi_flat, **query_options)
+ valid_mask = np.isfinite(dist)
+
+ # create a holder interp_values array and fill with nans.
+ if self.values.ndim > 1:
+ interp_shape = flattened_shape[:-1] + self.values.shape[1:]
+ else:
+ interp_shape = flattened_shape[:-1]
+
+ if np.issubdtype(self.values.dtype, np.complexfloating):
+ interp_values = np.full(interp_shape, np.nan, dtype=self.values.dtype)
+ else:
+ interp_values = np.full(interp_shape, np.nan)
+
+ interp_values[valid_mask] = self.values[i[valid_mask], ...]
+
+ if self.values.ndim > 1:
+ new_shape = original_shape[:-1] + self.values.shape[1:]
+ else:
+ new_shape = original_shape[:-1]
+ interp_values = interp_values.reshape(new_shape)
+
+ return interp_values
+
+
+#------------------------------------------------------------------------------
+# Convenience interface function
+#------------------------------------------------------------------------------
+
+
+def griddata(points, values, xi, method='linear', fill_value=np.nan,
+ rescale=False):
+ """
+ Interpolate unstructured D-D data.
+
+ Parameters
+ ----------
+ points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
+ Data point coordinates.
+ values : ndarray of float or complex, shape (n,)
+ Data values.
+ xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
+ Points at which to interpolate data.
+ method : {'linear', 'nearest', 'cubic'}, optional
+ Method of interpolation. One of
+
+ ``nearest``
+ return the value at the data point closest to
+ the point of interpolation. See `NearestNDInterpolator` for
+ more details.
+
+ ``linear``
+ tessellate the input point set to N-D
+ simplices, and interpolate linearly on each simplex. See
+ `LinearNDInterpolator` for more details.
+
+ ``cubic`` (1-D)
+ return the value determined from a cubic
+ spline.
+
+ ``cubic`` (2-D)
+ return the value determined from a
+ piecewise cubic, continuously differentiable (C1), and
+ approximately curvature-minimizing polynomial surface. See
+ `CloughTocher2DInterpolator` for more details.
+ fill_value : float, optional
+ Value used to fill in for requested points outside of the
+ convex hull of the input points. If not provided, then the
+ default is ``nan``. This option has no effect for the
+ 'nearest' method.
+ rescale : bool, optional
+ Rescale points to unit cube before performing interpolation.
+ This is useful if some of the input dimensions have
+ incommensurable units and differ by many orders of magnitude.
+
+ .. versionadded:: 0.14.0
+
+ Returns
+ -------
+ ndarray
+ Array of interpolated values.
+
+ See Also
+ --------
+ LinearNDInterpolator :
+ Piecewise linear interpolator in N dimensions.
+ NearestNDInterpolator :
+ Nearest-neighbor interpolator in N dimensions.
+ CloughTocher2DInterpolator :
+ Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
+ interpn : Interpolation on a regular grid or rectilinear grid.
+ RegularGridInterpolator : Interpolator on a regular or rectilinear grid
+ in arbitrary dimensions (`interpn` wraps this
+ class).
+
+ Notes
+ -----
+
+ .. versionadded:: 0.9
+
+ .. note:: For data on a regular grid use `interpn` instead.
+
+ Examples
+ --------
+
+ Suppose we want to interpolate the 2-D function
+
+ >>> import numpy as np
+ >>> def func(x, y):
+ ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
+
+ on a grid in [0, 1]x[0, 1]
+
+ >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
+
+ but we only know its values at 1000 data points:
+
+ >>> rng = np.random.default_rng()
+ >>> points = rng.random((1000, 2))
+ >>> values = func(points[:,0], points[:,1])
+
+ This can be done with `griddata` -- below we try out all of the
+ interpolation methods:
+
+ >>> from scipy.interpolate import griddata
+ >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
+ >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
+ >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
+
+ One can see that the exact result is reproduced by all of the
+ methods to some degree, but for this smooth function the piecewise
+ cubic interpolant gives the best results:
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.subplot(221)
+ >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
+ >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
+ >>> plt.title('Original')
+ >>> plt.subplot(222)
+ >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Nearest')
+ >>> plt.subplot(223)
+ >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Linear')
+ >>> plt.subplot(224)
+ >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
+ >>> plt.title('Cubic')
+ >>> plt.gcf().set_size_inches(6, 6)
+ >>> plt.show()
+
+ """ # numpy/numpydoc#87 # noqa: E501
+
+ points = _ndim_coords_from_arrays(points)
+
+ if points.ndim < 2:
+ ndim = points.ndim
+ else:
+ ndim = points.shape[-1]
+
+ if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
+ from ._interpolate import interp1d
+ points = points.ravel()
+ if isinstance(xi, tuple):
+ if len(xi) != 1:
+ raise ValueError("invalid number of dimensions in xi")
+ xi, = xi
+ # Sort points/values together, necessary as input for interp1d
+ idx = np.argsort(points)
+ points = points[idx]
+ values = values[idx]
+ if method == 'nearest':
+ fill_value = 'extrapolate'
+ ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
+ fill_value=fill_value)
+ return ip(xi)
+ elif method == 'nearest':
+ ip = NearestNDInterpolator(points, values, rescale=rescale)
+ return ip(xi)
+ elif method == 'linear':
+ ip = LinearNDInterpolator(points, values, fill_value=fill_value,
+ rescale=rescale)
+ return ip(xi)
+ elif method == 'cubic' and ndim == 2:
+ ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
+ rescale=rescale)
+ return ip(xi)
+ else:
+ raise ValueError("Unknown interpolation method %r for "
+ "%d dimensional data" % (method, ndim))
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_pade.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_pade.py
new file mode 100644
index 0000000000000000000000000000000000000000..387ef11dde5d3ace8a15324058c10fa31899c92c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_pade.py
@@ -0,0 +1,67 @@
+from numpy import zeros, asarray, eye, poly1d, hstack, r_
+from scipy import linalg
+
+__all__ = ["pade"]
+
+def pade(an, m, n=None):
+ """
+ Return Pade approximation to a polynomial as the ratio of two polynomials.
+
+ Parameters
+ ----------
+ an : (N,) array_like
+ Taylor series coefficients.
+ m : int
+ The order of the returned approximating polynomial `q`.
+ n : int, optional
+ The order of the returned approximating polynomial `p`. By default,
+ the order is ``len(an)-1-m``.
+
+ Returns
+ -------
+ p, q : Polynomial class
+ The Pade approximation of the polynomial defined by `an` is
+ ``p(x)/q(x)``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.interpolate import pade
+ >>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
+ >>> p, q = pade(e_exp, 2)
+
+ >>> e_exp.reverse()
+ >>> e_poly = np.poly1d(e_exp)
+
+ Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)``
+
+ >>> e_poly(1)
+ 2.7166666666666668
+
+ >>> p(1)/q(1)
+ 2.7179487179487181
+
+ """
+ an = asarray(an)
+ if n is None:
+ n = len(an) - 1 - m
+ if n < 0:
+ raise ValueError("Order of q must be smaller than len(an)-1.")
+ if n < 0:
+ raise ValueError("Order of p must be greater than 0.")
+ N = m + n
+ if N > len(an)-1:
+ raise ValueError("Order of q+p must be smaller than len(an).")
+ an = an[:N+1]
+ Akj = eye(N+1, n+1, dtype=an.dtype)
+ Bkj = zeros((N+1, m), dtype=an.dtype)
+ for row in range(1, m+1):
+ Bkj[row,:row] = -(an[:row])[::-1]
+ for row in range(m+1, N+1):
+ Bkj[row,:] = -(an[row-m:row])[::-1]
+ C = hstack((Akj, Bkj))
+ pq = linalg.solve(C, an)
+ p = pq[:n+1]
+ q = r_[1.0, pq[n+1:]]
+ return poly1d(p[::-1]), poly1d(q[::-1])
+
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_polyint.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_polyint.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ed06d8abdba9597397295a0ca3c4a2bc3b25659
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_polyint.py
@@ -0,0 +1,938 @@
+import warnings
+
+import numpy as np
+from scipy.special import factorial
+from scipy._lib._util import _asarray_validated, float_factorial, check_random_state
+
+
+__all__ = ["KroghInterpolator", "krogh_interpolate",
+ "BarycentricInterpolator", "barycentric_interpolate",
+ "approximate_taylor_polynomial"]
+
+
+def _isscalar(x):
+ """Check whether x is if a scalar type, or 0-dim"""
+ return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
+
+
+class _Interpolator1D:
+ """
+ Common features in univariate interpolation
+
+ Deal with input data type and interpolation axis rolling. The
+ actual interpolator can assume the y-data is of shape (n, r) where
+ `n` is the number of x-points, and `r` the number of variables,
+ and use self.dtype as the y-data type.
+
+ Attributes
+ ----------
+ _y_axis
+ Axis along which the interpolation goes in the original array
+ _y_extra_shape
+ Additional trailing shape of the input arrays, excluding
+ the interpolation axis.
+ dtype
+ Dtype of the y-data arrays. Can be set via _set_dtype, which
+ forces it to be float or complex.
+
+ Methods
+ -------
+ __call__
+ _prepare_x
+ _finish_y
+ _reshape_yi
+ _set_yi
+ _set_dtype
+ _evaluate
+
+ """
+
+ __slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
+
+ def __init__(self, xi=None, yi=None, axis=None):
+ self._y_axis = axis
+ self._y_extra_shape = None
+ self.dtype = None
+ if yi is not None:
+ self._set_yi(yi, xi=xi, axis=axis)
+
+ def __call__(self, x):
+ """
+ Evaluate the interpolant
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the interpolant.
+
+ Returns
+ -------
+ y : array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of `x`.
+
+ Notes
+ -----
+ Input values `x` must be convertible to `float` values like `int`
+ or `float`.
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate(x)
+ return self._finish_y(y, x_shape)
+
+ def _evaluate(self, x):
+ """
+ Actually evaluate the value of the interpolator.
+ """
+ raise NotImplementedError()
+
+ def _prepare_x(self, x):
+ """Reshape input x array to 1-D"""
+ x = _asarray_validated(x, check_finite=False, as_inexact=True)
+ x_shape = x.shape
+ return x.ravel(), x_shape
+
+ def _finish_y(self, y, x_shape):
+ """Reshape interpolated y back to an N-D array similar to initial y"""
+ y = y.reshape(x_shape + self._y_extra_shape)
+ if self._y_axis != 0 and x_shape != ():
+ nx = len(x_shape)
+ ny = len(self._y_extra_shape)
+ s = (list(range(nx, nx + self._y_axis))
+ + list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
+ y = y.transpose(s)
+ return y
+
+ def _reshape_yi(self, yi, check=False):
+ yi = np.moveaxis(np.asarray(yi), self._y_axis, 0)
+ if check and yi.shape[1:] != self._y_extra_shape:
+ ok_shape = "{!r} + (N,) + {!r}".format(self._y_extra_shape[-self._y_axis:],
+ self._y_extra_shape[:-self._y_axis])
+ raise ValueError("Data must be of shape %s" % ok_shape)
+ return yi.reshape((yi.shape[0], -1))
+
+ def _set_yi(self, yi, xi=None, axis=None):
+ if axis is None:
+ axis = self._y_axis
+ if axis is None:
+ raise ValueError("no interpolation axis specified")
+
+ yi = np.asarray(yi)
+
+ shape = yi.shape
+ if shape == ():
+ shape = (1,)
+ if xi is not None and shape[axis] != len(xi):
+ raise ValueError("x and y arrays must be equal in length along "
+ "interpolation axis.")
+
+ self._y_axis = (axis % yi.ndim)
+ self._y_extra_shape = yi.shape[:self._y_axis] + yi.shape[self._y_axis+1:]
+ self.dtype = None
+ self._set_dtype(yi.dtype)
+
+ def _set_dtype(self, dtype, union=False):
+ if np.issubdtype(dtype, np.complexfloating) \
+ or np.issubdtype(self.dtype, np.complexfloating):
+ self.dtype = np.complex128
+ else:
+ if not union or self.dtype != np.complex128:
+ self.dtype = np.float64
+
+
+class _Interpolator1DWithDerivatives(_Interpolator1D):
+ def derivatives(self, x, der=None):
+ """
+ Evaluate several derivatives of the polynomial at the point `x`
+
+ Produce an array of derivatives evaluated at the point `x`.
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the derivatives
+ der : int or list or None, optional
+ How many derivatives to evaluate, or None for all potentially
+ nonzero derivatives (that is, a number equal to the number
+ of points), or a list of derivatives to evaluate. This number
+ includes the function value as the '0th' derivative.
+
+ Returns
+ -------
+ d : ndarray
+ Array with derivatives; ``d[j]`` contains the jth derivative.
+ Shape of ``d[j]`` is determined by replacing the interpolation
+ axis in the original array with the shape of `x`.
+
+ Examples
+ --------
+ >>> from scipy.interpolate import KroghInterpolator
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
+ array([1.0,2.0,3.0])
+ >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
+ array([[1.0,1.0],
+ [2.0,2.0],
+ [3.0,3.0]])
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate_derivatives(x, der)
+
+ y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
+ if self._y_axis != 0 and x_shape != ():
+ nx = len(x_shape)
+ ny = len(self._y_extra_shape)
+ s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+ + list(range(1, nx+1)) +
+ list(range(nx+1+self._y_axis, nx+ny+1)))
+ y = y.transpose(s)
+ return y
+
+ def derivative(self, x, der=1):
+ """
+ Evaluate a single derivative of the polynomial at the point `x`.
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the derivatives
+
+ der : integer, optional
+ Which derivative to evaluate (default: first derivative).
+ This number includes the function value as 0th derivative.
+
+ Returns
+ -------
+ d : ndarray
+ Derivative interpolated at the x-points. Shape of `d` is
+ determined by replacing the interpolation axis in the
+ original array with the shape of `x`.
+
+ Notes
+ -----
+ This may be computed by evaluating all derivatives up to the desired
+ one (using self.derivatives()) and then discarding the rest.
+
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate_derivatives(x, der+1)
+ return self._finish_y(y[der], x_shape)
+
+ def _evaluate_derivatives(self, x, der=None):
+ """
+ Actually evaluate the derivatives.
+
+ Parameters
+ ----------
+ x : array_like
+ 1D array of points at which to evaluate the derivatives
+ der : integer, optional
+ The number of derivatives to evaluate, from 'order 0' (der=1)
+ to order der-1. If omitted, return all possibly-non-zero
+ derivatives, ie 0 to order n-1.
+
+ Returns
+ -------
+ d : ndarray
+ Array of shape ``(der, x.size, self.yi.shape[1])`` containing
+ the derivatives from 0 to der-1
+ """
+ raise NotImplementedError()
+
+
+class KroghInterpolator(_Interpolator1DWithDerivatives):
+ """
+ Interpolating polynomial for a set of points.
+
+ The polynomial passes through all the pairs ``(xi, yi)``. One may
+ additionally specify a number of derivatives at each point `xi`;
+ this is done by repeating the value `xi` and specifying the
+ derivatives as successive `yi` values.
+
+ Allows evaluation of the polynomial and all its derivatives.
+ For reasons of numerical stability, this function does not compute
+ the coefficients of the polynomial, although they can be obtained
+ by evaluating all the derivatives.
+
+ Parameters
+ ----------
+ xi : array_like, shape (npoints, )
+ Known x-coordinates. Must be sorted in increasing order.
+ yi : array_like, shape (..., npoints, ...)
+ Known y-coordinates. When an xi occurs two or more times in
+ a row, the corresponding yi's represent derivative values. The length of `yi`
+ along the interpolation axis must be equal to the length of `xi`. Use the
+ `axis` parameter to select the correct axis.
+ axis : int, optional
+ Axis in the `yi` array corresponding to the x-coordinate values. Defaults to
+ ``axis=0``.
+
+ Notes
+ -----
+ Be aware that the algorithms implemented here are not necessarily
+ the most numerically stable known. Moreover, even in a world of
+ exact computation, unless the x coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon. In general, even with well-chosen
+ x values, degrees higher than about thirty cause problems with
+ numerical instability in this code.
+
+ Based on [1]_.
+
+ References
+ ----------
+ .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
+ and Numerical Differentiation", 1970.
+
+ Examples
+ --------
+ To produce a polynomial that is zero at 0 and 1 and has
+ derivative 2 at 0, call
+
+ >>> from scipy.interpolate import KroghInterpolator
+ >>> KroghInterpolator([0,0,1],[0,2,0])
+
+ This constructs the quadratic :math:`2x^2-2x`. The derivative condition
+ is indicated by the repeated zero in the `xi` array; the corresponding
+ yi values are 0, the function value, and 2, the derivative value.
+
+ For another example, given `xi`, `yi`, and a derivative `ypi` for each
+ point, appropriate arrays can be constructed as:
+
+ >>> import numpy as np
+ >>> rng = np.random.default_rng()
+ >>> xi = np.linspace(0, 1, 5)
+ >>> yi, ypi = rng.random((2, 5))
+ >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
+ >>> KroghInterpolator(xi_k, yi_k)
+
+ To produce a vector-valued polynomial, supply a higher-dimensional
+ array for `yi`:
+
+ >>> KroghInterpolator([0,1],[[2,3],[4,5]])
+
+ This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
+
+ """
+
+ def __init__(self, xi, yi, axis=0):
+ super().__init__(xi, yi, axis)
+
+ self.xi = np.asarray(xi)
+ self.yi = self._reshape_yi(yi)
+ self.n, self.r = self.yi.shape
+
+ if (deg := self.xi.size) > 30:
+ warnings.warn(f"{deg} degrees provided, degrees higher than about"
+ " thirty cause problems with numerical instability "
+ "with 'KroghInterpolator'", stacklevel=2)
+
+ c = np.zeros((self.n+1, self.r), dtype=self.dtype)
+ c[0] = self.yi[0]
+ Vk = np.zeros((self.n, self.r), dtype=self.dtype)
+ for k in range(1, self.n):
+ s = 0
+ while s <= k and xi[k-s] == xi[k]:
+ s += 1
+ s -= 1
+ Vk[0] = self.yi[k]/float_factorial(s)
+ for i in range(k-s):
+ if xi[i] == xi[k]:
+ raise ValueError("Elements of `xi` can't be equal.")
+ if s == 0:
+ Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
+ else:
+ Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
+ c[k] = Vk[k-s]
+ self.c = c
+
+ def _evaluate(self, x):
+ pi = 1
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
+ p += self.c[0,np.newaxis,:]
+ for k in range(1, self.n):
+ w = x - self.xi[k-1]
+ pi = w*pi
+ p += pi[:,np.newaxis] * self.c[k]
+ return p
+
+ def _evaluate_derivatives(self, x, der=None):
+ n = self.n
+ r = self.r
+
+ if der is None:
+ der = self.n
+
+ pi = np.zeros((n, len(x)))
+ w = np.zeros((n, len(x)))
+ pi[0] = 1
+ p = np.zeros((len(x), self.r), dtype=self.dtype)
+ p += self.c[0, np.newaxis, :]
+
+ for k in range(1, n):
+ w[k-1] = x - self.xi[k-1]
+ pi[k] = w[k-1] * pi[k-1]
+ p += pi[k, :, np.newaxis] * self.c[k]
+
+ cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
+ cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
+ cn[0] = p
+ for k in range(1, n):
+ for i in range(1, n-k+1):
+ pi[i] = w[k+i-1]*pi[i-1] + pi[i]
+ cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
+ cn[k] *= float_factorial(k)
+
+ cn[n, :, :] = 0
+ return cn[:der]
+
+
+def krogh_interpolate(xi, yi, x, der=0, axis=0):
+ """
+ Convenience function for polynomial interpolation.
+
+ See `KroghInterpolator` for more details.
+
+ Parameters
+ ----------
+ xi : array_like
+ Interpolation points (known x-coordinates).
+ yi : array_like
+ Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
+ vectors of length R, or scalars if R=1.
+ x : array_like
+ Point or points at which to evaluate the derivatives.
+ der : int or list or None, optional
+ How many derivatives to evaluate, or None for all potentially
+ nonzero derivatives (that is, a number equal to the number
+ of points), or a list of derivatives to evaluate. This number
+ includes the function value as the '0th' derivative.
+ axis : int, optional
+ Axis in the `yi` array corresponding to the x-coordinate values.
+
+ Returns
+ -------
+ d : ndarray
+ If the interpolator's values are R-D then the
+ returned array will be the number of derivatives by N by R.
+ If `x` is a scalar, the middle dimension will be dropped; if
+ the `yi` are scalars then the last dimension will be dropped.
+
+ See Also
+ --------
+ KroghInterpolator : Krogh interpolator
+
+ Notes
+ -----
+ Construction of the interpolating polynomial is a relatively expensive
+ process. If you want to evaluate it repeatedly consider using the class
+ KroghInterpolator (which is what this function uses).
+
+ Examples
+ --------
+ We can interpolate 2D observed data using Krogh interpolation:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import krogh_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = krogh_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="krogh interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+ """
+
+ P = KroghInterpolator(xi, yi, axis=axis)
+ if der == 0:
+ return P(x)
+ elif _isscalar(der):
+ return P.derivative(x, der=der)
+ else:
+ return P.derivatives(x, der=np.amax(der)+1)[der]
+
+
+def approximate_taylor_polynomial(f,x,degree,scale,order=None):
+ """
+ Estimate the Taylor polynomial of f at x by polynomial fitting.
+
+ Parameters
+ ----------
+ f : callable
+ The function whose Taylor polynomial is sought. Should accept
+ a vector of `x` values.
+ x : scalar
+ The point at which the polynomial is to be evaluated.
+ degree : int
+ The degree of the Taylor polynomial
+ scale : scalar
+ The width of the interval to use to evaluate the Taylor polynomial.
+ Function values spread over a range this wide are used to fit the
+ polynomial. Must be chosen carefully.
+ order : int or None, optional
+ The order of the polynomial to be used in the fitting; `f` will be
+ evaluated ``order+1`` times. If None, use `degree`.
+
+ Returns
+ -------
+ p : poly1d instance
+ The Taylor polynomial (translated to the origin, so that
+ for example p(0)=f(x)).
+
+ Notes
+ -----
+ The appropriate choice of "scale" is a trade-off; too large and the
+ function differs from its Taylor polynomial too much to get a good
+ answer, too small and round-off errors overwhelm the higher-order terms.
+ The algorithm used becomes numerically unstable around order 30 even
+ under ideal circumstances.
+
+ Choosing order somewhat larger than degree may improve the higher-order
+ terms.
+
+ Examples
+ --------
+ We can calculate Taylor approximation polynomials of sin function with
+ various degrees:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import approximate_taylor_polynomial
+ >>> x = np.linspace(-10.0, 10.0, num=100)
+ >>> plt.plot(x, np.sin(x), label="sin curve")
+ >>> for degree in np.arange(1, 15, step=2):
+ ... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
+ ... order=degree + 2)
+ ... plt.plot(x, sin_taylor(x), label=f"degree={degree}")
+ >>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
+ ... borderaxespad=0.0, shadow=True)
+ >>> plt.tight_layout()
+ >>> plt.axis([-10, 10, -10, 10])
+ >>> plt.show()
+
+ """
+ if order is None:
+ order = degree
+
+ n = order+1
+ # Choose n points that cluster near the endpoints of the interval in
+ # a way that avoids the Runge phenomenon. Ensure, by including the
+ # endpoint or not as appropriate, that one point always falls at x
+ # exactly.
+ xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
+
+ P = KroghInterpolator(xs, f(xs))
+ d = P.derivatives(x,der=degree+1)
+
+ return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
+
+
+class BarycentricInterpolator(_Interpolator1DWithDerivatives):
+ r"""Interpolating polynomial for a set of points.
+
+ Constructs a polynomial that passes through a given set of points.
+ Allows evaluation of the polynomial and all its derivatives,
+ efficient changing of the y-values to be interpolated,
+ and updating by adding more x- and y-values.
+
+ For reasons of numerical stability, this function does not compute
+ the coefficients of the polynomial.
+
+ The values `yi` need to be provided before the function is
+ evaluated, but none of the preprocessing depends on them, so rapid
+ updates are possible.
+
+ Parameters
+ ----------
+ xi : array_like, shape (npoints, )
+ 1-D array of x coordinates of the points the polynomial
+ should pass through
+ yi : array_like, shape (..., npoints, ...), optional
+ N-D array of y coordinates of the points the polynomial should pass through.
+ If None, the y values will be supplied later via the `set_y` method.
+ The length of `yi` along the interpolation axis must be equal to the length
+ of `xi`. Use the ``axis`` parameter to select correct axis.
+ axis : int, optional
+ Axis in the yi array corresponding to the x-coordinate values. Defaults
+ to ``axis=0``.
+ wi : array_like, optional
+ The barycentric weights for the chosen interpolation points `xi`.
+ If absent or None, the weights will be computed from `xi` (default).
+ This allows for the reuse of the weights `wi` if several interpolants
+ are being calculated using the same nodes `xi`, without re-computation.
+ random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+ singleton is used.
+ If `seed` is an int, a new ``RandomState`` instance is used,
+ seeded with `seed`.
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
+ that instance is used.
+
+ Notes
+ -----
+ This class uses a "barycentric interpolation" method that treats
+ the problem as a special case of rational function interpolation.
+ This algorithm is quite stable, numerically, but even in a world of
+ exact computation, unless the x coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon.
+
+ Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
+
+ Examples
+ --------
+ To produce a quintic barycentric interpolant approximating the function
+ :math:`\sin x`, and its first four derivatives, using six randomly-spaced
+ nodes in :math:`(0, \frac{\pi}{2})`:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import BarycentricInterpolator
+ >>> rng = np.random.default_rng()
+ >>> xi = rng.random(6) * np.pi/2
+ >>> f, f_d1, f_d2, f_d3, f_d4 = np.sin, np.cos, lambda x: -np.sin(x), lambda x: -np.cos(x), np.sin
+ >>> P = BarycentricInterpolator(xi, f(xi), random_state=rng)
+ >>> fig, axs = plt.subplots(5, 1, sharex=True, layout='constrained', figsize=(7,10))
+ >>> x = np.linspace(0, np.pi, 100)
+ >>> axs[0].plot(x, P(x), 'r:', x, f(x), 'k--', xi, f(xi), 'xk')
+ >>> axs[1].plot(x, P.derivative(x), 'r:', x, f_d1(x), 'k--', xi, f_d1(xi), 'xk')
+ >>> axs[2].plot(x, P.derivative(x, 2), 'r:', x, f_d2(x), 'k--', xi, f_d2(xi), 'xk')
+ >>> axs[3].plot(x, P.derivative(x, 3), 'r:', x, f_d3(x), 'k--', xi, f_d3(xi), 'xk')
+ >>> axs[4].plot(x, P.derivative(x, 4), 'r:', x, f_d4(x), 'k--', xi, f_d4(xi), 'xk')
+ >>> axs[0].set_xlim(0, np.pi)
+ >>> axs[4].set_xlabel(r"$x$")
+ >>> axs[4].set_xticks([i * np.pi / 4 for i in range(5)],
+ ... ["0", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"])
+ >>> axs[0].set_ylabel("$f(x)$")
+ >>> axs[1].set_ylabel("$f'(x)$")
+ >>> axs[2].set_ylabel("$f''(x)$")
+ >>> axs[3].set_ylabel("$f^{(3)}(x)$")
+ >>> axs[4].set_ylabel("$f^{(4)}(x)$")
+ >>> labels = ['Interpolation nodes', 'True function $f$', 'Barycentric interpolation']
+ >>> axs[0].legend(axs[0].get_lines()[::-1], labels, bbox_to_anchor=(0., 1.02, 1., .102),
+ ... loc='lower left', ncols=3, mode="expand", borderaxespad=0., frameon=False)
+ >>> plt.show()
+ """ # numpy/numpydoc#87 # noqa: E501
+
+ def __init__(self, xi, yi=None, axis=0, *, wi=None, random_state=None):
+ super().__init__(xi, yi, axis)
+
+ random_state = check_random_state(random_state)
+
+ self.xi = np.asarray(xi, dtype=np.float64)
+ self.set_yi(yi)
+ self.n = len(self.xi)
+
+ # cache derivative object to avoid re-computing the weights with every call.
+ self._diff_cij = None
+
+ if wi is not None:
+ self.wi = wi
+ else:
+ # See page 510 of Berrut and Trefethen 2004 for an explanation of the
+ # capacity scaling and the suggestion of using a random permutation of
+ # the input factors.
+ # At the moment, the permutation is not performed for xi that are
+ # appended later through the add_xi interface. It's not clear to me how
+ # to implement that and it seems that most situations that require
+ # these numerical stability improvements will be able to provide all
+ # the points to the constructor.
+ self._inv_capacity = 4.0 / (np.max(self.xi) - np.min(self.xi))
+ permute = random_state.permutation(self.n, )
+ inv_permute = np.zeros(self.n, dtype=np.int32)
+ inv_permute[permute] = np.arange(self.n)
+ self.wi = np.zeros(self.n)
+
+ for i in range(self.n):
+ dist = self._inv_capacity * (self.xi[i] - self.xi[permute])
+ dist[inv_permute[i]] = 1.0
+ prod = np.prod(dist)
+ if prod == 0.0:
+ raise ValueError("Interpolation points xi must be"
+ " distinct.")
+ self.wi[i] = 1.0 / prod
+
+ def set_yi(self, yi, axis=None):
+ """
+ Update the y values to be interpolated
+
+ The barycentric interpolation algorithm requires the calculation
+ of weights, but these depend only on the `xi`. The `yi` can be changed
+ at any time.
+
+ Parameters
+ ----------
+ yi : array_like
+ The y-coordinates of the points the polynomial will pass through.
+ If None, the y values must be supplied later.
+ axis : int, optional
+ Axis in the `yi` array corresponding to the x-coordinate values.
+
+ """
+ if yi is None:
+ self.yi = None
+ return
+ self._set_yi(yi, xi=self.xi, axis=axis)
+ self.yi = self._reshape_yi(yi)
+ self.n, self.r = self.yi.shape
+ self._diff_baryint = None
+
+ def add_xi(self, xi, yi=None):
+ """
+ Add more x values to the set to be interpolated
+
+ The barycentric interpolation algorithm allows easy updating by
+ adding more points for the polynomial to pass through.
+
+ Parameters
+ ----------
+ xi : array_like
+ The x coordinates of the points that the polynomial should pass
+ through.
+ yi : array_like, optional
+ The y coordinates of the points the polynomial should pass through.
+ Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
+ vector-valued.
+ If `yi` is not given, the y values will be supplied later. `yi`
+ should be given if and only if the interpolator has y values
+ specified.
+
+ Notes
+ -----
+ The new points added by `add_xi` are not randomly permuted
+ so there is potential for numerical instability,
+ especially for a large number of points. If this
+ happens, please reconstruct interpolation from scratch instead.
+ """
+ if yi is not None:
+ if self.yi is None:
+ raise ValueError("No previous yi value to update!")
+ yi = self._reshape_yi(yi, check=True)
+ self.yi = np.vstack((self.yi,yi))
+ else:
+ if self.yi is not None:
+ raise ValueError("No update to yi provided!")
+ old_n = self.n
+ self.xi = np.concatenate((self.xi,xi))
+ self.n = len(self.xi)
+ self.wi **= -1
+ old_wi = self.wi
+ self.wi = np.zeros(self.n)
+ self.wi[:old_n] = old_wi
+ for j in range(old_n, self.n):
+ self.wi[:j] *= self._inv_capacity * (self.xi[j]-self.xi[:j])
+ self.wi[j] = np.multiply.reduce(
+ self._inv_capacity * (self.xi[:j]-self.xi[j])
+ )
+ self.wi **= -1
+ self._diff_cij = None
+ self._diff_baryint = None
+
+ def __call__(self, x):
+ """Evaluate the interpolating polynomial at the points x
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the interpolant.
+
+ Returns
+ -------
+ y : array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of `x`.
+
+ Notes
+ -----
+ Currently the code computes an outer product between `x` and the
+ weights, that is, it constructs an intermediate array of size
+ ``(N, len(x))``, where N is the degree of the polynomial.
+ """
+ return _Interpolator1D.__call__(self, x)
+
+ def _evaluate(self, x):
+ if x.size == 0:
+ p = np.zeros((0, self.r), dtype=self.dtype)
+ else:
+ c = x[..., np.newaxis] - self.xi
+ z = c == 0
+ c[z] = 1
+ c = self.wi / c
+ with np.errstate(divide='ignore'):
+ p = np.dot(c, self.yi) / np.sum(c, axis=-1)[..., np.newaxis]
+ # Now fix where x==some xi
+ r = np.nonzero(z)
+ if len(r) == 1: # evaluation at a scalar
+ if len(r[0]) > 0: # equals one of the points
+ p = self.yi[r[0][0]]
+ else:
+ p[r[:-1]] = self.yi[r[-1]]
+ return p
+
+ def derivative(self, x, der=1):
+ """
+ Evaluate a single derivative of the polynomial at the point x.
+
+ Parameters
+ ----------
+ x : array_like
+ Point or points at which to evaluate the derivatives
+ der : integer, optional
+ Which derivative to evaluate (default: first derivative).
+ This number includes the function value as 0th derivative.
+
+ Returns
+ -------
+ d : ndarray
+ Derivative interpolated at the x-points. Shape of `d` is
+ determined by replacing the interpolation axis in the
+ original array with the shape of `x`.
+ """
+ x, x_shape = self._prepare_x(x)
+ y = self._evaluate_derivatives(x, der+1, all_lower=False)
+ return self._finish_y(y, x_shape)
+
+ def _evaluate_derivatives(self, x, der=None, all_lower=True):
+ # NB: der here is not the order of the highest derivative;
+ # instead, it is the size of the derivatives matrix that
+ # would be returned with all_lower=True, including the
+ # '0th' derivative (the undifferentiated function).
+ # E.g. to evaluate the 5th derivative alone, call
+ # _evaluate_derivatives(x, der=6, all_lower=False).
+
+ if (not all_lower) and (x.size == 0 or self.r == 0):
+ return np.zeros((0, self.r), dtype=self.dtype)
+
+ if (not all_lower) and der == 1:
+ return self._evaluate(x)
+
+ if (not all_lower) and (der > self.n):
+ return np.zeros((len(x), self.r), dtype=self.dtype)
+
+ if der is None:
+ der = self.n
+
+ if all_lower and (x.size == 0 or self.r == 0):
+ return np.zeros((der, len(x), self.r), dtype=self.dtype)
+
+ if self._diff_cij is None:
+ # c[i,j] = xi[i] - xi[j]
+ c = self.xi[:, np.newaxis] - self.xi
+
+ # avoid division by 0 (diagonal entries are so far zero by construction)
+ np.fill_diagonal(c, 1)
+
+ # c[i,j] = (w[j] / w[i]) / (xi[i] - xi[j]) (equation 9.4)
+ c = self.wi/ (c * self.wi[..., np.newaxis])
+
+ # fill in correct diagonal entries: each column sums to 0
+ np.fill_diagonal(c, 0)
+
+ # calculate diagonal
+ # c[j,j] = -sum_{i != j} c[i,j] (equation 9.5)
+ d = -c.sum(axis=1)
+ # c[i,j] = l_j(x_i)
+ np.fill_diagonal(c, d)
+
+ self._diff_cij = c
+
+ if self._diff_baryint is None:
+ # initialise and cache derivative interpolator and cijs;
+ # reuse weights wi (which depend only on interpolation points xi),
+ # to avoid unnecessary re-computation
+ self._diff_baryint = BarycentricInterpolator(xi=self.xi,
+ yi=self._diff_cij @ self.yi,
+ wi=self.wi)
+ self._diff_baryint._diff_cij = self._diff_cij
+
+ if all_lower:
+ # assemble matrix of derivatives from order 0 to order der-1,
+ # in the format required by _Interpolator1DWithDerivatives.
+ cn = np.zeros((der, len(x), self.r), dtype=self.dtype)
+ for d in range(der):
+ cn[d, :, :] = self._evaluate_derivatives(x, d+1, all_lower=False)
+ return cn
+
+ # recursively evaluate only the derivative requested
+ return self._diff_baryint._evaluate_derivatives(x, der-1, all_lower=False)
+
+
+def barycentric_interpolate(xi, yi, x, axis=0, *, der=0):
+ """
+ Convenience function for polynomial interpolation.
+
+ Constructs a polynomial that passes through a given set of points,
+ then evaluates the polynomial. For reasons of numerical stability,
+ this function does not compute the coefficients of the polynomial.
+
+ This function uses a "barycentric interpolation" method that treats
+ the problem as a special case of rational function interpolation.
+ This algorithm is quite stable, numerically, but even in a world of
+ exact computation, unless the `x` coordinates are chosen very
+ carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+ polynomial interpolation itself is a very ill-conditioned process
+ due to the Runge phenomenon.
+
+ Parameters
+ ----------
+ xi : array_like
+ 1-D array of x coordinates of the points the polynomial should
+ pass through
+ yi : array_like
+ The y coordinates of the points the polynomial should pass through.
+ x : scalar or array_like
+ Point or points at which to evaluate the interpolant.
+ der : int or list or None, optional
+ How many derivatives to evaluate, or None for all potentially
+ nonzero derivatives (that is, a number equal to the number
+ of points), or a list of derivatives to evaluate. This number
+ includes the function value as the '0th' derivative.
+ axis : int, optional
+ Axis in the `yi` array corresponding to the x-coordinate values.
+
+ Returns
+ -------
+ y : scalar or array_like
+ Interpolated values. Shape is determined by replacing
+ the interpolation axis in the original array with the shape of `x`.
+
+ See Also
+ --------
+ BarycentricInterpolator : Barycentric interpolator
+
+ Notes
+ -----
+ Construction of the interpolation weights is a relatively slow process.
+ If you want to call this many times with the same xi (but possibly
+ varying yi or x) you should use the class `BarycentricInterpolator`.
+ This is what this function uses internally.
+
+ Examples
+ --------
+ We can interpolate 2D observed data using barycentric interpolation:
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import barycentric_interpolate
+ >>> x_observed = np.linspace(0.0, 10.0, 11)
+ >>> y_observed = np.sin(x_observed)
+ >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+ >>> y = barycentric_interpolate(x_observed, y_observed, x)
+ >>> plt.plot(x_observed, y_observed, "o", label="observation")
+ >>> plt.plot(x, y, label="barycentric interpolation")
+ >>> plt.legend()
+ >>> plt.show()
+
+ """
+ P = BarycentricInterpolator(xi, yi, axis=axis)
+ if der == 0:
+ return P(x)
+ elif _isscalar(der):
+ return P.derivative(x, der=der)
+ else:
+ return P.derivatives(x, der=np.amax(der)+1)[der]
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..d7e6dc6fe7e7a2a5b8b47df5ea2137fcbe52399f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_rbf.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_rbf.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed52230dd1cce678e56ca4427e10bafd07e501c0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_rbf.py
@@ -0,0 +1,290 @@
+"""rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
+
+Written by John Travers , February 2007
+Based closely on Matlab code by Alex Chirokov
+Additional, large, improvements by Robert Hetland
+Some additional alterations by Travis Oliphant
+Interpolation with multi-dimensional target domain by Josua Sassen
+
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+Copyright (c) 2006-2007, Robert Hetland
+Copyright (c) 2007, John Travers
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of Robert Hetland nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+import numpy as np
+
+from scipy import linalg
+from scipy.special import xlogy
+from scipy.spatial.distance import cdist, pdist, squareform
+
+__all__ = ['Rbf']
+
+
+class Rbf:
+ """
+ Rbf(*args, **kwargs)
+
+ A class for radial basis function interpolation of functions from
+ N-D scattered data to an M-D domain.
+
+ .. legacy:: class
+
+ `Rbf` is legacy code, for new usage please use `RBFInterpolator`
+ instead.
+
+ Parameters
+ ----------
+ *args : arrays
+ x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
+ and d is the array of values at the nodes
+ function : str or callable, optional
+ The radial basis function, based on the radius, r, given by the norm
+ (default is Euclidean distance); the default is 'multiquadric'::
+
+ 'multiquadric': sqrt((r/self.epsilon)**2 + 1)
+ 'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
+ 'gaussian': exp(-(r/self.epsilon)**2)
+ 'linear': r
+ 'cubic': r**3
+ 'quintic': r**5
+ 'thin_plate': r**2 * log(r)
+
+ If callable, then it must take 2 arguments (self, r). The epsilon
+ parameter will be available as self.epsilon. Other keyword
+ arguments passed in will be available as well.
+
+ epsilon : float, optional
+ Adjustable constant for gaussian or multiquadrics functions
+ - defaults to approximate average distance between nodes (which is
+ a good start).
+ smooth : float, optional
+ Values greater than zero increase the smoothness of the
+ approximation. 0 is for interpolation (default), the function will
+ always go through the nodal points in this case.
+ norm : str, callable, optional
+ A function that returns the 'distance' between two points, with
+ inputs as arrays of positions (x, y, z, ...), and an output as an
+ array of distance. E.g., the default: 'euclidean', such that the result
+ is a matrix of the distances from each point in ``x1`` to each point in
+ ``x2``. For more options, see documentation of
+ `scipy.spatial.distances.cdist`.
+ mode : str, optional
+ Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
+ '1-D' the data `d` will be considered as 1-D and flattened
+ internally. When it is 'N-D' the data `d` is assumed to be an array of
+ shape (n_samples, m), where m is the dimension of the target domain.
+
+
+ Attributes
+ ----------
+ N : int
+ The number of data points (as determined by the input arrays).
+ di : ndarray
+ The 1-D array of data values at each of the data coordinates `xi`.
+ xi : ndarray
+ The 2-D array of data coordinates.
+ function : str or callable
+ The radial basis function. See description under Parameters.
+ epsilon : float
+ Parameter used by gaussian or multiquadrics functions. See Parameters.
+ smooth : float
+ Smoothing parameter. See description under Parameters.
+ norm : str or callable
+ The distance function. See description under Parameters.
+ mode : str
+ Mode of the interpolation. See description under Parameters.
+ nodes : ndarray
+ A 1-D array of node values for the interpolation.
+ A : internal property, do not use
+
+ See Also
+ --------
+ RBFInterpolator
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.interpolate import Rbf
+ >>> rng = np.random.default_rng()
+ >>> x, y, z, d = rng.random((4, 50))
+ >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
+ >>> xi = yi = zi = np.linspace(0, 1, 20)
+ >>> di = rbfi(xi, yi, zi) # interpolated values
+ >>> di.shape
+ (20,)
+
+ """
+ # Available radial basis functions that can be selected as strings;
+ # they all start with _h_ (self._init_function relies on that)
+ def _h_multiquadric(self, r):
+ return np.sqrt((1.0/self.epsilon*r)**2 + 1)
+
+ def _h_inverse_multiquadric(self, r):
+ return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
+
+ def _h_gaussian(self, r):
+ return np.exp(-(1.0/self.epsilon*r)**2)
+
+ def _h_linear(self, r):
+ return r
+
+ def _h_cubic(self, r):
+ return r**3
+
+ def _h_quintic(self, r):
+ return r**5
+
+ def _h_thin_plate(self, r):
+ return xlogy(r**2, r)
+
+ # Setup self._function and do smoke test on initial r
+ def _init_function(self, r):
+ if isinstance(self.function, str):
+ self.function = self.function.lower()
+ _mapped = {'inverse': 'inverse_multiquadric',
+ 'inverse multiquadric': 'inverse_multiquadric',
+ 'thin-plate': 'thin_plate'}
+ if self.function in _mapped:
+ self.function = _mapped[self.function]
+
+ func_name = "_h_" + self.function
+ if hasattr(self, func_name):
+ self._function = getattr(self, func_name)
+ else:
+ functionlist = [x[3:] for x in dir(self)
+ if x.startswith('_h_')]
+ raise ValueError("function must be a callable or one of " +
+ ", ".join(functionlist))
+ self._function = getattr(self, "_h_"+self.function)
+ elif callable(self.function):
+ allow_one = False
+ if hasattr(self.function, 'func_code') or \
+ hasattr(self.function, '__code__'):
+ val = self.function
+ allow_one = True
+ elif hasattr(self.function, "__call__"):
+ val = self.function.__call__.__func__
+ else:
+ raise ValueError("Cannot determine number of arguments to "
+ "function")
+
+ argcount = val.__code__.co_argcount
+ if allow_one and argcount == 1:
+ self._function = self.function
+ elif argcount == 2:
+ self._function = self.function.__get__(self, Rbf)
+ else:
+ raise ValueError("Function argument must take 1 or 2 "
+ "arguments.")
+
+ a0 = self._function(r)
+ if a0.shape != r.shape:
+ raise ValueError("Callable must take array and return array of "
+ "the same shape")
+ return a0
+
+ def __init__(self, *args, **kwargs):
+ # `args` can be a variable number of arrays; we flatten them and store
+ # them as a single 2-D array `xi` of shape (n_args-1, array_size),
+ # plus a 1-D array `di` for the values.
+ # All arrays must have the same number of elements
+ self.xi = np.asarray([np.asarray(a, dtype=np.float64).flatten()
+ for a in args[:-1]])
+ self.N = self.xi.shape[-1]
+
+ self.mode = kwargs.pop('mode', '1-D')
+
+ if self.mode == '1-D':
+ self.di = np.asarray(args[-1]).flatten()
+ self._target_dim = 1
+ elif self.mode == 'N-D':
+ self.di = np.asarray(args[-1])
+ self._target_dim = self.di.shape[-1]
+ else:
+ raise ValueError("Mode has to be 1-D or N-D.")
+
+ if not all([x.size == self.di.shape[0] for x in self.xi]):
+ raise ValueError("All arrays must be equal length.")
+
+ self.norm = kwargs.pop('norm', 'euclidean')
+ self.epsilon = kwargs.pop('epsilon', None)
+ if self.epsilon is None:
+ # default epsilon is the "the average distance between nodes" based
+ # on a bounding hypercube
+ ximax = np.amax(self.xi, axis=1)
+ ximin = np.amin(self.xi, axis=1)
+ edges = ximax - ximin
+ edges = edges[np.nonzero(edges)]
+ self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
+
+ self.smooth = kwargs.pop('smooth', 0.0)
+ self.function = kwargs.pop('function', 'multiquadric')
+
+ # attach anything left in kwargs to self for use by any user-callable
+ # function or to save on the object returned.
+ for item, value in kwargs.items():
+ setattr(self, item, value)
+
+ # Compute weights
+ if self._target_dim > 1: # If we have more than one target dimension,
+ # we first factorize the matrix
+ self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
+ lu, piv = linalg.lu_factor(self.A)
+ for i in range(self._target_dim):
+ self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
+ else:
+ self.nodes = linalg.solve(self.A, self.di)
+
+ @property
+ def A(self):
+ # this only exists for backwards compatibility: self.A was available
+ # and, at least technically, public.
+ r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
+ return self._init_function(r) - np.eye(self.N)*self.smooth
+
+ def _call_norm(self, x1, x2):
+ return cdist(x1.T, x2.T, self.norm)
+
+ def __call__(self, *args):
+ args = [np.asarray(x) for x in args]
+ if not all([x.shape == y.shape for x in args for y in args]):
+ raise ValueError("Array lengths must be equal")
+ if self._target_dim > 1:
+ shp = args[0].shape + (self._target_dim,)
+ else:
+ shp = args[0].shape
+ xa = np.asarray([a.flatten() for a in args], dtype=np.float64)
+ r = self._call_norm(xa, self.xi)
+ return np.dot(self._function(r), self.nodes).reshape(shp)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py
new file mode 100644
index 0000000000000000000000000000000000000000..6690e6ccf7d5499db10efffb0ef1c0139a90d2ba
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp.py
@@ -0,0 +1,550 @@
+"""Module for RBF interpolation."""
+import warnings
+from itertools import combinations_with_replacement
+
+import numpy as np
+from numpy.linalg import LinAlgError
+from scipy.spatial import KDTree
+from scipy.special import comb
+from scipy.linalg.lapack import dgesv # type: ignore[attr-defined]
+
+from ._rbfinterp_pythran import (_build_system,
+ _build_evaluation_coefficients,
+ _polynomial_matrix)
+
+
+__all__ = ["RBFInterpolator"]
+
+
+# These RBFs are implemented.
+_AVAILABLE = {
+ "linear",
+ "thin_plate_spline",
+ "cubic",
+ "quintic",
+ "multiquadric",
+ "inverse_multiquadric",
+ "inverse_quadratic",
+ "gaussian"
+ }
+
+
+# The shape parameter does not need to be specified when using these RBFs.
+_SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
+
+
+# For RBFs that are conditionally positive definite of order m, the interpolant
+# should include polynomial terms with degree >= m - 1. Define the minimum
+# degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
+# Approximation Methods with MATLAB". The RBFs that are not in this dictionary
+# are positive definite and do not need polynomial terms.
+_NAME_TO_MIN_DEGREE = {
+ "multiquadric": 0,
+ "linear": 0,
+ "thin_plate_spline": 1,
+ "cubic": 1,
+ "quintic": 2
+ }
+
+
+def _monomial_powers(ndim, degree):
+ """Return the powers for each monomial in a polynomial.
+
+ Parameters
+ ----------
+ ndim : int
+ Number of variables in the polynomial.
+ degree : int
+ Degree of the polynomial.
+
+ Returns
+ -------
+ (nmonos, ndim) int ndarray
+ Array where each row contains the powers for each variable in a
+ monomial.
+
+ """
+ nmonos = comb(degree + ndim, ndim, exact=True)
+ out = np.zeros((nmonos, ndim), dtype=np.dtype("long"))
+ count = 0
+ for deg in range(degree + 1):
+ for mono in combinations_with_replacement(range(ndim), deg):
+ # `mono` is a tuple of variables in the current monomial with
+ # multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
+ for var in mono:
+ out[count, var] += 1
+
+ count += 1
+
+ return out
+
+
+def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
+ """Build and solve the RBF interpolation system of equations.
+
+ Parameters
+ ----------
+ y : (P, N) float ndarray
+ Data point coordinates.
+ d : (P, S) float ndarray
+ Data values at `y`.
+ smoothing : (P,) float ndarray
+ Smoothing parameter for each data point.
+ kernel : str
+ Name of the RBF.
+ epsilon : float
+ Shape parameter.
+ powers : (R, N) int ndarray
+ The exponents for each monomial in the polynomial.
+
+ Returns
+ -------
+ coeffs : (P + R, S) float ndarray
+ Coefficients for each RBF and monomial.
+ shift : (N,) float ndarray
+ Domain shift used to create the polynomial matrix.
+ scale : (N,) float ndarray
+ Domain scaling used to create the polynomial matrix.
+
+ """
+ lhs, rhs, shift, scale = _build_system(
+ y, d, smoothing, kernel, epsilon, powers
+ )
+ _, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
+ if info < 0:
+ raise ValueError(f"The {-info}-th argument had an illegal value.")
+ elif info > 0:
+ msg = "Singular matrix."
+ nmonos = powers.shape[0]
+ if nmonos > 0:
+ pmat = _polynomial_matrix((y - shift)/scale, powers)
+ rank = np.linalg.matrix_rank(pmat)
+ if rank < nmonos:
+ msg = (
+ "Singular matrix. The matrix of monomials evaluated at "
+ "the data point coordinates does not have full column "
+ f"rank ({rank}/{nmonos})."
+ )
+
+ raise LinAlgError(msg)
+
+ return shift, scale, coeffs
+
+
+class RBFInterpolator:
+ """Radial basis function (RBF) interpolation in N dimensions.
+
+ Parameters
+ ----------
+ y : (npoints, ndims) array_like
+ 2-D array of data point coordinates.
+ d : (npoints, ...) array_like
+ N-D array of data values at `y`. The length of `d` along the first
+ axis must be equal to the length of `y`. Unlike some interpolators, the
+ interpolation axis cannot be changed.
+ neighbors : int, optional
+ If specified, the value of the interpolant at each evaluation point
+ will be computed using only this many nearest data points. All the data
+ points are used by default.
+ smoothing : float or (npoints, ) array_like, optional
+ Smoothing parameter. The interpolant perfectly fits the data when this
+ is set to 0. For large values, the interpolant approaches a least
+ squares fit of a polynomial with the specified degree. Default is 0.
+ kernel : str, optional
+ Type of RBF. This should be one of
+
+ - 'linear' : ``-r``
+ - 'thin_plate_spline' : ``r**2 * log(r)``
+ - 'cubic' : ``r**3``
+ - 'quintic' : ``-r**5``
+ - 'multiquadric' : ``-sqrt(1 + r**2)``
+ - 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
+ - 'inverse_quadratic' : ``1/(1 + r**2)``
+ - 'gaussian' : ``exp(-r**2)``
+
+ Default is 'thin_plate_spline'.
+ epsilon : float, optional
+ Shape parameter that scales the input to the RBF. If `kernel` is
+ 'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
+ 1 and can be ignored because it has the same effect as scaling the
+ smoothing parameter. Otherwise, this must be specified.
+ degree : int, optional
+ Degree of the added polynomial. For some RBFs the interpolant may not
+ be well-posed if the polynomial degree is too small. Those RBFs and
+ their corresponding minimum degrees are
+
+ - 'multiquadric' : 0
+ - 'linear' : 0
+ - 'thin_plate_spline' : 1
+ - 'cubic' : 1
+ - 'quintic' : 2
+
+ The default value is the minimum degree for `kernel` or 0 if there is
+ no minimum degree. Set this to -1 for no added polynomial.
+
+ Notes
+ -----
+ An RBF is a scalar valued function in N-dimensional space whose value at
+ :math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
+ is the center of the RBF.
+
+ An RBF interpolant for the vector of data values :math:`d`, which are from
+ locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
+ plus a polynomial with a specified degree. The RBF interpolant is written
+ as
+
+ .. math::
+ f(x) = K(x, y) a + P(x) b,
+
+ where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
+ evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
+ monomials, which span polynomials with the specified degree, evaluated at
+ :math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
+ linear equations
+
+ .. math::
+ (K(y, y) + \\lambda I) a + P(y) b = d
+
+ and
+
+ .. math::
+ P(y)^T a = 0,
+
+ where :math:`\\lambda` is a non-negative smoothing parameter that controls
+ how well we want to fit the data. The data are fit exactly when the
+ smoothing parameter is 0.
+
+ The above system is uniquely solvable if the following requirements are
+ met:
+
+ - :math:`P(y)` must have full column rank. :math:`P(y)` always has full
+ column rank when `degree` is -1 or 0. When `degree` is 1,
+ :math:`P(y)` has full column rank if the data point locations are not
+ all collinear (N=2), coplanar (N=3), etc.
+ - If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
+ 'cubic', or 'quintic', then `degree` must not be lower than the
+ minimum value listed above.
+ - If `smoothing` is 0, then each data point location must be distinct.
+
+ When using an RBF that is not scale invariant ('multiquadric',
+ 'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
+ shape parameter must be chosen (e.g., through cross validation). Smaller
+ values for the shape parameter correspond to wider RBFs. The problem can
+ become ill-conditioned or singular when the shape parameter is too small.
+
+ The memory required to solve for the RBF interpolation coefficients
+ increases quadratically with the number of data points, which can become
+ impractical when interpolating more than about a thousand data points.
+ To overcome memory limitations for large interpolation problems, the
+ `neighbors` argument can be specified to compute an RBF interpolant for
+ each evaluation point using only the nearest data points.
+
+ .. versionadded:: 1.7.0
+
+ See Also
+ --------
+ NearestNDInterpolator
+ LinearNDInterpolator
+ CloughTocher2DInterpolator
+
+ References
+ ----------
+ .. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
+ World Scientific Publishing Co.
+
+ .. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
+
+ .. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
+
+ .. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
+
+ Examples
+ --------
+ Demonstrate interpolating scattered data to a grid in 2-D.
+
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.interpolate import RBFInterpolator
+ >>> from scipy.stats.qmc import Halton
+
+ >>> rng = np.random.default_rng()
+ >>> xobs = 2*Halton(2, seed=rng).random(100) - 1
+ >>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
+
+ >>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
+ >>> xflat = xgrid.reshape(2, -1).T
+ >>> yflat = RBFInterpolator(xobs, yobs)(xflat)
+ >>> ygrid = yflat.reshape(50, 50)
+
+ >>> fig, ax = plt.subplots()
+ >>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
+ >>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
+ >>> fig.colorbar(p)
+ >>> plt.show()
+
+ """
+
+ def __init__(self, y, d,
+ neighbors=None,
+ smoothing=0.0,
+ kernel="thin_plate_spline",
+ epsilon=None,
+ degree=None):
+ y = np.asarray(y, dtype=float, order="C")
+ if y.ndim != 2:
+ raise ValueError("`y` must be a 2-dimensional array.")
+
+ ny, ndim = y.shape
+
+ d_dtype = complex if np.iscomplexobj(d) else float
+ d = np.asarray(d, dtype=d_dtype, order="C")
+ if d.shape[0] != ny:
+ raise ValueError(
+ f"Expected the first axis of `d` to have length {ny}."
+ )
+
+ d_shape = d.shape[1:]
+ d = d.reshape((ny, -1))
+ # If `d` is complex, convert it to a float array with twice as many
+ # columns. Otherwise, the LHS matrix would need to be converted to
+ # complex and take up 2x more memory than necessary.
+ d = d.view(float)
+
+ if np.isscalar(smoothing):
+ smoothing = np.full(ny, smoothing, dtype=float)
+ else:
+ smoothing = np.asarray(smoothing, dtype=float, order="C")
+ if smoothing.shape != (ny,):
+ raise ValueError(
+ "Expected `smoothing` to be a scalar or have shape "
+ f"({ny},)."
+ )
+
+ kernel = kernel.lower()
+ if kernel not in _AVAILABLE:
+ raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
+
+ if epsilon is None:
+ if kernel in _SCALE_INVARIANT:
+ epsilon = 1.0
+ else:
+ raise ValueError(
+ "`epsilon` must be specified if `kernel` is not one of "
+ f"{_SCALE_INVARIANT}."
+ )
+ else:
+ epsilon = float(epsilon)
+
+ min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
+ if degree is None:
+ degree = max(min_degree, 0)
+ else:
+ degree = int(degree)
+ if degree < -1:
+ raise ValueError("`degree` must be at least -1.")
+ elif -1 < degree < min_degree:
+ warnings.warn(
+ f"`degree` should not be below {min_degree} except -1 "
+ f"when `kernel` is '{kernel}'."
+ f"The interpolant may not be uniquely "
+ f"solvable, and the smoothing parameter may have an "
+ f"unintuitive effect.",
+ UserWarning, stacklevel=2
+ )
+
+ if neighbors is None:
+ nobs = ny
+ else:
+ # Make sure the number of nearest neighbors used for interpolation
+ # does not exceed the number of observations.
+ neighbors = int(min(neighbors, ny))
+ nobs = neighbors
+
+ powers = _monomial_powers(ndim, degree)
+ # The polynomial matrix must have full column rank in order for the
+ # interpolant to be well-posed, which is not possible if there are
+ # fewer observations than monomials.
+ if powers.shape[0] > nobs:
+ raise ValueError(
+ f"At least {powers.shape[0]} data points are required when "
+ f"`degree` is {degree} and the number of dimensions is {ndim}."
+ )
+
+ if neighbors is None:
+ shift, scale, coeffs = _build_and_solve_system(
+ y, d, smoothing, kernel, epsilon, powers
+ )
+
+ # Make these attributes private since they do not always exist.
+ self._shift = shift
+ self._scale = scale
+ self._coeffs = coeffs
+
+ else:
+ self._tree = KDTree(y)
+
+ self.y = y
+ self.d = d
+ self.d_shape = d_shape
+ self.d_dtype = d_dtype
+ self.neighbors = neighbors
+ self.smoothing = smoothing
+ self.kernel = kernel
+ self.epsilon = epsilon
+ self.powers = powers
+
+ def _chunk_evaluator(
+ self,
+ x,
+ y,
+ shift,
+ scale,
+ coeffs,
+ memory_budget=1000000
+ ):
+ """
+ Evaluate the interpolation while controlling memory consumption.
+ We chunk the input if we need more memory than specified.
+
+ Parameters
+ ----------
+ x : (Q, N) float ndarray
+ array of points on which to evaluate
+ y: (P, N) float ndarray
+ array of points on which we know function values
+ shift: (N, ) ndarray
+ Domain shift used to create the polynomial matrix.
+ scale : (N,) float ndarray
+ Domain scaling used to create the polynomial matrix.
+ coeffs: (P+R, S) float ndarray
+ Coefficients in front of basis functions
+ memory_budget: int
+ Total amount of memory (in units of sizeof(float)) we wish
+ to devote for storing the array of coefficients for
+ interpolated points. If we need more memory than that, we
+ chunk the input.
+
+ Returns
+ -------
+ (Q, S) float ndarray
+ Interpolated array
+ """
+ nx, ndim = x.shape
+ if self.neighbors is None:
+ nnei = len(y)
+ else:
+ nnei = self.neighbors
+ # in each chunk we consume the same space we already occupy
+ chunksize = memory_budget // (self.powers.shape[0] + nnei) + 1
+ if chunksize <= nx:
+ out = np.empty((nx, self.d.shape[1]), dtype=float)
+ for i in range(0, nx, chunksize):
+ vec = _build_evaluation_coefficients(
+ x[i:i + chunksize, :],
+ y,
+ self.kernel,
+ self.epsilon,
+ self.powers,
+ shift,
+ scale)
+ out[i:i + chunksize, :] = np.dot(vec, coeffs)
+ else:
+ vec = _build_evaluation_coefficients(
+ x,
+ y,
+ self.kernel,
+ self.epsilon,
+ self.powers,
+ shift,
+ scale)
+ out = np.dot(vec, coeffs)
+ return out
+
+ def __call__(self, x):
+ """Evaluate the interpolant at `x`.
+
+ Parameters
+ ----------
+ x : (Q, N) array_like
+ Evaluation point coordinates.
+
+ Returns
+ -------
+ (Q, ...) ndarray
+ Values of the interpolant at `x`.
+
+ """
+ x = np.asarray(x, dtype=float, order="C")
+ if x.ndim != 2:
+ raise ValueError("`x` must be a 2-dimensional array.")
+
+ nx, ndim = x.shape
+ if ndim != self.y.shape[1]:
+ raise ValueError("Expected the second axis of `x` to have length "
+ f"{self.y.shape[1]}.")
+
+ # Our memory budget for storing RBF coefficients is
+ # based on how many floats in memory we already occupy
+ # If this number is below 1e6 we just use 1e6
+ # This memory budget is used to decide how we chunk
+ # the inputs
+ memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
+
+ if self.neighbors is None:
+ out = self._chunk_evaluator(
+ x,
+ self.y,
+ self._shift,
+ self._scale,
+ self._coeffs,
+ memory_budget=memory_budget)
+ else:
+ # Get the indices of the k nearest observation points to each
+ # evaluation point.
+ _, yindices = self._tree.query(x, self.neighbors)
+ if self.neighbors == 1:
+ # `KDTree` squeezes the output when neighbors=1.
+ yindices = yindices[:, None]
+
+ # Multiple evaluation points may have the same neighborhood of
+ # observation points. Make the neighborhoods unique so that we only
+ # compute the interpolation coefficients once for each
+ # neighborhood.
+ yindices = np.sort(yindices, axis=1)
+ yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
+ inv = np.reshape(inv, (-1,)) # flatten, we need 1-D indices
+ # `inv` tells us which neighborhood will be used by each evaluation
+ # point. Now we find which evaluation points will be using each
+ # neighborhood.
+ xindices = [[] for _ in range(len(yindices))]
+ for i, j in enumerate(inv):
+ xindices[j].append(i)
+
+ out = np.empty((nx, self.d.shape[1]), dtype=float)
+ for xidx, yidx in zip(xindices, yindices):
+ # `yidx` are the indices of the observations in this
+ # neighborhood. `xidx` are the indices of the evaluation points
+ # that are using this neighborhood.
+ xnbr = x[xidx]
+ ynbr = self.y[yidx]
+ dnbr = self.d[yidx]
+ snbr = self.smoothing[yidx]
+ shift, scale, coeffs = _build_and_solve_system(
+ ynbr,
+ dnbr,
+ snbr,
+ self.kernel,
+ self.epsilon,
+ self.powers,
+ )
+ out[xidx] = self._chunk_evaluator(
+ xnbr,
+ ynbr,
+ shift,
+ scale,
+ coeffs,
+ memory_budget=memory_budget)
+
+ out = out.view(self.d_dtype)
+ out = out.reshape((nx, ) + self.d_shape)
+ return out
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..f263c29ee3af68162a16a451be60848110a31552
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_rgi.py b/venv/lib/python3.10/site-packages/scipy/interpolate/_rgi.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb17bf9c8b57e8716be4fcfc6296c1ee21ebb985
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/_rgi.py
@@ -0,0 +1,766 @@
+__all__ = ['RegularGridInterpolator', 'interpn']
+
+import itertools
+import warnings
+
+import numpy as np
+
+import scipy.sparse.linalg as ssl
+
+from .interpnd import _ndim_coords_from_arrays
+from ._cubic import PchipInterpolator
+from ._rgi_cython import evaluate_linear_2d, find_indices
+from ._bsplines import make_interp_spline
+from ._fitpack2 import RectBivariateSpline
+from ._ndbspline import make_ndbspl
+
+
+def _check_points(points):
+ descending_dimensions = []
+ grid = []
+ for i, p in enumerate(points):
+ # early make points float
+ # see https://github.com/scipy/scipy/pull/17230
+ p = np.asarray(p, dtype=float)
+ if not np.all(p[1:] > p[:-1]):
+ if np.all(p[1:] < p[:-1]):
+ # input is descending, so make it ascending
+ descending_dimensions.append(i)
+ p = np.flip(p)
+ else:
+ raise ValueError(
+ "The points in dimension %d must be strictly "
+ "ascending or descending" % i)
+ # see https://github.com/scipy/scipy/issues/17716
+ p = np.ascontiguousarray(p)
+ grid.append(p)
+ return tuple(grid), tuple(descending_dimensions)
+
+
+def _check_dimensionality(points, values):
+ if len(points) > values.ndim:
+ raise ValueError("There are %d point arrays, but values has %d "
+ "dimensions" % (len(points), values.ndim))
+ for i, p in enumerate(points):
+ if not np.asarray(p).ndim == 1:
+ raise ValueError("The points in dimension %d must be "
+ "1-dimensional" % i)
+ if not values.shape[i] == len(p):
+ raise ValueError("There are %d points and %d values in "
+ "dimension %d" % (len(p), values.shape[i], i))
+
+
+class RegularGridInterpolator:
+ """
+ Interpolator on a regular or rectilinear grid in arbitrary dimensions.
+
+ The data must be defined on a rectilinear grid; that is, a rectangular
+ grid with even or uneven spacing. Linear, nearest-neighbor, spline
+ interpolations are supported. After setting up the interpolator object,
+ the interpolation method may be chosen at each evaluation.
+
+ Parameters
+ ----------
+ points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
+ The points defining the regular grid in n dimensions. The points in
+ each dimension (i.e. every elements of the points tuple) must be
+ strictly ascending or descending.
+
+ values : array_like, shape (m1, ..., mn, ...)
+ The data on the regular grid in n dimensions. Complex data is
+ accepted.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated with ``method="pchip"`` and will raise an
+ error in SciPy 1.15.0. This is because ``PchipInterpolator`` only
+ works with real values. If you are trying to use the real components of
+ the passed array, use ``np.real`` on ``values``.
+
+ method : str, optional
+ The method of interpolation to perform. Supported are "linear",
+ "nearest", "slinear", "cubic", "quintic" and "pchip". This
+ parameter will become the default for the object's ``__call__``
+ method. Default is "linear".
+
+ bounds_error : bool, optional
+ If True, when interpolated values are requested outside of the
+ domain of the input data, a ValueError is raised.
+ If False, then `fill_value` is used.
+ Default is True.
+
+ fill_value : float or None, optional
+ The value to use for points outside of the interpolation domain.
+ If None, values outside the domain are extrapolated.
+ Default is ``np.nan``.
+
+ solver : callable, optional
+ Only used for methods "slinear", "cubic" and "quintic".
+ Sparse linear algebra solver for construction of the NdBSpline instance.
+ Default is the iterative solver `scipy.sparse.linalg.gcrotmk`.
+
+ .. versionadded:: 1.13
+
+ solver_args: dict, optional
+ Additional arguments to pass to `solver`, if any.
+
+ .. versionadded:: 1.13
+
+ Methods
+ -------
+ __call__
+
+ Attributes
+ ----------
+ grid : tuple of ndarrays
+ The points defining the regular grid in n dimensions.
+ This tuple defines the full grid via
+ ``np.meshgrid(*grid, indexing='ij')``
+ values : ndarray
+ Data values at the grid.
+ method : str
+ Interpolation method.
+ fill_value : float or ``None``
+ Use this value for out-of-bounds arguments to `__call__`.
+ bounds_error : bool
+ If ``True``, out-of-bounds argument raise a ``ValueError``.
+
+ Notes
+ -----
+ Contrary to `LinearNDInterpolator` and `NearestNDInterpolator`, this class
+ avoids expensive triangulation of the input data by taking advantage of the
+ regular grid structure.
+
+ In other words, this class assumes that the data is defined on a
+ *rectilinear* grid.
+
+ .. versionadded:: 0.14
+
+ The 'slinear'(k=1), 'cubic'(k=3), and 'quintic'(k=5) methods are
+ tensor-product spline interpolators, where `k` is the spline degree,
+ If any dimension has fewer points than `k` + 1, an error will be raised.
+
+ .. versionadded:: 1.9
+
+ If the input data is such that dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolating.
+
+ **Choosing a solver for spline methods**
+
+ Spline methods, "slinear", "cubic" and "quintic" involve solving a
+ large sparse linear system at instantiation time. Depending on data,
+ the default solver may or may not be adequate. When it is not, you may
+ need to experiment with an optional `solver` argument, where you may
+ choose between the direct solver (`scipy.sparse.linalg.spsolve`) or
+ iterative solvers from `scipy.sparse.linalg`. You may need to supply
+ additional parameters via the optional `solver_args` parameter (for instance,
+ you may supply the starting value or target tolerance). See the
+ `scipy.sparse.linalg` documentation for the full list of available options.
+
+ Alternatively, you may instead use the legacy methods, "slinear_legacy",
+ "cubic_legacy" and "quintic_legacy". These methods allow faster construction
+ but evaluations will be much slower.
+
+ Examples
+ --------
+ **Evaluate a function on the points of a 3-D grid**
+
+ As a first example, we evaluate a simple example function on the points of
+ a 3-D grid:
+
+ >>> from scipy.interpolate import RegularGridInterpolator
+ >>> import numpy as np
+ >>> def f(x, y, z):
+ ... return 2 * x**3 + 3 * y**2 - z
+ >>> x = np.linspace(1, 4, 11)
+ >>> y = np.linspace(4, 7, 22)
+ >>> z = np.linspace(7, 9, 33)
+ >>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
+ >>> data = f(xg, yg, zg)
+
+ ``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``.
+ Next, define an interpolating function from this data:
+
+ >>> interp = RegularGridInterpolator((x, y, z), data)
+
+ Evaluate the interpolating function at the two points
+ ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
+
+ >>> pts = np.array([[2.1, 6.2, 8.3],
+ ... [3.3, 5.2, 7.1]])
+ >>> interp(pts)
+ array([ 125.80469388, 146.30069388])
+
+ which is indeed a close approximation to
+
+ >>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)
+ (125.54200000000002, 145.894)
+
+ **Interpolate and extrapolate a 2D dataset**
+
+ As a second example, we interpolate and extrapolate a 2D data set:
+
+ >>> x, y = np.array([-2, 0, 4]), np.array([-2, 0, 2, 5])
+ >>> def ff(x, y):
+ ... return x**2 + y**2
+
+ >>> xg, yg = np.meshgrid(x, y, indexing='ij')
+ >>> data = ff(xg, yg)
+ >>> interp = RegularGridInterpolator((x, y), data,
+ ... bounds_error=False, fill_value=None)
+
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> ax = fig.add_subplot(projection='3d')
+ >>> ax.scatter(xg.ravel(), yg.ravel(), data.ravel(),
+ ... s=60, c='k', label='data')
+
+ Evaluate and plot the interpolator on a finer grid
+
+ >>> xx = np.linspace(-4, 9, 31)
+ >>> yy = np.linspace(-4, 9, 31)
+ >>> X, Y = np.meshgrid(xx, yy, indexing='ij')
+
+ >>> # interpolator
+ >>> ax.plot_wireframe(X, Y, interp((X, Y)), rstride=3, cstride=3,
+ ... alpha=0.4, color='m', label='linear interp')
+
+ >>> # ground truth
+ >>> ax.plot_wireframe(X, Y, ff(X, Y), rstride=3, cstride=3,
+ ... alpha=0.4, label='ground truth')
+ >>> plt.legend()
+ >>> plt.show()
+
+ Other examples are given
+ :ref:`in the tutorial `.
+
+ See Also
+ --------
+ NearestNDInterpolator : Nearest neighbor interpolator on *unstructured*
+ data in N dimensions
+
+ LinearNDInterpolator : Piecewise linear interpolator on *unstructured* data
+ in N dimensions
+
+ interpn : a convenience function which wraps `RegularGridInterpolator`
+
+ scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
+ (suitable for e.g., N-D image resampling)
+
+ References
+ ----------
+ .. [1] Python package *regulargrid* by Johannes Buchner, see
+ https://pypi.python.org/pypi/regulargrid/
+ .. [2] Wikipedia, "Trilinear interpolation",
+ https://en.wikipedia.org/wiki/Trilinear_interpolation
+ .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
+ and multilinear table interpolation in many dimensions." MATH.
+ COMPUT. 50.181 (1988): 189-196.
+ https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
+ :doi:`10.1090/S0025-5718-1988-0917826-0`
+
+ """
+ # this class is based on code originally programmed by Johannes Buchner,
+ # see https://github.com/JohannesBuchner/regulargrid
+
+ _SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3,
+ "slinear_legacy": 1, "cubic_legacy": 3, "quintic_legacy": 5,}
+ _SPLINE_METHODS_recursive = {"slinear_legacy", "cubic_legacy",
+ "quintic_legacy", "pchip"}
+ _SPLINE_METHODS_ndbspl = {"slinear", "cubic", "quintic"}
+ _SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys())
+ _ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS
+
+ def __init__(self, points, values, method="linear", bounds_error=True,
+ fill_value=np.nan, *, solver=None, solver_args=None):
+ if method not in self._ALL_METHODS:
+ raise ValueError("Method '%s' is not defined" % method)
+ elif method in self._SPLINE_METHODS:
+ self._validate_grid_dimensions(points, method)
+ self.method = method
+ self.bounds_error = bounds_error
+ self.grid, self._descending_dimensions = _check_points(points)
+ self.values = self._check_values(values)
+ self._check_dimensionality(self.grid, self.values)
+ self.fill_value = self._check_fill_value(self.values, fill_value)
+ if self._descending_dimensions:
+ self.values = np.flip(values, axis=self._descending_dimensions)
+ if self.method == "pchip" and np.iscomplexobj(self.values):
+ msg = ("`PchipInterpolator` only works with real values. Passing "
+ "complex-dtyped `values` with `method='pchip'` is deprecated "
+ "and will raise an error in SciPy 1.15.0. If you are trying to "
+ "use the real components of the passed array, use `np.real` on "
+ "the array before passing to `RegularGridInterpolator`.")
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ if method in self._SPLINE_METHODS_ndbspl:
+ if solver_args is None:
+ solver_args = {}
+ self._spline = self._construct_spline(method, solver, **solver_args)
+ else:
+ if solver is not None or solver_args:
+ raise ValueError(
+ f"{method =} does not accept the 'solver' argument. Got "
+ f" {solver = } and with arguments {solver_args}."
+ )
+
+ def _construct_spline(self, method, solver=None, **solver_args):
+ if solver is None:
+ solver = ssl.gcrotmk
+ spl = make_ndbspl(
+ self.grid, self.values, self._SPLINE_DEGREE_MAP[method],
+ solver=solver, **solver_args
+ )
+ return spl
+
+ def _check_dimensionality(self, grid, values):
+ _check_dimensionality(grid, values)
+
+ def _check_points(self, points):
+ return _check_points(points)
+
+ def _check_values(self, values):
+ if not hasattr(values, 'ndim'):
+ # allow reasonable duck-typed values
+ values = np.asarray(values)
+
+ if hasattr(values, 'dtype') and hasattr(values, 'astype'):
+ if not np.issubdtype(values.dtype, np.inexact):
+ values = values.astype(float)
+
+ return values
+
+ def _check_fill_value(self, values, fill_value):
+ if fill_value is not None:
+ fill_value_dtype = np.asarray(fill_value).dtype
+ if (hasattr(values, 'dtype') and not
+ np.can_cast(fill_value_dtype, values.dtype,
+ casting='same_kind')):
+ raise ValueError("fill_value must be either 'None' or "
+ "of a type compatible with values")
+ return fill_value
+
+ def __call__(self, xi, method=None, *, nu=None):
+ """
+ Interpolation at coordinates.
+
+ Parameters
+ ----------
+ xi : ndarray of shape (..., ndim)
+ The coordinates to evaluate the interpolator at.
+
+ method : str, optional
+ The method of interpolation to perform. Supported are "linear",
+ "nearest", "slinear", "cubic", "quintic" and "pchip". Default is
+ the method chosen when the interpolator was created.
+
+ nu : sequence of ints, length ndim, optional
+ If not None, the orders of the derivatives to evaluate.
+ Each entry must be non-negative.
+ Only allowed for methods "slinear", "cubic" and "quintic".
+
+ .. versionadded:: 1.13
+
+ Returns
+ -------
+ values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
+ Interpolated values at `xi`. See notes for behaviour when
+ ``xi.ndim == 1``.
+
+ Notes
+ -----
+ In the case that ``xi.ndim == 1`` a new axis is inserted into
+ the 0 position of the returned array, values_x, so its shape is
+ instead ``(1,) + values.shape[ndim:]``.
+
+ Examples
+ --------
+ Here we define a nearest-neighbor interpolator of a simple function
+
+ >>> import numpy as np
+ >>> x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
+ >>> def f(x, y):
+ ... return x**2 + y**2
+ >>> data = f(*np.meshgrid(x, y, indexing='ij', sparse=True))
+ >>> from scipy.interpolate import RegularGridInterpolator
+ >>> interp = RegularGridInterpolator((x, y), data, method='nearest')
+
+ By construction, the interpolator uses the nearest-neighbor
+ interpolation
+
+ >>> interp([[1.5, 1.3], [0.3, 4.5]])
+ array([2., 9.])
+
+ We can however evaluate the linear interpolant by overriding the
+ `method` parameter
+
+ >>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear')
+ array([ 4.7, 24.3])
+ """
+ method = self.method if method is None else method
+ is_method_changed = self.method != method
+ if method not in self._ALL_METHODS:
+ raise ValueError("Method '%s' is not defined" % method)
+ if is_method_changed and method in self._SPLINE_METHODS_ndbspl:
+ self._spline = self._construct_spline(method)
+
+ if nu is not None and method not in self._SPLINE_METHODS_ndbspl:
+ raise ValueError(
+ f"Can only compute derivatives for methods "
+ f"{self._SPLINE_METHODS_ndbspl}, got {method =}."
+ )
+
+ xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi)
+
+ if method == "linear":
+ indices, norm_distances = self._find_indices(xi.T)
+ if (ndim == 2 and hasattr(self.values, 'dtype') and
+ self.values.ndim == 2 and self.values.flags.writeable and
+ self.values.dtype in (np.float64, np.complex128) and
+ self.values.dtype.byteorder == '='):
+ # until cython supports const fused types, the fast path
+ # cannot support non-writeable values
+ # a fast path
+ out = np.empty(indices.shape[1], dtype=self.values.dtype)
+ result = evaluate_linear_2d(self.values,
+ indices,
+ norm_distances,
+ self.grid,
+ out)
+ else:
+ result = self._evaluate_linear(indices, norm_distances)
+ elif method == "nearest":
+ indices, norm_distances = self._find_indices(xi.T)
+ result = self._evaluate_nearest(indices, norm_distances)
+ elif method in self._SPLINE_METHODS:
+ if is_method_changed:
+ self._validate_grid_dimensions(self.grid, method)
+ if method in self._SPLINE_METHODS_recursive:
+ result = self._evaluate_spline(xi, method)
+ else:
+ result = self._spline(xi, nu=nu)
+
+ if not self.bounds_error and self.fill_value is not None:
+ result[out_of_bounds] = self.fill_value
+
+ # f(nan) = nan, if any
+ if np.any(nans):
+ result[nans] = np.nan
+ return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
+
+ def _prepare_xi(self, xi):
+ ndim = len(self.grid)
+ xi = _ndim_coords_from_arrays(xi, ndim=ndim)
+ if xi.shape[-1] != len(self.grid):
+ raise ValueError("The requested sample points xi have dimension "
+ f"{xi.shape[-1]} but this "
+ f"RegularGridInterpolator has dimension {ndim}")
+
+ xi_shape = xi.shape
+ xi = xi.reshape(-1, xi_shape[-1])
+ xi = np.asarray(xi, dtype=float)
+
+ # find nans in input
+ nans = np.any(np.isnan(xi), axis=-1)
+
+ if self.bounds_error:
+ for i, p in enumerate(xi.T):
+ if not np.logical_and(np.all(self.grid[i][0] <= p),
+ np.all(p <= self.grid[i][-1])):
+ raise ValueError("One of the requested xi is out of bounds "
+ "in dimension %d" % i)
+ out_of_bounds = None
+ else:
+ out_of_bounds = self._find_out_of_bounds(xi.T)
+
+ return xi, xi_shape, ndim, nans, out_of_bounds
+
+ def _evaluate_linear(self, indices, norm_distances):
+ # slice for broadcasting over trailing dimensions in self.values
+ vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
+
+ # Compute shifting up front before zipping everything together
+ shift_norm_distances = [1 - yi for yi in norm_distances]
+ shift_indices = [i + 1 for i in indices]
+
+ # The formula for linear interpolation in 2d takes the form:
+ # values = self.values[(i0, i1)] * (1 - y0) * (1 - y1) + \
+ # self.values[(i0, i1 + 1)] * (1 - y0) * y1 + \
+ # self.values[(i0 + 1, i1)] * y0 * (1 - y1) + \
+ # self.values[(i0 + 1, i1 + 1)] * y0 * y1
+ # We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2)
+ zipped1 = zip(indices, shift_norm_distances)
+ zipped2 = zip(shift_indices, norm_distances)
+
+ # Take all products of zipped1 and zipped2 and iterate over them
+ # to get the terms in the above formula. This corresponds to iterating
+ # over the vertices of a hypercube.
+ hypercube = itertools.product(*zip(zipped1, zipped2))
+ value = np.array([0.])
+ for h in hypercube:
+ edge_indices, weights = zip(*h)
+ weight = np.array([1.])
+ for w in weights:
+ weight = weight * w
+ term = np.asarray(self.values[edge_indices]) * weight[vslice]
+ value = value + term # cannot use += because broadcasting
+ return value
+
+ def _evaluate_nearest(self, indices, norm_distances):
+ idx_res = [np.where(yi <= .5, i, i + 1)
+ for i, yi in zip(indices, norm_distances)]
+ return self.values[tuple(idx_res)]
+
+ def _validate_grid_dimensions(self, points, method):
+ k = self._SPLINE_DEGREE_MAP[method]
+ for i, point in enumerate(points):
+ ndim = len(np.atleast_1d(point))
+ if ndim <= k:
+ raise ValueError(f"There are {ndim} points in dimension {i},"
+ f" but method {method} requires at least "
+ f" {k+1} points per dimension.")
+
+ def _evaluate_spline(self, xi, method):
+ # ensure xi is 2D list of points to evaluate (`m` is the number of
+ # points and `n` is the number of interpolation dimensions,
+ # ``n == len(self.grid)``.)
+ if xi.ndim == 1:
+ xi = xi.reshape((1, xi.size))
+ m, n = xi.shape
+
+ # Reorder the axes: n-dimensional process iterates over the
+ # interpolation axes from the last axis downwards: E.g. for a 4D grid
+ # the order of axes is 3, 2, 1, 0. Each 1D interpolation works along
+ # the 0th axis of its argument array (for 1D routine it's its ``y``
+ # array). Thus permute the interpolation axes of `values` *and keep
+ # trailing dimensions trailing*.
+ axes = tuple(range(self.values.ndim))
+ axx = axes[:n][::-1] + axes[n:]
+ values = self.values.transpose(axx)
+
+ if method == 'pchip':
+ _eval_func = self._do_pchip
+ else:
+ _eval_func = self._do_spline_fit
+ k = self._SPLINE_DEGREE_MAP[method]
+
+ # Non-stationary procedure: difficult to vectorize this part entirely
+ # into numpy-level operations. Unfortunately this requires explicit
+ # looping over each point in xi.
+
+ # can at least vectorize the first pass across all points in the
+ # last variable of xi.
+ last_dim = n - 1
+ first_values = _eval_func(self.grid[last_dim],
+ values,
+ xi[:, last_dim],
+ k)
+
+ # the rest of the dimensions have to be on a per point-in-xi basis
+ shape = (m, *self.values.shape[n:])
+ result = np.empty(shape, dtype=self.values.dtype)
+ for j in range(m):
+ # Main process: Apply 1D interpolate in each dimension
+ # sequentially, starting with the last dimension.
+ # These are then "folded" into the next dimension in-place.
+ folded_values = first_values[j, ...]
+ for i in range(last_dim-1, -1, -1):
+ # Interpolate for each 1D from the last dimensions.
+ # This collapses each 1D sequence into a scalar.
+ folded_values = _eval_func(self.grid[i],
+ folded_values,
+ xi[j, i],
+ k)
+ result[j, ...] = folded_values
+
+ return result
+
+ @staticmethod
+ def _do_spline_fit(x, y, pt, k):
+ local_interp = make_interp_spline(x, y, k=k, axis=0)
+ values = local_interp(pt)
+ return values
+
+ @staticmethod
+ def _do_pchip(x, y, pt, k):
+ local_interp = PchipInterpolator(x, y, axis=0)
+ values = local_interp(pt)
+ return values
+
+ def _find_indices(self, xi):
+ return find_indices(self.grid, xi)
+
+ def _find_out_of_bounds(self, xi):
+ # check for out of bounds xi
+ out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
+ # iterate through dimensions
+ for x, grid in zip(xi, self.grid):
+ out_of_bounds += x < grid[0]
+ out_of_bounds += x > grid[-1]
+ return out_of_bounds
+
+
+def interpn(points, values, xi, method="linear", bounds_error=True,
+ fill_value=np.nan):
+ """
+ Multidimensional interpolation on regular or rectilinear grids.
+
+ Strictly speaking, not all regular grids are supported - this function
+ works on *rectilinear* grids, that is, a rectangular grid with even or
+ uneven spacing.
+
+ Parameters
+ ----------
+ points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
+ The points defining the regular grid in n dimensions. The points in
+ each dimension (i.e. every elements of the points tuple) must be
+ strictly ascending or descending.
+
+ values : array_like, shape (m1, ..., mn, ...)
+ The data on the regular grid in n dimensions. Complex data is
+ accepted.
+
+ .. deprecated:: 1.13.0
+ Complex data is deprecated with ``method="pchip"`` and will raise an
+ error in SciPy 1.15.0. This is because ``PchipInterpolator`` only
+ works with real values. If you are trying to use the real components of
+ the passed array, use ``np.real`` on ``values``.
+
+ xi : ndarray of shape (..., ndim)
+ The coordinates to sample the gridded data at
+
+ method : str, optional
+ The method of interpolation to perform. Supported are "linear",
+ "nearest", "slinear", "cubic", "quintic", "pchip", and "splinef2d".
+ "splinef2d" is only supported for 2-dimensional data.
+
+ bounds_error : bool, optional
+ If True, when interpolated values are requested outside of the
+ domain of the input data, a ValueError is raised.
+ If False, then `fill_value` is used.
+
+ fill_value : number, optional
+ If provided, the value to use for points outside of the
+ interpolation domain. If None, values outside
+ the domain are extrapolated. Extrapolation is not supported by method
+ "splinef2d".
+
+ Returns
+ -------
+ values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
+ Interpolated values at `xi`. See notes for behaviour when
+ ``xi.ndim == 1``.
+
+ See Also
+ --------
+ NearestNDInterpolator : Nearest neighbor interpolation on unstructured
+ data in N dimensions
+ LinearNDInterpolator : Piecewise linear interpolant on unstructured data
+ in N dimensions
+ RegularGridInterpolator : interpolation on a regular or rectilinear grid
+ in arbitrary dimensions (`interpn` wraps this
+ class).
+ RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
+ scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
+ (suitable for e.g., N-D image resampling)
+
+ Notes
+ -----
+
+ .. versionadded:: 0.14
+
+ In the case that ``xi.ndim == 1`` a new axis is inserted into
+ the 0 position of the returned array, values_x, so its shape is
+ instead ``(1,) + values.shape[ndim:]``.
+
+ If the input data is such that input dimensions have incommensurate
+ units and differ by many orders of magnitude, the interpolant may have
+ numerical artifacts. Consider rescaling the data before interpolation.
+
+ Examples
+ --------
+ Evaluate a simple example function on the points of a regular 3-D grid:
+
+ >>> import numpy as np
+ >>> from scipy.interpolate import interpn
+ >>> def value_func_3d(x, y, z):
+ ... return 2 * x + 3 * y - z
+ >>> x = np.linspace(0, 4, 5)
+ >>> y = np.linspace(0, 5, 6)
+ >>> z = np.linspace(0, 6, 7)
+ >>> points = (x, y, z)
+ >>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
+
+ Evaluate the interpolating function at a point
+
+ >>> point = np.array([2.21, 3.12, 1.15])
+ >>> print(interpn(points, values, point))
+ [12.63]
+
+ """
+ # sanity check 'method' kwarg
+ if method not in ["linear", "nearest", "cubic", "quintic", "pchip",
+ "splinef2d", "slinear",
+ "slinear_legacy", "cubic_legacy", "quintic_legacy"]:
+ raise ValueError("interpn only understands the methods 'linear', "
+ "'nearest', 'slinear', 'cubic', 'quintic', 'pchip', "
+ f"and 'splinef2d'. You provided {method}.")
+
+ if not hasattr(values, 'ndim'):
+ values = np.asarray(values)
+
+ ndim = values.ndim
+ if ndim > 2 and method == "splinef2d":
+ raise ValueError("The method splinef2d can only be used for "
+ "2-dimensional input data")
+ if not bounds_error and fill_value is None and method == "splinef2d":
+ raise ValueError("The method splinef2d does not support extrapolation.")
+
+ # sanity check consistency of input dimensions
+ if len(points) > ndim:
+ raise ValueError("There are %d point arrays, but values has %d "
+ "dimensions" % (len(points), ndim))
+ if len(points) != ndim and method == 'splinef2d':
+ raise ValueError("The method splinef2d can only be used for "
+ "scalar data with one point per coordinate")
+
+ grid, descending_dimensions = _check_points(points)
+ _check_dimensionality(grid, values)
+
+ # sanity check requested xi
+ xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
+ if xi.shape[-1] != len(grid):
+ raise ValueError("The requested sample points xi have dimension "
+ "%d, but this RegularGridInterpolator has "
+ "dimension %d" % (xi.shape[-1], len(grid)))
+
+ if bounds_error:
+ for i, p in enumerate(xi.T):
+ if not np.logical_and(np.all(grid[i][0] <= p),
+ np.all(p <= grid[i][-1])):
+ raise ValueError("One of the requested xi is out of bounds "
+ "in dimension %d" % i)
+
+ # perform interpolation
+ if method in RegularGridInterpolator._ALL_METHODS:
+ interp = RegularGridInterpolator(points, values, method=method,
+ bounds_error=bounds_error,
+ fill_value=fill_value)
+ return interp(xi)
+ elif method == "splinef2d":
+ xi_shape = xi.shape
+ xi = xi.reshape(-1, xi.shape[-1])
+
+ # RectBivariateSpline doesn't support fill_value; we need to wrap here
+ idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
+ grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
+ axis=0)
+ result = np.empty_like(xi[:, 0])
+
+ # make a copy of values for RectBivariateSpline
+ interp = RectBivariateSpline(points[0], points[1], values[:])
+ result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
+ result[np.logical_not(idx_valid)] = fill_value
+
+ return result.reshape(xi_shape[:-1])
+ else:
+ raise ValueError(f"unknown {method = }")
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..0afeb4d77b43683326c00eeaf689e7f65f4a870d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..df9e100e29dad7228ffb5e5c1414d1797a9eb367
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/dfitpack.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/fitpack.py b/venv/lib/python3.10/site-packages/scipy/interpolate/fitpack.py
new file mode 100644
index 0000000000000000000000000000000000000000..68a6a240961018cac8e59419245ee6791cba7a67
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/fitpack.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'BSpline',
+ 'bisplev',
+ 'bisplrep',
+ 'dblint',
+ 'insert',
+ 'spalde',
+ 'splantider',
+ 'splder',
+ 'splev',
+ 'splint',
+ 'splprep',
+ 'splrep',
+ 'sproot',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="fitpack",
+ private_modules=["_fitpack_py"], all=__all__,
+ attribute=name)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/fitpack2.py b/venv/lib/python3.10/site-packages/scipy/interpolate/fitpack2.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4848053e4d9791144483a213fe6040ef214c1e3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/fitpack2.py
@@ -0,0 +1,38 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'BivariateSpline',
+ 'InterpolatedUnivariateSpline',
+ 'LSQBivariateSpline',
+ 'LSQSphereBivariateSpline',
+ 'LSQUnivariateSpline',
+ 'RectBivariateSpline',
+ 'RectSphereBivariateSpline',
+ 'SmoothBivariateSpline',
+ 'SmoothSphereBivariateSpline',
+ 'SphereBivariateSpline',
+ 'UnivariateSpline',
+ 'array',
+ 'concatenate',
+ 'dfitpack',
+ 'dfitpack_int',
+ 'diff',
+ 'ones',
+ 'ravel',
+ 'zeros',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="fitpack2",
+ private_modules=["_fitpack2"], all=__all__,
+ attribute=name)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/interpnd.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/interpolate/interpnd.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..1b8adc9819fc31d9adc2b8c814382d562f50e7ab
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/interpnd.cpython-310-x86_64-linux-gnu.so differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/interpolate.py b/venv/lib/python3.10/site-packages/scipy/interpolate/interpolate.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c6ae3398fa036a923ab7ed0ddaf63c2d73fe559
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/interpolate.py
@@ -0,0 +1,44 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'BPoly',
+ 'BSpline',
+ 'NdPPoly',
+ 'PPoly',
+ 'RectBivariateSpline',
+ 'RegularGridInterpolator',
+ 'array',
+ 'asarray',
+ 'atleast_1d',
+ 'atleast_2d',
+ 'comb',
+ 'dfitpack',
+ 'interp1d',
+ 'interp2d',
+ 'interpn',
+ 'intp',
+ 'itertools',
+ 'lagrange',
+ 'make_interp_spline',
+ 'poly1d',
+ 'prod',
+ 'ravel',
+ 'searchsorted',
+ 'spec',
+ 'transpose',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="interpolate",
+ private_modules=["_interpolate", "fitpack2", "_rgi"],
+ all=__all__, attribute=name)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/ndgriddata.py b/venv/lib/python3.10/site-packages/scipy/interpolate/ndgriddata.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb2b1694d244d00b6aea9784fc0ad384a793d57d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/ndgriddata.py
@@ -0,0 +1,25 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'CloughTocher2DInterpolator',
+ 'LinearNDInterpolator',
+ 'NDInterpolatorBase',
+ 'NearestNDInterpolator',
+ 'cKDTree',
+ 'griddata',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="ndgriddata",
+ private_modules=["_ndgriddata"], all=__all__,
+ attribute=name)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/polyint.py b/venv/lib/python3.10/site-packages/scipy/interpolate/polyint.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbdae2e90934e5b2c351a7add76d132413e47ee2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/polyint.py
@@ -0,0 +1,26 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'BarycentricInterpolator',
+ 'KroghInterpolator',
+ 'approximate_taylor_polynomial',
+ 'barycentric_interpolate',
+ 'factorial',
+ 'float_factorial',
+ 'krogh_interpolate',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="polyint",
+ private_modules=["_polyint"], all=__all__,
+ attribute=name)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/rbf.py b/venv/lib/python3.10/site-packages/scipy/interpolate/rbf.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ae1facd687108fcba124e43f71eda01f45a48e3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/rbf.py
@@ -0,0 +1,25 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'Rbf',
+ 'cdist',
+ 'linalg',
+ 'pdist',
+ 'squareform',
+ 'xlogy',
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="interpolate", module="rbf",
+ private_modules=["_rbf"], all=__all__,
+ attribute=name)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6653cd8f5dca4a1f1361a3f098e343a85bd3b5d3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9ba0575fb90896af1671c33968dc1cb364e1147
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf8ab97ea0774e995ccdac9dee59262fe56a992e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f28dc92b710735d1ad7b28ae27bb2c2ad86a9d6e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_bsplines.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_bsplines.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a75f4d30743aee33676ee11b7ff9da0628fe340
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_bsplines.py
@@ -0,0 +1,2621 @@
+import os
+import operator
+import itertools
+
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose, assert_
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.interpolate import (
+ BSpline, BPoly, PPoly, make_interp_spline, make_lsq_spline, _bspl,
+ splev, splrep, splprep, splder, splantider, sproot, splint, insert,
+ CubicSpline, NdBSpline, make_smoothing_spline, RegularGridInterpolator,
+)
+import scipy.linalg as sl
+import scipy.sparse.linalg as ssl
+
+from scipy.interpolate._bsplines import (_not_a_knot, _augknt,
+ _woodbury_algorithm, _periodic_knots,
+ _make_interp_per_full_matr)
+import scipy.interpolate._fitpack_impl as _impl
+from scipy._lib._util import AxisError
+
+# XXX: move to the interpolate namespace
+from scipy.interpolate._ndbspline import make_ndbspl
+
+from scipy.interpolate import dfitpack
+from scipy.interpolate import _bsplines as _b
+
+
+class TestBSpline:
+
+ def test_ctor(self):
+ # knots should be an ordered 1-D array of finite real numbers
+ assert_raises((TypeError, ValueError), BSpline,
+ **dict(t=[1, 1.j], c=[1.], k=0))
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
+ assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
+
+ # for n+k+1 knots and degree k need at least n coefficients
+ assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
+ assert_raises(ValueError, BSpline,
+ **dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
+
+ # non-integer orders
+ assert_raises(TypeError, BSpline,
+ **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
+ assert_raises(TypeError, BSpline,
+ **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
+
+ # basic interval cannot have measure zero (here: [1..1])
+ assert_raises(ValueError, BSpline,
+ **dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
+
+ # tck vs self.tck
+ n, k = 11, 3
+ t = np.arange(n+k+1)
+ c = np.random.random(n)
+ b = BSpline(t, c, k)
+
+ assert_allclose(t, b.t)
+ assert_allclose(c, b.c)
+ assert_equal(k, b.k)
+
+ def test_tck(self):
+ b = _make_random_spline()
+ tck = b.tck
+
+ assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)
+ assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)
+ assert_equal(b.k, tck[2])
+
+ # b.tck is read-only
+ with pytest.raises(AttributeError):
+ b.tck = 'foo'
+
+ def test_degree_0(self):
+ xx = np.linspace(0, 1, 10)
+
+ b = BSpline(t=[0, 1], c=[3.], k=0)
+ assert_allclose(b(xx), 3)
+
+ b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
+ assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))
+
+ def test_degree_1(self):
+ t = [0, 1, 2, 3, 4]
+ c = [1, 2, 3]
+ k = 1
+ b = BSpline(t, c, k)
+
+ x = np.linspace(1, 3, 50)
+ assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),
+ b(x), atol=1e-14)
+ assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)
+
+ def test_bernstein(self):
+ # a special knot vector: Bernstein polynomials
+ k = 3
+ t = np.asarray([0]*(k+1) + [1]*(k+1))
+ c = np.asarray([1., 2., 3., 4.])
+ bp = BPoly(c.reshape(-1, 1), [0, 1])
+ bspl = BSpline(t, c, k)
+
+ xx = np.linspace(-1., 2., 10)
+ assert_allclose(bp(xx, extrapolate=True),
+ bspl(xx, extrapolate=True), atol=1e-14)
+ assert_allclose(splev(xx, (t, c, k)),
+ bspl(xx), atol=1e-14)
+
+ def test_rndm_naive_eval(self):
+ # test random coefficient spline *on the base interval*,
+ # t[k] <= x < t[-k-1]
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 50)
+ y_b = b(xx)
+
+ y_n = [_naive_eval(x, t, c, k) for x in xx]
+ assert_allclose(y_b, y_n, atol=1e-14)
+
+ y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]
+ assert_allclose(y_b, y_n2, atol=1e-14)
+
+ def test_rndm_splev(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 50)
+ assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
+
+ def test_rndm_splrep(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(20))
+ y = np.random.random(20)
+
+ tck = splrep(x, y)
+ b = BSpline(*tck)
+
+ t, k = b.t, b.k
+ xx = np.linspace(t[k], t[-k-1], 80)
+ assert_allclose(b(xx), splev(xx, tck), atol=1e-14)
+
+ def test_rndm_unity(self):
+ b = _make_random_spline()
+ b.c = np.ones_like(b.c)
+ xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100)
+ assert_allclose(b(xx), 1.)
+
+ def test_vectorization(self):
+ n, k = 22, 3
+ t = np.sort(np.random.random(n))
+ c = np.random.random(size=(n, 6, 7))
+ b = BSpline(t, c, k)
+ tm, tp = t[k], t[-k-1]
+ xx = tm + (tp - tm) * np.random.random((3, 4, 5))
+ assert_equal(b(xx).shape, (3, 4, 5, 6, 7))
+
+ def test_len_c(self):
+ # for n+k+1 knots, only first n coefs are used.
+ # and BTW this is consistent with FITPACK
+ n, k = 33, 3
+ t = np.sort(np.random.random(n+k+1))
+ c = np.random.random(n)
+
+ # pad coefficients with random garbage
+ c_pad = np.r_[c, np.random.random(k+1)]
+
+ b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)
+
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[0] - dt, t[-1] + dt, 50)
+ assert_allclose(b(xx), b_pad(xx), atol=1e-14)
+ assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
+ assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)
+
+ def test_endpoints(self):
+ # base interval is closed
+ b = _make_random_spline()
+ t, _, k = b.tck
+ tm, tp = t[k], t[-k-1]
+ for extrap in (True, False):
+ assert_allclose(b([tm, tp], extrap),
+ b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9)
+
+ def test_continuity(self):
+ # assert continuity at internal knots
+ b = _make_random_spline()
+ t, _, k = b.tck
+ assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),
+ atol=1e-9)
+
+ def test_extrap(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50)
+ mask = (t[k] < xx) & (xx < t[-k-1])
+
+ # extrap has no effect within the base interval
+ assert_allclose(b(xx[mask], extrapolate=True),
+ b(xx[mask], extrapolate=False))
+
+ # extrapolated values agree with FITPACK
+ assert_allclose(b(xx, extrapolate=True),
+ splev(xx, (t, c, k), ext=0))
+
+ def test_default_extrap(self):
+ # BSpline defaults to extrapolate=True
+ b = _make_random_spline()
+ t, _, k = b.tck
+ xx = [t[0] - 1, t[-1] + 1]
+ yy = b(xx)
+ assert_(not np.all(np.isnan(yy)))
+
+ def test_periodic_extrap(self):
+ np.random.seed(1234)
+ t = np.sort(np.random.random(8))
+ c = np.random.random(4)
+ k = 3
+ b = BSpline(t, c, k, extrapolate='periodic')
+ n = t.size - (k + 1)
+
+ dt = t[-1] - t[0]
+ xx = np.linspace(t[k] - dt, t[n] + dt, 50)
+ xy = t[k] + (xx - t[k]) % (t[n] - t[k])
+ assert_allclose(b(xx), splev(xy, (t, c, k)))
+
+ # Direct check
+ xx = [-1, 0, 0.5, 1]
+ xy = t[k] + (xx - t[k]) % (t[n] - t[k])
+ assert_equal(b(xx, extrapolate='periodic'), b(xy, extrapolate=True))
+
+ def test_ppoly(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ pp = PPoly.from_spline((t, c, k))
+
+ xx = np.linspace(t[k], t[-k], 100)
+ assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14)
+
+ def test_derivative_rndm(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[0], t[-1], 50)
+ xx = np.r_[xx, t]
+
+ for der in range(1, k+1):
+ yd = splev(xx, (t, c, k), der=der)
+ assert_allclose(yd, b(xx, nu=der), atol=1e-14)
+
+ # higher derivatives all vanish
+ assert_allclose(b(xx, nu=k+1), 0, atol=1e-14)
+
+ def test_derivative_jumps(self):
+ # example from de Boor, Chap IX, example (24)
+ # NB: knots augmented & corresp coefs are zeroed out
+ # in agreement with the convention (29)
+ k = 2
+ t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]
+ np.random.seed(1234)
+ c = np.r_[0, 0, np.random.random(5), 0, 0]
+ b = BSpline(t, c, k)
+
+ # b is continuous at x != 6 (triple knot)
+ x = np.asarray([1, 3, 4, 6])
+ assert_allclose(b(x[x != 6] - 1e-10),
+ b(x[x != 6] + 1e-10))
+ assert_(not np.allclose(b(6.-1e-10), b(6+1e-10)))
+
+ # 1st derivative jumps at double knots, 1 & 6:
+ x0 = np.asarray([3, 4])
+ assert_allclose(b(x0 - 1e-10, nu=1),
+ b(x0 + 1e-10, nu=1))
+ x1 = np.asarray([1, 6])
+ assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1),
+ b(x1 + 1e-10, nu=1))))
+
+ # 2nd derivative is not guaranteed to be continuous either
+ assert_(not np.all(np.allclose(b(x - 1e-10, nu=2),
+ b(x + 1e-10, nu=2))))
+
+ def test_basis_element_quadratic(self):
+ xx = np.linspace(-1, 4, 20)
+ b = BSpline.basis_element(t=[0, 1, 2, 3])
+ assert_allclose(b(xx),
+ splev(xx, (b.t, b.c, b.k)), atol=1e-14)
+ assert_allclose(b(xx),
+ B_0123(xx), atol=1e-14)
+
+ b = BSpline.basis_element(t=[0, 1, 1, 2])
+ xx = np.linspace(0, 2, 10)
+ assert_allclose(b(xx),
+ np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
+
+ def test_basis_element_rndm(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)
+
+ def test_cmplx(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ cc = c * (1. + 3.j)
+
+ b = BSpline(t, cc, k)
+ b_re = BSpline(t, b.c.real, k)
+ b_im = BSpline(t, b.c.imag, k)
+
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b(xx).real, b_re(xx), atol=1e-14)
+ assert_allclose(b(xx).imag, b_im(xx), atol=1e-14)
+
+ def test_nan(self):
+ # nan in, nan out.
+ b = BSpline.basis_element([0, 1, 1, 2])
+ assert_(np.isnan(b(np.nan)))
+
+ def test_derivative_method(self):
+ b = _make_random_spline(k=5)
+ t, c, k = b.tck
+ b0 = BSpline(t, c, k)
+ xx = np.linspace(t[k], t[-k-1], 20)
+ for j in range(1, k):
+ b = b.derivative()
+ assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antiderivative_method(self):
+ b = _make_random_spline()
+ t, c, k = b.tck
+ xx = np.linspace(t[k], t[-k-1], 20)
+ assert_allclose(b.antiderivative().derivative()(xx),
+ b(xx), atol=1e-14, rtol=1e-14)
+
+ # repeat with N-D array for c
+ c = np.c_[c, c, c]
+ c = np.dstack((c, c))
+ b = BSpline(t, c, k)
+ assert_allclose(b.antiderivative().derivative()(xx),
+ b(xx), atol=1e-14, rtol=1e-14)
+
+ def test_integral(self):
+ b = BSpline.basis_element([0, 1, 2]) # x for x < 1 else 2 - x
+ assert_allclose(b.integrate(0, 1), 0.5)
+ assert_allclose(b.integrate(1, 0), -1 * 0.5)
+ assert_allclose(b.integrate(1, 0), -0.5)
+
+ # extrapolate or zeros outside of [0, 2]; default is yes
+ assert_allclose(b.integrate(-1, 1), 0)
+ assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
+ assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
+ assert_allclose(b.integrate(1, -1, extrapolate=False), -1 * 0.5)
+
+ # Test ``_fitpack._splint()``
+ assert_allclose(b.integrate(1, -1, extrapolate=False),
+ _impl.splint(1, -1, b.tck))
+
+ # Test ``extrapolate='periodic'``.
+ b.extrapolate = 'periodic'
+ i = b.antiderivative()
+ period_int = i(2) - i(0)
+
+ assert_allclose(b.integrate(0, 2), period_int)
+ assert_allclose(b.integrate(2, 0), -1 * period_int)
+ assert_allclose(b.integrate(-9, -7), period_int)
+ assert_allclose(b.integrate(-8, -4), 2 * period_int)
+
+ assert_allclose(b.integrate(0.5, 1.5), i(1.5) - i(0.5))
+ assert_allclose(b.integrate(1.5, 3), i(1) - i(0) + i(2) - i(1.5))
+ assert_allclose(b.integrate(1.5 + 12, 3 + 12),
+ i(1) - i(0) + i(2) - i(1.5))
+ assert_allclose(b.integrate(1.5, 3 + 12),
+ i(1) - i(0) + i(2) - i(1.5) + 6 * period_int)
+
+ assert_allclose(b.integrate(0, -1), i(0) - i(1))
+ assert_allclose(b.integrate(-9, -10), i(0) - i(1))
+ assert_allclose(b.integrate(0, -9), i(1) - i(2) - 4 * period_int)
+
+ def test_integrate_ppoly(self):
+ # test .integrate method to be consistent with PPoly.integrate
+ x = [0, 1, 2, 3, 4]
+ b = make_interp_spline(x, x)
+ b.extrapolate = 'periodic'
+ p = PPoly.from_spline(b)
+
+ for x0, x1 in [(-5, 0.5), (0.5, 5), (-4, 13)]:
+ assert_allclose(b.integrate(x0, x1),
+ p.integrate(x0, x1))
+
+ def test_subclassing(self):
+ # classmethods should not decay to the base class
+ class B(BSpline):
+ pass
+
+ b = B.basis_element([0, 1, 2, 2])
+ assert_equal(b.__class__, B)
+ assert_equal(b.derivative().__class__, B)
+ assert_equal(b.antiderivative().__class__, B)
+
+ @pytest.mark.parametrize('axis', range(-4, 4))
+ def test_axis(self, axis):
+ n, k = 22, 3
+ t = np.linspace(0, 1, n + k + 1)
+ sh = [6, 7, 8]
+ # We need the positive axis for some of the indexing and slices used
+ # in this test.
+ pos_axis = axis % 4
+ sh.insert(pos_axis, n) # [22, 6, 7, 8] etc
+ c = np.random.random(size=sh)
+ b = BSpline(t, c, k, axis=axis)
+ assert_equal(b.c.shape,
+ [sh[pos_axis],] + sh[:pos_axis] + sh[pos_axis+1:])
+
+ xp = np.random.random((3, 4, 5))
+ assert_equal(b(xp).shape,
+ sh[:pos_axis] + list(xp.shape) + sh[pos_axis+1:])
+
+ # -c.ndim <= axis < c.ndim
+ for ax in [-c.ndim - 1, c.ndim]:
+ assert_raises(AxisError, BSpline,
+ **dict(t=t, c=c, k=k, axis=ax))
+
+ # derivative, antiderivative keeps the axis
+ for b1 in [BSpline(t, c, k, axis=axis).derivative(),
+ BSpline(t, c, k, axis=axis).derivative(2),
+ BSpline(t, c, k, axis=axis).antiderivative(),
+ BSpline(t, c, k, axis=axis).antiderivative(2)]:
+ assert_equal(b1.axis, b.axis)
+
+ def test_neg_axis(self):
+ k = 2
+ t = [0, 1, 2, 3, 4, 5, 6]
+ c = np.array([[-1, 2, 0, -1], [2, 0, -3, 1]])
+
+ spl = BSpline(t, c, k, axis=-1)
+ spl0 = BSpline(t, c[0], k)
+ spl1 = BSpline(t, c[1], k)
+ assert_equal(spl(2.5), [spl0(2.5), spl1(2.5)])
+
+ def test_design_matrix_bc_types(self):
+ '''
+ Splines with different boundary conditions are built on different
+ types of vectors of knots. As far as design matrix depends only on
+ vector of knots, `k` and `x` it is useful to make tests for different
+ boundary conditions (and as following different vectors of knots).
+ '''
+ def run_design_matrix_tests(n, k, bc_type):
+ '''
+ To avoid repetition of code the following function is provided.
+ '''
+ np.random.seed(1234)
+ x = np.sort(np.random.random_sample(n) * 40 - 20)
+ y = np.random.random_sample(n) * 40 - 20
+ if bc_type == "periodic":
+ y[0] = y[-1]
+
+ bspl = make_interp_spline(x, y, k=k, bc_type=bc_type)
+
+ c = np.eye(len(bspl.t) - k - 1)
+ des_matr_def = BSpline(bspl.t, c, k)(x)
+ des_matr_csr = BSpline.design_matrix(x,
+ bspl.t,
+ k).toarray()
+ assert_allclose(des_matr_csr @ bspl.c, y, atol=1e-14)
+ assert_allclose(des_matr_def, des_matr_csr, atol=1e-14)
+
+ # "clamped" and "natural" work only with `k = 3`
+ n = 11
+ k = 3
+ for bc in ["clamped", "natural"]:
+ run_design_matrix_tests(n, k, bc)
+
+ # "not-a-knot" works with odd `k`
+ for k in range(3, 8, 2):
+ run_design_matrix_tests(n, k, "not-a-knot")
+
+ # "periodic" works with any `k` (even more than `n`)
+ n = 5 # smaller `n` to test `k > n` case
+ for k in range(2, 7):
+ run_design_matrix_tests(n, k, "periodic")
+
+ @pytest.mark.parametrize('extrapolate', [False, True, 'periodic'])
+ @pytest.mark.parametrize('degree', range(5))
+ def test_design_matrix_same_as_BSpline_call(self, extrapolate, degree):
+ """Test that design_matrix(x) is equivalent to BSpline(..)(x)."""
+ np.random.seed(1234)
+ x = np.random.random_sample(10 * (degree + 1))
+ xmin, xmax = np.amin(x), np.amax(x)
+ k = degree
+ t = np.r_[np.linspace(xmin - 2, xmin - 1, degree),
+ np.linspace(xmin, xmax, 2 * (degree + 1)),
+ np.linspace(xmax + 1, xmax + 2, degree)]
+ c = np.eye(len(t) - k - 1)
+ bspline = BSpline(t, c, k, extrapolate)
+ assert_allclose(
+ bspline(x), BSpline.design_matrix(x, t, k, extrapolate).toarray()
+ )
+
+ # extrapolation regime
+ x = np.array([xmin - 10, xmin - 1, xmax + 1.5, xmax + 10])
+ if not extrapolate:
+ with pytest.raises(ValueError):
+ BSpline.design_matrix(x, t, k, extrapolate)
+ else:
+ assert_allclose(
+ bspline(x),
+ BSpline.design_matrix(x, t, k, extrapolate).toarray()
+ )
+
+ def test_design_matrix_x_shapes(self):
+ # test for different `x` shapes
+ np.random.seed(1234)
+ n = 10
+ k = 3
+ x = np.sort(np.random.random_sample(n) * 40 - 20)
+ y = np.random.random_sample(n) * 40 - 20
+
+ bspl = make_interp_spline(x, y, k=k)
+ for i in range(1, 4):
+ xc = x[:i]
+ yc = y[:i]
+ des_matr_csr = BSpline.design_matrix(xc,
+ bspl.t,
+ k).toarray()
+ assert_allclose(des_matr_csr @ bspl.c, yc, atol=1e-14)
+
+ def test_design_matrix_t_shapes(self):
+ # test for minimal possible `t` shape
+ t = [1., 1., 1., 2., 3., 4., 4., 4.]
+ des_matr = BSpline.design_matrix(2., t, 3).toarray()
+ assert_allclose(des_matr,
+ [[0.25, 0.58333333, 0.16666667, 0.]],
+ atol=1e-14)
+
+ def test_design_matrix_asserts(self):
+ np.random.seed(1234)
+ n = 10
+ k = 3
+ x = np.sort(np.random.random_sample(n) * 40 - 20)
+ y = np.random.random_sample(n) * 40 - 20
+ bspl = make_interp_spline(x, y, k=k)
+ # invalid vector of knots (should be a 1D non-descending array)
+ # here the actual vector of knots is reversed, so it is invalid
+ with assert_raises(ValueError):
+ BSpline.design_matrix(x, bspl.t[::-1], k)
+ k = 2
+ t = [0., 1., 2., 3., 4., 5.]
+ x = [1., 2., 3., 4.]
+ # out of bounds
+ with assert_raises(ValueError):
+ BSpline.design_matrix(x, t, k)
+
+ @pytest.mark.parametrize('bc_type', ['natural', 'clamped',
+ 'periodic', 'not-a-knot'])
+ def test_from_power_basis(self, bc_type):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(20))
+ y = np.random.random(20)
+ if bc_type == 'periodic':
+ y[-1] = y[0]
+ cb = CubicSpline(x, y, bc_type=bc_type)
+ bspl = BSpline.from_power_basis(cb, bc_type=bc_type)
+ xx = np.linspace(0, 1, 20)
+ assert_allclose(cb(xx), bspl(xx), atol=1e-15)
+ bspl_new = make_interp_spline(x, y, bc_type=bc_type)
+ assert_allclose(bspl.c, bspl_new.c, atol=1e-15)
+
+ @pytest.mark.parametrize('bc_type', ['natural', 'clamped',
+ 'periodic', 'not-a-knot'])
+ def test_from_power_basis_complex(self, bc_type):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(20))
+ y = np.random.random(20) + np.random.random(20) * 1j
+ if bc_type == 'periodic':
+ y[-1] = y[0]
+ cb = CubicSpline(x, y, bc_type=bc_type)
+ bspl = BSpline.from_power_basis(cb, bc_type=bc_type)
+ bspl_new_real = make_interp_spline(x, y.real, bc_type=bc_type)
+ bspl_new_imag = make_interp_spline(x, y.imag, bc_type=bc_type)
+ assert_equal(bspl.c.dtype, (bspl_new_real.c
+ + 1j * bspl_new_imag.c).dtype)
+ assert_allclose(bspl.c, bspl_new_real.c
+ + 1j * bspl_new_imag.c, atol=1e-15)
+
+ def test_from_power_basis_exmp(self):
+ '''
+ For x = [0, 1, 2, 3, 4] and y = [1, 1, 1, 1, 1]
+ the coefficients of Cubic Spline in the power basis:
+
+ $[[0, 0, 0, 0, 0],\\$
+ $[0, 0, 0, 0, 0],\\$
+ $[0, 0, 0, 0, 0],\\$
+ $[1, 1, 1, 1, 1]]$
+
+ It could be shown explicitly that coefficients of the interpolating
+ function in B-spline basis are c = [1, 1, 1, 1, 1, 1, 1]
+ '''
+ x = np.array([0, 1, 2, 3, 4])
+ y = np.array([1, 1, 1, 1, 1])
+ bspl = BSpline.from_power_basis(CubicSpline(x, y, bc_type='natural'),
+ bc_type='natural')
+ assert_allclose(bspl.c, [1, 1, 1, 1, 1, 1, 1], atol=1e-15)
+
+ def test_read_only(self):
+ # BSpline must work on read-only knots and coefficients.
+ t = np.array([0, 1])
+ c = np.array([3.0])
+ t.setflags(write=False)
+ c.setflags(write=False)
+
+ xx = np.linspace(0, 1, 10)
+ xx.setflags(write=False)
+
+ b = BSpline(t=t, c=c, k=0)
+ assert_allclose(b(xx), 3)
+
+
+class TestInsert:
+
+ @pytest.mark.parametrize('xval', [0.0, 1.0, 2.5, 4, 6.5, 7.0])
+ def test_insert(self, xval):
+ # insert a knot, incl edges (0.0, 7.0) and exactly at an existing knot (4.0)
+ x = np.arange(8)
+ y = np.sin(x)**3
+ spl = make_interp_spline(x, y, k=3)
+
+ spl_1f = insert(xval, spl) # FITPACK
+ spl_1 = spl.insert_knot(xval)
+
+ assert_allclose(spl_1.t, spl_1f.t, atol=1e-15)
+ assert_allclose(spl_1.c, spl_1f.c[:-spl.k-1], atol=1e-15)
+
+ # knot insertion preserves values, unless multiplicity >= k+1
+ xx = x if xval != x[-1] else x[:-1]
+ xx = np.r_[xx, 0.5*(x[1:] + x[:-1])]
+ assert_allclose(spl(xx), spl_1(xx), atol=1e-15)
+
+ # ... repeat with ndim > 1
+ y1 = np.cos(x)**3
+ spl_y1 = make_interp_spline(x, y1, k=3)
+ spl_yy = make_interp_spline(x, np.c_[y, y1], k=3)
+ spl_yy1 = spl_yy.insert_knot(xval)
+
+ assert_allclose(spl_yy1.t, spl_1.t, atol=1e-15)
+ assert_allclose(spl_yy1.c, np.c_[spl.insert_knot(xval).c,
+ spl_y1.insert_knot(xval).c], atol=1e-15)
+
+ xx = x if xval != x[-1] else x[:-1]
+ xx = np.r_[xx, 0.5*(x[1:] + x[:-1])]
+ assert_allclose(spl_yy(xx), spl_yy1(xx), atol=1e-15)
+
+
+ @pytest.mark.parametrize(
+ 'xval, m', [(0.0, 2), (1.0, 3), (1.5, 5), (4, 2), (7.0, 2)]
+ )
+ def test_insert_multi(self, xval, m):
+ x = np.arange(8)
+ y = np.sin(x)**3
+ spl = make_interp_spline(x, y, k=3)
+
+ spl_1f = insert(xval, spl, m=m)
+ spl_1 = spl.insert_knot(xval, m)
+
+ assert_allclose(spl_1.t, spl_1f.t, atol=1e-15)
+ assert_allclose(spl_1.c, spl_1f.c[:-spl.k-1], atol=1e-15)
+
+ xx = x if xval != x[-1] else x[:-1]
+ xx = np.r_[xx, 0.5*(x[1:] + x[:-1])]
+ assert_allclose(spl(xx), spl_1(xx), atol=1e-15)
+
+ def test_insert_random(self):
+ rng = np.random.default_rng(12345)
+ n, k = 11, 3
+
+ t = np.sort(rng.uniform(size=n+k+1))
+ c = rng.uniform(size=(n, 3, 2))
+ spl = BSpline(t, c, k)
+
+ xv = rng.uniform(low=t[k+1], high=t[-k-1])
+ spl_1 = spl.insert_knot(xv)
+
+ xx = rng.uniform(low=t[k+1], high=t[-k-1], size=33)
+ assert_allclose(spl(xx), spl_1(xx), atol=1e-15)
+
+ @pytest.mark.parametrize('xv', [0, 0.1, 2.0, 4.0, 4.5, # l.h. edge
+ 5.5, 6.0, 6.1, 7.0] # r.h. edge
+ )
+ def test_insert_periodic(self, xv):
+ x = np.arange(8)
+ y = np.sin(x)**3
+ tck = splrep(x, y, k=3)
+ spl = BSpline(*tck, extrapolate="periodic")
+
+ spl_1 = spl.insert_knot(xv)
+ tf, cf, k = insert(xv, spl.tck, per=True)
+
+ assert_allclose(spl_1.t, tf, atol=1e-15)
+ assert_allclose(spl_1.c[:-k-1], cf[:-k-1], atol=1e-15)
+
+ xx = np.random.default_rng(1234).uniform(low=0, high=7, size=41)
+ assert_allclose(spl_1(xx), splev(xx, (tf, cf, k)), atol=1e-15)
+
+ def test_insert_periodic_too_few_internal_knots(self):
+ # both FITPACK and spl.insert_knot raise when there's not enough
+ # internal knots to make a periodic extension.
+ # Below the internal knots are 2, 3, , 4, 5
+ # ^
+ # 2, 3, 3.5, 4, 5
+ # so two knots from each side from the new one, while need at least
+ # from either left or right.
+ xv = 3.5
+ k = 3
+ t = np.array([0]*(k+1) + [2, 3, 4, 5] + [7]*(k+1))
+ c = np.ones(len(t) - k - 1)
+ spl = BSpline(t, c, k, extrapolate="periodic")
+
+ with assert_raises(ValueError):
+ insert(xv, (t, c, k), per=True)
+
+ with assert_raises(ValueError):
+ spl.insert_knot(xv)
+
+ def test_insert_no_extrap(self):
+ k = 3
+ t = np.array([0]*(k+1) + [2, 3, 4, 5] + [7]*(k+1))
+ c = np.ones(len(t) - k - 1)
+ spl = BSpline(t, c, k)
+
+ with assert_raises(ValueError):
+ spl.insert_knot(-1)
+
+ with assert_raises(ValueError):
+ spl.insert_knot(8)
+
+ with assert_raises(ValueError):
+ spl.insert_knot(3, m=0)
+
+
+def test_knots_multiplicity():
+ # Take a spline w/ random coefficients, throw in knots of varying
+ # multiplicity.
+
+ def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14):
+ # check evaluations against FITPACK, incl extrapolations
+ t, c, k = b.tck
+ x = np.unique(t)
+ x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1]
+ assert_allclose(splev(x, (t, c, k), der), b(x, der),
+ atol=atol, rtol=rtol, err_msg=f'der = {der} k = {b.k}')
+
+ # test loop itself
+ # [the index `j` is for interpreting the traceback in case of a failure]
+ for k in [1, 2, 3, 4, 5]:
+ b = _make_random_spline(k=k)
+ for j, b1 in enumerate(_make_multiples(b)):
+ check_splev(b1, j)
+ for der in range(1, k+1):
+ check_splev(b1, j, der, 1e-12, 1e-12)
+
+
+### stolen from @pv, verbatim
+def _naive_B(x, k, i, t):
+ """
+ Naive way to compute B-spline basis functions. Useful only for testing!
+ computes B(x; t[i],..., t[i+k+1])
+ """
+ if k == 0:
+ return 1.0 if t[i] <= x < t[i+1] else 0.0
+ if t[i+k] == t[i]:
+ c1 = 0.0
+ else:
+ c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t)
+ if t[i+k+1] == t[i+1]:
+ c2 = 0.0
+ else:
+ c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t)
+ return (c1 + c2)
+
+
+### stolen from @pv, verbatim
+def _naive_eval(x, t, c, k):
+ """
+ Naive B-spline evaluation. Useful only for testing!
+ """
+ if x == t[k]:
+ i = k
+ else:
+ i = np.searchsorted(t, x) - 1
+ assert t[i] <= x <= t[i+1]
+ assert i >= k and i < len(t) - k
+ return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1))
+
+
+def _naive_eval_2(x, t, c, k):
+ """Naive B-spline evaluation, another way."""
+ n = len(t) - (k+1)
+ assert n >= k+1
+ assert len(c) >= n
+ assert t[k] <= x <= t[n]
+ return sum(c[i] * _naive_B(x, k, i, t) for i in range(n))
+
+
+def _sum_basis_elements(x, t, c, k):
+ n = len(t) - (k+1)
+ assert n >= k+1
+ assert len(c) >= n
+ s = 0.
+ for i in range(n):
+ b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)
+ s += c[i] * np.nan_to_num(b) # zero out out-of-bounds elements
+ return s
+
+
+def B_012(x):
+ """ A linear B-spline function B(x | 0, 1, 2)."""
+ x = np.atleast_1d(x)
+ return np.piecewise(x, [(x < 0) | (x > 2),
+ (x >= 0) & (x < 1),
+ (x >= 1) & (x <= 2)],
+ [lambda x: 0., lambda x: x, lambda x: 2.-x])
+
+
+def B_0123(x, der=0):
+ """A quadratic B-spline function B(x | 0, 1, 2, 3)."""
+ x = np.atleast_1d(x)
+ conds = [x < 1, (x > 1) & (x < 2), x > 2]
+ if der == 0:
+ funcs = [lambda x: x*x/2.,
+ lambda x: 3./4 - (x-3./2)**2,
+ lambda x: (3.-x)**2 / 2]
+ elif der == 2:
+ funcs = [lambda x: 1.,
+ lambda x: -2.,
+ lambda x: 1.]
+ else:
+ raise ValueError('never be here: der=%s' % der)
+ pieces = np.piecewise(x, conds, funcs)
+ return pieces
+
+
+def _make_random_spline(n=35, k=3):
+ np.random.seed(123)
+ t = np.sort(np.random.random(n+k+1))
+ c = np.random.random(n)
+ return BSpline.construct_fast(t, c, k)
+
+
+def _make_multiples(b):
+ """Increase knot multiplicity."""
+ c, k = b.c, b.k
+
+ t1 = b.t.copy()
+ t1[17:19] = t1[17]
+ t1[22] = t1[21]
+ yield BSpline(t1, c, k)
+
+ t1 = b.t.copy()
+ t1[:k+1] = t1[0]
+ yield BSpline(t1, c, k)
+
+ t1 = b.t.copy()
+ t1[-k-1:] = t1[-1]
+ yield BSpline(t1, c, k)
+
+
+class TestInterop:
+ #
+ # Test that FITPACK-based spl* functions can deal with BSpline objects
+ #
+ def setup_method(self):
+ xx = np.linspace(0, 4.*np.pi, 41)
+ yy = np.cos(xx)
+ b = make_interp_spline(xx, yy)
+ self.tck = (b.t, b.c, b.k)
+ self.xx, self.yy, self.b = xx, yy, b
+
+ self.xnew = np.linspace(0, 4.*np.pi, 21)
+
+ c2 = np.c_[b.c, b.c, b.c]
+ self.c2 = np.dstack((c2, c2))
+ self.b2 = BSpline(b.t, self.c2, b.k)
+
+ def test_splev(self):
+ xnew, b, b2 = self.xnew, self.b, self.b2
+
+ # check that splev works with 1-D array of coefficients
+ # for array and scalar `x`
+ assert_allclose(splev(xnew, b),
+ b(xnew), atol=1e-15, rtol=1e-15)
+ assert_allclose(splev(xnew, b.tck),
+ b(xnew), atol=1e-15, rtol=1e-15)
+ assert_allclose([splev(x, b) for x in xnew],
+ b(xnew), atol=1e-15, rtol=1e-15)
+
+ # With N-D coefficients, there's a quirck:
+ # splev(x, BSpline) is equivalent to BSpline(x)
+ with assert_raises(ValueError, match="Calling splev.. with BSpline"):
+ splev(xnew, b2)
+
+ # However, splev(x, BSpline.tck) needs some transposes. This is because
+ # BSpline interpolates along the first axis, while the legacy FITPACK
+ # wrapper does list(map(...)) which effectively interpolates along the
+ # last axis. Like so:
+ sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0)
+ cc = b2.c.transpose(sh)
+ tck = (b2.t, cc, b2.k)
+ assert_allclose(splev(xnew, tck),
+ b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
+
+ def test_splrep(self):
+ x, y = self.xx, self.yy
+ # test that "new" splrep is equivalent to _impl.splrep
+ tck = splrep(x, y)
+ t, c, k = _impl.splrep(x, y)
+ assert_allclose(tck[0], t, atol=1e-15)
+ assert_allclose(tck[1], c, atol=1e-15)
+ assert_equal(tck[2], k)
+
+ # also cover the `full_output=True` branch
+ tck_f, _, _, _ = splrep(x, y, full_output=True)
+ assert_allclose(tck_f[0], t, atol=1e-15)
+ assert_allclose(tck_f[1], c, atol=1e-15)
+ assert_equal(tck_f[2], k)
+
+ # test that the result of splrep roundtrips with splev:
+ # evaluate the spline on the original `x` points
+ yy = splev(x, tck)
+ assert_allclose(y, yy, atol=1e-15)
+
+ # ... and also it roundtrips if wrapped in a BSpline
+ b = BSpline(*tck)
+ assert_allclose(y, b(x), atol=1e-15)
+
+ def test_splrep_errors(self):
+ # test that both "old" and "new" splrep raise for an N-D ``y`` array
+ # with n > 1
+ x, y = self.xx, self.yy
+ y2 = np.c_[y, y]
+ with assert_raises(ValueError):
+ splrep(x, y2)
+ with assert_raises(ValueError):
+ _impl.splrep(x, y2)
+
+ # input below minimum size
+ with assert_raises(TypeError, match="m > k must hold"):
+ splrep(x[:3], y[:3])
+ with assert_raises(TypeError, match="m > k must hold"):
+ _impl.splrep(x[:3], y[:3])
+
+ def test_splprep(self):
+ x = np.arange(15).reshape((3, 5))
+ b, u = splprep(x)
+ tck, u1 = _impl.splprep(x)
+
+ # test the roundtrip with splev for both "old" and "new" output
+ assert_allclose(u, u1, atol=1e-15)
+ assert_allclose(splev(u, b), x, atol=1e-15)
+ assert_allclose(splev(u, tck), x, atol=1e-15)
+
+ # cover the ``full_output=True`` branch
+ (b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True)
+ assert_allclose(u, u_f, atol=1e-15)
+ assert_allclose(splev(u_f, b_f), x, atol=1e-15)
+
+ def test_splprep_errors(self):
+ # test that both "old" and "new" code paths raise for x.ndim > 2
+ x = np.arange(3*4*5).reshape((3, 4, 5))
+ with assert_raises(ValueError, match="too many values to unpack"):
+ splprep(x)
+ with assert_raises(ValueError, match="too many values to unpack"):
+ _impl.splprep(x)
+
+ # input below minimum size
+ x = np.linspace(0, 40, num=3)
+ with assert_raises(TypeError, match="m > k must hold"):
+ splprep([x])
+ with assert_raises(TypeError, match="m > k must hold"):
+ _impl.splprep([x])
+
+ # automatically calculated parameters are non-increasing
+ # see gh-7589
+ x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266]
+ with assert_raises(ValueError, match="Invalid inputs"):
+ splprep([x])
+ with assert_raises(ValueError, match="Invalid inputs"):
+ _impl.splprep([x])
+
+ # given non-increasing parameter values u
+ x = [1, 3, 2, 4]
+ u = [0, 0.3, 0.2, 1]
+ with assert_raises(ValueError, match="Invalid inputs"):
+ splprep(*[[x], None, u])
+
+ def test_sproot(self):
+ b, b2 = self.b, self.b2
+ roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi
+ # sproot accepts a BSpline obj w/ 1-D coef array
+ assert_allclose(sproot(b), roots, atol=1e-7, rtol=1e-7)
+ assert_allclose(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7)
+
+ # ... and deals with trailing dimensions if coef array is N-D
+ with assert_raises(ValueError, match="Calling sproot.. with BSpline"):
+ sproot(b2, mest=50)
+
+ # and legacy behavior is preserved for a tck tuple w/ N-D coef
+ c2r = b2.c.transpose(1, 2, 0)
+ rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50))
+ assert_equal(rr.shape, (3, 2, 4))
+ assert_allclose(rr - roots, 0, atol=1e-12)
+
+ def test_splint(self):
+ # test that splint accepts BSpline objects
+ b, b2 = self.b, self.b2
+ assert_allclose(splint(0, 1, b),
+ splint(0, 1, b.tck), atol=1e-14)
+ assert_allclose(splint(0, 1, b),
+ b.integrate(0, 1), atol=1e-14)
+
+ # ... and deals with N-D arrays of coefficients
+ with assert_raises(ValueError, match="Calling splint.. with BSpline"):
+ splint(0, 1, b2)
+
+ # and the legacy behavior is preserved for a tck tuple w/ N-D coef
+ c2r = b2.c.transpose(1, 2, 0)
+ integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k)))
+ assert_equal(integr.shape, (3, 2))
+ assert_allclose(integr,
+ splint(0, 1, b), atol=1e-14)
+
+ def test_splder(self):
+ for b in [self.b, self.b2]:
+ # pad the c array (FITPACK convention)
+ ct = len(b.t) - len(b.c)
+ if ct > 0:
+ b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
+
+ for n in [1, 2, 3]:
+ bd = splder(b)
+ tck_d = _impl.splder((b.t, b.c, b.k))
+ assert_allclose(bd.t, tck_d[0], atol=1e-15)
+ assert_allclose(bd.c, tck_d[1], atol=1e-15)
+ assert_equal(bd.k, tck_d[2])
+ assert_(isinstance(bd, BSpline))
+ assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
+
+ def test_splantider(self):
+ for b in [self.b, self.b2]:
+ # pad the c array (FITPACK convention)
+ ct = len(b.t) - len(b.c)
+ if ct > 0:
+ b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
+
+ for n in [1, 2, 3]:
+ bd = splantider(b)
+ tck_d = _impl.splantider((b.t, b.c, b.k))
+ assert_allclose(bd.t, tck_d[0], atol=1e-15)
+ assert_allclose(bd.c, tck_d[1], atol=1e-15)
+ assert_equal(bd.k, tck_d[2])
+ assert_(isinstance(bd, BSpline))
+ assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
+
+ def test_insert(self):
+ b, b2, xx = self.b, self.b2, self.xx
+
+ j = b.t.size // 2
+ tn = 0.5*(b.t[j] + b.t[j+1])
+
+ bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))
+ assert_allclose(splev(xx, bn),
+ splev(xx, tck_n), atol=1e-15)
+ assert_(isinstance(bn, BSpline))
+ assert_(isinstance(tck_n, tuple)) # back-compat: tck in, tck out
+
+ # for N-D array of coefficients, BSpline.c needs to be transposed
+ # after that, the results are equivalent.
+ sh = tuple(range(b2.c.ndim))
+ c_ = b2.c.transpose(sh[1:] + (0,))
+ tck_n2 = insert(tn, (b2.t, c_, b2.k))
+
+ bn2 = insert(tn, b2)
+
+ # need a transpose for comparing the results, cf test_splev
+ assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),
+ bn2(xx), atol=1e-15)
+ assert_(isinstance(bn2, BSpline))
+ assert_(isinstance(tck_n2, tuple)) # back-compat: tck in, tck out
+
+
+class TestInterp:
+ #
+ # Test basic ways of constructing interpolating splines.
+ #
+ xx = np.linspace(0., 2.*np.pi)
+ yy = np.sin(xx)
+
+ def test_non_int_order(self):
+ with assert_raises(TypeError):
+ make_interp_spline(self.xx, self.yy, k=2.5)
+
+ def test_order_0(self):
+ b = make_interp_spline(self.xx, self.yy, k=0)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ b = make_interp_spline(self.xx, self.yy, k=0, axis=-1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_linear(self):
+ b = make_interp_spline(self.xx, self.yy, k=1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ b = make_interp_spline(self.xx, self.yy, k=1, axis=-1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ @pytest.mark.parametrize('k', [0, 1, 2, 3])
+ def test_incompatible_x_y(self, k):
+ x = [0, 1, 2, 3, 4, 5]
+ y = [0, 1, 2, 3, 4, 5, 6, 7]
+ with assert_raises(ValueError, match="Shapes of x"):
+ make_interp_spline(x, y, k=k)
+
+ @pytest.mark.parametrize('k', [0, 1, 2, 3])
+ def test_broken_x(self, k):
+ x = [0, 1, 1, 2, 3, 4] # duplicates
+ y = [0, 1, 2, 3, 4, 5]
+ with assert_raises(ValueError, match="x to not have duplicates"):
+ make_interp_spline(x, y, k=k)
+
+ x = [0, 2, 1, 3, 4, 5] # unsorted
+ with assert_raises(ValueError, match="Expect x to be a 1D strictly"):
+ make_interp_spline(x, y, k=k)
+
+ x = [0, 1, 2, 3, 4, 5]
+ x = np.asarray(x).reshape((1, -1)) # 1D
+ with assert_raises(ValueError, match="Expect x to be a 1D strictly"):
+ make_interp_spline(x, y, k=k)
+
+ def test_not_a_knot(self):
+ for k in [3, 5]:
+ b = make_interp_spline(self.xx, self.yy, k)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_periodic(self):
+ # k = 5 here for more derivatives
+ b = make_interp_spline(self.xx, self.yy, k=5, bc_type='periodic')
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ # in periodic case it is expected equality of k-1 first
+ # derivatives at the boundaries
+ for i in range(1, 5):
+ assert_allclose(b(self.xx[0], nu=i), b(self.xx[-1], nu=i), atol=1e-11)
+ # tests for axis=-1
+ b = make_interp_spline(self.xx, self.yy, k=5, bc_type='periodic', axis=-1)
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ for i in range(1, 5):
+ assert_allclose(b(self.xx[0], nu=i), b(self.xx[-1], nu=i), atol=1e-11)
+
+ @pytest.mark.parametrize('k', [2, 3, 4, 5, 6, 7])
+ def test_periodic_random(self, k):
+ # tests for both cases (k > n and k <= n)
+ n = 5
+ np.random.seed(1234)
+ x = np.sort(np.random.random_sample(n) * 10)
+ y = np.random.random_sample(n) * 100
+ y[0] = y[-1]
+ b = make_interp_spline(x, y, k=k, bc_type='periodic')
+ assert_allclose(b(x), y, atol=1e-14)
+
+ def test_periodic_axis(self):
+ n = self.xx.shape[0]
+ np.random.seed(1234)
+ x = np.random.random_sample(n) * 2 * np.pi
+ x = np.sort(x)
+ x[0] = 0.
+ x[-1] = 2 * np.pi
+ y = np.zeros((2, n))
+ y[0] = np.sin(x)
+ y[1] = np.cos(x)
+ b = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)
+ for i in range(n):
+ assert_allclose(b(x[i]), y[:, i], atol=1e-14)
+ assert_allclose(b(x[0]), b(x[-1]), atol=1e-14)
+
+ def test_periodic_points_exception(self):
+ # first and last points should match when periodic case expected
+ np.random.seed(1234)
+ k = 5
+ n = 8
+ x = np.sort(np.random.random_sample(n))
+ y = np.random.random_sample(n)
+ y[0] = y[-1] - 1 # to be sure that they are not equal
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, k=k, bc_type='periodic')
+
+ def test_periodic_knots_exception(self):
+ # `periodic` case does not work with passed vector of knots
+ np.random.seed(1234)
+ k = 3
+ n = 7
+ x = np.sort(np.random.random_sample(n))
+ y = np.random.random_sample(n)
+ t = np.zeros(n + 2 * k)
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, k, t, 'periodic')
+
+ @pytest.mark.parametrize('k', [2, 3, 4, 5])
+ def test_periodic_splev(self, k):
+ # comparison values of periodic b-spline with splev
+ b = make_interp_spline(self.xx, self.yy, k=k, bc_type='periodic')
+ tck = splrep(self.xx, self.yy, per=True, k=k)
+ spl = splev(self.xx, tck)
+ assert_allclose(spl, b(self.xx), atol=1e-14)
+
+ # comparison derivatives of periodic b-spline with splev
+ for i in range(1, k):
+ spl = splev(self.xx, tck, der=i)
+ assert_allclose(spl, b(self.xx, nu=i), atol=1e-10)
+
+ def test_periodic_cubic(self):
+ # comparison values of cubic periodic b-spline with CubicSpline
+ b = make_interp_spline(self.xx, self.yy, k=3, bc_type='periodic')
+ cub = CubicSpline(self.xx, self.yy, bc_type='periodic')
+ assert_allclose(b(self.xx), cub(self.xx), atol=1e-14)
+
+ # edge case: Cubic interpolation on 3 points
+ n = 3
+ x = np.sort(np.random.random_sample(n) * 10)
+ y = np.random.random_sample(n) * 100
+ y[0] = y[-1]
+ b = make_interp_spline(x, y, k=3, bc_type='periodic')
+ cub = CubicSpline(x, y, bc_type='periodic')
+ assert_allclose(b(x), cub(x), atol=1e-14)
+
+ def test_periodic_full_matrix(self):
+ # comparison values of cubic periodic b-spline with
+ # solution of the system with full matrix
+ k = 3
+ b = make_interp_spline(self.xx, self.yy, k=k, bc_type='periodic')
+ t = _periodic_knots(self.xx, k)
+ c = _make_interp_per_full_matr(self.xx, self.yy, t, k)
+ b1 = np.vectorize(lambda x: _naive_eval(x, t, c, k))
+ assert_allclose(b(self.xx), b1(self.xx), atol=1e-14)
+
+ def test_quadratic_deriv(self):
+ der = [(1, 8.)] # order, value: f'(x) = 8.
+
+ # derivative at right-hand edge
+ b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(None, der))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[-1], 1), der[0][1], atol=1e-14, rtol=1e-14)
+
+ # derivative at left-hand edge
+ b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(der, None))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[0], 1), der[0][1], atol=1e-14, rtol=1e-14)
+
+ def test_cubic_deriv(self):
+ k = 3
+
+ # first derivatives at left & right edges:
+ der_l, der_r = [(1, 3.)], [(1, 4.)]
+ b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(self.xx[0], 1), b(self.xx[-1], 1)],
+ [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
+
+ # 'natural' cubic spline, zero out 2nd derivatives at the boundaries
+ der_l, der_r = [(2, 0)], [(2, 0)]
+ b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_quintic_derivs(self):
+ k, n = 5, 7
+ x = np.arange(n).astype(np.float64)
+ y = np.sin(x)
+ der_l = [(1, -12.), (2, 1)]
+ der_r = [(1, 8.), (2, 3.)]
+ b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r))
+ assert_allclose(b(x), y, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(x[0], 1), b(x[0], 2)],
+ [val for (nu, val) in der_l])
+ assert_allclose([b(x[-1], 1), b(x[-1], 2)],
+ [val for (nu, val) in der_r])
+
+ @pytest.mark.xfail(reason='unstable')
+ def test_cubic_deriv_unstable(self):
+ # 1st and 2nd derivative at x[0], no derivative information at x[-1]
+ # The problem is not that it fails [who would use this anyway],
+ # the problem is that it fails *silently*, and I've no idea
+ # how to detect this sort of instability.
+ # In this particular case: it's OK for len(t) < 20, goes haywire
+ # at larger `len(t)`.
+ k = 3
+ t = _augknt(self.xx, k)
+
+ der_l = [(1, 3.), (2, 4.)]
+ b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None))
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+ def test_knots_not_data_sites(self):
+ # Knots need not coincide with the data sites.
+ # use a quadratic spline, knots are at data averages,
+ # two additional constraints are zero 2nd derivatives at edges
+ k = 2
+ t = np.r_[(self.xx[0],)*(k+1),
+ (self.xx[1:] + self.xx[:-1]) / 2.,
+ (self.xx[-1],)*(k+1)]
+ b = make_interp_spline(self.xx, self.yy, k, t,
+ bc_type=([(2, 0)], [(2, 0)]))
+
+ assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(self.xx[0], 2), b(self.xx[-1], 2)], [0., 0.],
+ atol=1e-14)
+
+ def test_minimum_points_and_deriv(self):
+ # interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and
+ # f'(0) = 0, f'(1) = 3.
+ k = 3
+ x = [0., 1.]
+ y = [0., 1.]
+ b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)]))
+
+ xx = np.linspace(0., 1.)
+ yy = xx**3
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+
+ def test_deriv_spec(self):
+ # If one of the derivatives is omitted, the spline definition is
+ # incomplete.
+ x = y = [1.0, 2, 3, 4, 5, 6]
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=([(1, 0.)], None))
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=(1, 0.))
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=[(1, 0.)])
+
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=42)
+
+ # CubicSpline expects`bc_type=(left_pair, right_pair)`, while
+ # here we expect `bc_type=(iterable, iterable)`.
+ l, r = (1, 0.0), (1, 0.0)
+ with assert_raises(ValueError):
+ make_interp_spline(x, y, bc_type=(l, r))
+
+ def test_complex(self):
+ k = 3
+ xx = self.xx
+ yy = self.yy + 1.j*self.yy
+
+ # first derivatives at left & right edges:
+ der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)]
+ b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+ assert_allclose([b(xx[0], 1), b(xx[-1], 1)],
+ [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
+
+ # also test zero and first order
+ for k in (0, 1):
+ b = make_interp_spline(xx, yy, k=k)
+ assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+
+ def test_int_xy(self):
+ x = np.arange(10).astype(int)
+ y = np.arange(10).astype(int)
+
+ # Cython chokes on "buffer type mismatch" (construction) or
+ # "no matching signature found" (evaluation)
+ for k in (0, 1, 2, 3):
+ b = make_interp_spline(x, y, k=k)
+ b(x)
+
+ def test_sliced_input(self):
+ # Cython code chokes on non C contiguous arrays
+ xx = np.linspace(-1, 1, 100)
+
+ x = xx[::5]
+ y = xx[::5]
+
+ for k in (0, 1, 2, 3):
+ make_interp_spline(x, y, k=k)
+
+ def test_check_finite(self):
+ # check_finite defaults to True; nans and such trigger a ValueError
+ x = np.arange(10).astype(float)
+ y = x**2
+
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, make_interp_spline, x, y)
+
+ @pytest.mark.parametrize('k', [1, 2, 3, 5])
+ def test_list_input(self, k):
+ # regression test for gh-8714: TypeError for x, y being lists and k=2
+ x = list(range(10))
+ y = [a**2 for a in x]
+ make_interp_spline(x, y, k=k)
+
+ def test_multiple_rhs(self):
+ yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
+ der_l = [(1, [1., 2.])]
+ der_r = [(1, [3., 4.])]
+
+ b = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
+ assert_allclose(b(self.xx), yy, atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[0], 1), der_l[0][1], atol=1e-14, rtol=1e-14)
+ assert_allclose(b(self.xx[-1], 1), der_r[0][1], atol=1e-14, rtol=1e-14)
+
+ def test_shapes(self):
+ np.random.seed(1234)
+ k, n = 3, 22
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ b = make_interp_spline(x, y, k)
+ assert_equal(b.c.shape, (n, 5, 6, 7))
+
+ # now throw in some derivatives
+ d_l = [(1, np.random.random((5, 6, 7)))]
+ d_r = [(1, np.random.random((5, 6, 7)))]
+ b = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
+ assert_equal(b.c.shape, (n + k - 1, 5, 6, 7))
+
+ def test_string_aliases(self):
+ yy = np.sin(self.xx)
+
+ # a single string is duplicated
+ b1 = make_interp_spline(self.xx, yy, k=3, bc_type='natural')
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=([(2, 0)], [(2, 0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # two strings are handled
+ b1 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=('natural', 'clamped'))
+ b2 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=([(2, 0)], [(1, 0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # one-sided BCs are OK
+ b1 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, 'clamped'))
+ b2 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, [(1, 0.0)]))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # 'not-a-knot' is equivalent to None
+ b1 = make_interp_spline(self.xx, yy, k=3, bc_type='not-a-knot')
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=None)
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # unknown strings do not pass
+ with assert_raises(ValueError):
+ make_interp_spline(self.xx, yy, k=3, bc_type='typo')
+
+ # string aliases are handled for 2D values
+ yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
+ der_l = [(1, [0., 0.])]
+ der_r = [(2, [0., 0.])]
+ b2 = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
+ b1 = make_interp_spline(self.xx, yy, k=3,
+ bc_type=('clamped', 'natural'))
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ # ... and for N-D values:
+ np.random.seed(1234)
+ k, n = 3, 22
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ # now throw in some derivatives
+ d_l = [(1, np.zeros((5, 6, 7)))]
+ d_r = [(1, np.zeros((5, 6, 7)))]
+ b1 = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
+ b2 = make_interp_spline(x, y, k, bc_type='clamped')
+ assert_allclose(b1.c, b2.c, atol=1e-15)
+
+ def test_full_matrix(self):
+ np.random.seed(1234)
+ k, n = 3, 7
+ x = np.sort(np.random.random(size=n))
+ y = np.random.random(size=n)
+ t = _not_a_knot(x, k)
+
+ b = make_interp_spline(x, y, k, t)
+ cf = make_interp_full_matr(x, y, t, k)
+ assert_allclose(b.c, cf, atol=1e-14, rtol=1e-14)
+
+ def test_woodbury(self):
+ '''
+ Random elements in diagonal matrix with blocks in the
+ left lower and right upper corners checking the
+ implementation of Woodbury algorithm.
+ '''
+ np.random.seed(1234)
+ n = 201
+ for k in range(3, 32, 2):
+ offset = int((k - 1) / 2)
+ a = np.diagflat(np.random.random((1, n)))
+ for i in range(1, offset + 1):
+ a[:-i, i:] += np.diagflat(np.random.random((1, n - i)))
+ a[i:, :-i] += np.diagflat(np.random.random((1, n - i)))
+ ur = np.random.random((offset, offset))
+ a[:offset, -offset:] = ur
+ ll = np.random.random((offset, offset))
+ a[-offset:, :offset] = ll
+ d = np.zeros((k, n))
+ for i, j in enumerate(range(offset, -offset - 1, -1)):
+ if j < 0:
+ d[i, :j] = np.diagonal(a, offset=j)
+ else:
+ d[i, j:] = np.diagonal(a, offset=j)
+ b = np.random.random(n)
+ assert_allclose(_woodbury_algorithm(d, ur, ll, b, k),
+ np.linalg.solve(a, b), atol=1e-14)
+
+
+def make_interp_full_matr(x, y, t, k):
+ """Assemble an spline order k with knots t to interpolate
+ y(x) using full matrices.
+ Not-a-knot BC only.
+
+ This routine is here for testing only (even though it's functional).
+ """
+ assert x.size == y.size
+ assert t.size == x.size + k + 1
+ n = x.size
+
+ A = np.zeros((n, n), dtype=np.float64)
+
+ for j in range(n):
+ xval = x[j]
+ if xval == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, xval) - 1
+
+ # fill a row
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+ A[j, left-k:left+1] = bb
+
+ c = sl.solve(A, y)
+ return c
+
+
+def make_lsq_full_matrix(x, y, t, k=3):
+ """Make the least-square spline, full matrices."""
+ x, y, t = map(np.asarray, (x, y, t))
+ m = x.size
+ n = t.size - k - 1
+
+ A = np.zeros((m, n), dtype=np.float64)
+
+ for j in range(m):
+ xval = x[j]
+ # find interval
+ if xval == t[k]:
+ left = k
+ else:
+ left = np.searchsorted(t, xval) - 1
+
+ # fill a row
+ bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+ A[j, left-k:left+1] = bb
+
+ # have observation matrix, can solve the LSQ problem
+ B = np.dot(A.T, A)
+ Y = np.dot(A.T, y)
+ c = sl.solve(B, Y)
+
+ return c, (A, Y)
+
+
+class TestLSQ:
+ #
+ # Test make_lsq_spline
+ #
+ np.random.seed(1234)
+ n, k = 13, 3
+ x = np.sort(np.random.random(n))
+ y = np.random.random(n)
+ t = _augknt(np.linspace(x[0], x[-1], 7), k)
+
+ def test_lstsq(self):
+ # check LSQ construction vs a full matrix version
+ x, y, t, k = self.x, self.y, self.t, self.k
+
+ c0, AY = make_lsq_full_matrix(x, y, t, k)
+ b = make_lsq_spline(x, y, t, k)
+
+ assert_allclose(b.c, c0)
+ assert_equal(b.c.shape, (t.size - k - 1,))
+
+ # also check against numpy.lstsq
+ aa, yy = AY
+ c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1)
+ assert_allclose(b.c, c1)
+
+ def test_weights(self):
+ # weights = 1 is same as None
+ x, y, t, k = self.x, self.y, self.t, self.k
+ w = np.ones_like(x)
+
+ b = make_lsq_spline(x, y, t, k)
+ b_w = make_lsq_spline(x, y, t, k, w=w)
+
+ assert_allclose(b.t, b_w.t, atol=1e-14)
+ assert_allclose(b.c, b_w.c, atol=1e-14)
+ assert_equal(b.k, b_w.k)
+
+ def test_multiple_rhs(self):
+ x, t, k, n = self.x, self.t, self.k, self.n
+ y = np.random.random(size=(n, 5, 6, 7))
+
+ b = make_lsq_spline(x, y, t, k)
+ assert_equal(b.c.shape, (t.size-k-1, 5, 6, 7))
+
+ def test_complex(self):
+ # cmplx-valued `y`
+ x, t, k = self.x, self.t, self.k
+ yc = self.y * (1. + 2.j)
+
+ b = make_lsq_spline(x, yc, t, k)
+ b_re = make_lsq_spline(x, yc.real, t, k)
+ b_im = make_lsq_spline(x, yc.imag, t, k)
+
+ assert_allclose(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
+
+ def test_int_xy(self):
+ x = np.arange(10).astype(int)
+ y = np.arange(10).astype(int)
+ t = _augknt(x, k=1)
+ # Cython chokes on "buffer type mismatch"
+ make_lsq_spline(x, y, t, k=1)
+
+ def test_sliced_input(self):
+ # Cython code chokes on non C contiguous arrays
+ xx = np.linspace(-1, 1, 100)
+
+ x = xx[::3]
+ y = xx[::3]
+ t = _augknt(x, 1)
+ make_lsq_spline(x, y, t, k=1)
+
+ def test_checkfinite(self):
+ # check_finite defaults to True; nans and such trigger a ValueError
+ x = np.arange(12).astype(float)
+ y = x**2
+ t = _augknt(x, 3)
+
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, make_lsq_spline, x, y, t)
+
+ def test_read_only(self):
+ # Check that make_lsq_spline works with read only arrays
+ x, y, t = self.x, self.y, self.t
+ x.setflags(write=False)
+ y.setflags(write=False)
+ t.setflags(write=False)
+ make_lsq_spline(x=x, y=y, t=t)
+
+
+def data_file(basename):
+ return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', basename)
+
+
+class TestSmoothingSpline:
+ #
+ # test make_smoothing_spline
+ #
+ def test_invalid_input(self):
+ np.random.seed(1234)
+ n = 100
+ x = np.sort(np.random.random_sample(n) * 4 - 2)
+ y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+ # ``x`` and ``y`` should have same shapes (1-D array)
+ with assert_raises(ValueError):
+ make_smoothing_spline(x, y[1:])
+ with assert_raises(ValueError):
+ make_smoothing_spline(x[1:], y)
+ with assert_raises(ValueError):
+ make_smoothing_spline(x.reshape(1, n), y)
+
+ # ``x`` should be an ascending array
+ with assert_raises(ValueError):
+ make_smoothing_spline(x[::-1], y)
+
+ x_dupl = np.copy(x)
+ x_dupl[0] = x_dupl[1]
+
+ with assert_raises(ValueError):
+ make_smoothing_spline(x_dupl, y)
+
+ # x and y length must be >= 5
+ x = np.arange(4)
+ y = np.ones(4)
+ exception_message = "``x`` and ``y`` length must be at least 5"
+ with pytest.raises(ValueError, match=exception_message):
+ make_smoothing_spline(x, y)
+
+ def test_compare_with_GCVSPL(self):
+ """
+ Data is generated in the following way:
+ >>> np.random.seed(1234)
+ >>> n = 100
+ >>> x = np.sort(np.random.random_sample(n) * 4 - 2)
+ >>> y = np.sin(x) + np.random.normal(scale=.5, size=n)
+ >>> np.savetxt('x.csv', x)
+ >>> np.savetxt('y.csv', y)
+
+ We obtain the result of performing the GCV smoothing splines
+ package (by Woltring, gcvspl) on the sample data points
+ using its version for Octave (https://github.com/srkuberski/gcvspl).
+ In order to use this implementation, one should clone the repository
+ and open the folder in Octave.
+ In Octave, we load up ``x`` and ``y`` (generated from Python code
+ above):
+
+ >>> x = csvread('x.csv');
+ >>> y = csvread('y.csv');
+
+ Then, in order to access the implementation, we compile gcvspl files in
+ Octave:
+
+ >>> mex gcvsplmex.c gcvspl.c
+ >>> mex spldermex.c gcvspl.c
+
+ The first function computes the vector of unknowns from the dataset
+ (x, y) while the second one evaluates the spline in certain points
+ with known vector of coefficients.
+
+ >>> c = gcvsplmex( x, y, 2 );
+ >>> y0 = spldermex( x, c, 2, x, 0 );
+
+ If we want to compare the results of the gcvspl code, we can save
+ ``y0`` in csv file:
+
+ >>> csvwrite('y0.csv', y0);
+
+ """
+ # load the data sample
+ with np.load(data_file('gcvspl.npz')) as data:
+ # data points
+ x = data['x']
+ y = data['y']
+
+ y_GCVSPL = data['y_GCVSPL']
+ y_compr = make_smoothing_spline(x, y)(x)
+
+ # such tolerance is explained by the fact that the spline is built
+ # using an iterative algorithm for minimizing the GCV criteria. These
+ # algorithms may vary, so the tolerance should be rather low.
+ assert_allclose(y_compr, y_GCVSPL, atol=1e-4, rtol=1e-4)
+
+ def test_non_regularized_case(self):
+ """
+ In case the regularization parameter is 0, the resulting spline
+ is an interpolation spline with natural boundary conditions.
+ """
+ # create data sample
+ np.random.seed(1234)
+ n = 100
+ x = np.sort(np.random.random_sample(n) * 4 - 2)
+ y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+ spline_GCV = make_smoothing_spline(x, y, lam=0.)
+ spline_interp = make_interp_spline(x, y, 3, bc_type='natural')
+
+ grid = np.linspace(x[0], x[-1], 2 * n)
+ assert_allclose(spline_GCV(grid),
+ spline_interp(grid),
+ atol=1e-15)
+
+ def test_weighted_smoothing_spline(self):
+ # create data sample
+ np.random.seed(1234)
+ n = 100
+ x = np.sort(np.random.random_sample(n) * 4 - 2)
+ y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+ spl = make_smoothing_spline(x, y)
+
+ # in order not to iterate over all of the indices, we select 10 of
+ # them randomly
+ for ind in np.random.choice(range(100), size=10):
+ w = np.ones(n)
+ w[ind] = 30.
+ spl_w = make_smoothing_spline(x, y, w)
+ # check that spline with weight in a certain point is closer to the
+ # original point than the one without weights
+ orig = abs(spl(x[ind]) - y[ind])
+ weighted = abs(spl_w(x[ind]) - y[ind])
+
+ if orig < weighted:
+ raise ValueError(f'Spline with weights should be closer to the'
+ f' points than the original one: {orig:.4} < '
+ f'{weighted:.4}')
+
+
+################################
+# NdBSpline tests
+def bspline2(xy, t, c, k):
+ """A naive 2D tensort product spline evaluation."""
+ x, y = xy
+ tx, ty = t
+ nx = len(tx) - k - 1
+ assert (nx >= k+1)
+ ny = len(ty) - k - 1
+ assert (ny >= k+1)
+ return sum(c[ix, iy] * B(x, k, ix, tx) * B(y, k, iy, ty)
+ for ix in range(nx) for iy in range(ny))
+
+
+def B(x, k, i, t):
+ if k == 0:
+ return 1.0 if t[i] <= x < t[i+1] else 0.0
+ if t[i+k] == t[i]:
+ c1 = 0.0
+ else:
+ c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
+ if t[i+k+1] == t[i+1]:
+ c2 = 0.0
+ else:
+ c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
+ return c1 + c2
+
+
+def bspline(x, t, c, k):
+ n = len(t) - k - 1
+ assert (n >= k+1) and (len(c) >= n)
+ return sum(c[i] * B(x, k, i, t) for i in range(n))
+
+
+class NdBSpline0:
+ def __init__(self, t, c, k=3):
+ """Tensor product spline object.
+
+ c[i1, i2, ..., id] * B(x1, i1) * B(x2, i2) * ... * B(xd, id)
+
+ Parameters
+ ----------
+ c : ndarray, shape (n1, n2, ..., nd, ...)
+ b-spline coefficients
+ t : tuple of 1D ndarrays
+ knot vectors in directions 1, 2, ... d
+ ``len(t[i]) == n[i] + k + 1``
+ k : int or length-d tuple of integers
+ spline degrees.
+ """
+ ndim = len(t)
+ assert ndim <= len(c.shape)
+
+ try:
+ len(k)
+ except TypeError:
+ # make k a tuple
+ k = (k,)*ndim
+
+ self.k = tuple(operator.index(ki) for ki in k)
+ self.t = tuple(np.asarray(ti, dtype=float) for ti in t)
+ self.c = c
+
+ def __call__(self, x):
+ ndim = len(self.t)
+ # a single evaluation point: `x` is a 1D array_like, shape (ndim,)
+ assert len(x) == ndim
+
+ # get the indices in an ndim-dimensional vector
+ i = ['none', ]*ndim
+ for d in range(ndim):
+ td, xd = self.t[d], x[d]
+ k = self.k[d]
+
+ # find the index for x[d]
+ if xd == td[k]:
+ i[d] = k
+ else:
+ i[d] = np.searchsorted(td, xd) - 1
+ assert td[i[d]] <= xd <= td[i[d]+1]
+ assert i[d] >= k and i[d] < len(td) - k
+ i = tuple(i)
+
+ # iterate over the dimensions, form linear combinations of
+ # products B(x_1) * B(x_2) * ... B(x_N) of (k+1)**N b-splines
+ # which are non-zero at `i = (i_1, i_2, ..., i_N)`.
+ result = 0
+ iters = [range(i[d] - self.k[d], i[d] + 1) for d in range(ndim)]
+ for idx in itertools.product(*iters):
+ term = self.c[idx] * np.prod([B(x[d], self.k[d], idx[d], self.t[d])
+ for d in range(ndim)])
+ result += term
+ return result
+
+
+class TestNdBSpline:
+
+ def test_1D(self):
+ # test ndim=1 agrees with BSpline
+ rng = np.random.default_rng(12345)
+ n, k = 11, 3
+ n_tr = 7
+ t = np.sort(rng.uniform(size=n + k + 1))
+ c = rng.uniform(size=(n, n_tr))
+
+ b = BSpline(t, c, k)
+ nb = NdBSpline((t,), c, k)
+
+ xi = rng.uniform(size=21)
+ # NdBSpline expects xi.shape=(npts, ndim)
+ assert_allclose(nb(xi[:, None]),
+ b(xi), atol=1e-14)
+ assert nb(xi[:, None]).shape == (xi.shape[0], c.shape[1])
+
+ def make_2d_case(self):
+ # make a 2D separable spline
+ x = np.arange(6)
+ y = x**3
+ spl = make_interp_spline(x, y, k=3)
+
+ y_1 = x**3 + 2*x
+ spl_1 = make_interp_spline(x, y_1, k=3)
+
+ t2 = (spl.t, spl_1.t)
+ c2 = spl.c[:, None] * spl_1.c[None, :]
+
+ return t2, c2, 3
+
+ def make_2d_mixed(self):
+ # make a 2D separable spline w/ kx=3, ky=2
+ x = np.arange(6)
+ y = x**3
+ spl = make_interp_spline(x, y, k=3)
+
+ x = np.arange(5) + 1.5
+ y_1 = x**2 + 2*x
+ spl_1 = make_interp_spline(x, y_1, k=2)
+
+ t2 = (spl.t, spl_1.t)
+ c2 = spl.c[:, None] * spl_1.c[None, :]
+
+ return t2, c2, spl.k, spl_1.k
+
+ def test_2D_separable(self):
+ xi = [(1.5, 2.5), (2.5, 1), (0.5, 1.5)]
+ t2, c2, k = self.make_2d_case()
+ target = [x**3 * (y**3 + 2*y) for (x, y) in xi]
+
+ # sanity check: bspline2 gives the product as constructed
+ assert_allclose([bspline2(xy, t2, c2, k) for xy in xi],
+ target,
+ atol=1e-14)
+
+ # check evaluation on a 2D array: the 1D array of 2D points
+ bspl2 = NdBSpline(t2, c2, k=3)
+ assert bspl2(xi).shape == (len(xi), )
+ assert_allclose(bspl2(xi),
+ target, atol=1e-14)
+
+ # now check on a multidim xi
+ rng = np.random.default_rng(12345)
+ xi = rng.uniform(size=(4, 3, 2)) * 5
+ result = bspl2(xi)
+ assert result.shape == (4, 3)
+
+ # also check the values
+ x, y = xi.reshape((-1, 2)).T
+ assert_allclose(result.ravel(),
+ x**3 * (y**3 + 2*y), atol=1e-14)
+
+ def test_2D_separable_2(self):
+ # test `c` with trailing dimensions, i.e. c.ndim > ndim
+ ndim = 2
+ xi = [(1.5, 2.5), (2.5, 1), (0.5, 1.5)]
+ target = [x**3 * (y**3 + 2*y) for (x, y) in xi]
+
+ t2, c2, k = self.make_2d_case()
+ c2_4 = np.dstack((c2, c2, c2, c2)) # c22.shape = (6, 6, 4)
+
+ xy = (1.5, 2.5)
+ bspl2_4 = NdBSpline(t2, c2_4, k=3)
+ result = bspl2_4(xy)
+ val_single = NdBSpline(t2, c2, k)(xy)
+ assert result.shape == (4,)
+ assert_allclose(result,
+ [val_single, ]*4, atol=1e-14)
+
+ # now try the array xi : the output.shape is (3, 4) where 3
+ # is the number of points in xi and 4 is the trailing dimension of c
+ assert bspl2_4(xi).shape == np.shape(xi)[:-1] + bspl2_4.c.shape[ndim:]
+ assert_allclose(bspl2_4(xi) - np.asarray(target)[:, None],
+ 0, atol=5e-14)
+
+ # two trailing dimensions
+ c2_22 = c2_4.reshape((6, 6, 2, 2))
+ bspl2_22 = NdBSpline(t2, c2_22, k=3)
+
+ result = bspl2_22(xy)
+ assert result.shape == (2, 2)
+ assert_allclose(result,
+ [[val_single, val_single],
+ [val_single, val_single]], atol=1e-14)
+
+ # now try the array xi : the output shape is (3, 2, 2)
+ # for 3 points in xi and c trailing dimensions being (2, 2)
+ assert (bspl2_22(xi).shape ==
+ np.shape(xi)[:-1] + bspl2_22.c.shape[ndim:])
+ assert_allclose(bspl2_22(xi) - np.asarray(target)[:, None, None],
+ 0, atol=5e-14)
+
+ def test_2D_random(self):
+ rng = np.random.default_rng(12345)
+ k = 3
+ tx = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=7)) * 3, 3, 3, 3, 3]
+ ty = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ c = rng.uniform(size=(tx.size-k-1, ty.size-k-1))
+
+ spl = NdBSpline((tx, ty), c, k=k)
+
+ xi = (1., 1.)
+ assert_allclose(spl(xi),
+ bspline2(xi, (tx, ty), c, k), atol=1e-14)
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1]]
+ assert_allclose(spl(xi),
+ [bspline2(xy, (tx, ty), c, k) for xy in xi],
+ atol=1e-14)
+
+ def test_2D_mixed(self):
+ t2, c2, kx, ky = self.make_2d_mixed()
+ xi = [(1.4, 4.5), (2.5, 2.4), (4.5, 3.5)]
+ target = [x**3 * (y**2 + 2*y) for (x, y) in xi]
+ bspl2 = NdBSpline(t2, c2, k=(kx, ky))
+ assert bspl2(xi).shape == (len(xi), )
+ assert_allclose(bspl2(xi),
+ target, atol=1e-14)
+
+ def test_2D_derivative(self):
+ t2, c2, kx, ky = self.make_2d_mixed()
+ xi = [(1.4, 4.5), (2.5, 2.4), (4.5, 3.5)]
+ bspl2 = NdBSpline(t2, c2, k=(kx, ky))
+
+ der = bspl2(xi, nu=(1, 0))
+ assert_allclose(der,
+ [3*x**2 * (y**2 + 2*y) for x, y in xi], atol=1e-14)
+
+ der = bspl2(xi, nu=(1, 1))
+ assert_allclose(der,
+ [3*x**2 * (2*y + 2) for x, y in xi], atol=1e-14)
+
+ der = bspl2(xi, nu=(0, 0))
+ assert_allclose(der,
+ [x**3 * (y**2 + 2*y) for x, y in xi], atol=1e-14)
+
+ with assert_raises(ValueError):
+ # all(nu >= 0)
+ der = bspl2(xi, nu=(-1, 0))
+
+ with assert_raises(ValueError):
+ # len(nu) == ndim
+ der = bspl2(xi, nu=(-1, 0, 1))
+
+ def test_2D_mixed_random(self):
+ rng = np.random.default_rng(12345)
+ kx, ky = 2, 3
+ tx = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=7)) * 3, 3, 3, 3, 3]
+ ty = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ c = rng.uniform(size=(tx.size - kx - 1, ty.size - ky - 1))
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1]]
+
+ bspl2 = NdBSpline((tx, ty), c, k=(kx, ky))
+ bspl2_0 = NdBSpline0((tx, ty), c, k=(kx, ky))
+
+ assert_allclose(bspl2(xi),
+ [bspl2_0(xp) for xp in xi], atol=1e-14)
+
+ def test_tx_neq_ty(self):
+ # 2D separable spline w/ len(tx) != len(ty)
+ x = np.arange(6)
+ y = np.arange(7) + 1.5
+
+ spl_x = make_interp_spline(x, x**3, k=3)
+ spl_y = make_interp_spline(y, y**2 + 2*y, k=3)
+ cc = spl_x.c[:, None] * spl_y.c[None, :]
+ bspl = NdBSpline((spl_x.t, spl_y.t), cc, (spl_x.k, spl_y.k))
+
+ values = (x**3)[:, None] * (y**2 + 2*y)[None, :]
+ rgi = RegularGridInterpolator((x, y), values)
+
+ xi = [(a, b) for a, b in itertools.product(x, y)]
+ bxi = bspl(xi)
+
+ assert not np.isnan(bxi).any()
+ assert_allclose(bxi, rgi(xi), atol=1e-14)
+ assert_allclose(bxi.reshape(values.shape), values, atol=1e-14)
+
+ def make_3d_case(self):
+ # make a 3D separable spline
+ x = np.arange(6)
+ y = x**3
+ spl = make_interp_spline(x, y, k=3)
+
+ y_1 = x**3 + 2*x
+ spl_1 = make_interp_spline(x, y_1, k=3)
+
+ y_2 = x**3 + 3*x + 1
+ spl_2 = make_interp_spline(x, y_2, k=3)
+
+ t2 = (spl.t, spl_1.t, spl_2.t)
+ c2 = (spl.c[:, None, None] *
+ spl_1.c[None, :, None] *
+ spl_2.c[None, None, :])
+
+ return t2, c2, 3
+
+ def test_3D_separable(self):
+ rng = np.random.default_rng(12345)
+ x, y, z = rng.uniform(size=(3, 11)) * 5
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+
+ xi = [_ for _ in zip(x, y, z)]
+ result = bspl3(xi)
+ assert result.shape == (11,)
+ assert_allclose(result, target, atol=1e-14)
+
+ def test_3D_derivative(self):
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+ rng = np.random.default_rng(12345)
+ x, y, z = rng.uniform(size=(3, 11)) * 5
+ xi = [_ for _ in zip(x, y, z)]
+
+ assert_allclose(bspl3(xi, nu=(1, 0, 0)),
+ 3*x**2 * (y**3 + 2*y) * (z**3 + 3*z + 1), atol=1e-14)
+
+ assert_allclose(bspl3(xi, nu=(2, 0, 0)),
+ 6*x * (y**3 + 2*y) * (z**3 + 3*z + 1), atol=1e-14)
+
+ assert_allclose(bspl3(xi, nu=(2, 1, 0)),
+ 6*x * (3*y**2 + 2) * (z**3 + 3*z + 1), atol=1e-14)
+
+ assert_allclose(bspl3(xi, nu=(2, 1, 3)),
+ 6*x * (3*y**2 + 2) * (6), atol=1e-14)
+
+ assert_allclose(bspl3(xi, nu=(2, 1, 4)),
+ np.zeros(len(xi)), atol=1e-14)
+
+ def test_3D_random(self):
+ rng = np.random.default_rng(12345)
+ k = 3
+ tx = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=7)) * 3, 3, 3, 3, 3]
+ ty = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ tz = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ c = rng.uniform(size=(tx.size-k-1, ty.size-k-1, tz.size-k-1))
+
+ spl = NdBSpline((tx, ty, tz), c, k=k)
+ spl_0 = NdBSpline0((tx, ty, tz), c, k=k)
+
+ xi = (1., 1., 1)
+ assert_allclose(spl(xi), spl_0(xi), atol=1e-14)
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1],
+ [0.9, 1.4, 1.9]]
+ assert_allclose(spl(xi), [spl_0(xp) for xp in xi], atol=1e-14)
+
+ def test_3D_random_complex(self):
+ rng = np.random.default_rng(12345)
+ k = 3
+ tx = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=7)) * 3, 3, 3, 3, 3]
+ ty = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ tz = np.r_[0, 0, 0, 0, np.sort(rng.uniform(size=8)) * 4, 4, 4, 4, 4]
+ c = (rng.uniform(size=(tx.size-k-1, ty.size-k-1, tz.size-k-1)) +
+ rng.uniform(size=(tx.size-k-1, ty.size-k-1, tz.size-k-1))*1j)
+
+ spl = NdBSpline((tx, ty, tz), c, k=k)
+ spl_re = NdBSpline((tx, ty, tz), c.real, k=k)
+ spl_im = NdBSpline((tx, ty, tz), c.imag, k=k)
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1],
+ [0.9, 1.4, 1.9]]
+ assert_allclose(spl(xi),
+ spl_re(xi) + 1j*spl_im(xi), atol=1e-14)
+
+ @pytest.mark.parametrize('cls_extrap', [None, True])
+ @pytest.mark.parametrize('call_extrap', [None, True])
+ def test_extrapolate_3D_separable(self, cls_extrap, call_extrap):
+ # test that extrapolate=True does extrapolate
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3, extrapolate=cls_extrap)
+
+ # evaluate out of bounds
+ x, y, z = [-2, -1, 7], [-3, -0.5, 6.5], [-1, -1.5, 7.5]
+ x, y, z = map(np.asarray, (x, y, z))
+ xi = [_ for _ in zip(x, y, z)]
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+
+ result = bspl3(xi, extrapolate=call_extrap)
+ assert_allclose(result, target, atol=1e-14)
+
+ @pytest.mark.parametrize('extrap', [(False, True), (True, None)])
+ def test_extrapolate_3D_separable_2(self, extrap):
+ # test that call(..., extrapolate=None) defers to self.extrapolate,
+ # otherwise supersedes self.extrapolate
+ t3, c3, k = self.make_3d_case()
+ cls_extrap, call_extrap = extrap
+ bspl3 = NdBSpline(t3, c3, k=3, extrapolate=cls_extrap)
+
+ # evaluate out of bounds
+ x, y, z = [-2, -1, 7], [-3, -0.5, 6.5], [-1, -1.5, 7.5]
+ x, y, z = map(np.asarray, (x, y, z))
+ xi = [_ for _ in zip(x, y, z)]
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+
+ result = bspl3(xi, extrapolate=call_extrap)
+ assert_allclose(result, target, atol=1e-14)
+
+ def test_extrapolate_false_3D_separable(self):
+ # test that extrapolate=False produces nans for out-of-bounds values
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+
+ # evaluate out of bounds and inside
+ x, y, z = [-2, 1, 7], [-3, 0.5, 6.5], [-1, 1.5, 7.5]
+ x, y, z = map(np.asarray, (x, y, z))
+ xi = [_ for _ in zip(x, y, z)]
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+
+ result = bspl3(xi, extrapolate=False)
+ assert np.isnan(result[0])
+ assert np.isnan(result[-1])
+ assert_allclose(result[1:-1], target[1:-1], atol=1e-14)
+
+ def test_x_nan_3D(self):
+ # test that spline(nan) is nan
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+
+ # evaluate out of bounds and inside
+ x = np.asarray([-2, 3, np.nan, 1, 2, 7, np.nan])
+ y = np.asarray([-3, 3.5, 1, np.nan, 3, 6.5, 6.5])
+ z = np.asarray([-1, 3.5, 2, 3, np.nan, 7.5, 7.5])
+ xi = [_ for _ in zip(x, y, z)]
+ target = x**3 * (y**3 + 2*y) * (z**3 + 3*z + 1)
+ mask = np.isnan(x) | np.isnan(y) | np.isnan(z)
+ target[mask] = np.nan
+
+ result = bspl3(xi)
+ assert np.isnan(result[mask]).all()
+ assert_allclose(result, target, atol=1e-14)
+
+ def test_non_c_contiguous(self):
+ # check that non C-contiguous inputs are OK
+ rng = np.random.default_rng(12345)
+ kx, ky = 3, 3
+ tx = np.sort(rng.uniform(low=0, high=4, size=16))
+ tx = np.r_[(tx[0],)*kx, tx, (tx[-1],)*kx]
+ ty = np.sort(rng.uniform(low=0, high=4, size=16))
+ ty = np.r_[(ty[0],)*ky, ty, (ty[-1],)*ky]
+
+ assert not tx[::2].flags.c_contiguous
+ assert not ty[::2].flags.c_contiguous
+
+ c = rng.uniform(size=(tx.size//2 - kx - 1, ty.size//2 - ky - 1))
+ c = c.T
+ assert not c.flags.c_contiguous
+
+ xi = np.c_[[1, 1.5, 2],
+ [1.1, 1.6, 2.1]]
+
+ bspl2 = NdBSpline((tx[::2], ty[::2]), c, k=(kx, ky))
+ bspl2_0 = NdBSpline0((tx[::2], ty[::2]), c, k=(kx, ky))
+
+ assert_allclose(bspl2(xi),
+ [bspl2_0(xp) for xp in xi], atol=1e-14)
+
+ def test_readonly(self):
+ t3, c3, k = self.make_3d_case()
+ bspl3 = NdBSpline(t3, c3, k=3)
+
+ for i in range(3):
+ t3[i].flags.writeable = False
+ c3.flags.writeable = False
+
+ bspl3_ = NdBSpline(t3, c3, k=3)
+
+ assert bspl3((1, 2, 3)) == bspl3_((1, 2, 3))
+
+ def test_design_matrix(self):
+ t3, c3, k = self.make_3d_case()
+
+ xi = np.asarray([[1, 2, 3], [4, 5, 6]])
+ dm = NdBSpline(t3, c3, k).design_matrix(xi, t3, k)
+ dm1 = NdBSpline.design_matrix(xi, t3, [k, k, k])
+ assert dm.shape[0] == xi.shape[0]
+ assert_allclose(dm.todense(), dm1.todense(), atol=1e-16)
+
+ with assert_raises(ValueError):
+ NdBSpline.design_matrix([1, 2, 3], t3, [k]*3)
+
+ with assert_raises(ValueError, match="Data and knots*"):
+ NdBSpline.design_matrix([[1, 2]], t3, [k]*3)
+
+
+class TestMakeND:
+ def test_2D_separable_simple(self):
+ x = np.arange(6)
+ y = np.arange(6) + 0.5
+ values = x[:, None]**3 * (y**3 + 2*y)[None, :]
+ xi = [(a, b) for a, b in itertools.product(x, y)]
+
+ bspl = make_ndbspl((x, y), values, k=1)
+ assert_allclose(bspl(xi), values.ravel(), atol=1e-15)
+
+ # test the coefficients vs outer product of 1D coefficients
+ spl_x = make_interp_spline(x, x**3, k=1)
+ spl_y = make_interp_spline(y, y**3 + 2*y, k=1)
+ cc = spl_x.c[:, None] * spl_y.c[None, :]
+ assert_allclose(cc, bspl.c, atol=1e-11, rtol=0)
+
+ # test against RGI
+ from scipy.interpolate import RegularGridInterpolator as RGI
+ rgi = RGI((x, y), values, method='linear')
+ assert_allclose(rgi(xi), bspl(xi), atol=1e-14)
+
+ def test_2D_separable_trailing_dims(self):
+ # test `c` with trailing dimensions, i.e. c.ndim > ndim
+ x = np.arange(6)
+ y = np.arange(6)
+ xi = [(a, b) for a, b in itertools.product(x, y)]
+
+ # make values4.shape = (6, 6, 4)
+ values = x[:, None]**3 * (y**3 + 2*y)[None, :]
+ values4 = np.dstack((values, values, values, values))
+ bspl = make_ndbspl((x, y), values4, k=3, solver=ssl.spsolve)
+
+ result = bspl(xi)
+ target = np.dstack((values, values, values, values))
+ assert result.shape == (36, 4)
+ assert_allclose(result.reshape(6, 6, 4),
+ target, atol=1e-14)
+
+ # now two trailing dimensions
+ values22 = values4.reshape((6, 6, 2, 2))
+ bspl = make_ndbspl((x, y), values22, k=3, solver=ssl.spsolve)
+
+ result = bspl(xi)
+ assert result.shape == (36, 2, 2)
+ assert_allclose(result.reshape(6, 6, 2, 2),
+ target.reshape((6, 6, 2, 2)), atol=1e-14)
+
+ @pytest.mark.parametrize('k', [(3, 3), (1, 1), (3, 1), (1, 3), (3, 5)])
+ def test_2D_mixed(self, k):
+ # make a 2D separable spline w/ len(tx) != len(ty)
+ x = np.arange(6)
+ y = np.arange(7) + 1.5
+ xi = [(a, b) for a, b in itertools.product(x, y)]
+
+ values = (x**3)[:, None] * (y**2 + 2*y)[None, :]
+ bspl = make_ndbspl((x, y), values, k=k, solver=ssl.spsolve)
+ assert_allclose(bspl(xi), values.ravel(), atol=1e-15)
+
+ def _get_sample_2d_data(self):
+ # from test_rgi.py::TestIntepN
+ x = np.array([.5, 2., 3., 4., 5.5, 6.])
+ y = np.array([.5, 2., 3., 4., 5.5, 6.])
+ z = np.array(
+ [
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 3, 2, 1, 1],
+ [1, 2, 2, 2, 1, 1],
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 2, 2, 1, 1],
+ ]
+ )
+ return x, y, z
+
+ def test_2D_vs_RGI_linear(self):
+ x, y, z = self._get_sample_2d_data()
+ bspl = make_ndbspl((x, y), z, k=1)
+ rgi = RegularGridInterpolator((x, y), z, method='linear')
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ def test_2D_vs_RGI_cubic(self):
+ x, y, z = self._get_sample_2d_data()
+ bspl = make_ndbspl((x, y), z, k=3, solver=ssl.spsolve)
+ rgi = RegularGridInterpolator((x, y), z, method='cubic_legacy')
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ @pytest.mark.parametrize('solver', [ssl.gmres, ssl.gcrotmk])
+ def test_2D_vs_RGI_cubic_iterative(self, solver):
+ # same as `test_2D_vs_RGI_cubic`, only with an iterative solver.
+ # Note the need to add an explicit `rtol` solver_arg to achieve the
+ # target accuracy of 1e-14. (the relation between solver atol/rtol
+ # and the accuracy of the final result is not direct and needs experimenting)
+ x, y, z = self._get_sample_2d_data()
+ bspl = make_ndbspl((x, y), z, k=3, solver=solver, rtol=1e-6)
+ rgi = RegularGridInterpolator((x, y), z, method='cubic_legacy')
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ def test_2D_vs_RGI_quintic(self):
+ x, y, z = self._get_sample_2d_data()
+ bspl = make_ndbspl((x, y), z, k=5, solver=ssl.spsolve)
+ rgi = RegularGridInterpolator((x, y), z, method='quintic_legacy')
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ @pytest.mark.parametrize(
+ 'k, meth', [(1, 'linear'), (3, 'cubic_legacy'), (5, 'quintic_legacy')]
+ )
+ def test_3D_random_vs_RGI(self, k, meth):
+ rndm = np.random.default_rng(123456)
+ x = np.cumsum(rndm.uniform(size=6))
+ y = np.cumsum(rndm.uniform(size=7))
+ z = np.cumsum(rndm.uniform(size=8))
+ values = rndm.uniform(size=(6, 7, 8))
+
+ bspl = make_ndbspl((x, y, z), values, k=k, solver=ssl.spsolve)
+ rgi = RegularGridInterpolator((x, y, z), values, method=meth)
+
+ xi = np.random.uniform(low=0.7, high=2.1, size=(11, 3))
+ assert_allclose(bspl(xi), rgi(xi), atol=1e-14)
+
+ def test_solver_err_not_converged(self):
+ x, y, z = self._get_sample_2d_data()
+ solver_args = {'maxiter': 1}
+ with assert_raises(ValueError, match='solver'):
+ make_ndbspl((x, y), z, k=3, **solver_args)
+
+ with assert_raises(ValueError, match='solver'):
+ make_ndbspl((x, y), np.dstack((z, z)), k=3, **solver_args)
+
+
+class TestFpchec:
+ # https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpchec.f
+
+ def test_1D_x_t(self):
+ k = 1
+ t = np.arange(12).reshape(2, 6)
+ x = np.arange(12)
+
+ with pytest.raises(ValueError, match="1D sequence"):
+ _b.fpcheck(x, t, k)
+
+ with pytest.raises(ValueError, match="1D sequence"):
+ _b.fpcheck(t, x, k)
+
+ def test_condition_1(self):
+ # c 1) k+1 <= n-k-1 <= m
+ k = 3
+ n = 2*(k + 1) - 1 # not OK
+ m = n + 11 # OK
+ t = np.arange(n)
+ x = np.arange(m)
+
+ assert dfitpack.fpchec(x, t, k) == 10
+ with pytest.raises(ValueError, match="Need k+1*"):
+ _b.fpcheck(x, t, k)
+
+ n = 2*(k+1) + 1 # OK
+ m = n - k - 2 # not OK
+ t = np.arange(n)
+ x = np.arange(m)
+
+ assert dfitpack.fpchec(x, t, k) == 10
+ with pytest.raises(ValueError, match="Need k+1*"):
+ _b.fpcheck(x, t, k)
+
+ def test_condition_2(self):
+ # c 2) t(1) <= t(2) <= ... <= t(k+1)
+ # c t(n-k) <= t(n-k+1) <= ... <= t(n)
+ k = 3
+ t = [0]*(k+1) + [2] + [5]*(k+1) # this is OK
+ x = [1, 2, 3, 4, 4.5]
+
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None # does not raise
+
+ tt = t.copy()
+ tt[-1] = tt[0] # not OK
+ assert dfitpack.fpchec(x, tt, k) == 20
+ with pytest.raises(ValueError, match="Last k knots*"):
+ _b.fpcheck(x, tt, k)
+
+ tt = t.copy()
+ tt[0] = tt[-1] # not OK
+ assert dfitpack.fpchec(x, tt, k) == 20
+ with pytest.raises(ValueError, match="First k knots*"):
+ _b.fpcheck(x, tt, k)
+
+ def test_condition_3(self):
+ # c 3) t(k+1) < t(k+2) < ... < t(n-k)
+ k = 3
+ t = [0]*(k+1) + [2, 3] + [5]*(k+1) # this is OK
+ x = [1, 2, 3, 3.5, 4, 4.5]
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ t = [0]*(k+1) + [2, 2] + [5]*(k+1) # this is not OK
+ assert dfitpack.fpchec(x, t, k) == 30
+ with pytest.raises(ValueError, match="Internal knots*"):
+ _b.fpcheck(x, t, k)
+
+ def test_condition_4(self):
+ # c 4) t(k+1) <= x(i) <= t(n-k)
+ # NB: FITPACK's fpchec only checks x[0] & x[-1], so we follow.
+ k = 3
+ t = [0]*(k+1) + [5]*(k+1)
+ x = [1, 2, 3, 3.5, 4, 4.5] # this is OK
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ xx = x.copy()
+ xx[0] = t[0] # still OK
+ assert dfitpack.fpchec(xx, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ xx = x.copy()
+ xx[0] = t[0] - 1 # not OK
+ assert dfitpack.fpchec(xx, t, k) == 40
+ with pytest.raises(ValueError, match="Out of bounds*"):
+ _b.fpcheck(xx, t, k)
+
+ xx = x.copy()
+ xx[-1] = t[-1] + 1 # not OK
+ assert dfitpack.fpchec(xx, t, k) == 40
+ with pytest.raises(ValueError, match="Out of bounds*"):
+ _b.fpcheck(xx, t, k)
+
+ # ### Test the S-W condition (no 5)
+ # c 5) the conditions specified by schoenberg and whitney must hold
+ # c for at least one subset of data points, i.e. there must be a
+ # c subset of data points y(j) such that
+ # c t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
+ def test_condition_5_x1xm(self):
+ # x(1).ge.t(k2) .or. x(m).le.t(nk1)
+ k = 1
+ t = [0, 0, 1, 2, 2]
+ x = [1.1, 1.1, 1.1]
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ x = [0.5, 0.5, 0.5]
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ def test_condition_5_k1(self):
+ # special case nk3 (== n - k - 2) < 2
+ k = 1
+ t = [0, 0, 1, 1]
+ x = [0.5, 0.6]
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ def test_condition_5_1(self):
+ # basically, there can't be an interval of t[j]..t[j+k+1] with no x
+ k = 3
+ t = [0]*(k+1) + [2] + [5]*(k+1)
+ x = [3]*5
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ t = [0]*(k+1) + [2] + [5]*(k+1)
+ x = [1]*5
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ def test_condition_5_2(self):
+ # same as _5_1, only the empty interval is in the middle
+ k = 3
+ t = [0]*(k+1) + [2, 3] + [5]*(k+1)
+ x = [1.1]*5 + [4]
+
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
+ # and this one is OK
+ x = [1.1]*4 + [4, 4]
+ assert dfitpack.fpchec(x, t, k) == 0
+ assert _b.fpcheck(x, t, k) is None
+
+ def test_condition_5_3(self):
+ # similar to _5_2, covers a different failure branch
+ k = 1
+ t = [0, 0, 2, 3, 4, 5, 6, 7, 7]
+ x = [1, 1, 1, 5.2, 5.2, 5.2, 6.5]
+
+ assert dfitpack.fpchec(x, t, k) == 50
+ with pytest.raises(ValueError, match="Schoenberg-Whitney*"):
+ _b.fpcheck(x, t, k)
+
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack.py
new file mode 100644
index 0000000000000000000000000000000000000000..c76178681de063e21fda5afedd4759248e8e19bb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack.py
@@ -0,0 +1,503 @@
+import itertools
+import os
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_,
+ assert_almost_equal, assert_array_almost_equal)
+from pytest import raises as assert_raises
+import pytest
+from scipy._lib._testutils import check_free_memory
+
+from scipy.interpolate import RectBivariateSpline
+
+from scipy.interpolate._fitpack_py import (splrep, splev, bisplrep, bisplev,
+ sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
+from scipy.interpolate.dfitpack import regrid_smth
+from scipy.interpolate._fitpack2 import dfitpack_int
+
+
+def data_file(basename):
+ return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', basename)
+
+
+def norm2(x):
+ return np.sqrt(np.dot(x.T, x))
+
+
+def f1(x, d=0):
+ """Derivatives of sin->cos->-sin->-cos."""
+ if d % 4 == 0:
+ return np.sin(x)
+ if d % 4 == 1:
+ return np.cos(x)
+ if d % 4 == 2:
+ return -np.sin(x)
+ if d % 4 == 3:
+ return -np.cos(x)
+
+
+def makepairs(x, y):
+ """Helper function to create an array of pairs of x and y."""
+ xy = np.array(list(itertools.product(np.asarray(x), np.asarray(y))))
+ return xy.T
+
+
+class TestSmokeTests:
+ """
+ Smoke tests (with a few asserts) for fitpack routines -- mostly
+ check that they are runnable
+ """
+ def check_1(self, per=0, s=0, a=0, b=2*np.pi, at_nodes=False,
+ xb=None, xe=None):
+ if xb is None:
+ xb = a
+ if xe is None:
+ xe = b
+
+ N = 20
+ # nodes and middle points of the nodes
+ x = np.linspace(a, b, N + 1)
+ x1 = a + (b - a) * np.arange(1, N, dtype=float) / float(N - 1)
+ v = f1(x)
+
+ def err_est(k, d):
+ # Assume f has all derivatives < 1
+ h = 1.0 / N
+ tol = 5 * h**(.75*(k-d))
+ if s > 0:
+ tol += 1e5*s
+ return tol
+
+ for k in range(1, 6):
+ tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
+ tt = tck[0][k:-k] if at_nodes else x1
+
+ for d in range(k+1):
+ tol = err_est(k, d)
+ err = norm2(f1(tt, d) - splev(tt, tck, d)) / norm2(f1(tt, d))
+ assert err < tol
+
+ def check_2(self, per=0, N=20, ia=0, ib=2*np.pi):
+ a, b, dx = 0, 2*np.pi, 0.2*np.pi
+ x = np.linspace(a, b, N+1) # nodes
+ v = np.sin(x)
+
+ def err_est(k, d):
+ # Assume f has all derivatives < 1
+ h = 1.0 / N
+ tol = 5 * h**(.75*(k-d))
+ return tol
+
+ nk = []
+ for k in range(1, 6):
+ tck = splrep(x, v, s=0, per=per, k=k, xe=b)
+ nk.append([splint(ia, ib, tck), spalde(dx, tck)])
+
+ k = 1
+ for r in nk:
+ d = 0
+ for dr in r[1]:
+ tol = err_est(k, d)
+ assert_allclose(dr, f1(dx, d), atol=0, rtol=tol)
+ d = d+1
+ k = k+1
+
+ def test_smoke_splrep_splev(self):
+ self.check_1(s=1e-6)
+ self.check_1(b=1.5*np.pi)
+ self.check_1(b=1.5*np.pi, xe=2*np.pi, per=1, s=1e-1)
+
+ @pytest.mark.parametrize('per', [0, 1])
+ @pytest.mark.parametrize('at_nodes', [True, False])
+ def test_smoke_splrep_splev_2(self, per, at_nodes):
+ self.check_1(per=per, at_nodes=at_nodes)
+
+ @pytest.mark.parametrize('N', [20, 50])
+ @pytest.mark.parametrize('per', [0, 1])
+ def test_smoke_splint_spalde(self, N, per):
+ self.check_2(per=per, N=N)
+
+ @pytest.mark.parametrize('N', [20, 50])
+ @pytest.mark.parametrize('per', [0, 1])
+ def test_smoke_splint_spalde_iaib(self, N, per):
+ self.check_2(ia=0.2*np.pi, ib=np.pi, N=N, per=per)
+
+ def test_smoke_sproot(self):
+ # sproot is only implemented for k=3
+ a, b = 0.1, 15
+ x = np.linspace(a, b, 20)
+ v = np.sin(x)
+
+ for k in [1, 2, 4, 5]:
+ tck = splrep(x, v, s=0, per=0, k=k, xe=b)
+ with assert_raises(ValueError):
+ sproot(tck)
+
+ k = 3
+ tck = splrep(x, v, s=0, k=3)
+ roots = sproot(tck)
+ assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
+ assert_allclose(roots, np.pi * np.array([1, 2, 3, 4]), rtol=1e-3)
+
+ @pytest.mark.parametrize('N', [20, 50])
+ @pytest.mark.parametrize('k', [1, 2, 3, 4, 5])
+ def test_smoke_splprep_splrep_splev(self, N, k):
+ a, b, dx = 0, 2.*np.pi, 0.2*np.pi
+ x = np.linspace(a, b, N+1) # nodes
+ v = np.sin(x)
+
+ tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
+ uv = splev(dx, tckp)
+ err1 = abs(uv[1] - np.sin(uv[0]))
+ assert err1 < 1e-2
+
+ tck = splrep(x, v, s=0, per=0, k=k)
+ err2 = abs(splev(uv[0], tck) - np.sin(uv[0]))
+ assert err2 < 1e-2
+
+ # Derivatives of parametric cubic spline at u (first function)
+ if k == 3:
+ tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
+ for d in range(1, k+1):
+ uv = splev(dx, tckp, d)
+
+ def test_smoke_bisplrep_bisplev(self):
+ xb, xe = 0, 2.*np.pi
+ yb, ye = 0, 2.*np.pi
+ kx, ky = 3, 3
+ Nx, Ny = 20, 20
+
+ def f2(x, y):
+ return np.sin(x+y)
+
+ x = np.linspace(xb, xe, Nx + 1)
+ y = np.linspace(yb, ye, Ny + 1)
+ xy = makepairs(x, y)
+ tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
+
+ tt = [tck[0][kx:-kx], tck[1][ky:-ky]]
+ t2 = makepairs(tt[0], tt[1])
+ v1 = bisplev(tt[0], tt[1], tck)
+ v2 = f2(t2[0], t2[1])
+ v2.shape = len(tt[0]), len(tt[1])
+
+ assert norm2(np.ravel(v1 - v2)) < 1e-2
+
+
+class TestSplev:
+ def test_1d_shape(self):
+ x = [1,2,3,4,5]
+ y = [4,5,6,7,8]
+ tck = splrep(x, y)
+ z = splev([1], tck)
+ assert_equal(z.shape, (1,))
+ z = splev(1, tck)
+ assert_equal(z.shape, ())
+
+ def test_2d_shape(self):
+ x = [1, 2, 3, 4, 5]
+ y = [4, 5, 6, 7, 8]
+ tck = splrep(x, y)
+ t = np.array([[1.0, 1.5, 2.0, 2.5],
+ [3.0, 3.5, 4.0, 4.5]])
+ z = splev(t, tck)
+ z0 = splev(t[0], tck)
+ z1 = splev(t[1], tck)
+ assert_equal(z, np.vstack((z0, z1)))
+
+ def test_extrapolation_modes(self):
+ # test extrapolation modes
+ # * if ext=0, return the extrapolated value.
+ # * if ext=1, return 0
+ # * if ext=2, raise a ValueError
+ # * if ext=3, return the boundary value.
+ x = [1,2,3]
+ y = [0,2,4]
+ tck = splrep(x, y, k=1)
+
+ rstl = [[-2, 6], [0, 0], None, [0, 4]]
+ for ext in (0, 1, 3):
+ assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
+
+ assert_raises(ValueError, splev, [0, 4], tck, ext=2)
+
+
+class TestSplder:
+ def setup_method(self):
+ # non-uniform grid, just to make it sure
+ x = np.linspace(0, 1, 100)**3
+ y = np.sin(20 * x)
+ self.spl = splrep(x, y)
+
+ # double check that knots are non-uniform
+ assert_(np.ptp(np.diff(self.spl[0])) > 0)
+
+ def test_inverse(self):
+ # Check that antiderivative + derivative is identity.
+ for n in range(5):
+ spl2 = splantider(self.spl, n)
+ spl3 = splder(spl2, n)
+ assert_allclose(self.spl[0], spl3[0])
+ assert_allclose(self.spl[1], spl3[1])
+ assert_equal(self.spl[2], spl3[2])
+
+ def test_splder_vs_splev(self):
+ # Check derivative vs. FITPACK
+
+ for n in range(3+1):
+ # Also extrapolation!
+ xx = np.linspace(-1, 2, 2000)
+ if n == 3:
+ # ... except that FITPACK extrapolates strangely for
+ # order 0, so let's not check that.
+ xx = xx[(xx >= 0) & (xx <= 1)]
+
+ dy = splev(xx, self.spl, n)
+ spl2 = splder(self.spl, n)
+ dy2 = splev(xx, spl2)
+ if n == 1:
+ assert_allclose(dy, dy2, rtol=2e-6)
+ else:
+ assert_allclose(dy, dy2)
+
+ def test_splantider_vs_splint(self):
+ # Check antiderivative vs. FITPACK
+ spl2 = splantider(self.spl)
+
+ # no extrapolation, splint assumes function is zero outside
+ # range
+ xx = np.linspace(0, 1, 20)
+
+ for x1 in xx:
+ for x2 in xx:
+ y1 = splint(x1, x2, self.spl)
+ y2 = splev(x2, spl2) - splev(x1, spl2)
+ assert_allclose(y1, y2)
+
+ def test_order0_diff(self):
+ assert_raises(ValueError, splder, self.spl, 4)
+
+ def test_kink(self):
+ # Should refuse to differentiate splines with kinks
+
+ spl2 = insert(0.5, self.spl, m=2)
+ splder(spl2, 2) # Should work
+ assert_raises(ValueError, splder, spl2, 3)
+
+ spl2 = insert(0.5, self.spl, m=3)
+ splder(spl2, 1) # Should work
+ assert_raises(ValueError, splder, spl2, 2)
+
+ spl2 = insert(0.5, self.spl, m=4)
+ assert_raises(ValueError, splder, spl2, 1)
+
+ def test_multidim(self):
+ # c can have trailing dims
+ for n in range(3):
+ t, c, k = self.spl
+ c2 = np.c_[c, c, c]
+ c2 = np.dstack((c2, c2))
+
+ spl2 = splantider((t, c2, k), n)
+ spl3 = splder(spl2, n)
+
+ assert_allclose(t, spl3[0])
+ assert_allclose(c2, spl3[1])
+ assert_equal(k, spl3[2])
+
+
+class TestSplint:
+ def test_len_c(self):
+ n, k = 7, 3
+ x = np.arange(n)
+ y = x**3
+ t, c, k = splrep(x, y, s=0)
+
+ # note that len(c) == len(t) == 11 (== len(x) + 2*(k-1))
+ assert len(t) == len(c) == n + 2*(k-1)
+
+ # integrate directly: $\int_0^6 x^3 dx = 6^4 / 4$
+ res = splint(0, 6, (t, c, k))
+ assert_allclose(res, 6**4 / 4, atol=1e-15)
+
+ # check that the coefficients past len(t) - k - 1 are ignored
+ c0 = c.copy()
+ c0[len(t)-k-1:] = np.nan
+ res0 = splint(0, 6, (t, c0, k))
+ assert_allclose(res0, 6**4 / 4, atol=1e-15)
+
+ # however, all other coefficients *are* used
+ c0[6] = np.nan
+ assert np.isnan(splint(0, 6, (t, c0, k)))
+
+ # check that the coefficient array can have length `len(t) - k - 1`
+ c1 = c[:len(t) - k - 1]
+ res1 = splint(0, 6, (t, c1, k))
+ assert_allclose(res1, 6**4 / 4, atol=1e-15)
+
+ # however shorter c arrays raise. The error from f2py is a
+ # `dftipack.error`, which is an Exception but not ValueError etc.
+ with assert_raises(Exception, match=r">=n-k-1"):
+ splint(0, 1, (np.ones(10), np.ones(5), 3))
+
+
+class TestBisplrep:
+ def test_overflow(self):
+ from numpy.lib.stride_tricks import as_strided
+ if dfitpack_int.itemsize == 8:
+ size = 1500000**2
+ else:
+ size = 400**2
+ # Don't allocate a real array, as it's very big, but rely
+ # on that it's not referenced
+ x = as_strided(np.zeros(()), shape=(size,))
+ assert_raises(OverflowError, bisplrep, x, x, x, w=x,
+ xb=0, xe=1, yb=0, ye=1, s=0)
+
+ def test_regression_1310(self):
+ # Regression test for gh-1310
+ with np.load(data_file('bug-1310.npz')) as loaded_data:
+ data = loaded_data['data']
+
+ # Shouldn't crash -- the input data triggers work array sizes
+ # that caused previously some data to not be aligned on
+ # sizeof(double) boundaries in memory, which made the Fortran
+ # code to crash when compiled with -O3
+ bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
+ full_output=True)
+
+ @pytest.mark.skipif(dfitpack_int != np.int64, reason="needs ilp64 fitpack")
+ def test_ilp64_bisplrep(self):
+ check_free_memory(28000) # VM size, doesn't actually use the pages
+ x = np.linspace(0, 1, 400)
+ y = np.linspace(0, 1, 400)
+ x, y = np.meshgrid(x, y)
+ z = np.zeros_like(x)
+ tck = bisplrep(x, y, z, kx=3, ky=3, s=0)
+ assert_allclose(bisplev(0.5, 0.5, tck), 0.0)
+
+
+def test_dblint():
+ # Basic test to see it runs and gives the correct result on a trivial
+ # problem. Note that `dblint` is not exposed in the interpolate namespace.
+ x = np.linspace(0, 1)
+ y = np.linspace(0, 1)
+ xx, yy = np.meshgrid(x, y)
+ rect = RectBivariateSpline(x, y, 4 * xx * yy)
+ tck = list(rect.tck)
+ tck.extend(rect.degrees)
+
+ assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
+ assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
+ assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
+ assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
+
+
+def test_splev_der_k():
+ # regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
+ # for x outside of knot range
+
+ # test case from gh-2188
+ tck = (np.array([0., 0., 2.5, 2.5]),
+ np.array([-1.56679978, 2.43995873, 0., 0.]),
+ 1)
+ t, c, k = tck
+ x = np.array([-3, 0, 2.5, 3])
+
+ # an explicit form of the linear spline
+ assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
+ assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
+
+ # now check a random spline vs splder
+ np.random.seed(1234)
+ x = np.sort(np.random.random(30))
+ y = np.random.random(30)
+ t, c, k = splrep(x, y)
+
+ x = [t[0] - 1., t[-1] + 1.]
+ tck2 = splder((t, c, k), k)
+ assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
+
+
+def test_splprep_segfault():
+ # regression test for gh-3847: splprep segfaults if knots are specified
+ # for task=-1
+ t = np.arange(0, 1.1, 0.1)
+ x = np.sin(2*np.pi*t)
+ y = np.cos(2*np.pi*t)
+ tck, u = splprep([x, y], s=0)
+ np.arange(0, 1.01, 0.01)
+
+ uknots = tck[0] # using the knots from the previous fitting
+ tck, u = splprep([x, y], task=-1, t=uknots) # here is the crash
+
+
+def test_bisplev_integer_overflow():
+ np.random.seed(1)
+
+ x = np.linspace(0, 1, 11)
+ y = x
+ z = np.random.randn(11, 11).ravel()
+ kx = 1
+ ky = 1
+
+ nx, tx, ny, ty, c, fp, ier = regrid_smth(
+ x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
+ tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
+
+ xp = np.zeros([2621440])
+ yp = np.zeros([2621440])
+
+ assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
+
+
+@pytest.mark.xslow
+def test_gh_1766():
+ # this should fail gracefully instead of segfaulting (int overflow)
+ size = 22
+ kx, ky = 3, 3
+ def f2(x, y):
+ return np.sin(x+y)
+
+ x = np.linspace(0, 10, size)
+ y = np.linspace(50, 700, size)
+ xy = makepairs(x, y)
+ tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
+ # the size value here can either segfault
+ # or produce a MemoryError on main
+ tx_ty_size = 500000
+ tck[0] = np.arange(tx_ty_size)
+ tck[1] = np.arange(tx_ty_size) * 4
+ tt_0 = np.arange(50)
+ tt_1 = np.arange(50) * 3
+ with pytest.raises(MemoryError):
+ bisplev(tt_0, tt_1, tck, 1, 1)
+
+
+def test_spalde_scalar_input():
+ # Ticket #629
+ x = np.linspace(0, 10)
+ y = x**3
+ tck = splrep(x, y, k=3, t=[5])
+ res = spalde(np.float64(1), tck)
+ des = np.array([1., 3., 6., 6.])
+ assert_almost_equal(res, des)
+
+
+def test_spalde_nc():
+ # regression test for https://github.com/scipy/scipy/issues/19002
+ # here len(t) = 29 and len(c) = 25 (== len(t) - k - 1)
+ x = np.asarray([-10., -9., -8., -7., -6., -5., -4., -3., -2.5, -2., -1.5,
+ -1., -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 4., 5., 6.],
+ dtype="float")
+ t = [-10.0, -10.0, -10.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0,
+ -2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0,
+ 5.0, 6.0, 6.0, 6.0, 6.0]
+ c = np.asarray([1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
+ k = 3
+
+ res = spalde(x, (t, c, k))
+ res_splev = np.asarray([splev(x, (t, c, k), nu) for nu in range(4)])
+ assert_allclose(res, res_splev.T, atol=1e-15)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack2.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack2.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1ebf0d26e8795c5230b4336a02126a8e1e53096
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_fitpack2.py
@@ -0,0 +1,1355 @@
+# Created by Pearu Peterson, June 2003
+import itertools
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
+ assert_array_almost_equal, assert_allclose, suppress_warnings)
+from pytest import raises as assert_raises
+
+from numpy import array, diff, linspace, meshgrid, ones, pi, shape
+from scipy.interpolate._fitpack_py import bisplrep, bisplev, splrep, spalde
+from scipy.interpolate._fitpack2 import (UnivariateSpline,
+ LSQUnivariateSpline, InterpolatedUnivariateSpline,
+ LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
+ LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
+ RectSphereBivariateSpline)
+
+
+class TestUnivariateSpline:
+ def test_linear_constant(self):
+ x = [1,2,3]
+ y = [3,3,3]
+ lut = UnivariateSpline(x,y,k=1)
+ assert_array_almost_equal(lut.get_knots(),[1,3])
+ assert_array_almost_equal(lut.get_coeffs(),[3,3])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
+
+ def test_preserve_shape(self):
+ x = [1, 2, 3]
+ y = [0, 2, 4]
+ lut = UnivariateSpline(x, y, k=1)
+ arg = 2
+ assert_equal(shape(arg), shape(lut(arg)))
+ assert_equal(shape(arg), shape(lut(arg, nu=1)))
+ arg = [1.5, 2, 2.5]
+ assert_equal(shape(arg), shape(lut(arg)))
+ assert_equal(shape(arg), shape(lut(arg, nu=1)))
+
+ def test_linear_1d(self):
+ x = [1,2,3]
+ y = [0,2,4]
+ lut = UnivariateSpline(x,y,k=1)
+ assert_array_almost_equal(lut.get_knots(),[1,3])
+ assert_array_almost_equal(lut.get_coeffs(),[0,4])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
+
+ def test_subclassing(self):
+ # See #731
+
+ class ZeroSpline(UnivariateSpline):
+ def __call__(self, x):
+ return 0*array(x)
+
+ sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
+ assert_array_equal(sp([1.5, 2.5]), [0., 0.])
+
+ def test_empty_input(self):
+ # Test whether empty input returns an empty output. Ticket 1014
+ x = [1,3,5,7,9]
+ y = [0,4,9,12,21]
+ spl = UnivariateSpline(x, y, k=3)
+ assert_array_equal(spl([]), array([]))
+
+ def test_roots(self):
+ x = [1, 3, 5, 7, 9]
+ y = [0, 4, 9, 12, 21]
+ spl = UnivariateSpline(x, y, k=3)
+ assert_almost_equal(spl.roots()[0], 1.050290639101332)
+
+ def test_roots_length(self): # for gh18335
+ x = np.linspace(0, 50 * np.pi, 1000)
+ y = np.cos(x)
+ spl = UnivariateSpline(x, y, s=0)
+ assert_equal(len(spl.roots()), 50)
+
+ def test_derivatives(self):
+ x = [1, 3, 5, 7, 9]
+ y = [0, 4, 9, 12, 21]
+ spl = UnivariateSpline(x, y, k=3)
+ assert_almost_equal(spl.derivatives(3.5),
+ [5.5152902, 1.7146577, -0.1830357, 0.3125])
+
+ def test_derivatives_2(self):
+ x = np.arange(8)
+ y = x**3 + 2.*x**2
+
+ tck = splrep(x, y, s=0)
+ ders = spalde(3, tck)
+ assert_allclose(ders, [45., # 3**3 + 2*(3)**2
+ 39., # 3*(3)**2 + 4*(3)
+ 22., # 6*(3) + 4
+ 6.], # 6*3**0
+ atol=1e-15)
+ spl = UnivariateSpline(x, y, s=0, k=3)
+ assert_allclose(spl.derivatives(3),
+ ders,
+ atol=1e-15)
+
+ def test_resize_regression(self):
+ """Regression test for #1375."""
+ x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
+ -0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
+ 0.65016502, 1.]
+ y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
+ 0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
+ 0.62928599, 1.]
+ w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
+ 6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
+ 6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
+ 1.00000000e+12]
+ spl = UnivariateSpline(x=x, y=y, w=w, s=None)
+ desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
+ assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
+
+ def test_out_of_range_regression(self):
+ # Test different extrapolation modes. See ticket 3557
+ x = np.arange(5, dtype=float)
+ y = x**3
+
+ xp = linspace(-8, 13, 100)
+ xp_zeros = xp.copy()
+ xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
+ xp_clip = xp.copy()
+ xp_clip[xp_clip < x[0]] = x[0]
+ xp_clip[xp_clip > x[-1]] = x[-1]
+
+ for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
+ spl = cls(x=x, y=y)
+ for ext in [0, 'extrapolate']:
+ assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
+ for ext in [1, 'zeros']:
+ assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
+ for ext in [2, 'raise']:
+ assert_raises(ValueError, spl, xp, **dict(ext=ext))
+ for ext in [3, 'const']:
+ assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
+ assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
+
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ spl = LSQUnivariateSpline(x, y, t)
+ assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
+ assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
+ assert_raises(ValueError, spl, xp, **dict(ext=2))
+ assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
+
+ # also make sure that unknown values for `ext` are caught early
+ for ext in [-1, 'unknown']:
+ spl = UnivariateSpline(x, y)
+ assert_raises(ValueError, spl, xp, **dict(ext=ext))
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, ext=ext))
+
+ def test_lsq_fpchec(self):
+ xs = np.arange(100) * 1.
+ ys = np.arange(100) * 1.
+ knots = np.linspace(0, 99, 10)
+ bbox = (-1, 101)
+ assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
+ bbox=bbox)
+
+ def test_derivative_and_antiderivative(self):
+ # Thin wrappers to splder/splantider, so light smoke test only.
+ x = np.linspace(0, 1, 70)**3
+ y = np.cos(x)
+
+ spl = UnivariateSpline(x, y, s=0)
+ spl2 = spl.antiderivative(2).derivative(2)
+ assert_allclose(spl(0.3), spl2(0.3))
+
+ spl2 = spl.antiderivative(1)
+ assert_allclose(spl2(0.6) - spl2(0.2),
+ spl.integral(0.2, 0.6))
+
+ def test_derivative_extrapolation(self):
+ # Regression test for gh-10195: for a const-extrapolation spline
+ # its derivative evaluates to zero for extrapolation
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 5]
+ f = UnivariateSpline(x_values, y_values, ext='const', k=3)
+
+ x = [-1, 0, -0.5, 9, 9.5, 10]
+ assert_allclose(f.derivative()(x), 0, atol=1e-15)
+
+ def test_integral_out_of_bounds(self):
+ # Regression test for gh-7906: .integral(a, b) is wrong if both
+ # a and b are out-of-bounds
+ x = np.linspace(0., 1., 7)
+ for ext in range(4):
+ f = UnivariateSpline(x, x, s=0, ext=ext)
+ for (a, b) in [(1, 1), (1, 5), (2, 5),
+ (0, 0), (-2, 0), (-2, -1)]:
+ assert_allclose(f.integral(a, b), 0, atol=1e-15)
+
+ def test_nan(self):
+ # bail out early if the input data contains nans
+ x = np.arange(10, dtype=float)
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(x, y, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ y_end = y[-1]
+ for z in [np.nan, np.inf, -np.inf]:
+ y[-1] = z
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, check_finite=True))
+ y[-1] = y_end # check valid y but invalid w
+ w[-1] = z
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, w=w, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, w=w, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, w=w, check_finite=True))
+
+ def test_strictly_increasing_x(self):
+ # Test the x is required to be strictly increasing for
+ # UnivariateSpline if s=0 and for InterpolatedUnivariateSpline,
+ # but merely increasing for UnivariateSpline if s>0
+ # and for LSQUnivariateSpline; see gh-8535
+ xx = np.arange(10, dtype=float)
+ yy = xx**3
+ x = np.arange(10, dtype=float)
+ x[1] = x[0]
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(xx, yy, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ UnivariateSpline(x=x, y=y, w=w, s=1, check_finite=True)
+ LSQUnivariateSpline(x=x, y=y, t=t, w=w, check_finite=True)
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, s=0, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+
+ def test_increasing_x(self):
+ # Test that x is required to be increasing, see gh-8535
+ xx = np.arange(10, dtype=float)
+ yy = xx**3
+ x = np.arange(10, dtype=float)
+ x[1] = x[0] - 1.0
+ y = x**3
+ w = np.ones_like(x)
+ # also test LSQUnivariateSpline [which needs explicit knots]
+ spl = UnivariateSpline(xx, yy, check_finite=True)
+ t = spl.get_knots()[3:4] # interior knots w/ default k=3
+ assert_raises(ValueError, UnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, InterpolatedUnivariateSpline,
+ **dict(x=x, y=y, check_finite=True))
+ assert_raises(ValueError, LSQUnivariateSpline,
+ **dict(x=x, y=y, t=t, w=w, check_finite=True))
+
+ def test_invalid_input_for_univariate_spline(self):
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ UnivariateSpline(x_values, y_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [-1.0, 1.0, 1.0, 1.0]
+ UnivariateSpline(x_values, y_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ UnivariateSpline(x_values, y_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ UnivariateSpline(x_values, y_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ UnivariateSpline(x_values, y_values, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ def test_invalid_input_for_interpolated_univariate_spline(self):
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ InterpolatedUnivariateSpline(x_values, y_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [-1.0, 1.0, 1.0, 1.0]
+ InterpolatedUnivariateSpline(x_values, y_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ InterpolatedUnivariateSpline(x_values, y_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ InterpolatedUnivariateSpline(x_values, y_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ def test_invalid_input_for_lsq_univariate_spline(self):
+
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ spl = UnivariateSpline(x_values, y_values, check_finite=True)
+ t_values = spl.get_knots()[3:4] # interior knots w/ default k=3
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5]
+ LSQUnivariateSpline(x_values, y_values, t_values)
+ assert "x and y should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x_values = [1, 2, 4, 6, 8.5]
+ y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+ w_values = [1.0, 1.0, 1.0, 1.0]
+ LSQUnivariateSpline(x_values, y_values, t_values, w=w_values)
+ assert "x, y, and w should have a same length" in str(info.value)
+
+ message = "Interior knots t must satisfy Schoenberg-Whitney conditions"
+ with assert_raises(ValueError, match=message) as info:
+ bbox = (100, -100)
+ LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-1)
+ LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
+ assert "bbox shape should be (2,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ LSQUnivariateSpline(x_values, y_values, t_values, k=6)
+ assert "k should be 1 <= k <= 5" in str(info.value)
+
+ def test_array_like_input(self):
+ x_values = np.array([1, 2, 4, 6, 8.5])
+ y_values = np.array([0.5, 0.8, 1.3, 2.5, 2.8])
+ w_values = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
+ bbox = np.array([-100, 100])
+ # np.array input
+ spl1 = UnivariateSpline(x=x_values, y=y_values, w=w_values,
+ bbox=bbox)
+ # list input
+ spl2 = UnivariateSpline(x=x_values.tolist(), y=y_values.tolist(),
+ w=w_values.tolist(), bbox=bbox.tolist())
+
+ assert_allclose(spl1([0.1, 0.5, 0.9, 0.99]),
+ spl2([0.1, 0.5, 0.9, 0.99]))
+
+ def test_fpknot_oob_crash(self):
+ # https://github.com/scipy/scipy/issues/3691
+ x = range(109)
+ y = [0., 0., 0., 0., 0., 10.9, 0., 11., 0.,
+ 0., 0., 10.9, 0., 0., 0., 0., 0., 0.,
+ 10.9, 0., 0., 0., 11., 0., 0., 0., 10.9,
+ 0., 0., 0., 10.5, 0., 0., 0., 10.7, 0.,
+ 0., 0., 11., 0., 0., 0., 0., 0., 0.,
+ 10.9, 0., 0., 10.7, 0., 0., 0., 10.6, 0.,
+ 0., 0., 10.5, 0., 0., 10.7, 0., 0., 10.5,
+ 0., 0., 11.5, 0., 0., 0., 10.7, 0., 0.,
+ 10.7, 0., 0., 10.9, 0., 0., 10.8, 0., 0.,
+ 0., 10.7, 0., 0., 10.6, 0., 0., 0., 10.4,
+ 0., 0., 10.6, 0., 0., 10.5, 0., 0., 0.,
+ 10.7, 0., 0., 0., 10.4, 0., 0., 0., 10.8, 0.]
+ with suppress_warnings() as sup:
+ r = sup.record(
+ UserWarning,
+ r"""
+The maximal number of iterations maxit \(set to 20 by the program\)
+allowed for finding a smoothing spline with fp=s has been reached: s
+too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs\(fp-s\)/s < tol.""")
+ UnivariateSpline(x, y, k=1)
+ assert_equal(len(r), 1)
+
+
+class TestLSQBivariateSpline:
+ # NOTE: The systems in this test class are rank-deficient
+ def test_linear_constant(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+ assert_equal(len(r), 1)
+
+ assert_almost_equal(lut(2,2), 3.)
+
+ def test_bilinearity(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [0,7,8,3,4,7,1,3,4]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ # This seems to fail (ier=1, see ticket 1642).
+ sup.filter(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+
+ tx, ty = lut.get_knots()
+ for xa, xb in zip(tx[:-1], tx[1:]):
+ for ya, yb in zip(ty[:-1], ty[1:]):
+ for t in [0.1, 0.5, 0.9]:
+ for s in [0.3, 0.4, 0.7]:
+ xp = xa*(1-t) + xb*t
+ yp = ya*(1-s) + yb*s
+ zp = (+ lut(xa, ya)*(1-t)*(1-s)
+ + lut(xb, ya)*t*(1-s)
+ + lut(xa, yb)*(1-t)*s
+ + lut(xb, yb)*t*s)
+ assert_almost_equal(lut(xp,yp), zp)
+
+ def test_integral(self):
+ x = [1,1,1,2,2,2,8,8,8]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = array([0,7,8,3,4,7,1,3,4])
+
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
+ assert_equal(len(r), 1)
+ tx, ty = lut.get_knots()
+ tz = lut(tx, ty)
+ trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+
+ assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]),
+ trpz)
+
+ def test_empty_input(self):
+ # Test whether empty inputs returns an empty output. Ticket 1014
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ s = 0.1
+ tx = [1+s,3-s]
+ ty = [1+s,3-s]
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
+ assert_equal(len(r), 1)
+
+ assert_array_equal(lut([], []), np.zeros((0,0)))
+ assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ s = 0.1
+ tx = [1 + s, 3 - s]
+ ty = [1 + s, 3 - s]
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0, num=10)
+ LSQBivariateSpline(x, y, z, tx, ty)
+ assert "x, y, and z should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0, num=20)
+ LSQBivariateSpline(x, y, z, tx, ty, w=w)
+ assert "x, y, z, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ w = np.linspace(-1.0, 10.0)
+ LSQBivariateSpline(x, y, z, tx, ty, w=w)
+ assert "w should be positive" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-100, 100, -100)
+ LSQBivariateSpline(x, y, z, tx, ty, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ LSQBivariateSpline(x, y, z, tx, ty, kx=10, ky=10)
+ assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in \
+ str(info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQBivariateSpline(x, y, z, tx, ty, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQBivariateSpline(x, y, z, tx, ty, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ s = 0.1
+ tx = np.array([1 + s, 3 - s])
+ ty = np.array([1 + s, 3 - s])
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0)
+ bbox = np.array([1.0, 10.0, 1.0, 10.0])
+
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ # np.array input
+ spl1 = LSQBivariateSpline(x, y, z, tx, ty, w=w, bbox=bbox)
+ # list input
+ spl2 = LSQBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ tx.tolist(), ty.tolist(), w=w.tolist(),
+ bbox=bbox)
+ assert_allclose(spl1(2.0, 2.0), spl2(2.0, 2.0))
+ assert_equal(len(r), 2)
+
+ def test_unequal_length_of_knots(self):
+ """Test for the case when the input knot-location arrays in x and y are
+ of different lengths.
+ """
+ x, y = np.mgrid[0:100, 0:100]
+ x = x.ravel()
+ y = y.ravel()
+ z = 3.0 * np.ones_like(x)
+ tx = np.linspace(0.1, 98.0, 29)
+ ty = np.linspace(0.1, 98.0, 33)
+ with suppress_warnings() as sup:
+ r = sup.record(UserWarning, "\nThe coefficients of the spline")
+ lut = LSQBivariateSpline(x,y,z,tx,ty)
+ assert_equal(len(r), 1)
+
+ assert_almost_equal(lut(x, y, grid=False), z)
+
+
+class TestSmoothBivariateSpline:
+ def test_linear_constant(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [3,3,3,3,3,3,3,3,3]
+ lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
+ assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
+ assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
+
+ def test_linear_1d(self):
+ x = [1,1,1,2,2,2,3,3,3]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = [0,0,0,2,2,2,4,4,4]
+ lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
+ assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
+ assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
+ assert_almost_equal(lut.get_residual(),0.0)
+ assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
+
+ def test_integral(self):
+ x = [1,1,1,2,2,2,4,4,4]
+ y = [1,2,3,1,2,3,1,2,3]
+ z = array([0,7,8,3,4,7,1,3,4])
+
+ with suppress_warnings() as sup:
+ # This seems to fail (ier=1, see ticket 1642).
+ sup.filter(UserWarning, "\nThe required storage space")
+ lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
+
+ tx = [1,2,4]
+ ty = [1,2,3]
+
+ tz = lut(tx, ty)
+ trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+ assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
+
+ lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
+ assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
+ decimal=0) # the quadratures give 23.75 and 23.85
+
+ tz = lut(tx[:-1], ty[:-1])
+ trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
+ * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+ assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
+
+ def test_rerun_lwrk2_too_small(self):
+ # in this setting, lwrk2 is too small in the default run. Here we
+ # check for equality with the bisplrep/bisplev output because there,
+ # an automatic re-run of the spline representation is done if ier>10.
+ x = np.linspace(-2, 2, 80)
+ y = np.linspace(-2, 2, 80)
+ z = x + y
+ xi = np.linspace(-1, 1, 100)
+ yi = np.linspace(-2, 2, 100)
+ tck = bisplrep(x, y, z)
+ res1 = bisplev(xi, yi, tck)
+ interp_ = SmoothBivariateSpline(x, y, z)
+ res2 = interp_(xi, yi)
+ assert_almost_equal(res1, res2)
+
+ def test_invalid_input(self):
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0, num=10)
+ SmoothBivariateSpline(x, y, z)
+ assert "x, y, and z should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = np.linspace(1.0, 10.0)
+ y = np.linspace(1.0, 10.0)
+ z = np.linspace(1.0, 10.0)
+ w = np.linspace(1.0, 10.0, num=20)
+ SmoothBivariateSpline(x, y, z, w=w)
+ assert "x, y, z, and w should have a same length" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ w = np.linspace(-1.0, 10.0)
+ SmoothBivariateSpline(x, y, z, w=w)
+ assert "w should be positive" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ bbox = (-100, 100, -100)
+ SmoothBivariateSpline(x, y, z, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ SmoothBivariateSpline(x, y, z, kx=10, ky=10)
+ assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in\
+ str(info.value)
+
+ with assert_raises(ValueError) as info:
+ SmoothBivariateSpline(x, y, z, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothBivariateSpline(x, y, z, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothBivariateSpline(x, y, z, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ x = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
+ y = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
+ z = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ w = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
+ bbox = np.array([1.0, 3.0, 1.0, 3.0])
+ # np.array input
+ spl1 = SmoothBivariateSpline(x, y, z, w=w, bbox=bbox, kx=1, ky=1)
+ # list input
+ spl2 = SmoothBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ bbox=bbox.tolist(), w=w.tolist(),
+ kx=1, ky=1)
+ assert_allclose(spl1(0.1, 0.5), spl2(0.1, 0.5))
+
+
+class TestLSQSphereBivariateSpline:
+ def setup_method(self):
+ # define the input data and coordinates
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
+ phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+ knotdata = data[::5, ::5]
+ # calculate spline coefficients
+ lats, lons = meshgrid(theta, phi)
+ lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ self.lut_lsq = lut_lsq
+ self.data = knotdata
+ self.new_lons, self.new_lats = knotsp, knotst
+
+ def test_linear_constant(self):
+ assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
+ assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
+ self.data)
+
+ def test_empty_input(self):
+ assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
+ assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
+ ntheta) * pi
+ phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1), nphi) * 2. * pi
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = linspace(-0.1, 1.0, num=ntheta) * pi
+ invalid_lats, lons = meshgrid(invalid_theta, phi)
+ LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = linspace(0.1, 1.1, num=ntheta) * pi
+ invalid_lats, lons = meshgrid(invalid_theta, phi)
+ LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = linspace(-0.1, 1.0, num=ntheta) * 2.0 * pi
+ lats, invalid_lons = meshgrid(theta, invalid_phi)
+ LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = linspace(0.0, 1.1, num=ntheta) * 2.0 * pi
+ lats, invalid_lons = meshgrid(theta, invalid_phi)
+ LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
+ data.T.ravel(), knotst, knotsp)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ lats, lons = meshgrid(theta, phi)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotst = np.copy(knotst)
+ invalid_knotst[0] = -0.1
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), invalid_knotst, knotsp)
+ assert "tt should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotst = np.copy(knotst)
+ invalid_knotst[0] = pi
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), invalid_knotst, knotsp)
+ assert "tt should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotsp = np.copy(knotsp)
+ invalid_knotsp[0] = -0.1
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, invalid_knotsp)
+ assert "tp should be between (0, 2pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_knotsp = np.copy(knotsp)
+ invalid_knotsp[0] = 2 * pi
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, invalid_knotsp)
+ assert "tp should be between (0, 2pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, w=invalid_w)
+ assert "w should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, eps=0.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+ knotst, knotsp, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ ntheta, nphi = 70, 90
+ theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
+ ntheta) * pi
+ phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1),
+ nphi) * 2. * pi
+ lats, lons = meshgrid(theta, phi)
+ data = ones((theta.shape[0], phi.shape[0]))
+ # define knots and extract data values at the knots
+ knotst = theta[::5]
+ knotsp = phi[::5]
+ w = ones(lats.ravel().shape[0])
+
+ # np.array input
+ spl1 = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+ data.T.ravel(), knotst, knotsp, w=w)
+ # list input
+ spl2 = LSQSphereBivariateSpline(lats.ravel().tolist(),
+ lons.ravel().tolist(),
+ data.T.ravel().tolist(),
+ knotst.tolist(),
+ knotsp.tolist(), w=w.tolist())
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestSmoothSphereBivariateSpline:
+ def setup_method(self):
+ theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
+ .75*pi, .75*pi])
+ phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
+ 1.5 * pi])
+ r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
+
+ def test_linear_constant(self):
+ assert_almost_equal(self.lut.get_residual(), 0.)
+ assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
+ [[3, 3], [3, 3], [3, 3]])
+
+ def test_empty_input(self):
+ assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
+ assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
+
+ def test_invalid_input(self):
+ theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi, .5 * pi,
+ .75 * pi, .75 * pi, .75 * pi])
+ phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
+ 1.5 * pi])
+ r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = array([-0.1 * pi, .25 * pi, .25 * pi, .5 * pi,
+ .5 * pi, .5 * pi, .75 * pi, .75 * pi,
+ .75 * pi])
+ SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi,
+ .5 * pi, .5 * pi, .75 * pi, .75 * pi,
+ 1.1 * pi])
+ SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
+ assert "theta should be between [0, pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = array([-.1 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
+ .5 * pi, pi, 1.5 * pi])
+ SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_phi = array([1.0 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
+ .5 * pi, pi, 2.1 * pi])
+ SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
+ assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
+ SmoothSphereBivariateSpline(theta, phi, r, w=invalid_w, s=1E10)
+ assert "w should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, s=-1.0)
+ assert "s should be positive" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, eps=-1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ SmoothSphereBivariateSpline(theta, phi, r, eps=1.0)
+ assert "eps should be between (0, 1)" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ theta = np.array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi,
+ .5 * pi, .75 * pi, .75 * pi, .75 * pi])
+ phi = np.array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi,
+ pi, 1.5 * pi])
+ r = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+ w = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
+
+ # np.array input
+ spl1 = SmoothSphereBivariateSpline(theta, phi, r, w=w, s=1E10)
+
+ # list input
+ spl2 = SmoothSphereBivariateSpline(theta.tolist(), phi.tolist(),
+ r.tolist(), w=w.tolist(), s=1E10)
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestRectBivariateSpline:
+ def test_defaults(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y),z)
+
+ def test_evaluate(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+
+ xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
+ yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
+ zi = lut.ev(xi, yi)
+ zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
+
+ assert_almost_equal(zi, zi2)
+
+ def test_derivatives_grid(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
+ [0,0,-11,0,0],[0,0,4,0,0]])/6.
+ dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
+ [2,.25,0,-.25,-2],[4,-1,0,1,-4]])
+ dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
+ [-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y,dx=1),dx)
+ assert_array_almost_equal(lut(x,y,dy=1),dy)
+ assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
+
+ def test_derivatives(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ dx = array([0,0,2./3,0,0])
+ dy = array([4,-1,0,-.25,-4])
+ dxdy = array([160,65,0,55,32])/24.
+ lut = RectBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
+ assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
+ assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
+
+ def test_partial_derivative_method_grid(self):
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1],
+ [1, 2, 1, 2, 1],
+ [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1],
+ [1, 2, 1, 2, 1]])
+ dx = array([[0, 0, -20, 0, 0],
+ [0, 0, 13, 0, 0],
+ [0, 0, 4, 0, 0],
+ [0, 0, -11, 0, 0],
+ [0, 0, 4, 0, 0]]) / 6.
+ dy = array([[4, -1, 0, 1, -4],
+ [4, -1, 0, 1, -4],
+ [0, 1.5, 0, -1.5, 0],
+ [2, .25, 0, -.25, -2],
+ [4, -1, 0, 1, -4]])
+ dxdy = array([[40, -25, 0, 25, -40],
+ [-26, 16.25, 0, -16.25, 26],
+ [-8, 5, 0, -5, 8],
+ [22, -13.75, 0, 13.75, -22],
+ [-8, 5, 0, -5, 8]]) / 6.
+ lut = RectBivariateSpline(x, y, z)
+ assert_array_almost_equal(lut.partial_derivative(1, 0)(x, y), dx)
+ assert_array_almost_equal(lut.partial_derivative(0, 1)(x, y), dy)
+ assert_array_almost_equal(lut.partial_derivative(1, 1)(x, y), dxdy)
+
+ def test_partial_derivative_method(self):
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1],
+ [1, 2, 1, 2, 1],
+ [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1],
+ [1, 2, 1, 2, 1]])
+ dx = array([0, 0, 2./3, 0, 0])
+ dy = array([4, -1, 0, -.25, -4])
+ dxdy = array([160, 65, 0, 55, 32]) / 24.
+ lut = RectBivariateSpline(x, y, z)
+ assert_array_almost_equal(lut.partial_derivative(1, 0)(x, y,
+ grid=False),
+ dx)
+ assert_array_almost_equal(lut.partial_derivative(0, 1)(x, y,
+ grid=False),
+ dy)
+ assert_array_almost_equal(lut.partial_derivative(1, 1)(x, y,
+ grid=False),
+ dxdy)
+
+ def test_partial_derivative_order_too_large(self):
+ x = array([0, 1, 2, 3, 4], dtype=float)
+ y = x.copy()
+ z = ones((x.size, y.size))
+ lut = RectBivariateSpline(x, y, z)
+ with assert_raises(ValueError):
+ lut.partial_derivative(4, 1)
+
+ def test_broadcast(self):
+ x = array([1,2,3,4,5])
+ y = array([1,2,3,4,5])
+ z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+ lut = RectBivariateSpline(x,y,z)
+ assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
+
+ def test_invalid_input(self):
+
+ with assert_raises(ValueError) as info:
+ x = array([6, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "x must be strictly increasing" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([2, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "y must be strictly increasing" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1]])
+ RectBivariateSpline(x, y, z)
+ assert "x dimension of z must have same number of elements as x"\
+ in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 3, 2],
+ [1, 2, 2, 2], [1, 2, 1, 2]])
+ RectBivariateSpline(x, y, z)
+ assert "y dimension of z must have same number of elements as y"\
+ in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ bbox = (-100, 100, -100)
+ RectBivariateSpline(x, y, z, bbox=bbox)
+ assert "bbox shape should be (4,)" in str(info.value)
+
+ with assert_raises(ValueError) as info:
+ RectBivariateSpline(x, y, z, s=-1.0)
+ assert "s should be s >= 0.0" in str(info.value)
+
+ def test_array_like_input(self):
+ x = array([1, 2, 3, 4, 5])
+ y = array([1, 2, 3, 4, 5])
+ z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ bbox = array([1, 5, 1, 5])
+
+ spl1 = RectBivariateSpline(x, y, z, bbox=bbox)
+ spl2 = RectBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+ bbox=bbox.tolist())
+ assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+ def test_not_increasing_input(self):
+ # gh-8565
+ NSamp = 20
+ Theta = np.random.uniform(0, np.pi, NSamp)
+ Phi = np.random.uniform(0, 2 * np.pi, NSamp)
+ Data = np.ones(NSamp)
+
+ Interpolator = SmoothSphereBivariateSpline(Theta, Phi, Data, s=3.5)
+
+ NLon = 6
+ NLat = 3
+ GridPosLats = np.arange(NLat) / NLat * np.pi
+ GridPosLons = np.arange(NLon) / NLon * 2 * np.pi
+
+ # No error
+ Interpolator(GridPosLats, GridPosLons)
+
+ nonGridPosLats = GridPosLats.copy()
+ nonGridPosLats[2] = 0.001
+ with assert_raises(ValueError) as exc_info:
+ Interpolator(nonGridPosLats, GridPosLons)
+ assert "x must be strictly increasing" in str(exc_info.value)
+
+ nonGridPosLons = GridPosLons.copy()
+ nonGridPosLons[2] = 0.001
+ with assert_raises(ValueError) as exc_info:
+ Interpolator(GridPosLats, nonGridPosLons)
+ assert "y must be strictly increasing" in str(exc_info.value)
+
+
+class TestRectSphereBivariateSpline:
+ def test_defaults(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+ lut = RectSphereBivariateSpline(x,y,z)
+ assert_array_almost_equal(lut(x,y),z)
+
+ def test_evaluate(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+ lut = RectSphereBivariateSpline(x,y,z)
+ yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
+ xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
+ zi = lut.ev(xi, yi)
+ zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
+ assert_almost_equal(zi, zi2)
+
+ def test_invalid_input(self):
+ data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+ np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(-1, 170, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 181, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-181, 10, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-10, 360, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(10, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data, s=-1)
+ assert "s should be positive" in str(exc_info.value)
+
+ def test_derivatives_grid(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+
+ lut = RectSphereBivariateSpline(x,y,z)
+
+ y = linspace(0.02, 2*pi-0.02, 7)
+ x = linspace(0.02, pi-0.02, 7)
+
+ assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dtheta=1, dphi=1),
+ _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
+ rtol=1e-3, atol=1e-3)
+
+ assert_array_equal(lut(x, y, dtheta=1),
+ lut.partial_derivative(1, 0)(x, y))
+ assert_array_equal(lut(x, y, dphi=1),
+ lut.partial_derivative(0, 1)(x, y))
+ assert_array_equal(lut(x, y, dtheta=1, dphi=1),
+ lut.partial_derivative(1, 1)(x, y))
+
+ assert_array_equal(lut(x, y, dtheta=1, grid=False),
+ lut.partial_derivative(1, 0)(x, y, grid=False))
+ assert_array_equal(lut(x, y, dphi=1, grid=False),
+ lut.partial_derivative(0, 1)(x, y, grid=False))
+ assert_array_equal(lut(x, y, dtheta=1, dphi=1, grid=False),
+ lut.partial_derivative(1, 1)(x, y, grid=False))
+
+ def test_derivatives(self):
+ y = linspace(0.01, 2*pi-0.01, 7)
+ x = linspace(0.01, pi-0.01, 7)
+ z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+ [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+ [1,2,1,2,1,2,1]])
+
+ lut = RectSphereBivariateSpline(x,y,z)
+
+ y = linspace(0.02, 2*pi-0.02, 7)
+ x = linspace(0.02, pi-0.02, 7)
+
+ assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
+ assert_allclose(lut(x, y, dtheta=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dphi=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
+ rtol=1e-4, atol=1e-4)
+ assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
+ _numdiff_2d(lambda x,y: lut(x,y,grid=False),
+ x, y, dx=1, dy=1, eps=1e-6),
+ rtol=1e-3, atol=1e-3)
+
+ def test_invalid_input_2(self):
+ data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+ np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(0, 170, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 180, 9) * np.pi / 180.
+ lons = np.linspace(0, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "u should be between (0, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-181, 10, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(-10, 360, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data)
+ assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
+
+ with assert_raises(ValueError) as exc_info:
+ lats = np.linspace(10, 170, 9) * np.pi / 180.
+ lons = np.linspace(10, 350, 18) * np.pi / 180.
+ RectSphereBivariateSpline(lats, lons, data, s=-1)
+ assert "s should be positive" in str(exc_info.value)
+
+ def test_array_like_input(self):
+ y = linspace(0.01, 2 * pi - 0.01, 7)
+ x = linspace(0.01, pi - 0.01, 7)
+ z = array([[1, 2, 1, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
+ [1, 2, 3, 2, 1, 2, 1],
+ [1, 2, 2, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
+ [1, 2, 2, 2, 1, 2, 1],
+ [1, 2, 1, 2, 1, 2, 1]])
+ # np.array input
+ spl1 = RectSphereBivariateSpline(x, y, z)
+ # list input
+ spl2 = RectSphereBivariateSpline(x.tolist(), y.tolist(), z.tolist())
+ assert_array_almost_equal(spl1(x, y), spl2(x, y))
+
+ def test_negative_evaluation(self):
+ lats = np.array([25, 30, 35, 40, 45])
+ lons = np.array([-90, -85, -80, -75, 70])
+ mesh = np.meshgrid(lats, lons)
+ data = mesh[0] + mesh[1] # lon + lat value
+ lat_r = np.radians(lats)
+ lon_r = np.radians(lons)
+ interpolator = RectSphereBivariateSpline(lat_r, lon_r, data)
+ query_lat = np.radians(np.array([35, 37.5]))
+ query_lon = np.radians(np.array([-80, -77.5]))
+ data_interp = interpolator(query_lat, query_lon)
+ ans = np.array([[-45.0, -42.480862],
+ [-49.0625, -46.54315]])
+ assert_array_almost_equal(data_interp, ans)
+
+ def test_pole_continuity_gh_14591(self):
+ # regression test for https://github.com/scipy/scipy/issues/14591
+ # with pole_continuty=(True, True), the internal work array size
+ # was too small, leading to a FITPACK data validation error.
+
+ # The reproducer in gh-14591 was using a NetCDF4 file with
+ # 361x507 arrays, so here we trivialize array sizes to a minimum
+ # which still demonstrates the issue.
+ u = np.arange(1, 10) * np.pi / 10
+ v = np.arange(1, 10) * np.pi / 10
+ r = np.zeros((9, 9))
+ for p in [(True, True), (True, False), (False, False)]:
+ RectSphereBivariateSpline(u, v, r, s=0, pole_continuity=p)
+
+
+def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
+ if dx == 0 and dy == 0:
+ return func(x, y)
+ elif dx == 1 and dy == 0:
+ return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
+ elif dx == 0 and dy == 1:
+ return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
+ elif dx == 1 and dy == 1:
+ return (func(x + eps, y + eps) - func(x - eps, y + eps)
+ - func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
+ else:
+ raise ValueError("invalid derivative order")
+
+
+class Test_DerivedBivariateSpline:
+ """Test the creation, usage, and attribute access of the (private)
+ _DerivedBivariateSpline class.
+ """
+ def setup_method(self):
+ x = np.concatenate(list(zip(range(10), range(10))))
+ y = np.concatenate(list(zip(range(10), range(1, 11))))
+ z = np.concatenate((np.linspace(3, 1, 10), np.linspace(1, 3, 10)))
+ with suppress_warnings() as sup:
+ sup.record(UserWarning, "\nThe coefficients of the spline")
+ self.lut_lsq = LSQBivariateSpline(x, y, z,
+ linspace(0.5, 19.5, 4),
+ linspace(1.5, 20.5, 4),
+ eps=1e-2)
+ self.lut_smooth = SmoothBivariateSpline(x, y, z)
+ xx = linspace(0, 1, 20)
+ yy = xx + 1.0
+ zz = array([np.roll(z, i) for i in range(z.size)])
+ self.lut_rect = RectBivariateSpline(xx, yy, zz)
+ self.orders = list(itertools.product(range(3), range(3)))
+
+ def test_creation_from_LSQ(self):
+ for nux, nuy in self.orders:
+ lut_der = self.lut_lsq.partial_derivative(nux, nuy)
+ a = lut_der(3.5, 3.5, grid=False)
+ b = self.lut_lsq(3.5, 3.5, dx=nux, dy=nuy, grid=False)
+ assert_equal(a, b)
+
+ def test_creation_from_Smooth(self):
+ for nux, nuy in self.orders:
+ lut_der = self.lut_smooth.partial_derivative(nux, nuy)
+ a = lut_der(5.5, 5.5, grid=False)
+ b = self.lut_smooth(5.5, 5.5, dx=nux, dy=nuy, grid=False)
+ assert_equal(a, b)
+
+ def test_creation_from_Rect(self):
+ for nux, nuy in self.orders:
+ lut_der = self.lut_rect.partial_derivative(nux, nuy)
+ a = lut_der(0.5, 1.5, grid=False)
+ b = self.lut_rect(0.5, 1.5, dx=nux, dy=nuy, grid=False)
+ assert_equal(a, b)
+
+ def test_invalid_attribute_fp(self):
+ der = self.lut_rect.partial_derivative(1, 1)
+ with assert_raises(AttributeError):
+ der.fp
+
+ def test_invalid_attribute_get_residual(self):
+ der = self.lut_smooth.partial_derivative(1, 1)
+ with assert_raises(AttributeError):
+ der.get_residual()
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_gil.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_gil.py
new file mode 100644
index 0000000000000000000000000000000000000000..0902308fb6af6802ba216e3aeec499d0ddfb1407
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_gil.py
@@ -0,0 +1,65 @@
+import itertools
+import threading
+import time
+
+import numpy as np
+from numpy.testing import assert_equal
+import pytest
+import scipy.interpolate
+
+
+class TestGIL:
+ """Check if the GIL is properly released by scipy.interpolate functions."""
+
+ def setup_method(self):
+ self.messages = []
+
+ def log(self, message):
+ self.messages.append(message)
+
+ def make_worker_thread(self, target, args):
+ log = self.log
+
+ class WorkerThread(threading.Thread):
+ def run(self):
+ log('interpolation started')
+ target(*args)
+ log('interpolation complete')
+
+ return WorkerThread()
+
+ @pytest.mark.slow
+ @pytest.mark.xfail(reason='race conditions, may depend on system load')
+ def test_rectbivariatespline(self):
+ def generate_params(n_points):
+ x = y = np.linspace(0, 1000, n_points)
+ x_grid, y_grid = np.meshgrid(x, y)
+ z = x_grid * y_grid
+ return x, y, z
+
+ def calibrate_delay(requested_time):
+ for n_points in itertools.count(5000, 1000):
+ args = generate_params(n_points)
+ time_started = time.time()
+ interpolate(*args)
+ if time.time() - time_started > requested_time:
+ return args
+
+ def interpolate(x, y, z):
+ scipy.interpolate.RectBivariateSpline(x, y, z)
+
+ args = calibrate_delay(requested_time=3)
+ worker_thread = self.make_worker_thread(interpolate, args)
+ worker_thread.start()
+ for i in range(3):
+ time.sleep(0.5)
+ self.log('working')
+ worker_thread.join()
+ assert_equal(self.messages, [
+ 'interpolation started',
+ 'working',
+ 'working',
+ 'working',
+ 'interpolation complete',
+ ])
+
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpnd.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpnd.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c7d52b422fb79971ddf86247196e92ea606a22d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpnd.py
@@ -0,0 +1,387 @@
+import os
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
+ suppress_warnings)
+from pytest import raises as assert_raises
+import pytest
+
+import scipy.interpolate.interpnd as interpnd
+import scipy.spatial._qhull as qhull
+
+import pickle
+
+
+def data_file(basename):
+ return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', basename)
+
+
+class TestLinearNDInterpolation:
+ def test_smoketest(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+
+ yi = interpnd.LinearNDInterpolator(x, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_smoketest_alternate(self):
+ # Test at single points, alternate calling convention
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+
+ yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])
+ assert_almost_equal(y, yi)
+
+ def test_complex_smoketest(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ yi = interpnd.LinearNDInterpolator(x, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_tri_input(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.LinearNDInterpolator(tri, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_square(self):
+ # Test barycentric interpolation on a square against a manual
+ # implementation
+
+ points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.float64)
+ values = np.array([1., 2., -3., 5.], dtype=np.float64)
+
+ # NB: assume triangles (0, 1, 3) and (1, 2, 3)
+ #
+ # 1----2
+ # | \ |
+ # | \ |
+ # 0----3
+
+ def ip(x, y):
+ t1 = (x + y <= 1)
+ t2 = ~t1
+
+ x1 = x[t1]
+ y1 = y[t1]
+
+ x2 = x[t2]
+ y2 = y[t2]
+
+ z = 0*x
+
+ z[t1] = (values[0]*(1 - x1 - y1)
+ + values[1]*y1
+ + values[3]*x1)
+
+ z[t2] = (values[2]*(x2 + y2 - 1)
+ + values[1]*(1 - x2)
+ + values[3]*(1 - y2))
+ return z
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],
+ np.linspace(0, 1, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+
+ xi = np.array([xx, yy]).T.copy()
+ zi = interpnd.LinearNDInterpolator(points, values)(xi)
+
+ assert_almost_equal(zi, ip(xx, yy))
+
+ def test_smoketest_rescale(self):
+ # Test at single points
+ x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+
+ yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)
+ assert_almost_equal(y, yi)
+
+ def test_square_rescale(self):
+ # Test barycentric interpolation on a rectangle with rescaling
+ # agaings the same implementation without rescaling
+
+ points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.float64)
+ values = np.array([1., 2., -3., 5.], dtype=np.float64)
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
+ np.linspace(0, 100, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+ xi = np.array([xx, yy]).T.copy()
+ zi = interpnd.LinearNDInterpolator(points, values)(xi)
+ zi_rescaled = interpnd.LinearNDInterpolator(points, values,
+ rescale=True)(xi)
+
+ assert_almost_equal(zi, zi_rescaled)
+
+ def test_tripoints_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.LinearNDInterpolator(tri.points, y)(x)
+ yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,
+ rescale=True)(x)
+ assert_almost_equal(yi, yi_rescale)
+
+ def test_tri_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ match = ("Rescaling is not supported when passing a "
+ "Delaunay triangulation as ``points``.")
+ with pytest.raises(ValueError, match=match):
+ interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)
+
+ def test_pickle(self):
+ # Test at single points
+ np.random.seed(1234)
+ x = np.random.rand(30, 2)
+ y = np.random.rand(30) + 1j*np.random.rand(30)
+
+ ip = interpnd.LinearNDInterpolator(x, y)
+ ip2 = pickle.loads(pickle.dumps(ip))
+
+ assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
+
+
+class TestEstimateGradients2DGlobal:
+ def test_smoketest(self):
+ x = np.array([(0, 0), (0, 2),
+ (1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)
+ tri = qhull.Delaunay(x)
+
+ # Should be exact for linear functions, independent of triangulation
+
+ funcs = [
+ (lambda x, y: 0*x + 1, (0, 0)),
+ (lambda x, y: 0 + x, (1, 0)),
+ (lambda x, y: -2 + y, (0, 1)),
+ (lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))
+ ]
+
+ for j, (func, grad) in enumerate(funcs):
+ z = func(x[:,0], x[:,1])
+ dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)
+
+ assert_equal(dz.shape, (6, 2))
+ assert_allclose(dz, np.array(grad)[None,:] + 0*dz,
+ rtol=1e-5, atol=1e-5, err_msg="item %d" % j)
+
+ def test_regression_2359(self):
+ # Check regression --- for certain point sets, gradient
+ # estimation could end up in an infinite loop
+ points = np.load(data_file('estimate_gradients_hang.npy'))
+ values = np.random.rand(points.shape[0])
+ tri = qhull.Delaunay(points)
+
+ # This should not hang
+ with suppress_warnings() as sup:
+ sup.filter(interpnd.GradientEstimationWarning,
+ "Gradient estimation did not converge")
+ interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
+
+
+class TestCloughTocher2DInterpolator:
+
+ def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False,
+ rescale=False, **kw):
+ np.random.seed(1234)
+ if x is None:
+ x = np.array([(0, 0), (0, 1),
+ (1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),
+ (0.5, 0.2)],
+ dtype=float)
+
+ if not alternate:
+ ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),
+ tol=1e-6, rescale=rescale)
+ else:
+ ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),
+ func(x[:,0], x[:,1]),
+ tol=1e-6, rescale=rescale)
+
+ p = np.random.rand(50, 2)
+
+ if not alternate:
+ a = ip(p)
+ else:
+ a = ip(p[:,0], p[:,1])
+ b = func(p[:,0], p[:,1])
+
+ try:
+ assert_allclose(a, b, **kw)
+ except AssertionError:
+ print("_check_accuracy: abs(a-b):", abs(a - b))
+ print("ip.grad:", ip.grad)
+ raise
+
+ def test_linear_smoketest(self):
+ # Should be exact for linear functions, independent of triangulation
+ funcs = [
+ lambda x, y: 0*x + 1,
+ lambda x, y: 0 + x,
+ lambda x, y: -2 + y,
+ lambda x, y: 3 + 3*x + 14.15*y,
+ ]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ alternate=True,
+ err_msg="Function (alternate) %d" % j)
+ # check rescaling
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ err_msg="Function (rescaled) %d" % j, rescale=True)
+ self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+ alternate=True, rescale=True,
+ err_msg="Function (alternate, rescaled) %d" % j)
+
+ def test_quadratic_smoketest(self):
+ # Should be reasonably accurate for quadratic functions
+ funcs = [
+ lambda x, y: x**2,
+ lambda x, y: y**2,
+ lambda x, y: x**2 - y**2,
+ lambda x, y: x*y,
+ ]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
+ err_msg="Function %d" % j, rescale=True)
+
+ def test_tri_input(self):
+ # Test at single points
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)
+ assert_almost_equal(y, yi)
+
+ def test_tri_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ match = ("Rescaling is not supported when passing a "
+ "Delaunay triangulation as ``points``.")
+ with pytest.raises(ValueError, match=match):
+ interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)
+
+ def test_tripoints_input_rescale(self):
+ # Test at single points
+ x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 3j*y
+
+ tri = qhull.Delaunay(x)
+ yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)
+ yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)
+ assert_almost_equal(yi, yi_rescale)
+
+ def test_dense(self):
+ # Should be more accurate for dense meshes
+ funcs = [
+ lambda x, y: x**2,
+ lambda x, y: y**2,
+ lambda x, y: x**2 - y**2,
+ lambda x, y: x*y,
+ lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
+ ]
+
+ np.random.seed(4321) # use a different seed than the check!
+ grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),
+ np.random.rand(30*30, 2)]
+
+ for j, func in enumerate(funcs):
+ self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
+ err_msg="Function %d" % j)
+ self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
+ err_msg="Function %d" % j, rescale=True)
+
+ def test_wrong_ndim(self):
+ x = np.random.randn(30, 3)
+ y = np.random.randn(30)
+ assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)
+
+ def test_pickle(self):
+ # Test at single points
+ np.random.seed(1234)
+ x = np.random.rand(30, 2)
+ y = np.random.rand(30) + 1j*np.random.rand(30)
+
+ ip = interpnd.CloughTocher2DInterpolator(x, y)
+ ip2 = pickle.loads(pickle.dumps(ip))
+
+ assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
+
+ def test_boundary_tri_symmetry(self):
+ # Interpolation at neighbourless triangles should retain
+ # symmetry with mirroring the triangle.
+
+ # Equilateral triangle
+ points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)])
+ values = np.array([1, 0, 0])
+
+ ip = interpnd.CloughTocher2DInterpolator(points, values)
+
+ # Set gradient to zero at vertices
+ ip.grad[...] = 0
+
+ # Interpolation should be symmetric vs. bisector
+ alpha = 0.3
+ p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)])
+ p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)])
+
+ v1 = ip(p1)
+ v2 = ip(p2)
+ assert_allclose(v1, v2)
+
+ # ... and affine invariant
+ np.random.seed(1)
+ A = np.random.randn(2, 2)
+ b = np.random.randn(2)
+
+ points = A.dot(points.T).T + b[None,:]
+ p1 = A.dot(p1) + b
+ p2 = A.dot(p2) + b
+
+ ip = interpnd.CloughTocher2DInterpolator(points, values)
+ ip.grad[...] = 0
+
+ w1 = ip(p1)
+ w2 = ip(p2)
+ assert_allclose(w1, v1)
+ assert_allclose(w2, v2)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpolate.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpolate.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5a1dd600c16cbe1886a31d979b561438df7c13a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_interpolate.py
@@ -0,0 +1,2584 @@
+from numpy.testing import (assert_, assert_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_array_equal,
+ assert_allclose, suppress_warnings)
+from pytest import raises as assert_raises
+import pytest
+
+from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
+import numpy as np
+
+from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
+ splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
+ NdPPoly, BSpline)
+
+from scipy.special import poch, gamma
+
+from scipy.interpolate import _ppoly
+
+from scipy._lib._gcutils import assert_deallocated, IS_PYPY
+
+from scipy.integrate import nquad
+
+from scipy.special import binom
+
+
+class TestInterp2D:
+ def test_interp2d(self):
+ y, x = mgrid[0:2:20j, 0:pi:21j]
+ z = sin(x+0.5*y)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ II = interp2d(x, y, z)
+ assert_almost_equal(II(1.0, 2.0), sin(2.0), decimal=2)
+
+ v, u = ogrid[0:2:24j, 0:pi:25j]
+ assert_almost_equal(II(u.ravel(), v.ravel()),
+ sin(u+0.5*v), decimal=2)
+
+ def test_interp2d_meshgrid_input(self):
+ # Ticket #703
+ x = linspace(0, 2, 16)
+ y = linspace(0, pi, 21)
+ z = sin(x[None, :] + y[:, None]/2.)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ II = interp2d(x, y, z)
+ assert_almost_equal(II(1.0, 2.0), sin(2.0), decimal=2)
+
+ def test_interp2d_meshgrid_input_unsorted(self):
+ np.random.seed(1234)
+ x = linspace(0, 2, 16)
+ y = linspace(0, pi, 21)
+
+ z = sin(x[None, :] + y[:, None] / 2.)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
+
+ np.random.shuffle(x)
+ z = sin(x[None, :] + y[:, None]/2.)
+ ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
+
+ np.random.shuffle(x)
+ np.random.shuffle(y)
+ z = sin(x[None, :] + y[:, None] / 2.)
+ ip3 = interp2d(x, y, z, kind='cubic')
+
+ x = linspace(0, 2, 31)
+ y = linspace(0, pi, 30)
+
+ assert_equal(ip1(x, y), ip2(x, y))
+ assert_equal(ip1(x, y), ip3(x, y))
+
+ def test_interp2d_eval_unsorted(self):
+ y, x = mgrid[0:2:20j, 0:pi:21j]
+ z = sin(x + 0.5*y)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ func = interp2d(x, y, z)
+
+ xe = np.array([3, 4, 5])
+ ye = np.array([5.3, 7.1])
+ assert_allclose(func(xe, ye), func(xe, ye[::-1]))
+
+ assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
+
+ def test_interp2d_linear(self):
+ # Ticket #898
+ a = np.zeros([5, 5])
+ a[2, 2] = 1.0
+ x = y = np.arange(5)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ b = interp2d(x, y, a, 'linear')
+ assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
+ assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
+
+ def test_interp2d_bounds(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 2, 7)
+ z = x[None, :]**2 + y[:, None]
+
+ ix = np.linspace(-1, 3, 31)
+ iy = np.linspace(-1, 3, 33)
+
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+
+ b = interp2d(x, y, z, bounds_error=True)
+ assert_raises(ValueError, b, ix, iy)
+
+ b = interp2d(x, y, z, fill_value=np.nan)
+ iz = b(ix, iy)
+ mx = (ix < 0) | (ix > 1)
+ my = (iy < 0) | (iy > 2)
+ assert_(np.isnan(iz[my, :]).all())
+ assert_(np.isnan(iz[:, mx]).all())
+ assert_(np.isfinite(iz[~my, :][:, ~mx]).all())
+
+
+class TestInterp1D:
+
+ def setup_method(self):
+ self.x5 = np.arange(5.)
+ self.x10 = np.arange(10.)
+ self.y10 = np.arange(10.)
+ self.x25 = self.x10.reshape((2,5))
+ self.x2 = np.arange(2.)
+ self.y2 = np.arange(2.)
+ self.x1 = np.array([0.])
+ self.y1 = np.array([0.])
+
+ self.y210 = np.arange(20.).reshape((2, 10))
+ self.y102 = np.arange(20.).reshape((10, 2))
+ self.y225 = np.arange(20.).reshape((2, 2, 5))
+ self.y25 = np.arange(10.).reshape((2, 5))
+ self.y235 = np.arange(30.).reshape((2, 3, 5))
+ self.y325 = np.arange(30.).reshape((3, 2, 5))
+
+ # Edge updated test matrix 1
+ # array([[ 30, 1, 2, 3, 4, 5, 6, 7, 8, -30],
+ # [ 30, 11, 12, 13, 14, 15, 16, 17, 18, -30]])
+ self.y210_edge_updated = np.arange(20.).reshape((2, 10))
+ self.y210_edge_updated[:, 0] = 30
+ self.y210_edge_updated[:, -1] = -30
+
+ # Edge updated test matrix 2
+ # array([[ 30, 30],
+ # [ 2, 3],
+ # [ 4, 5],
+ # [ 6, 7],
+ # [ 8, 9],
+ # [ 10, 11],
+ # [ 12, 13],
+ # [ 14, 15],
+ # [ 16, 17],
+ # [-30, -30]])
+ self.y102_edge_updated = np.arange(20.).reshape((10, 2))
+ self.y102_edge_updated[0, :] = 30
+ self.y102_edge_updated[-1, :] = -30
+
+ self.fill_value = -100.0
+
+ def test_validation(self):
+ # Make sure that appropriate exceptions are raised when invalid values
+ # are given to the constructor.
+
+ # These should all work.
+ for kind in ('nearest', 'nearest-up', 'zero', 'linear', 'slinear',
+ 'quadratic', 'cubic', 'previous', 'next'):
+ interp1d(self.x10, self.y10, kind=kind)
+ interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate")
+ interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=np.array([-1]))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=(-1,))
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=-1)
+ interp1d(self.x10, self.y10, kind='linear',
+ fill_value=(-1, -1))
+ interp1d(self.x10, self.y10, kind=0)
+ interp1d(self.x10, self.y10, kind=1)
+ interp1d(self.x10, self.y10, kind=2)
+ interp1d(self.x10, self.y10, kind=3)
+ interp1d(self.x10, self.y210, kind='linear', axis=-1,
+ fill_value=(-1, -1))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=np.ones(10))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=(np.ones(10), np.ones(10)))
+ interp1d(self.x2, self.y210, kind='linear', axis=0,
+ fill_value=(np.ones(10), -1))
+
+ # x array must be 1D.
+ assert_raises(ValueError, interp1d, self.x25, self.y10)
+
+ # y array cannot be a scalar.
+ assert_raises(ValueError, interp1d, self.x10, np.array(0))
+
+ # Check for x and y arrays having the same length.
+ assert_raises(ValueError, interp1d, self.x10, self.y2)
+ assert_raises(ValueError, interp1d, self.x2, self.y10)
+ assert_raises(ValueError, interp1d, self.x10, self.y102)
+ interp1d(self.x10, self.y210)
+ interp1d(self.x10, self.y102, axis=0)
+
+ # Check for x and y having at least 1 element.
+ assert_raises(ValueError, interp1d, self.x1, self.y10)
+ assert_raises(ValueError, interp1d, self.x10, self.y1)
+
+ # Bad fill values
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=(-1, -1, -1)) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[-1, -1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=np.array((-1, -1, -1))) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[[-1]]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=[-1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=np.array([])) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+ fill_value=()) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
+ axis=0, fill_value=[-1, -1]) # doesn't broadcast
+ assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
+ axis=0, fill_value=(0., [-1, -1])) # above doesn't bc
+
+ def test_init(self):
+ # Check that the attributes are initialized appropriately by the
+ # constructor.
+ assert_(interp1d(self.x10, self.y10).copy)
+ assert_(not interp1d(self.x10, self.y10, copy=False).copy)
+ assert_(interp1d(self.x10, self.y10).bounds_error)
+ assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
+ assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
+ assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
+ 3.0)
+ assert_equal(interp1d(self.x10, self.y10, fill_value=(1.0, 2.0)).fill_value,
+ (1.0, 2.0))
+ assert_equal(interp1d(self.x10, self.y10).axis, 0)
+ assert_equal(interp1d(self.x10, self.y210).axis, 1)
+ assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
+ assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
+ assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
+ assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
+
+ def test_assume_sorted(self):
+ # Check for unsorted arrays
+ interp10 = interp1d(self.x10, self.y10)
+ interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
+
+ assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
+ assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
+ interp10([2.4, 5.6, 6.0]))
+
+ # Check assume_sorted keyword (defaults to False)
+ interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
+ assume_sorted=False)
+ assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
+
+ interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
+ assume_sorted=True)
+ # Should raise an error for unsorted input if assume_sorted=True
+ assert_raises(ValueError, interp10_assume_kw2, self.x10)
+
+ # Check that if y is a 2-D array, things are still consistent
+ interp10_y_2d = interp1d(self.x10, self.y210)
+ interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
+ assert_array_almost_equal(interp10_y_2d(self.x10),
+ interp10_y_2d_unsorted(self.x10))
+
+ def test_linear(self):
+ for kind in ['linear', 'slinear']:
+ self._check_linear(kind)
+
+ def _check_linear(self, kind):
+ # Check the actual implementation of linear interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind=kind)
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2.4, 5.6, 6.0]))
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind=kind,
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [-1, 0, 9, 11], rtol=1e-14)
+
+ opts = dict(kind=kind,
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_linear_dtypes(self):
+ # regression test for gh-5898, where 1D linear interpolation has been
+ # delegated to numpy.interp for all float dtypes, and the latter was
+ # not handling e.g. np.float128.
+ for dtyp in [np.float16,
+ np.float32,
+ np.float64,
+ np.longdouble]:
+ x = np.arange(8, dtype=dtyp)
+ y = x
+ yp = interp1d(x, y, kind='linear')(x)
+ assert_equal(yp.dtype, dtyp)
+ assert_allclose(yp, y, atol=1e-15)
+
+ # regression test for gh-14531, where 1D linear interpolation has been
+ # has been extended to delegate to numpy.interp for integer dtypes
+ x = [0, 1, 2]
+ y = [np.nan, 0, 1]
+ yp = interp1d(x, y)(x)
+ assert_allclose(yp, y, atol=1e-15)
+
+ def test_slinear_dtypes(self):
+ # regression test for gh-7273: 1D slinear interpolation fails with
+ # float32 inputs
+ dt_r = [np.float16, np.float32, np.float64]
+ dt_rc = dt_r + [np.complex64, np.complex128]
+ spline_kinds = ['slinear', 'zero', 'quadratic', 'cubic']
+ for dtx in dt_r:
+ x = np.arange(0, 10, dtype=dtx)
+ for dty in dt_rc:
+ y = np.exp(-x/3.0).astype(dty)
+ for dtn in dt_r:
+ xnew = x.astype(dtn)
+ for kind in spline_kinds:
+ f = interp1d(x, y, kind=kind, bounds_error=False)
+ assert_allclose(f(xnew), y, atol=1e-7,
+ err_msg=f"{dtx}, {dty} {dtn}")
+
+ def test_cubic(self):
+ # Check the actual implementation of spline interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='cubic')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array([1.2]))
+ assert_array_almost_equal(interp10(1.5), np.array([1.5]))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2.4, 5.6, 6.0]),)
+
+ def test_nearest(self):
+ # Check the actual implementation of nearest-neighbour interpolation.
+ # Nearest asserts that half-integer case (1.5) rounds down to 1
+ interp10 = interp1d(self.x10, self.y10, kind='nearest')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='nearest',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, 9], rtol=1e-14)
+
+ opts = dict(kind='nearest',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_nearest_up(self):
+ # Check the actual implementation of nearest-neighbour interpolation.
+ # Nearest-up asserts that half-integer case (1.5) rounds up to 2
+ interp10 = interp1d(self.x10, self.y10, kind='nearest-up')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(2.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='nearest-up',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, 9], rtol=1e-14)
+
+ opts = dict(kind='nearest-up',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ def test_previous(self):
+ # Check the actual implementation of previous interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='previous')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 5., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='previous',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [np.nan, 0, 9, 9], rtol=1e-14)
+
+ # Tests for gh-9591
+ interpolator1D = interp1d(self.x10, self.y10, kind="previous",
+ fill_value='extrapolate')
+ assert_allclose(interpolator1D([-1, -2, 5, 8, 12, 25]),
+ [np.nan, np.nan, 5, 8, 9, 9])
+
+ interpolator2D = interp1d(self.x10, self.y210, kind="previous",
+ fill_value='extrapolate')
+ assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+ [[np.nan, np.nan, 5, 8, 9, 9],
+ [np.nan, np.nan, 15, 18, 19, 19]])
+
+ interpolator2DAxis0 = interp1d(self.x10, self.y102, kind="previous",
+ axis=0, fill_value='extrapolate')
+ assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+ [[np.nan, np.nan],
+ [10, 11],
+ [18, 19]])
+
+ opts = dict(kind='previous',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ # Tests for gh-16813
+ interpolator1D = interp1d([0, 1, 2],
+ [0, 1, -1], kind="previous",
+ fill_value='extrapolate',
+ assume_sorted=True)
+ assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+ [np.nan, np.nan, 0, 1, -1, -1, -1])
+
+ interpolator1D = interp1d([2, 0, 1], # x is not ascending
+ [-1, 0, 1], kind="previous",
+ fill_value='extrapolate',
+ assume_sorted=False)
+ assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+ [np.nan, np.nan, 0, 1, -1, -1, -1])
+
+ interpolator2D = interp1d(self.x10, self.y210_edge_updated,
+ kind="previous",
+ fill_value='extrapolate')
+ assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+ [[np.nan, np.nan, 5, 8, -30, -30],
+ [np.nan, np.nan, 15, 18, -30, -30]])
+
+ interpolator2DAxis0 = interp1d(self.x10, self.y102_edge_updated,
+ kind="previous",
+ axis=0, fill_value='extrapolate')
+ assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+ [[np.nan, np.nan],
+ [10, 11],
+ [-30, -30]])
+
+ def test_next(self):
+ # Check the actual implementation of next interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='next')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(2.))
+ assert_array_almost_equal(interp10(1.5), np.array(2.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([3., 6., 6.]),)
+
+ # test fill_value="extrapolate"
+ extrapolator = interp1d(self.x10, self.y10, kind='next',
+ fill_value='extrapolate')
+ assert_allclose(extrapolator([-1., 0, 9, 11]),
+ [0, 0, 9, np.nan], rtol=1e-14)
+
+ # Tests for gh-9591
+ interpolator1D = interp1d(self.x10, self.y10, kind="next",
+ fill_value='extrapolate')
+ assert_allclose(interpolator1D([-1, -2, 5, 8, 12, 25]),
+ [0, 0, 5, 8, np.nan, np.nan])
+
+ interpolator2D = interp1d(self.x10, self.y210, kind="next",
+ fill_value='extrapolate')
+ assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+ [[0, 0, 5, 8, np.nan, np.nan],
+ [10, 10, 15, 18, np.nan, np.nan]])
+
+ interpolator2DAxis0 = interp1d(self.x10, self.y102, kind="next",
+ axis=0, fill_value='extrapolate')
+ assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+ [[0, 1],
+ [10, 11],
+ [np.nan, np.nan]])
+
+ opts = dict(kind='next',
+ fill_value='extrapolate',
+ bounds_error=True)
+ assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+ # Tests for gh-16813
+ interpolator1D = interp1d([0, 1, 2],
+ [0, 1, -1], kind="next",
+ fill_value='extrapolate',
+ assume_sorted=True)
+ assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+ [0, 0, 0, 1, -1, np.nan, np.nan])
+
+ interpolator1D = interp1d([2, 0, 1], # x is not ascending
+ [-1, 0, 1], kind="next",
+ fill_value='extrapolate',
+ assume_sorted=False)
+ assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+ [0, 0, 0, 1, -1, np.nan, np.nan])
+
+ interpolator2D = interp1d(self.x10, self.y210_edge_updated,
+ kind="next",
+ fill_value='extrapolate')
+ assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+ [[30, 30, 5, 8, np.nan, np.nan],
+ [30, 30, 15, 18, np.nan, np.nan]])
+
+ interpolator2DAxis0 = interp1d(self.x10, self.y102_edge_updated,
+ kind="next",
+ axis=0, fill_value='extrapolate')
+ assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+ [[30, 30],
+ [10, 11],
+ [np.nan, np.nan]])
+
+ def test_zero(self):
+ # Check the actual implementation of zero-order spline interpolation.
+ interp10 = interp1d(self.x10, self.y10, kind='zero')
+ assert_array_almost_equal(interp10(self.x10), self.y10)
+ assert_array_almost_equal(interp10(1.2), np.array(1.))
+ assert_array_almost_equal(interp10(1.5), np.array(1.))
+ assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+ np.array([2., 5., 6.]))
+
+ def bounds_check_helper(self, interpolant, test_array, fail_value):
+ # Asserts that a ValueError is raised and that the error message
+ # contains the value causing this exception.
+ assert_raises(ValueError, interpolant, test_array)
+ try:
+ interpolant(test_array)
+ except ValueError as err:
+ assert (f"{fail_value}" in str(err))
+
+ def _bounds_check(self, kind='linear'):
+ # Test that our handling of out-of-bounds input is correct.
+ extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
+ bounds_error=False, kind=kind)
+
+ assert_array_equal(extrap10(11.2), np.array(self.fill_value))
+ assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
+ assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
+ np.array(self.fill_value),)
+ assert_array_equal(extrap10._check_bounds(
+ np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
+ np.array([[True, False, False, False, False],
+ [False, False, False, False, True]]))
+
+ raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
+ kind=kind)
+
+ self.bounds_check_helper(raises_bounds_error, -1.0, -1.0)
+ self.bounds_check_helper(raises_bounds_error, 11.0, 11.0)
+ self.bounds_check_helper(raises_bounds_error, [0.0, -1.0, 0.0], -1.0)
+ self.bounds_check_helper(raises_bounds_error, [0.0, 1.0, 21.0], 21.0)
+
+ raises_bounds_error([0.0, 5.0, 9.0])
+
+ def _bounds_check_int_nan_fill(self, kind='linear'):
+ x = np.arange(10).astype(int)
+ y = np.arange(10).astype(int)
+ c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
+ yi = c(x - 1)
+ assert_(np.isnan(yi[0]))
+ assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
+
+ def test_bounds(self):
+ for kind in ('linear', 'cubic', 'nearest', 'previous', 'next',
+ 'slinear', 'zero', 'quadratic'):
+ self._bounds_check(kind)
+ self._bounds_check_int_nan_fill(kind)
+
+ def _check_fill_value(self, kind):
+ interp = interp1d(self.x10, self.y10, kind=kind,
+ fill_value=(-100, 100), bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), -100)
+ assert_array_almost_equal(interp([-10, 10]), [-100, 100])
+
+ # Proper broadcasting:
+ # interp along axis of length 5
+ # other dim=(2, 3), (3, 2), (2, 2), or (2,)
+
+ # one singleton fill_value (works for all)
+ for y in (self.y235, self.y325, self.y225, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=100, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), 100)
+ assert_array_almost_equal(interp([-10, 10]), 100)
+
+ # singleton lower, singleton upper
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=(-100, 100), bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), -100)
+ if y.ndim == 3:
+ result = [[[-100, 100]] * y.shape[1]] * y.shape[0]
+ else:
+ result = [[-100, 100]] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # one broadcastable (3,) fill_value
+ fill_value = [100, 200, 300]
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp(-10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
+ [200, 200],
+ [300, 300]]] * 2)
+
+ # one broadcastable (2,) fill_value
+ fill_value = [100, 200]
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y225, self.y325, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ result = [100, 200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(10), result)
+ assert_array_almost_equal(interp(-10), result)
+ result = [[100, 100], [200, 200]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # broadcastable (3,) lower, singleton upper
+ fill_value = (np.array([-100, -200, -300]), 100)
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 100],
+ [-300, 100]]] * 2)
+
+ # broadcastable (2,) lower, singleton upper
+ fill_value = (np.array([-100, -200]), 100)
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y225, self.y325, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), 100)
+ result = [-100, -200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(-10), result)
+ result = [[-100, 100], [-200, 100]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # broadcastable (3,) lower, broadcastable (3,) upper
+ fill_value = ([-100, -200, -300], [100, 200, 300])
+ for y in (self.y325, self.y225):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2): # check ndarray as well as list here
+ if ii == 1:
+ fill_value = tuple(np.array(f) for f in fill_value)
+ interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
+ assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 200],
+ [-300, 300]]] * 2)
+ # broadcastable (2,) lower, broadcastable (2,) upper
+ fill_value = ([-100, -200], [100, 200])
+ assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for y in (self.y325, self.y225, self.y25):
+ interp = interp1d(self.x5, y, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ result = [100, 200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(10), result)
+ result = [-100, -200]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp(-10), result)
+ result = [[-100, 100], [-200, 200]]
+ if y.ndim == 3:
+ result = [result] * y.shape[0]
+ assert_array_almost_equal(interp([-10, 10]), result)
+
+ # one broadcastable (2, 2) array-like
+ fill_value = [[100, 200], [1000, 2000]]
+ for y in (self.y235, self.y325, self.y25):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2):
+ if ii == 1:
+ fill_value = np.array(fill_value)
+ interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp(-10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
+ [200, 200]],
+ [[1000, 1000],
+ [2000, 2000]]])
+
+ # broadcastable (2, 2) lower, broadcastable (2, 2) upper
+ fill_value = ([[-100, -200], [-1000, -2000]],
+ [[100, 200], [1000, 2000]])
+ for y in (self.y235, self.y325, self.y25):
+ assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+ axis=-1, fill_value=fill_value, bounds_error=False)
+ for ii in range(2):
+ if ii == 1:
+ fill_value = (np.array(fill_value[0]), np.array(fill_value[1]))
+ interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
+ fill_value=fill_value, bounds_error=False)
+ assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
+ assert_array_almost_equal(interp(-10), [[-100, -200],
+ [-1000, -2000]])
+ assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+ [-200, 200]],
+ [[-1000, 1000],
+ [-2000, 2000]]])
+
+ def test_fill_value(self):
+ # test that two-element fill value works
+ for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
+ 'zero', 'previous', 'next'):
+ self._check_fill_value(kind)
+
+ def test_fill_value_writeable(self):
+ # backwards compat: fill_value is a public writeable attribute
+ interp = interp1d(self.x10, self.y10, fill_value=123.0)
+ assert_equal(interp.fill_value, 123.0)
+ interp.fill_value = 321.0
+ assert_equal(interp.fill_value, 321.0)
+
+ def _nd_check_interp(self, kind='linear'):
+ # Check the behavior when the inputs and outputs are multidimensional.
+
+ # Multidimensional input.
+ interp10 = interp1d(self.x10, self.y10, kind=kind)
+ assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
+ np.array([[3., 5.], [2., 7.]]))
+
+ # Scalar input -> 0-dim scalar array output
+ assert_(isinstance(interp10(1.2), np.ndarray))
+ assert_equal(interp10(1.2).shape, ())
+
+ # Multidimensional outputs.
+ interp210 = interp1d(self.x10, self.y210, kind=kind)
+ assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
+ assert_array_almost_equal(interp210(np.array([1., 2.])),
+ np.array([[1., 2.], [11., 12.]]))
+
+ interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
+ assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
+ assert_array_almost_equal(interp102(np.array([1., 3.])),
+ np.array([[2., 3.], [6., 7.]]))
+
+ # Both at the same time!
+ x_new = np.array([[3., 5.], [2., 7.]])
+ assert_array_almost_equal(interp210(x_new),
+ np.array([[[3., 5.], [2., 7.]],
+ [[13., 15.], [12., 17.]]]))
+ assert_array_almost_equal(interp102(x_new),
+ np.array([[[6., 7.], [10., 11.]],
+ [[4., 5.], [14., 15.]]]))
+
+ def _nd_check_shape(self, kind='linear'):
+ # Check large N-D output shape
+ a = [4, 5, 6, 7]
+ y = np.arange(np.prod(a)).reshape(*a)
+ for n, s in enumerate(a):
+ x = np.arange(s)
+ z = interp1d(x, y, axis=n, kind=kind)
+ assert_array_almost_equal(z(x), y, err_msg=kind)
+
+ x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
+ b = list(a)
+ b[n:n+1] = [2,3,1]
+ assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
+
+ def test_nd(self):
+ for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest',
+ 'zero', 'previous', 'next'):
+ self._nd_check_interp(kind)
+ self._nd_check_shape(kind)
+
+ def _check_complex(self, dtype=np.complex128, kind='linear'):
+ x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
+ y = x * x ** (1 + 2j)
+ y = y.astype(dtype)
+
+ # simple test
+ c = interp1d(x, y, kind=kind)
+ assert_array_almost_equal(y[:-1], c(x)[:-1])
+
+ # check against interpolating real+imag separately
+ xi = np.linspace(1, 10, 31)
+ cr = interp1d(x, y.real, kind=kind)
+ ci = interp1d(x, y.imag, kind=kind)
+ assert_array_almost_equal(c(xi).real, cr(xi))
+ assert_array_almost_equal(c(xi).imag, ci(xi))
+
+ def test_complex(self):
+ for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
+ 'zero', 'previous', 'next'):
+ self._check_complex(np.complex64, kind)
+ self._check_complex(np.complex128, kind)
+
+ @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+ def test_circular_refs(self):
+ # Test interp1d can be automatically garbage collected
+ x = np.linspace(0, 1)
+ y = np.linspace(0, 1)
+ # Confirm interp can be released from memory after use
+ with assert_deallocated(interp1d, x, y) as interp:
+ interp([0.1, 0.2])
+ del interp
+
+ def test_overflow_nearest(self):
+ # Test that the x range doesn't overflow when given integers as input
+ for kind in ('nearest', 'previous', 'next'):
+ x = np.array([0, 50, 127], dtype=np.int8)
+ ii = interp1d(x, x, kind=kind)
+ assert_array_almost_equal(ii(x), x)
+
+ def test_local_nans(self):
+ # check that for local interpolation kinds (slinear, zero) a single nan
+ # only affects its local neighborhood
+ x = np.arange(10).astype(float)
+ y = x.copy()
+ y[6] = np.nan
+ for kind in ('zero', 'slinear'):
+ ir = interp1d(x, y, kind=kind)
+ vals = ir([4.9, 7.0])
+ assert_(np.isfinite(vals).all())
+
+ def test_spline_nans(self):
+ # Backwards compat: a single nan makes the whole spline interpolation
+ # return nans in an array of the correct shape. And it doesn't raise,
+ # just quiet nans because of backcompat.
+ x = np.arange(8).astype(float)
+ y = x.copy()
+ yn = y.copy()
+ yn[3] = np.nan
+
+ for kind in ['quadratic', 'cubic']:
+ ir = interp1d(x, y, kind=kind)
+ irn = interp1d(x, yn, kind=kind)
+ for xnew in (6, [1, 6], [[1, 6], [3, 5]]):
+ xnew = np.asarray(xnew)
+ out, outn = ir(x), irn(x)
+ assert_(np.isnan(outn).all())
+ assert_equal(out.shape, outn.shape)
+
+ def test_all_nans(self):
+ # regression test for gh-11637: interp1d core dumps with all-nan `x`
+ x = np.ones(10) * np.nan
+ y = np.arange(10)
+ with assert_raises(ValueError):
+ interp1d(x, y, kind='cubic')
+
+ def test_read_only(self):
+ x = np.arange(0, 10)
+ y = np.exp(-x / 3.0)
+ xnew = np.arange(0, 9, 0.1)
+ # Check both read-only and not read-only:
+ for xnew_writeable in (True, False):
+ xnew.flags.writeable = xnew_writeable
+ x.flags.writeable = False
+ for kind in ('linear', 'nearest', 'zero', 'slinear', 'quadratic',
+ 'cubic'):
+ f = interp1d(x, y, kind=kind)
+ vals = f(xnew)
+ assert_(np.isfinite(vals).all())
+
+ @pytest.mark.parametrize(
+ "kind", ("linear", "nearest", "nearest-up", "previous", "next")
+ )
+ def test_single_value(self, kind):
+ # https://github.com/scipy/scipy/issues/4043
+ f = interp1d([1.5], [6], kind=kind, bounds_error=False,
+ fill_value=(2, 10))
+ assert_array_equal(f([1, 1.5, 2]), [2, 6, 10])
+ # check still error if bounds_error=True
+ f = interp1d([1.5], [6], kind=kind, bounds_error=True)
+ with assert_raises(ValueError, match="x_new is above"):
+ f(2.0)
+
+
+class TestLagrange:
+
+ def test_lagrange(self):
+ p = poly1d([5,2,1,4,3])
+ xs = np.arange(len(p.coeffs))
+ ys = p(xs)
+ pl = lagrange(xs,ys)
+ assert_array_almost_equal(p.coeffs,pl.coeffs)
+
+
+class TestAkima1DInterpolator:
+ def test_eval(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344, 5.9803623910336236590978842,
+ 5.5067291516462386624652936, 5.2031367459745245795943447,
+ 4.1796554159017080820603951, 3.4110386597938129327189927,
+ 3.])
+ assert_allclose(ak(xi), yi)
+
+ def test_eval_mod(self):
+ # Reference values generated with the following MATLAB code:
+ # format longG
+ # x = 0:10; y = [0. 2. 1. 3. 2. 6. 5.5 5.5 2.7 5.1 3.];
+ # xi = [0. 0.5 1. 1.5 2.5 3.5 4.5 5.1 6.5 7.2 8.6 9.9 10.];
+ # makima(x, y, xi)
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ ak = Akima1DInterpolator(x, y, method="makima")
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.array([
+ 0.0, 1.34471153846154, 2.0, 1.44375, 1.94375, 2.51939102564103,
+ 4.10366931918656, 5.98501550899192, 5.51756330960439, 5.1757231914014,
+ 4.12326636931311, 3.32931513157895, 3.0])
+ assert_allclose(ak(xi), yi)
+
+ def test_eval_2d(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ y = np.column_stack((y, 2. * y))
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344,
+ 5.9803623910336236590978842,
+ 5.5067291516462386624652936,
+ 5.2031367459745245795943447,
+ 4.1796554159017080820603951,
+ 3.4110386597938129327189927, 3.])
+ yi = np.column_stack((yi, 2. * yi))
+ assert_allclose(ak(xi), yi)
+
+ def test_eval_3d(self):
+ x = np.arange(0., 11.)
+ y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ y = np.empty((11, 2, 2))
+ y[:, 0, 0] = y_
+ y[:, 1, 0] = 2. * y_
+ y[:, 0, 1] = 3. * y_
+ y[:, 1, 1] = 4. * y_
+ ak = Akima1DInterpolator(x, y)
+ xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+ 8.6, 9.9, 10.])
+ yi = np.empty((13, 2, 2))
+ yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+ 4.1363636363636366866103344,
+ 5.9803623910336236590978842,
+ 5.5067291516462386624652936,
+ 5.2031367459745245795943447,
+ 4.1796554159017080820603951,
+ 3.4110386597938129327189927, 3.])
+ yi[:, 0, 0] = yi_
+ yi[:, 1, 0] = 2. * yi_
+ yi[:, 0, 1] = 3. * yi_
+ yi[:, 1, 1] = 4. * yi_
+ assert_allclose(ak(xi), yi)
+
+ def test_degenerate_case_multidimensional(self):
+ # This test is for issue #5683.
+ x = np.array([0, 1, 2])
+ y = np.vstack((x, x**2)).T
+ ak = Akima1DInterpolator(x, y)
+ x_eval = np.array([0.5, 1.5])
+ y_eval = ak(x_eval)
+ assert_allclose(y_eval, np.vstack((x_eval, x_eval**2)).T)
+
+ def test_extend(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ ak = Akima1DInterpolator(x, y)
+ match = "Extending a 1-D Akima interpolator is not yet implemented"
+ with pytest.raises(NotImplementedError, match=match):
+ ak.extend(None, None)
+
+ def test_mod_invalid_method(self):
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ match = "`method`=invalid is unsupported."
+ with pytest.raises(NotImplementedError, match=match):
+ Akima1DInterpolator(x, y, method="invalid") # type: ignore
+
+ def test_complex(self):
+ # Complex-valued data deprecated
+ x = np.arange(0., 11.)
+ y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+ y = y - 2j*y
+ # actually raises ComplexWarning, which subclasses RuntimeWarning, see
+ # https://github.com/numpy/numpy/blob/main/numpy/exceptions.py
+ msg = "Passing an array with a complex.*|Casting complex values to real.*"
+ with pytest.warns((RuntimeWarning, DeprecationWarning), match=msg):
+ Akima1DInterpolator(x, y)
+
+
+class TestPPolyCommon:
+ # test basic functionality for PPoly and BPoly
+ def test_sort_check(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 1, 0.5])
+ assert_raises(ValueError, PPoly, c, x)
+ assert_raises(ValueError, BPoly, c, x)
+
+ def test_ctor_c(self):
+ # wrong shape: `c` must be at least 2D
+ with assert_raises(ValueError):
+ PPoly([1, 2], [0, 1])
+
+ def test_extend(self):
+ # Test adding new points to the piecewise polynomial
+ np.random.seed(1234)
+
+ order = 3
+ x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
+ c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
+
+ for cls in (PPoly, BPoly):
+ pp = cls(c[:,:9], x[:10])
+ pp.extend(c[:,9:], x[10:])
+
+ pp2 = cls(c[:, 10:], x[10:])
+ pp2.extend(c[:, :10], x[:10])
+
+ pp3 = cls(c, x)
+
+ assert_array_equal(pp.c, pp3.c)
+ assert_array_equal(pp.x, pp3.x)
+ assert_array_equal(pp2.c, pp3.c)
+ assert_array_equal(pp2.x, pp3.x)
+
+ def test_extend_diff_orders(self):
+ # Test extending polynomial with different order one
+ np.random.seed(1234)
+
+ x = np.linspace(0, 1, 6)
+ c = np.random.rand(2, 5)
+
+ x2 = np.linspace(1, 2, 6)
+ c2 = np.random.rand(4, 5)
+
+ for cls in (PPoly, BPoly):
+ pp1 = cls(c, x)
+ pp2 = cls(c2, x2)
+
+ pp_comb = cls(c, x)
+ pp_comb.extend(c2, x2[1:])
+
+ # NB. doesn't match to pp1 at the endpoint, because pp1 is not
+ # continuous with pp2 as we took random coefs.
+ xi1 = np.linspace(0, 1, 300, endpoint=False)
+ xi2 = np.linspace(1, 2, 300)
+
+ assert_allclose(pp1(xi1), pp_comb(xi1))
+ assert_allclose(pp2(xi2), pp_comb(xi2))
+
+ def test_extend_descending(self):
+ np.random.seed(0)
+
+ order = 3
+ x = np.sort(np.random.uniform(0, 10, 20))
+ c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3)
+
+ for cls in (PPoly, BPoly):
+ p = cls(c, x)
+
+ p1 = cls(c[:, :9], x[:10])
+ p1.extend(c[:, 9:], x[10:])
+
+ p2 = cls(c[:, 10:], x[10:])
+ p2.extend(c[:, :10], x[:10])
+
+ assert_array_equal(p1.c, p.c)
+ assert_array_equal(p1.x, p.x)
+ assert_array_equal(p2.c, p.c)
+ assert_array_equal(p2.x, p.x)
+
+ def test_shape(self):
+ np.random.seed(1234)
+ c = np.random.rand(8, 12, 5, 6, 7)
+ x = np.sort(np.random.rand(13))
+ xp = np.random.rand(3, 4)
+ for cls in (PPoly, BPoly):
+ p = cls(c, x)
+ assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
+
+ # 'scalars'
+ for cls in (PPoly, BPoly):
+ p = cls(c[..., 0, 0, 0], x)
+
+ assert_equal(np.shape(p(0.5)), ())
+ assert_equal(np.shape(p(np.array(0.5))), ())
+
+ assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]], dtype=object))
+
+ def test_complex_coef(self):
+ np.random.seed(12345)
+ x = np.sort(np.random.random(13))
+ c = np.random.random((8, 12)) * (1. + 0.3j)
+ c_re, c_im = c.real, c.imag
+ xp = np.random.random(5)
+ for cls in (PPoly, BPoly):
+ p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
+ for nu in [0, 1, 2]:
+ assert_allclose(p(xp, nu).real, p_re(xp, nu))
+ assert_allclose(p(xp, nu).imag, p_im(xp, nu))
+
+ def test_axis(self):
+ np.random.seed(12345)
+ c = np.random.rand(3, 4, 5, 6, 7, 8)
+ c_s = c.shape
+ xp = np.random.random((1, 2))
+ for axis in (0, 1, 2, 3):
+ m = c.shape[axis+1]
+ x = np.sort(np.random.rand(m+1))
+ for cls in (PPoly, BPoly):
+ p = cls(c, x, axis=axis)
+ assert_equal(p.c.shape,
+ c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:])
+ res = p(xp)
+ targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:]
+ assert_equal(res.shape, targ_shape)
+
+ # deriv/antideriv does not drop the axis
+ for p1 in [cls(c, x, axis=axis).derivative(),
+ cls(c, x, axis=axis).derivative(2),
+ cls(c, x, axis=axis).antiderivative(),
+ cls(c, x, axis=axis).antiderivative(2)]:
+ assert_equal(p1.axis, p.axis)
+
+ # c array needs two axes for the coefficients and intervals, so
+ # 0 <= axis < c.ndim-1; raise otherwise
+ for axis in (-1, 4, 5, 6):
+ for cls in (BPoly, PPoly):
+ assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
+
+
+class TestPolySubclassing:
+ class P(PPoly):
+ pass
+
+ class B(BPoly):
+ pass
+
+ def _make_polynomials(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(3))
+ c = np.random.random((4, 2))
+ return self.P(c, x), self.B(c, x)
+
+ def test_derivative(self):
+ pp, bp = self._make_polynomials()
+ for p in (pp, bp):
+ pd = p.derivative()
+ assert_equal(p.__class__, pd.__class__)
+
+ ppa = pp.antiderivative()
+ assert_equal(pp.__class__, ppa.__class__)
+
+ def test_from_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = self.P.from_spline(spl)
+ assert_equal(pp.__class__, self.P)
+
+ def test_conversions(self):
+ pp, bp = self._make_polynomials()
+
+ pp1 = self.P.from_bernstein_basis(bp)
+ assert_equal(pp1.__class__, self.P)
+
+ bp1 = self.B.from_power_basis(pp)
+ assert_equal(bp1.__class__, self.B)
+
+ def test_from_derivatives(self):
+ x = [0, 1, 2]
+ y = [[1], [2], [3]]
+ bp = self.B.from_derivatives(x, y)
+ assert_equal(bp.__class__, self.B)
+
+
+class TestPPoly:
+ def test_simple(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x)
+ assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
+ assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
+
+ def test_periodic(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x, extrapolate='periodic')
+
+ assert_allclose(p(1.3), 1 * 0.3 ** 2 + 2 * 0.3 + 3)
+ assert_allclose(p(-0.3), 4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6)
+
+ assert_allclose(p(1.3, 1), 2 * 0.3 + 2)
+ assert_allclose(p(-0.3, 1), 8 * (0.7 - 0.5) + 5)
+
+ def test_read_only(self):
+ c = np.array([[1, 4], [2, 5], [3, 6]])
+ x = np.array([0, 0.5, 1])
+ xnew = np.array([0, 0.1, 0.2])
+ PPoly(c, x, extrapolate='periodic')
+
+ for writeable in (True, False):
+ x.flags.writeable = writeable
+ c.flags.writeable = writeable
+ f = PPoly(c, x)
+ vals = f(xnew)
+ assert_(np.isfinite(vals).all())
+
+ def test_descending(self):
+ def binom_matrix(power):
+ n = np.arange(power + 1).reshape(-1, 1)
+ k = np.arange(power + 1)
+ B = binom(n, k)
+ return B[::-1, ::-1]
+
+ np.random.seed(0)
+
+ power = 3
+ for m in [10, 20, 30]:
+ x = np.sort(np.random.uniform(0, 10, m + 1))
+ ca = np.random.uniform(-2, 2, size=(power + 1, m))
+
+ h = np.diff(x)
+ h_powers = h[None, :] ** np.arange(power + 1)[::-1, None]
+ B = binom_matrix(power)
+ cap = ca * h_powers
+ cdp = np.dot(B.T, cap)
+ cd = cdp / h_powers
+
+ pa = PPoly(ca, x, extrapolate=True)
+ pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True)
+
+ x_test = np.random.uniform(-10, 20, 100)
+ assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
+ assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
+
+ pa_d = pa.derivative()
+ pd_d = pd.derivative()
+
+ assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
+
+ # Antiderivatives won't be equal because fixing continuity is
+ # done in the reverse order, but surely the differences should be
+ # equal.
+ pa_i = pa.antiderivative()
+ pd_i = pd.antiderivative()
+ for a, b in np.random.uniform(-10, 20, (5, 2)):
+ int_a = pa.integrate(a, b)
+ int_d = pd.integrate(a, b)
+ assert_allclose(int_a, int_d, rtol=1e-13)
+ assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
+ rtol=1e-13)
+
+ roots_d = pd.roots()
+ roots_a = pa.roots()
+ assert_allclose(roots_a, np.sort(roots_d), rtol=1e-12)
+
+ def test_multi_shape(self):
+ c = np.random.rand(6, 2, 1, 2, 3)
+ x = np.array([0, 0.5, 1])
+ p = PPoly(c, x)
+ assert_equal(p.x.shape, x.shape)
+ assert_equal(p.c.shape, c.shape)
+ assert_equal(p(0.3).shape, c.shape[2:])
+
+ assert_equal(p(np.random.rand(5, 6)).shape, (5, 6) + c.shape[2:])
+
+ dp = p.derivative()
+ assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
+ ip = p.antiderivative()
+ assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
+
+ def test_construct_fast(self):
+ np.random.seed(1234)
+ c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
+ x = np.array([0, 0.5, 1])
+ p = PPoly.construct_fast(c, x)
+ assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
+ assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
+
+ def test_vs_alternative_implementations(self):
+ np.random.seed(1234)
+ c = np.random.rand(3, 12, 22)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+
+ p = PPoly(c, x)
+
+ xp = np.r_[0.3, 0.5, 0.33, 0.6]
+ expected = _ppoly_eval_1(c, x, xp)
+ assert_allclose(p(xp), expected)
+
+ expected = _ppoly_eval_2(c[:,:,0], x, xp)
+ assert_allclose(p(xp)[:,0], expected)
+
+ def test_from_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ assert_allclose(pp(xi), splev(xi, spl))
+
+ # make sure .from_spline accepts BSpline objects
+ b = BSpline(*spl)
+ ppp = PPoly.from_spline(b)
+ assert_allclose(ppp(xi), b(xi))
+
+ # BSpline's extrapolate attribute propagates unless overridden
+ t, c, k = spl
+ for extrap in (None, True, False):
+ b = BSpline(t, c, k, extrapolate=extrap)
+ p = PPoly.from_spline(b)
+ assert_equal(p.extrapolate, b.extrapolate)
+
+ def test_derivative_simple(self):
+ np.random.seed(1234)
+ c = np.array([[4, 3, 2, 1]]).T
+ dc = np.array([[3*4, 2*3, 2]]).T
+ ddc = np.array([[2*3*4, 1*2*3]]).T
+ x = np.array([0, 1])
+
+ pp = PPoly(c, x)
+ dpp = PPoly(dc, x)
+ ddpp = PPoly(ddc, x)
+
+ assert_allclose(pp.derivative().c, dpp.c)
+ assert_allclose(pp.derivative(2).c, ddpp.c)
+
+ def test_derivative_eval(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ for dx in range(0, 3):
+ assert_allclose(pp(xi, dx), splev(xi, spl, dx))
+
+ def test_derivative(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ xi = np.linspace(0, 1, 200)
+ for dx in range(0, 10):
+ assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
+ err_msg="dx=%d" % (dx,))
+
+ def test_antiderivative_of_constant(self):
+ # https://github.com/scipy/scipy/issues/4216
+ p = PPoly([[1.]], [0, 1])
+ assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
+ assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
+
+ def test_antiderivative_regression_4355(self):
+ # https://github.com/scipy/scipy/issues/4355
+ p = PPoly([[1., 0.5]], [0, 1, 2])
+ q = p.antiderivative()
+ assert_equal(q.c, [[1, 0.5], [0, 1]])
+ assert_equal(q.x, [0, 1, 2])
+ assert_allclose(p.integrate(0, 2), 1.5)
+ assert_allclose(q(2) - q(0), 1.5)
+
+ def test_antiderivative_simple(self):
+ np.random.seed(1234)
+ # [ p1(x) = 3*x**2 + 2*x + 1,
+ # p2(x) = 1.6875]
+ c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
+ # [ pp1(x) = x**3 + x**2 + x,
+ # pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
+ ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
+ # [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
+ # ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
+ iic = np.array([[1/4, 1/3, 1/2, 0, 0],
+ [0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
+ x = np.array([0, 0.25, 1])
+
+ pp = PPoly(c, x)
+ ipp = pp.antiderivative()
+ iipp = pp.antiderivative(2)
+ iipp2 = ipp.antiderivative()
+
+ assert_allclose(ipp.x, x)
+ assert_allclose(ipp.c.T, ic.T)
+ assert_allclose(iipp.c.T, iic.T)
+ assert_allclose(iipp2.c.T, iic.T)
+
+ def test_antiderivative_vs_derivative(self):
+ np.random.seed(1234)
+ x = np.linspace(0, 1, 30)**2
+ y = np.random.rand(len(x))
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ for dx in range(0, 10):
+ ipp = pp.antiderivative(dx)
+
+ # check that derivative is inverse op
+ pp2 = ipp.derivative(dx)
+ assert_allclose(pp.c, pp2.c)
+
+ # check continuity
+ for k in range(dx):
+ pp2 = ipp.derivative(k)
+
+ r = 1e-13
+ endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
+
+ assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
+ rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
+
+ def test_antiderivative_vs_spline(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ for dx in range(0, 10):
+ pp2 = pp.antiderivative(dx)
+ spl2 = splantider(spl, dx)
+
+ xi = np.linspace(0, 1, 200)
+ assert_allclose(pp2(xi), splev(xi, spl2),
+ rtol=1e-7)
+
+ def test_antiderivative_continuity(self):
+ c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T
+ x = np.array([0, 0.5, 1])
+
+ p = PPoly(c, x)
+ ip = p.antiderivative()
+
+ # check continuity
+ assert_allclose(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8)
+
+ # check that only lowest order coefficients were changed
+ p2 = ip.derivative()
+ assert_allclose(p2.c, p.c)
+
+ def test_integrate(self):
+ np.random.seed(1234)
+ x = np.sort(np.r_[0, np.random.rand(11), 1])
+ y = np.random.rand(len(x))
+
+ spl = splrep(x, y, s=0, k=5)
+ pp = PPoly.from_spline(spl)
+
+ a, b = 0.3, 0.9
+ ig = pp.integrate(a, b)
+
+ ipp = pp.antiderivative()
+ assert_allclose(ig, ipp(b) - ipp(a))
+ assert_allclose(ig, splint(a, b, spl))
+
+ a, b = -0.3, 0.9
+ ig = pp.integrate(a, b, extrapolate=True)
+ assert_allclose(ig, ipp(b) - ipp(a))
+
+ assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
+
+ def test_integrate_readonly(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ for writeable in (True, False):
+ x.flags.writeable = writeable
+
+ P = PPoly(c, x)
+ vals = P.integrate(1, 4)
+
+ assert_(np.isfinite(vals).all())
+
+ def test_integrate_periodic(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ P = PPoly(c, x, extrapolate='periodic')
+ I = P.antiderivative()
+
+ period_int = I(4) - I(1)
+
+ assert_allclose(P.integrate(1, 4), period_int)
+ assert_allclose(P.integrate(-10, -7), period_int)
+ assert_allclose(P.integrate(-10, -4), 2 * period_int)
+
+ assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
+ assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5 + 12, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
+
+ assert_allclose(P.integrate(0, -1), I(2) - I(3))
+ assert_allclose(P.integrate(-9, -10), I(2) - I(3))
+ assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
+
+ def test_roots(self):
+ x = np.linspace(0, 1, 31)**2
+ y = np.sin(30*x)
+
+ spl = splrep(x, y, s=0, k=3)
+ pp = PPoly.from_spline(spl)
+
+ r = pp.roots()
+ r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
+ assert_allclose(r, sproot(spl), atol=1e-15)
+
+ def test_roots_idzero(self):
+ # Roots for piecewise polynomials with identically zero
+ # sections.
+ c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
+ x = np.array([0, 0.4, 0.6, 1.0])
+
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(),
+ [0.25, 0.4, np.nan, 0.6 + 0.25])
+
+ # ditto for p.solve(const) with sections identically equal const
+ const = 2.
+ c1 = c.copy()
+ c1[1, :] += const
+ pp1 = PPoly(c1, x)
+
+ assert_array_equal(pp1.solve(const),
+ [0.25, 0.4, np.nan, 0.6 + 0.25])
+
+ def test_roots_all_zero(self):
+ # test the code path for the polynomial being identically zero everywhere
+ c = [[0], [0]]
+ x = [0, 1]
+ p = PPoly(c, x)
+ assert_array_equal(p.roots(), [0, np.nan])
+ assert_array_equal(p.solve(0), [0, np.nan])
+ assert_array_equal(p.solve(1), [])
+
+ c = [[0, 0], [0, 0]]
+ x = [0, 1, 2]
+ p = PPoly(c, x)
+ assert_array_equal(p.roots(), [0, np.nan, 1, np.nan])
+ assert_array_equal(p.solve(0), [0, np.nan, 1, np.nan])
+ assert_array_equal(p.solve(1), [])
+
+ def test_roots_repeated(self):
+ # Check roots repeated in multiple sections are reported only
+ # once.
+
+ # [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
+ c = np.array([[1, 0, -1], [-1, 0, 0]]).T
+ x = np.array([-1, 0, 1])
+
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(), [-2, 0])
+ assert_array_equal(pp.roots(extrapolate=False), [0])
+
+ def test_roots_discont(self):
+ # Check that a discontinuity across zero is reported as root
+ c = np.array([[1], [-1]]).T
+ x = np.array([0, 0.5, 1])
+ pp = PPoly(c, x)
+ assert_array_equal(pp.roots(), [0.5])
+ assert_array_equal(pp.roots(discontinuity=False), [])
+
+ # ditto for a discontinuity across y:
+ assert_array_equal(pp.solve(0.5), [0.5])
+ assert_array_equal(pp.solve(0.5, discontinuity=False), [])
+
+ assert_array_equal(pp.solve(1.5), [])
+ assert_array_equal(pp.solve(1.5, discontinuity=False), [])
+
+ def test_roots_random(self):
+ # Check high-order polynomials with random coefficients
+ np.random.seed(1234)
+
+ num = 0
+
+ for extrapolate in (True, False):
+ for order in range(0, 20):
+ x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
+ c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
+
+ pp = PPoly(c, x)
+ for y in [0, np.random.random()]:
+ r = pp.solve(y, discontinuity=False, extrapolate=extrapolate)
+
+ for i in range(2):
+ for j in range(3):
+ rr = r[i,j]
+ if rr.size > 0:
+ # Check that the reported roots indeed are roots
+ num += rr.size
+ val = pp(rr, extrapolate=extrapolate)[:,i,j]
+ cmpval = pp(rr, nu=1,
+ extrapolate=extrapolate)[:,i,j]
+ msg = f"({extrapolate!r}) r = {repr(rr)}"
+ assert_allclose((val-y) / cmpval, 0, atol=1e-7,
+ err_msg=msg)
+
+ # Check that we checked a number of roots
+ assert_(num > 100, repr(num))
+
+ def test_roots_croots(self):
+ # Test the complex root finding algorithm
+ np.random.seed(1234)
+
+ for k in range(1, 15):
+ c = np.random.rand(k, 1, 130)
+
+ if k == 3:
+ # add a case with zero discriminant
+ c[:,0,0] = 1, 2, 1
+
+ for y in [0, np.random.random()]:
+ w = np.empty(c.shape, dtype=complex)
+ _ppoly._croots_poly1(c, w)
+
+ if k == 1:
+ assert_(np.isnan(w).all())
+ continue
+
+ res = 0
+ cres = 0
+ for i in range(k):
+ res += c[i,None] * w**(k-1-i)
+ cres += abs(c[i,None] * w**(k-1-i))
+ with np.errstate(invalid='ignore'):
+ res /= cres
+ res = res.ravel()
+ res = res[~np.isnan(res)]
+ assert_allclose(res, 0, atol=1e-10)
+
+ def test_extrapolate_attr(self):
+ # [ 1 - x**2 ]
+ c = np.array([[-1, 0, 1]]).T
+ x = np.array([0, 1])
+
+ for extrapolate in [True, False, None]:
+ pp = PPoly(c, x, extrapolate=extrapolate)
+ pp_d = pp.derivative()
+ pp_i = pp.antiderivative()
+
+ if extrapolate is False:
+ assert_(np.isnan(pp([-0.1, 1.1])).all())
+ assert_(np.isnan(pp_i([-0.1, 1.1])).all())
+ assert_(np.isnan(pp_d([-0.1, 1.1])).all())
+ assert_equal(pp.roots(), [1])
+ else:
+ assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
+ assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
+ assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
+ assert_allclose(pp.roots(), [1, -1])
+
+
+class TestBPoly:
+ def test_simple(self):
+ x = [0, 1]
+ c = [[3]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.1), 3.)
+
+ def test_simple2(self):
+ x = [0, 1]
+ c = [[3], [1]]
+ bp = BPoly(c, x) # 3*(1-x) + 1*x
+ assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
+
+ def test_simple3(self):
+ x = [0, 1]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
+ assert_allclose(bp(0.2),
+ 3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
+
+ def test_simple4(self):
+ x = [0, 1]
+ c = [[1], [1], [1], [2]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.3), 0.7**3 +
+ 3 * 0.7**2 * 0.3 +
+ 3 * 0.7 * 0.3**2 +
+ 2 * 0.3**3)
+
+ def test_simple5(self):
+ x = [0, 1]
+ c = [[1], [1], [8], [2], [1]]
+ bp = BPoly(c, x)
+ assert_allclose(bp(0.3), 0.7**4 +
+ 4 * 0.7**3 * 0.3 +
+ 8 * 6 * 0.7**2 * 0.3**2 +
+ 2 * 4 * 0.7 * 0.3**3 +
+ 0.3**4)
+
+ def test_periodic(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ # [3*(1-x)**2, 2*((x-1)/2)**2]
+ bp = BPoly(c, x, extrapolate='periodic')
+
+ assert_allclose(bp(3.4), 3 * 0.6**2)
+ assert_allclose(bp(-1.3), 2 * (0.7/2)**2)
+
+ assert_allclose(bp(3.4, 1), -6 * 0.6)
+ assert_allclose(bp(-1.3, 1), 2 * (0.7/2))
+
+ def test_descending(self):
+ np.random.seed(0)
+
+ power = 3
+ for m in [10, 20, 30]:
+ x = np.sort(np.random.uniform(0, 10, m + 1))
+ ca = np.random.uniform(-0.1, 0.1, size=(power + 1, m))
+ # We need only to flip coefficients to get it right!
+ cd = ca[::-1].copy()
+
+ pa = BPoly(ca, x, extrapolate=True)
+ pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True)
+
+ x_test = np.random.uniform(-10, 20, 100)
+ assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
+ assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
+
+ pa_d = pa.derivative()
+ pd_d = pd.derivative()
+
+ assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
+
+ # Antiderivatives won't be equal because fixing continuity is
+ # done in the reverse order, but surely the differences should be
+ # equal.
+ pa_i = pa.antiderivative()
+ pd_i = pd.antiderivative()
+ for a, b in np.random.uniform(-10, 20, (5, 2)):
+ int_a = pa.integrate(a, b)
+ int_d = pd.integrate(a, b)
+ assert_allclose(int_a, int_d, rtol=1e-12)
+ assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
+ rtol=1e-12)
+
+ def test_multi_shape(self):
+ c = np.random.rand(6, 2, 1, 2, 3)
+ x = np.array([0, 0.5, 1])
+ p = BPoly(c, x)
+ assert_equal(p.x.shape, x.shape)
+ assert_equal(p.c.shape, c.shape)
+ assert_equal(p(0.3).shape, c.shape[2:])
+ assert_equal(p(np.random.rand(5,6)).shape,
+ (5,6)+c.shape[2:])
+
+ dp = p.derivative()
+ assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
+
+ def test_interval_length(self):
+ x = [0, 2]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x)
+ xval = 0.1
+ s = xval / 2 # s = (x - xa) / (xb - xa)
+ assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
+
+ def test_two_intervals(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
+
+ assert_allclose(bp(0.4), 3 * 0.6*0.6)
+ assert_allclose(bp(1.7), 2 * (0.7/2)**2)
+
+ def test_extrapolate_attr(self):
+ x = [0, 2]
+ c = [[3], [1], [4]]
+ bp = BPoly(c, x)
+
+ for extrapolate in (True, False, None):
+ bp = BPoly(c, x, extrapolate=extrapolate)
+ bp_d = bp.derivative()
+ if extrapolate is False:
+ assert_(np.isnan(bp([-0.1, 2.1])).all())
+ assert_(np.isnan(bp_d([-0.1, 2.1])).all())
+ else:
+ assert_(not np.isnan(bp([-0.1, 2.1])).any())
+ assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
+
+
+class TestBPolyCalculus:
+ def test_derivative(self):
+ x = [0, 1, 3]
+ c = [[3, 0], [0, 0], [0, 2]]
+ bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
+ bp_der = bp.derivative()
+ assert_allclose(bp_der(0.4), -6*(0.6))
+ assert_allclose(bp_der(1.7), 0.7)
+
+ # derivatives in-place
+ assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
+ [-6*(1-0.4), 6., 0.])
+ assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
+ [0.7, 1., 0])
+
+ def test_derivative_ppoly(self):
+ # make sure it's consistent w/ power basis
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+
+ for d in range(k):
+ bp = bp.derivative()
+ pp = pp.derivative()
+ xp = np.linspace(x[0], x[-1], 21)
+ assert_allclose(bp(xp), pp(xp))
+
+ def test_deriv_inplace(self):
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+
+ # test both real and complex coefficients
+ for cc in [c.copy(), c*(1. + 2.j)]:
+ bp = BPoly(cc, x)
+ xp = np.linspace(x[0], x[-1], 21)
+ for i in range(k):
+ assert_allclose(bp(xp, i), bp.derivative(i)(xp))
+
+ def test_antiderivative_simple(self):
+ # f(x) = x for x \in [0, 1),
+ # (x-1)/2 for x \in [1, 3]
+ #
+ # antiderivative is then
+ # F(x) = x**2 / 2 for x \in [0, 1),
+ # 0.5*x*(x/2 - 1) + A for x \in [1, 3]
+ # where A = 3/4 for continuity at x = 1.
+ x = [0, 1, 3]
+ c = [[0, 0], [1, 1]]
+
+ bp = BPoly(c, x)
+ bi = bp.antiderivative()
+
+ xx = np.linspace(0, 3, 11)
+ assert_allclose(bi(xx),
+ np.where(xx < 1, xx**2 / 2.,
+ 0.5 * xx * (xx/2. - 1) + 3./4),
+ atol=1e-12, rtol=1e-12)
+
+ def test_der_antider(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10, 2, 3))
+ bp = BPoly(c, x)
+
+ xx = np.linspace(x[0], x[-1], 100)
+ assert_allclose(bp.antiderivative().derivative()(xx),
+ bp(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antider_ppoly(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10, 2, 3))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+
+ xx = np.linspace(x[0], x[-1], 10)
+
+ assert_allclose(bp.antiderivative(2)(xx),
+ pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12)
+
+ def test_antider_continuous(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10))
+ bp = BPoly(c, x).antiderivative()
+
+ xx = bp.x[1:-1]
+ assert_allclose(bp(xx - 1e-14),
+ bp(xx + 1e-14), atol=1e-12, rtol=1e-12)
+
+ def test_integrate(self):
+ np.random.seed(1234)
+ x = np.sort(np.random.random(11))
+ c = np.random.random((4, 10))
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+ assert_allclose(bp.integrate(0, 1),
+ pp.integrate(0, 1), atol=1e-12, rtol=1e-12)
+
+ def test_integrate_extrap(self):
+ c = [[1]]
+ x = [0, 1]
+ b = BPoly(c, x)
+
+ # default is extrapolate=True
+ assert_allclose(b.integrate(0, 2), 2., atol=1e-14)
+
+ # .integrate argument overrides self.extrapolate
+ b1 = BPoly(c, x, extrapolate=False)
+ assert_(np.isnan(b1.integrate(0, 2)))
+ assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14)
+
+ def test_integrate_periodic(self):
+ x = np.array([1, 2, 4])
+ c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+ P = BPoly.from_power_basis(PPoly(c, x), extrapolate='periodic')
+ I = P.antiderivative()
+
+ period_int = I(4) - I(1)
+
+ assert_allclose(P.integrate(1, 4), period_int)
+ assert_allclose(P.integrate(-10, -7), period_int)
+ assert_allclose(P.integrate(-10, -4), 2 * period_int)
+
+ assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
+ assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5 + 12, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5))
+ assert_allclose(P.integrate(3.5, 5 + 12),
+ I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
+
+ assert_allclose(P.integrate(0, -1), I(2) - I(3))
+ assert_allclose(P.integrate(-9, -10), I(2) - I(3))
+ assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
+
+ def test_antider_neg(self):
+ # .derivative(-nu) ==> .andiderivative(nu) and vice versa
+ c = [[1]]
+ x = [0, 1]
+ b = BPoly(c, x)
+
+ xx = np.linspace(0, 1, 21)
+
+ assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx),
+ atol=1e-12, rtol=1e-12)
+ assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx),
+ atol=1e-12, rtol=1e-12)
+
+
+class TestPolyConversions:
+ def test_bp_from_pp(self):
+ x = [0, 1, 3]
+ c = [[3, 2], [1, 8], [4, 3]]
+ pp = PPoly(c, x)
+ bp = BPoly.from_power_basis(pp)
+ pp1 = PPoly.from_bernstein_basis(bp)
+
+ xp = [0.1, 1.4]
+ assert_allclose(pp(xp), bp(xp))
+ assert_allclose(pp(xp), pp1(xp))
+
+ def test_bp_from_pp_random(self):
+ np.random.seed(1234)
+ m, k = 5, 8 # number of intervals, order
+ x = np.sort(np.random.random(m))
+ c = np.random.random((k, m-1))
+ pp = PPoly(c, x)
+ bp = BPoly.from_power_basis(pp)
+ pp1 = PPoly.from_bernstein_basis(bp)
+
+ xp = np.linspace(x[0], x[-1], 21)
+ assert_allclose(pp(xp), bp(xp))
+ assert_allclose(pp(xp), pp1(xp))
+
+ def test_pp_from_bp(self):
+ x = [0, 1, 3]
+ c = [[3, 3], [1, 1], [4, 2]]
+ bp = BPoly(c, x)
+ pp = PPoly.from_bernstein_basis(bp)
+ bp1 = BPoly.from_power_basis(pp)
+
+ xp = [0.1, 1.4]
+ assert_allclose(bp(xp), pp(xp))
+ assert_allclose(bp(xp), bp1(xp))
+
+ def test_broken_conversions(self):
+ # regression test for gh-10597: from_power_basis only accepts PPoly etc.
+ x = [0, 1, 3]
+ c = [[3, 3], [1, 1], [4, 2]]
+ pp = PPoly(c, x)
+ with assert_raises(TypeError):
+ PPoly.from_bernstein_basis(pp)
+
+ bp = BPoly(c, x)
+ with assert_raises(TypeError):
+ BPoly.from_power_basis(bp)
+
+
+class TestBPolyFromDerivatives:
+ def test_make_poly_1(self):
+ c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
+ assert_allclose(c1, [2., 3.])
+
+ def test_make_poly_2(self):
+ c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
+ assert_allclose(c1, [1., 1., 1.])
+
+ # f'(0) = 3
+ c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
+ assert_allclose(c2, [2., 7./2, 1.])
+
+ # f'(1) = 3
+ c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
+ assert_allclose(c3, [2., -0.5, 1.])
+
+ def test_make_poly_3(self):
+ # f'(0)=2, f''(0)=3
+ c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
+ assert_allclose(c1, [1., 5./3, 17./6, 4.])
+
+ # f'(1)=2, f''(1)=3
+ c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
+ assert_allclose(c2, [1., 19./6, 10./3, 4.])
+
+ # f'(0)=2, f'(1)=3
+ c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
+ assert_allclose(c3, [1., 5./3, 3., 4.])
+
+ def test_make_poly_12(self):
+ np.random.seed(12345)
+ ya = np.r_[0, np.random.random(5)]
+ yb = np.r_[0, np.random.random(5)]
+
+ c = BPoly._construct_from_derivatives(0, 1, ya, yb)
+ pp = BPoly(c[:, None], [0, 1])
+ for j in range(6):
+ assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
+ pp = pp.derivative()
+
+ def test_raise_degree(self):
+ np.random.seed(12345)
+ x = [0, 1]
+ k, d = 8, 5
+ c = np.random.random((k, 1, 2, 3, 4))
+ bp = BPoly(c, x)
+
+ c1 = BPoly._raise_degree(c, d)
+ bp1 = BPoly(c1, x)
+
+ xp = np.linspace(0, 1, 11)
+ assert_allclose(bp(xp), bp1(xp))
+
+ def test_xi_yi(self):
+ assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
+
+ def test_coords_order(self):
+ xi = [0, 0, 1]
+ yi = [[0], [0], [0]]
+ assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
+
+ def test_zeros(self):
+ xi = [0, 1, 2, 3]
+ yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree
+ pp = BPoly.from_derivatives(xi, yi)
+ assert_(pp.c.shape == (4, 3))
+
+ ppd = pp.derivative()
+ for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
+ assert_allclose([pp(xp), ppd(xp)], [0., 0.])
+
+ def _make_random_mk(self, m, k):
+ # k derivatives at each breakpoint
+ np.random.seed(1234)
+ xi = np.asarray([1. * j**2 for j in range(m+1)])
+ yi = [np.random.random(k) for j in range(m+1)]
+ return xi, yi
+
+ def test_random_12(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+ pp = BPoly.from_derivatives(xi, yi)
+
+ for order in range(k//2):
+ assert_allclose(pp(xi), [yy[order] for yy in yi])
+ pp = pp.derivative()
+
+ def test_order_zero(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+ assert_raises(ValueError, BPoly.from_derivatives,
+ **dict(xi=xi, yi=yi, orders=0))
+
+ def test_orders_too_high(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok
+ assert_raises(ValueError, BPoly.from_derivatives, # but this is not
+ **dict(xi=xi, yi=yi, orders=2*k))
+
+ def test_orders_global(self):
+ m, k = 5, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ # ok, this is confusing. Local polynomials will be of the order 5
+ # which means that up to the 2nd derivatives will be used at each point
+ order = 5
+ pp = BPoly.from_derivatives(xi, yi, orders=order)
+
+ for j in range(order//2+1):
+ assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
+
+ # now repeat with `order` being even: on each interval, it uses
+ # order//2 'derivatives' @ the right-hand endpoint and
+ # order//2+1 @ 'derivatives' the left-hand endpoint
+ order = 6
+ pp = BPoly.from_derivatives(xi, yi, orders=order)
+ for j in range(order//2):
+ assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
+
+ def test_orders_local(self):
+ m, k = 7, 12
+ xi, yi = self._make_random_mk(m, k)
+
+ orders = [o + 1 for o in range(m)]
+ for i, x in enumerate(xi[1:-1]):
+ pp = BPoly.from_derivatives(xi, yi, orders=orders)
+ for j in range(orders[i] // 2 + 1):
+ assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
+ pp = pp.derivative()
+ assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
+
+ def test_yi_trailing_dims(self):
+ m, k = 7, 5
+ xi = np.sort(np.random.random(m+1))
+ yi = np.random.random((m+1, k, 6, 7, 8))
+ pp = BPoly.from_derivatives(xi, yi)
+ assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
+
+ def test_gh_5430(self):
+ # At least one of these raises an error unless gh-5430 is
+ # fixed. In py2k an int is implemented using a C long, so
+ # which one fails depends on your system. In py3k there is only
+ # one arbitrary precision integer type, so both should fail.
+ orders = np.int32(1)
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = np.int64(1)
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = 1
+ # This worked before; make sure it still works
+ p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+ assert_almost_equal(p(0), 0)
+ orders = 1
+
+
+class TestNdPPoly:
+ def test_simple_1d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5)
+ x = np.linspace(0, 1, 5+1)
+
+ xi = np.random.rand(200)
+
+ p = NdPPoly(c, (x,))
+ v1 = p((xi,))
+
+ v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel()
+ assert_allclose(v1, v2)
+
+ def test_simple_2d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7)
+ x = np.linspace(0, 1, 6+1)
+ y = np.linspace(0, 1, 7+1)**2
+
+ xi = np.random.rand(200)
+ yi = np.random.rand(200)
+
+ v1 = np.empty([len(xi), 1], dtype=c.dtype)
+ v1.fill(np.nan)
+ _ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1),
+ (x, y),
+ np.array([4, 5], dtype=np.intc),
+ np.c_[xi, yi],
+ np.array([0, 0], dtype=np.intc),
+ 1,
+ v1)
+ v1 = v1.ravel()
+ v2 = _ppoly2d_eval(c, (x, y), xi, yi)
+ assert_allclose(v1, v2)
+
+ p = NdPPoly(c, (x, y))
+ for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)):
+ v1 = p(np.c_[xi, yi], nu=nu)
+ v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu)
+ assert_allclose(v1, v2, err_msg=repr(nu))
+
+ def test_simple_3d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9)
+ x = np.linspace(0, 1, 7+1)
+ y = np.linspace(0, 1, 8+1)**2
+ z = np.linspace(0, 1, 9+1)**3
+
+ xi = np.random.rand(40)
+ yi = np.random.rand(40)
+ zi = np.random.rand(40)
+
+ p = NdPPoly(c, (x, y, z))
+
+ for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0),
+ (6, 0, 2)):
+ v1 = p((xi, yi, zi), nu=nu)
+ v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu)
+ assert_allclose(v1, v2, err_msg=repr(nu))
+
+ def test_simple_4d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9, 10, 11)
+ x = np.linspace(0, 1, 8+1)
+ y = np.linspace(0, 1, 9+1)**2
+ z = np.linspace(0, 1, 10+1)**3
+ u = np.linspace(0, 1, 11+1)**4
+
+ xi = np.random.rand(20)
+ yi = np.random.rand(20)
+ zi = np.random.rand(20)
+ ui = np.random.rand(20)
+
+ p = NdPPoly(c, (x, y, z, u))
+ v1 = p((xi, yi, zi, ui))
+
+ v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui)
+ assert_allclose(v1, v2)
+
+ def test_deriv_1d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5)
+ x = np.linspace(0, 1, 5+1)
+
+ p = NdPPoly(c, (x,))
+
+ # derivative
+ dp = p.derivative(nu=[1])
+ p1 = PPoly(c, x)
+ dp1 = p1.derivative()
+ assert_allclose(dp.c, dp1.c)
+
+ # antiderivative
+ dp = p.antiderivative(nu=[2])
+ p1 = PPoly(c, x)
+ dp1 = p1.antiderivative(2)
+ assert_allclose(dp.c, dp1.c)
+
+ def test_deriv_3d(self):
+ np.random.seed(1234)
+
+ c = np.random.rand(4, 5, 6, 7, 8, 9)
+ x = np.linspace(0, 1, 7+1)
+ y = np.linspace(0, 1, 8+1)**2
+ z = np.linspace(0, 1, 9+1)**3
+
+ p = NdPPoly(c, (x, y, z))
+
+ # differentiate vs x
+ p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x)
+ dp = p.derivative(nu=[2])
+ dp1 = p1.derivative(2)
+ assert_allclose(dp.c,
+ dp1.c.transpose(0, 2, 3, 1, 4, 5))
+
+ # antidifferentiate vs y
+ p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y)
+ dp = p.antiderivative(nu=[0, 1, 0])
+ dp1 = p1.antiderivative(1)
+ assert_allclose(dp.c,
+ dp1.c.transpose(2, 0, 3, 4, 1, 5))
+
+ # differentiate vs z
+ p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z)
+ dp = p.derivative(nu=[0, 0, 3])
+ dp1 = p1.derivative(3)
+ assert_allclose(dp.c,
+ dp1.c.transpose(2, 3, 0, 4, 5, 1))
+
+ def test_deriv_3d_simple(self):
+ # Integrate to obtain function x y**2 z**4 / (2! 4!)
+
+ c = np.ones((1, 1, 1, 3, 4, 5))
+ x = np.linspace(0, 1, 3+1)**1
+ y = np.linspace(0, 1, 4+1)**2
+ z = np.linspace(0, 1, 5+1)**3
+
+ p = NdPPoly(c, (x, y, z))
+ ip = p.antiderivative((1, 0, 4))
+ ip = ip.antiderivative((0, 2, 0))
+
+ xi = np.random.rand(20)
+ yi = np.random.rand(20)
+ zi = np.random.rand(20)
+
+ assert_allclose(ip((xi, yi, zi)),
+ xi * yi**2 * zi**4 / (gamma(3)*gamma(5)))
+
+ def test_integrate_2d(self):
+ np.random.seed(1234)
+ c = np.random.rand(4, 5, 16, 17)
+ x = np.linspace(0, 1, 16+1)**1
+ y = np.linspace(0, 1, 17+1)**2
+
+ # make continuously differentiable so that nquad() has an
+ # easier time
+ c = c.transpose(0, 2, 1, 3)
+ cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
+ _ppoly.fix_continuity(cx, x, 2)
+ c = cx.reshape(c.shape)
+ c = c.transpose(0, 2, 1, 3)
+ c = c.transpose(1, 3, 0, 2)
+ cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
+ _ppoly.fix_continuity(cx, y, 2)
+ c = cx.reshape(c.shape)
+ c = c.transpose(2, 0, 3, 1).copy()
+
+ # Check integration
+ p = NdPPoly(c, (x, y))
+
+ for ranges in [[(0, 1), (0, 1)],
+ [(0, 0.5), (0, 1)],
+ [(0, 1), (0, 0.5)],
+ [(0.3, 0.7), (0.6, 0.2)]]:
+
+ ig = p.integrate(ranges)
+ ig2, err2 = nquad(lambda x, y: p((x, y)), ranges,
+ opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2)
+ assert_allclose(ig, ig2, rtol=1e-5, atol=1e-5,
+ err_msg=repr(ranges))
+
+ def test_integrate_1d(self):
+ np.random.seed(1234)
+ c = np.random.rand(4, 5, 6, 16, 17, 18)
+ x = np.linspace(0, 1, 16+1)**1
+ y = np.linspace(0, 1, 17+1)**2
+ z = np.linspace(0, 1, 18+1)**3
+
+ # Check 1-D integration
+ p = NdPPoly(c, (x, y, z))
+
+ u = np.random.rand(200)
+ v = np.random.rand(200)
+ a, b = 0.2, 0.7
+
+ px = p.integrate_1d(a, b, axis=0)
+ pax = p.antiderivative((1, 0, 0))
+ assert_allclose(px((u, v)), pax((b, u, v)) - pax((a, u, v)))
+
+ py = p.integrate_1d(a, b, axis=1)
+ pay = p.antiderivative((0, 1, 0))
+ assert_allclose(py((u, v)), pay((u, b, v)) - pay((u, a, v)))
+
+ pz = p.integrate_1d(a, b, axis=2)
+ paz = p.antiderivative((0, 0, 1))
+ assert_allclose(pz((u, v)), paz((u, v, b)) - paz((u, v, a)))
+
+
+def _ppoly_eval_1(c, x, xps):
+ """Evaluate piecewise polynomial manually"""
+ out = np.zeros((len(xps), c.shape[2]))
+ for i, xp in enumerate(xps):
+ if xp < 0 or xp > 1:
+ out[i,:] = np.nan
+ continue
+ j = np.searchsorted(x, xp) - 1
+ d = xp - x[j]
+ assert_(x[j] <= xp < x[j+1])
+ r = sum(c[k,j] * d**(c.shape[0]-k-1)
+ for k in range(c.shape[0]))
+ out[i,:] = r
+ return out
+
+
+def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
+ """Evaluate piecewise polynomial manually (another way)"""
+ a = breaks[0]
+ b = breaks[-1]
+ K = coeffs.shape[0]
+
+ saveshape = np.shape(xnew)
+ xnew = np.ravel(xnew)
+ res = np.empty_like(xnew)
+ mask = (xnew >= a) & (xnew <= b)
+ res[~mask] = fill
+ xx = xnew.compress(mask)
+ indxs = np.searchsorted(breaks, xx)-1
+ indxs = indxs.clip(0, len(breaks))
+ pp = coeffs
+ diff = xx - breaks.take(indxs)
+ V = np.vander(diff, N=K)
+ values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in range(len(xx))])
+ res[mask] = values
+ res.shape = saveshape
+ return res
+
+
+def _dpow(x, y, n):
+ """
+ d^n (x**y) / dx^n
+ """
+ if n < 0:
+ raise ValueError("invalid derivative order")
+ elif n > y:
+ return 0
+ else:
+ return poch(y - n + 1, n) * x**(y - n)
+
+
+def _ppoly2d_eval(c, xs, xnew, ynew, nu=None):
+ """
+ Straightforward evaluation of 2-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ nx, ny = c.shape[:2]
+
+ for jout, (x, y) in enumerate(zip(xnew, ynew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+
+ val = 0
+
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ val += (c[nx-k1-1,ny-k2-1,j1,j2]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1]))
+
+ out[jout] = val
+
+ return out
+
+
+def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None):
+ """
+ Straightforward evaluation of 3-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ nx, ny, nz = c.shape[:3]
+
+ for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1]) and
+ (xs[2][0] <= z <= xs[2][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+ j3 = np.searchsorted(xs[2], z) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+ s3 = z - xs[2][j3]
+
+ val = 0
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ for k3 in range(c.shape[2]):
+ val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1])
+ * _dpow(s3, k3, nu[2]))
+
+ out[jout] = val
+
+ return out
+
+
+def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None):
+ """
+ Straightforward evaluation of 4-D piecewise polynomial
+ """
+ if nu is None:
+ nu = (0, 0, 0, 0)
+
+ out = np.empty((len(xnew),), dtype=c.dtype)
+
+ mx, my, mz, mu = c.shape[:4]
+
+ for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)):
+ if not ((xs[0][0] <= x <= xs[0][-1]) and
+ (xs[1][0] <= y <= xs[1][-1]) and
+ (xs[2][0] <= z <= xs[2][-1]) and
+ (xs[3][0] <= u <= xs[3][-1])):
+ out[jout] = np.nan
+ continue
+
+ j1 = np.searchsorted(xs[0], x) - 1
+ j2 = np.searchsorted(xs[1], y) - 1
+ j3 = np.searchsorted(xs[2], z) - 1
+ j4 = np.searchsorted(xs[3], u) - 1
+
+ s1 = x - xs[0][j1]
+ s2 = y - xs[1][j2]
+ s3 = z - xs[2][j3]
+ s4 = u - xs[3][j4]
+
+ val = 0
+ for k1 in range(c.shape[0]):
+ for k2 in range(c.shape[1]):
+ for k3 in range(c.shape[2]):
+ for k4 in range(c.shape[3]):
+ val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4]
+ * _dpow(s1, k1, nu[0])
+ * _dpow(s2, k2, nu[1])
+ * _dpow(s3, k3, nu[2])
+ * _dpow(s4, k4, nu[3]))
+
+ out[jout] = val
+
+ return out
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_ndgriddata.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_ndgriddata.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3e1ed8968ae5fd55e2f55d8a24c0ee2fa4ade45
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_ndgriddata.py
@@ -0,0 +1,284 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_array_equal, assert_allclose
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.interpolate import (griddata, NearestNDInterpolator,
+ LinearNDInterpolator,
+ CloughTocher2DInterpolator)
+
+
+parametrize_interpolators = pytest.mark.parametrize(
+ "interpolator", [NearestNDInterpolator, LinearNDInterpolator,
+ CloughTocher2DInterpolator]
+)
+
+class TestGriddata:
+ def test_fill_value(self):
+ x = [(0,0), (0,1), (1,0)]
+ y = [1, 2, 3]
+
+ yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
+ assert_array_equal(yi, [-1., -1, 1])
+
+ yi = griddata(x, y, [(1,1), (1,2), (0,0)])
+ assert_array_equal(yi, [np.nan, np.nan, 1])
+
+ def test_alternative_call(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
+ + np.array([0,1])[None,:])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
+ rescale=rescale)
+ assert_allclose(y, yi, atol=1e-14, err_msg=msg)
+
+ def test_multivalue_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
+ + np.array([0,1])[None,:])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, x, method=method, rescale=rescale)
+ assert_allclose(y, yi, atol=1e-14, err_msg=msg)
+
+ def test_multipoint_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+
+ xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, xi, method=method, rescale=rescale)
+
+ assert_equal(yi.shape, (5, 3), err_msg=msg)
+ assert_allclose(yi, np.tile(y[:,None], (1, 3)),
+ atol=1e-14, err_msg=msg)
+
+ def test_complex_2d(self):
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 2j*y[::-1]
+
+ xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
+
+ for method in ('nearest', 'linear', 'cubic'):
+ for rescale in (True, False):
+ msg = repr((method, rescale))
+ yi = griddata(x, y, xi, method=method, rescale=rescale)
+
+ assert_equal(yi.shape, (5, 3), err_msg=msg)
+ assert_allclose(yi, np.tile(y[:,None], (1, 3)),
+ atol=1e-14, err_msg=msg)
+
+ def test_1d(self):
+ x = np.array([1, 2.5, 3, 4.5, 5, 6])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ assert_allclose(griddata(x, y, x, method=method), y,
+ err_msg=method, atol=1e-14)
+ assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
+ err_msg=method, atol=1e-14)
+ assert_allclose(griddata((x,), y, (x,), method=method), y,
+ err_msg=method, atol=1e-14)
+
+ def test_1d_borders(self):
+ # Test for nearest neighbor case with xi outside
+ # the range of the values.
+ x = np.array([1, 2.5, 3, 4.5, 5, 6])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+ xi = np.array([0.9, 6.5])
+ yi_should = np.array([1.0, 1.0])
+
+ method = 'nearest'
+ assert_allclose(griddata(x, y, xi,
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+ assert_allclose(griddata(x.reshape(6, 1), y, xi,
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+ assert_allclose(griddata((x, ), y, (xi, ),
+ method=method), yi_should,
+ err_msg=method,
+ atol=1e-14)
+
+ def test_1d_unsorted(self):
+ x = np.array([2.5, 1, 4.5, 5, 6, 3])
+ y = np.array([1, 2, 0, 3.9, 2, 1])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ assert_allclose(griddata(x, y, x, method=method), y,
+ err_msg=method, atol=1e-10)
+ assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
+ err_msg=method, atol=1e-10)
+ assert_allclose(griddata((x,), y, (x,), method=method), y,
+ err_msg=method, atol=1e-10)
+
+ def test_square_rescale_manual(self):
+ points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.float64)
+ points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)],
+ dtype=np.float64)
+ values = np.array([1., 2., -3., 5., 9.], dtype=np.float64)
+
+ xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
+ np.linspace(0, 100, 14)[None,:])
+ xx = xx.ravel()
+ yy = yy.ravel()
+ xi = np.array([xx, yy]).T.copy()
+
+ for method in ('nearest', 'linear', 'cubic'):
+ msg = method
+ zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
+ method=method)
+ zi_rescaled = griddata(points, values, xi, method=method,
+ rescale=True)
+ assert_allclose(zi, zi_rescaled, err_msg=msg,
+ atol=1e-12)
+
+ def test_xi_1d(self):
+ # Check that 1-D xi is interpreted as a coordinate
+ x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+ dtype=np.float64)
+ y = np.arange(x.shape[0], dtype=np.float64)
+ y = y - 2j*y[::-1]
+
+ xi = np.array([0.5, 0.5])
+
+ for method in ('nearest', 'linear', 'cubic'):
+ p1 = griddata(x, y, xi, method=method)
+ p2 = griddata(x, y, xi[None,:], method=method)
+ assert_allclose(p1, p2, err_msg=method)
+
+ xi1 = np.array([0.5])
+ xi3 = np.array([0.5, 0.5, 0.5])
+ assert_raises(ValueError, griddata, x, y, xi1,
+ method=method)
+ assert_raises(ValueError, griddata, x, y, xi3,
+ method=method)
+
+
+class TestNearestNDInterpolator:
+ def test_nearest_options(self):
+ # smoke test that NearestNDInterpolator accept cKDTree options
+ npts, nd = 4, 3
+ x = np.arange(npts*nd).reshape((npts, nd))
+ y = np.arange(npts)
+ nndi = NearestNDInterpolator(x, y)
+
+ opts = {'balanced_tree': False, 'compact_nodes': False}
+ nndi_o = NearestNDInterpolator(x, y, tree_options=opts)
+ assert_allclose(nndi(x), nndi_o(x), atol=1e-14)
+
+ def test_nearest_list_argument(self):
+ nd = np.array([[0, 0, 0, 0, 1, 0, 1],
+ [0, 0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 1, 1, 2]])
+ d = nd[:, 3:]
+
+ # z is np.array
+ NI = NearestNDInterpolator((d[0], d[1]), d[2])
+ assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
+
+ # z is list
+ NI = NearestNDInterpolator((d[0], d[1]), list(d[2]))
+ assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
+
+ def test_nearest_query_options(self):
+ nd = np.array([[0, 0.5, 0, 1],
+ [0, 0, 0.5, 1],
+ [0, 1, 1, 2]])
+ delta = 0.1
+ query_points = [0 + delta, 1 + delta], [0 + delta, 1 + delta]
+
+ # case 1 - query max_dist is smaller than
+ # the query points' nearest distance to nd.
+ NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
+ distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
+ assert_array_equal(NI(query_points, distance_upper_bound=distance_upper_bound),
+ [np.nan, np.nan])
+
+ # case 2 - query p is inf, will return [0, 2]
+ distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
+ p = np.inf
+ assert_array_equal(
+ NI(query_points, distance_upper_bound=distance_upper_bound, p=p),
+ [0, 2]
+ )
+
+ # case 3 - query max_dist is larger, so should return non np.nan
+ distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) + 1e-7
+ assert_array_equal(
+ NI(query_points, distance_upper_bound=distance_upper_bound),
+ [0, 2]
+ )
+
+ def test_nearest_query_valid_inputs(self):
+ nd = np.array([[0, 1, 0, 1],
+ [0, 0, 1, 1],
+ [0, 1, 1, 2]])
+ NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
+ with assert_raises(TypeError):
+ NI([0.5, 0.5], query_options="not a dictionary")
+
+
+class TestNDInterpolators:
+ @parametrize_interpolators
+ def test_broadcastable_input(self, interpolator):
+ # input data
+ np.random.seed(0)
+ x = np.random.random(10)
+ y = np.random.random(10)
+ z = np.hypot(x, y)
+
+ # x-y grid for interpolation
+ X = np.linspace(min(x), max(x))
+ Y = np.linspace(min(y), max(y))
+ X, Y = np.meshgrid(X, Y)
+ XY = np.vstack((X.ravel(), Y.ravel())).T
+ interp = interpolator(list(zip(x, y)), z)
+ # single array input
+ interp_points0 = interp(XY)
+ # tuple input
+ interp_points1 = interp((X, Y))
+ interp_points2 = interp((X, 0.0))
+ # broadcastable input
+ interp_points3 = interp(X, Y)
+ interp_points4 = interp(X, 0.0)
+
+ assert_equal(interp_points0.size ==
+ interp_points1.size ==
+ interp_points2.size ==
+ interp_points3.size ==
+ interp_points4.size, True)
+
+ @parametrize_interpolators
+ def test_read_only(self, interpolator):
+ # input data
+ np.random.seed(0)
+ xy = np.random.random((10, 2))
+ x, y = xy[:, 0], xy[:, 1]
+ z = np.hypot(x, y)
+
+ # interpolation points
+ XY = np.random.random((50, 2))
+
+ xy.setflags(write=False)
+ z.setflags(write=False)
+ XY.setflags(write=False)
+
+ interp = interpolator(xy, z)
+ interp(XY)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_pade.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_pade.py
new file mode 100644
index 0000000000000000000000000000000000000000..f58e01e5e730d2e5c4630f41933a0b77a17efc77
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_pade.py
@@ -0,0 +1,104 @@
+from numpy.testing import (assert_array_equal, assert_array_almost_equal)
+from scipy.interpolate import pade
+
+def test_pade_trivial():
+ nump, denomp = pade([1.0], 0)
+ assert_array_equal(nump.c, [1.0])
+ assert_array_equal(denomp.c, [1.0])
+
+ nump, denomp = pade([1.0], 0, 0)
+ assert_array_equal(nump.c, [1.0])
+ assert_array_equal(denomp.c, [1.0])
+
+
+def test_pade_4term_exp():
+ # First four Taylor coefficients of exp(x).
+ # Unlike poly1d, the first array element is the zero-order term.
+ an = [1.0, 1.0, 0.5, 1.0/6]
+
+ nump, denomp = pade(an, 0)
+ assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1)
+ assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
+
+ nump, denomp = pade(an, 2)
+ assert_array_almost_equal(nump.c, [1.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
+
+ nump, denomp = pade(an, 3)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
+
+ # Testing inclusion of optional parameter
+ nump, denomp = pade(an, 0, 3)
+ assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1, 2)
+ assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
+
+ nump, denomp = pade(an, 2, 1)
+ assert_array_almost_equal(nump.c, [1.0/3, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
+
+ nump, denomp = pade(an, 3, 0)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
+
+ # Testing reducing array.
+ nump, denomp = pade(an, 0, 2)
+ assert_array_almost_equal(nump.c, [0.5, 1.0, 1.0])
+ assert_array_almost_equal(denomp.c, [1.0])
+
+ nump, denomp = pade(an, 1, 1)
+ assert_array_almost_equal(nump.c, [1.0/2, 1.0])
+ assert_array_almost_equal(denomp.c, [-1.0/2, 1.0])
+
+ nump, denomp = pade(an, 2, 0)
+ assert_array_almost_equal(nump.c, [1.0])
+ assert_array_almost_equal(denomp.c, [1.0/2, -1.0, 1.0])
+
+
+def test_pade_ints():
+ # Simple test sequences (one of ints, one of floats).
+ an_int = [1, 2, 3, 4]
+ an_flt = [1.0, 2.0, 3.0, 4.0]
+
+ # Make sure integer arrays give the same result as float arrays with same values.
+ for i in range(0, len(an_int)):
+ for j in range(0, len(an_int) - i):
+
+ # Create float and int pade approximation for given order.
+ nump_int, denomp_int = pade(an_int, i, j)
+ nump_flt, denomp_flt = pade(an_flt, i, j)
+
+ # Check that they are the same.
+ assert_array_equal(nump_int.c, nump_flt.c)
+ assert_array_equal(denomp_int.c, denomp_flt.c)
+
+
+def test_pade_complex():
+ # Test sequence with known solutions - see page 6 of 10.1109/PESGM.2012.6344759.
+ # Variable x is parameter - these tests will work with any complex number.
+ x = 0.2 + 0.6j
+ an = [1.0, x, -x*x.conjugate(), x.conjugate()*(x**2) + x*(x.conjugate()**2),
+ -(x**3)*x.conjugate() - 3*(x*x.conjugate())**2 - x*(x.conjugate()**3)]
+
+ nump, denomp = pade(an, 1, 1)
+ assert_array_almost_equal(nump.c, [x + x.conjugate(), 1.0])
+ assert_array_almost_equal(denomp.c, [x.conjugate(), 1.0])
+
+ nump, denomp = pade(an, 1, 2)
+ assert_array_almost_equal(nump.c, [x**2, 2*x + x.conjugate(), 1.0])
+ assert_array_almost_equal(denomp.c, [x + x.conjugate(), 1.0])
+
+ nump, denomp = pade(an, 2, 2)
+ assert_array_almost_equal(
+ nump.c,
+ [x**2 + x*x.conjugate() + x.conjugate()**2, 2*(x + x.conjugate()), 1.0]
+ )
+ assert_array_almost_equal(denomp.c, [x.conjugate()**2, x + 2*x.conjugate(), 1.0])
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_polyint.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_polyint.py
new file mode 100644
index 0000000000000000000000000000000000000000..31215b1e986a9fc57a3b0f18823bcc6ffd9e81f9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_polyint.py
@@ -0,0 +1,941 @@
+import warnings
+import io
+import numpy as np
+
+from numpy.testing import (
+ assert_almost_equal, assert_array_equal, assert_array_almost_equal,
+ assert_allclose, assert_equal, assert_)
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.interpolate import (
+ KroghInterpolator, krogh_interpolate,
+ BarycentricInterpolator, barycentric_interpolate,
+ approximate_taylor_polynomial, CubicHermiteSpline, pchip,
+ PchipInterpolator, pchip_interpolate, Akima1DInterpolator, CubicSpline,
+ make_interp_spline)
+
+
+def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
+ extra_args={}):
+ np.random.seed(1234)
+
+ x = [-1, 0, 1, 2, 3, 4]
+ s = list(range(1, len(y_shape)+1))
+ s.insert(axis % (len(y_shape)+1), 0)
+ y = np.random.rand(*((6,) + y_shape)).transpose(s)
+
+ xi = np.zeros(x_shape)
+ if interpolator_cls is CubicHermiteSpline:
+ dydx = np.random.rand(*((6,) + y_shape)).transpose(s)
+ yi = interpolator_cls(x, y, dydx, axis=axis, **extra_args)(xi)
+ else:
+ yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
+
+ target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ + x_shape + y.shape[axis:][1:])
+ assert_equal(yi.shape, target_shape)
+
+ # check it works also with lists
+ if x_shape and y.size > 0:
+ if interpolator_cls is CubicHermiteSpline:
+ interpolator_cls(list(x), list(y), list(dydx), axis=axis,
+ **extra_args)(list(xi))
+ else:
+ interpolator_cls(list(x), list(y), axis=axis,
+ **extra_args)(list(xi))
+
+ # check also values
+ if xi.size > 0 and deriv_shape is None:
+ bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
+ yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
+ yv = yv.reshape(bs_shape)
+
+ yi, y = np.broadcast_arrays(yi, yv)
+ assert_allclose(yi, y)
+
+
+SHAPES = [(), (0,), (1,), (6, 2, 5)]
+
+
+def test_shapes():
+
+ def spl_interp(x, y, axis):
+ return make_interp_spline(x, y, axis=axis)
+
+ for ip in [KroghInterpolator, BarycentricInterpolator, CubicHermiteSpline,
+ pchip, Akima1DInterpolator, CubicSpline, spl_interp]:
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ if ip != CubicSpline:
+ check_shape(ip, s1, s2, None, axis)
+ else:
+ for bc in ['natural', 'clamped']:
+ extra = {'bc_type': bc}
+ check_shape(ip, s1, s2, None, axis, extra)
+
+def test_derivs_shapes():
+ for ip in [KroghInterpolator, BarycentricInterpolator]:
+ def interpolator_derivs(x, y, axis=0):
+ return ip(x, y, axis).derivatives
+
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ check_shape(interpolator_derivs, s1, s2, (6,), axis)
+
+
+def test_deriv_shapes():
+ def krogh_deriv(x, y, axis=0):
+ return KroghInterpolator(x, y, axis).derivative
+
+ def bary_deriv(x, y, axis=0):
+ return BarycentricInterpolator(x, y, axis).derivative
+
+ def pchip_deriv(x, y, axis=0):
+ return pchip(x, y, axis).derivative()
+
+ def pchip_deriv2(x, y, axis=0):
+ return pchip(x, y, axis).derivative(2)
+
+ def pchip_antideriv(x, y, axis=0):
+ return pchip(x, y, axis).antiderivative()
+
+ def pchip_antideriv2(x, y, axis=0):
+ return pchip(x, y, axis).antiderivative(2)
+
+ def pchip_deriv_inplace(x, y, axis=0):
+ class P(PchipInterpolator):
+ def __call__(self, x):
+ return PchipInterpolator.__call__(self, x, 1)
+ pass
+ return P(x, y, axis)
+
+ def akima_deriv(x, y, axis=0):
+ return Akima1DInterpolator(x, y, axis).derivative()
+
+ def akima_antideriv(x, y, axis=0):
+ return Akima1DInterpolator(x, y, axis).antiderivative()
+
+ def cspline_deriv(x, y, axis=0):
+ return CubicSpline(x, y, axis).derivative()
+
+ def cspline_antideriv(x, y, axis=0):
+ return CubicSpline(x, y, axis).antiderivative()
+
+ def bspl_deriv(x, y, axis=0):
+ return make_interp_spline(x, y, axis=axis).derivative()
+
+ def bspl_antideriv(x, y, axis=0):
+ return make_interp_spline(x, y, axis=axis).antiderivative()
+
+ for ip in [krogh_deriv, bary_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
+ pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
+ cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
+ for s1 in SHAPES:
+ for s2 in SHAPES:
+ for axis in range(-len(s2), len(s2)):
+ check_shape(ip, s1, s2, (), axis)
+
+
+def test_complex():
+ x = [1, 2, 3, 4]
+ y = [1, 2, 1j, 3]
+
+ for ip in [KroghInterpolator, BarycentricInterpolator, CubicSpline]:
+ p = ip(x, y)
+ assert_allclose(y, p(x))
+
+ dydx = [0, -1j, 2, 3j]
+ p = CubicHermiteSpline(x, y, dydx)
+ assert_allclose(y, p(x))
+ assert_allclose(dydx, p(x, 1))
+
+
+class TestKrogh:
+ def setup_method(self):
+ self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
+ self.test_xs = np.linspace(-1,1,100)
+ self.xs = np.linspace(-1,1,5)
+ self.ys = self.true_poly(self.xs)
+
+ def test_lagrange(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
+
+ def test_scalar(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(7),P(7))
+ assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
+
+ def test_derivatives(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
+ D[i])
+
+ def test_low_derivatives(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ D = P.derivatives(self.test_xs,len(self.xs)+2)
+ for i in range(D.shape[0]):
+ assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
+ D[i])
+
+ def test_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ m = 10
+ r = P.derivatives(self.test_xs,m)
+ for i in range(m):
+ assert_almost_equal(P.derivative(self.test_xs,i),r[i])
+
+ def test_high_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ for i in range(len(self.xs), 2*len(self.xs)):
+ assert_almost_equal(P.derivative(self.test_xs,i),
+ np.zeros(len(self.test_xs)))
+
+ def test_ndim_derivatives(self):
+ poly1 = self.true_poly
+ poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
+ poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
+ ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
+
+ P = KroghInterpolator(self.xs, ys, axis=0)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_allclose(D[i],
+ np.stack((poly1.deriv(i)(self.test_xs),
+ poly2.deriv(i)(self.test_xs),
+ poly3.deriv(i)(self.test_xs)),
+ axis=-1))
+
+ def test_ndim_derivative(self):
+ poly1 = self.true_poly
+ poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
+ poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
+ ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
+
+ P = KroghInterpolator(self.xs, ys, axis=0)
+ for i in range(P.n):
+ assert_allclose(P.derivative(self.test_xs, i),
+ np.stack((poly1.deriv(i)(self.test_xs),
+ poly2.deriv(i)(self.test_xs),
+ poly3.deriv(i)(self.test_xs)),
+ axis=-1))
+
+ def test_hermite(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
+
+ def test_vector(self):
+ xs = [0, 1, 2]
+ ys = np.array([[0,1],[1,0],[2,1]])
+ P = KroghInterpolator(xs,ys)
+ Pi = [KroghInterpolator(xs,ys[:,i]) for i in range(ys.shape[1])]
+ test_xs = np.linspace(-1,3,100)
+ assert_almost_equal(P(test_xs),
+ np.asarray([p(test_xs) for p in Pi]).T)
+ assert_almost_equal(P.derivatives(test_xs),
+ np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
+ (1,2,0)))
+
+ def test_empty(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_array_equal(P([]), [])
+
+ def test_shapes_scalarvalue(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ assert_array_equal(np.shape(P(0)), ())
+ assert_array_equal(np.shape(P(np.array(0))), ())
+ assert_array_equal(np.shape(P([0])), (1,))
+ assert_array_equal(np.shape(P([0,1])), (2,))
+
+ def test_shapes_scalarvalue_derivative(self):
+ P = KroghInterpolator(self.xs,self.ys)
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,))
+ assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
+
+ def test_shapes_vectorvalue(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+ assert_array_equal(np.shape(P(0)), (3,))
+ assert_array_equal(np.shape(P([0])), (1,3))
+ assert_array_equal(np.shape(P([0,1])), (2,3))
+
+ def test_shapes_1d_vectorvalue(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
+ assert_array_equal(np.shape(P(0)), (1,))
+ assert_array_equal(np.shape(P([0])), (1,1))
+ assert_array_equal(np.shape(P([0,1])), (2,1))
+
+ def test_shapes_vectorvalue_derivative(self):
+ P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,3))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
+
+ def test_wrapper(self):
+ P = KroghInterpolator(self.xs, self.ys)
+ ki = krogh_interpolate
+ assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
+ assert_almost_equal(P.derivative(self.test_xs, 2),
+ ki(self.xs, self.ys, self.test_xs, der=2))
+ assert_almost_equal(P.derivatives(self.test_xs, 2),
+ ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
+
+ def test_int_inputs(self):
+ # Check input args are cast correctly to floats, gh-3669
+ x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
+ 13104, 60000]
+ offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
+ -0.48002351, -0.34925329, -0.26503107,
+ -0.13148093, -0.12988833, -0.12979296,
+ -0.12973574, -0.08582937, 0.05])
+ f = KroghInterpolator(x, offset_cdf)
+
+ assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
+ 0, atol=1e-10)
+
+ def test_derivatives_complex(self):
+ # regression test for gh-7381: krogh.derivatives(0) fails complex y
+ x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
+ func = KroghInterpolator(x, y)
+ cmplx = func.derivatives(0)
+
+ cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
+ 1j*KroghInterpolator(x, y.imag).derivatives(0))
+ assert_allclose(cmplx, cmplx2, atol=1e-15)
+
+ def test_high_degree_warning(self):
+ with pytest.warns(UserWarning, match="40 degrees provided,"):
+ KroghInterpolator(np.arange(40), np.ones(40))
+
+
+class TestTaylor:
+ def test_exponential(self):
+ degree = 5
+ p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
+ for i in range(degree+1):
+ assert_almost_equal(p(0),1)
+ p = p.deriv()
+ assert_almost_equal(p(0),0)
+
+
+class TestBarycentric:
+ def setup_method(self):
+ self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
+ self.test_xs = np.linspace(-1, 1, 100)
+ self.xs = np.linspace(-1, 1, 5)
+ self.ys = self.true_poly(self.xs)
+
+ def test_lagrange(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_allclose(P(self.test_xs), self.true_poly(self.test_xs))
+
+ def test_scalar(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_allclose(P(7), self.true_poly(7))
+ assert_allclose(P(np.array(7)), self.true_poly(np.array(7)))
+
+ def test_derivatives(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_allclose(self.true_poly.deriv(i)(self.test_xs), D[i])
+
+ def test_low_derivatives(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ D = P.derivatives(self.test_xs, len(self.xs)+2)
+ for i in range(D.shape[0]):
+ assert_allclose(self.true_poly.deriv(i)(self.test_xs),
+ D[i],
+ atol=1e-12)
+
+ def test_derivative(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ m = 10
+ r = P.derivatives(self.test_xs, m)
+ for i in range(m):
+ assert_allclose(P.derivative(self.test_xs, i), r[i])
+
+ def test_high_derivative(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ for i in range(len(self.xs), 5*len(self.xs)):
+ assert_allclose(P.derivative(self.test_xs, i),
+ np.zeros(len(self.test_xs)))
+
+ def test_ndim_derivatives(self):
+ poly1 = self.true_poly
+ poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
+ poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
+ ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
+
+ P = BarycentricInterpolator(self.xs, ys, axis=0)
+ D = P.derivatives(self.test_xs)
+ for i in range(D.shape[0]):
+ assert_allclose(D[i],
+ np.stack((poly1.deriv(i)(self.test_xs),
+ poly2.deriv(i)(self.test_xs),
+ poly3.deriv(i)(self.test_xs)),
+ axis=-1),
+ atol=1e-12)
+
+ def test_ndim_derivative(self):
+ poly1 = self.true_poly
+ poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
+ poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
+ ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
+
+ P = BarycentricInterpolator(self.xs, ys, axis=0)
+ for i in range(P.n):
+ assert_allclose(P.derivative(self.test_xs, i),
+ np.stack((poly1.deriv(i)(self.test_xs),
+ poly2.deriv(i)(self.test_xs),
+ poly3.deriv(i)(self.test_xs)),
+ axis=-1),
+ atol=1e-12)
+
+ def test_delayed(self):
+ P = BarycentricInterpolator(self.xs)
+ P.set_yi(self.ys)
+ assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+ def test_append(self):
+ P = BarycentricInterpolator(self.xs[:3], self.ys[:3])
+ P.add_xi(self.xs[3:], self.ys[3:])
+ assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+ def test_vector(self):
+ xs = [0, 1, 2]
+ ys = np.array([[0, 1], [1, 0], [2, 1]])
+ BI = BarycentricInterpolator
+ P = BI(xs, ys)
+ Pi = [BI(xs, ys[:, i]) for i in range(ys.shape[1])]
+ test_xs = np.linspace(-1, 3, 100)
+ assert_almost_equal(P(test_xs),
+ np.asarray([p(test_xs) for p in Pi]).T)
+
+ def test_shapes_scalarvalue(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ assert_array_equal(np.shape(P(0)), ())
+ assert_array_equal(np.shape(P(np.array(0))), ())
+ assert_array_equal(np.shape(P([0])), (1,))
+ assert_array_equal(np.shape(P([0, 1])), (2,))
+
+ def test_shapes_scalarvalue_derivative(self):
+ P = BarycentricInterpolator(self.xs,self.ys)
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,))
+ assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
+
+ def test_shapes_vectorvalue(self):
+ P = BarycentricInterpolator(self.xs, np.outer(self.ys, np.arange(3)))
+ assert_array_equal(np.shape(P(0)), (3,))
+ assert_array_equal(np.shape(P([0])), (1, 3))
+ assert_array_equal(np.shape(P([0, 1])), (2, 3))
+
+ def test_shapes_1d_vectorvalue(self):
+ P = BarycentricInterpolator(self.xs, np.outer(self.ys, [1]))
+ assert_array_equal(np.shape(P(0)), (1,))
+ assert_array_equal(np.shape(P([0])), (1, 1))
+ assert_array_equal(np.shape(P([0,1])), (2, 1))
+
+ def test_shapes_vectorvalue_derivative(self):
+ P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+ n = P.n
+ assert_array_equal(np.shape(P.derivatives(0)), (n,3))
+ assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
+ assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
+
+ def test_wrapper(self):
+ P = BarycentricInterpolator(self.xs, self.ys)
+ bi = barycentric_interpolate
+ assert_allclose(P(self.test_xs), bi(self.xs, self.ys, self.test_xs))
+ assert_allclose(P.derivative(self.test_xs, 2),
+ bi(self.xs, self.ys, self.test_xs, der=2))
+ assert_allclose(P.derivatives(self.test_xs, 2),
+ bi(self.xs, self.ys, self.test_xs, der=[0, 1]))
+
+ def test_int_input(self):
+ x = 1000 * np.arange(1, 11) # np.prod(x[-1] - x[:-1]) overflows
+ y = np.arange(1, 11)
+ value = barycentric_interpolate(x, y, 1000 * 9.5)
+ assert_almost_equal(value, 9.5)
+
+ def test_large_chebyshev(self):
+ # The weights for Chebyshev points of the second kind have analytically
+ # solvable weights. Naive calculation of barycentric weights will fail
+ # for large N because of numerical underflow and overflow. We test
+ # correctness for large N against analytical Chebyshev weights.
+
+ # Without capacity scaling or permutation, n=800 fails,
+ # With just capacity scaling, n=1097 fails
+ # With both capacity scaling and random permutation, n=30000 succeeds
+ n = 1100
+ j = np.arange(n + 1).astype(np.float64)
+ x = np.cos(j * np.pi / n)
+
+ # See page 506 of Berrut and Trefethen 2004 for this formula
+ w = (-1) ** j
+ w[0] *= 0.5
+ w[-1] *= 0.5
+
+ P = BarycentricInterpolator(x)
+
+ # It's okay to have a constant scaling factor in the weights because it
+ # cancels out in the evaluation of the polynomial.
+ factor = P.wi[0]
+ assert_almost_equal(P.wi / (2 * factor), w)
+
+ def test_warning(self):
+ # Test if the divide-by-zero warning is properly ignored when computing
+ # interpolated values equals to interpolation points
+ P = BarycentricInterpolator([0, 1], [1, 2])
+ with np.errstate(divide='raise'):
+ yi = P(P.xi)
+
+ # Check if the interpolated values match the input values
+ # at the nodes
+ assert_almost_equal(yi, P.yi.ravel())
+
+ def test_repeated_node(self):
+ # check that a repeated node raises a ValueError
+ # (computing the weights requires division by xi[i] - xi[j])
+ xis = np.array([0.1, 0.5, 0.9, 0.5])
+ ys = np.array([1, 2, 3, 4])
+ with pytest.raises(ValueError,
+ match="Interpolation points xi must be distinct."):
+ BarycentricInterpolator(xis, ys)
+
+
+class TestPCHIP:
+ def _make_random(self, npts=20):
+ np.random.seed(1234)
+ xi = np.sort(np.random.random(npts))
+ yi = np.random.random(npts)
+ return pchip(xi, yi), xi, yi
+
+ def test_overshoot(self):
+ # PCHIP should not overshoot
+ p, xi, yi = self._make_random()
+ for i in range(len(xi)-1):
+ x1, x2 = xi[i], xi[i+1]
+ y1, y2 = yi[i], yi[i+1]
+ if y1 > y2:
+ y1, y2 = y2, y1
+ xp = np.linspace(x1, x2, 10)
+ yp = p(xp)
+ assert_(((y1 <= yp + 1e-15) & (yp <= y2 + 1e-15)).all())
+
+ def test_monotone(self):
+ # PCHIP should preserve monotonicty
+ p, xi, yi = self._make_random()
+ for i in range(len(xi)-1):
+ x1, x2 = xi[i], xi[i+1]
+ y1, y2 = yi[i], yi[i+1]
+ xp = np.linspace(x1, x2, 10)
+ yp = p(xp)
+ assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
+
+ def test_cast(self):
+ # regression test for integer input data, see gh-3453
+ data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
+ [-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
+ xx = np.arange(100)
+ curve = pchip(data[0], data[1])(xx)
+
+ data1 = data * 1.0
+ curve1 = pchip(data1[0], data1[1])(xx)
+
+ assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
+
+ def test_nag(self):
+ # Example from NAG C implementation,
+ # http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
+ # suggested in gh-5326 as a smoke test for the way the derivatives
+ # are computed (see also gh-3453)
+ dataStr = '''
+ 7.99 0.00000E+0
+ 8.09 0.27643E-4
+ 8.19 0.43750E-1
+ 8.70 0.16918E+0
+ 9.20 0.46943E+0
+ 10.00 0.94374E+0
+ 12.00 0.99864E+0
+ 15.00 0.99992E+0
+ 20.00 0.99999E+0
+ '''
+ data = np.loadtxt(io.StringIO(dataStr))
+ pch = pchip(data[:,0], data[:,1])
+
+ resultStr = '''
+ 7.9900 0.0000
+ 9.1910 0.4640
+ 10.3920 0.9645
+ 11.5930 0.9965
+ 12.7940 0.9992
+ 13.9950 0.9998
+ 15.1960 0.9999
+ 16.3970 1.0000
+ 17.5980 1.0000
+ 18.7990 1.0000
+ 20.0000 1.0000
+ '''
+ result = np.loadtxt(io.StringIO(resultStr))
+ assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
+
+ def test_endslopes(self):
+ # this is a smoke test for gh-3453: PCHIP interpolator should not
+ # set edge slopes to zero if the data do not suggest zero edge derivatives
+ x = np.array([0.0, 0.1, 0.25, 0.35])
+ y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
+ y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
+ for pp in (pchip(x, y1), pchip(x, y2)):
+ for t in (x[0], x[-1]):
+ assert_(pp(t, 1) != 0)
+
+ def test_all_zeros(self):
+ x = np.arange(10)
+ y = np.zeros_like(x)
+
+ # this should work and not generate any warnings
+ with warnings.catch_warnings():
+ warnings.filterwarnings('error')
+ pch = pchip(x, y)
+
+ xx = np.linspace(0, 9, 101)
+ assert_equal(pch(xx), 0.)
+
+ def test_two_points(self):
+ # regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
+ # it tries to use a three-point scheme to estimate edge derivatives,
+ # while there are only two points available.
+ # Instead, it should construct a linear interpolator.
+ x = np.linspace(0, 1, 11)
+ p = pchip([0, 1], [0, 2])
+ assert_allclose(p(x), 2*x, atol=1e-15)
+
+ def test_pchip_interpolate(self):
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1),
+ [1.])
+
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0),
+ [3.5])
+
+ assert_array_almost_equal(
+ pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]),
+ [[3.5], [1]])
+
+ def test_roots(self):
+ # regression test for gh-6357: .roots method should work
+ p = pchip([0, 1], [-1, 1])
+ r = p.roots()
+ assert_allclose(r, 0.5)
+
+
+class TestCubicSpline:
+ @staticmethod
+ def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
+ tol=1e-14):
+ """Check that spline coefficients satisfy the continuity and boundary
+ conditions."""
+ x = S.x
+ c = S.c
+ dx = np.diff(x)
+ dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
+ dxi = dx[:-1]
+
+ # Check C2 continuity.
+ assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
+ c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
+ assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
+ 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
+ assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
+ rtol=tol, atol=tol)
+
+ # Check that we found a parabola, the third derivative is 0.
+ if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
+ assert_allclose(c[0], 0, rtol=tol, atol=tol)
+ return
+
+ # Check periodic boundary conditions.
+ if bc_start == 'periodic':
+ assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
+ assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
+ assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
+ return
+
+ # Check other boundary conditions.
+ if bc_start == 'not-a-knot':
+ if x.size == 2:
+ slope = (S(x[1]) - S(x[0])) / dx[0]
+ assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)
+ else:
+ assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)
+ elif bc_start == 'clamped':
+ assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)
+ elif bc_start == 'natural':
+ assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)
+ else:
+ order, value = bc_start
+ assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)
+
+ if bc_end == 'not-a-knot':
+ if x.size == 2:
+ slope = (S(x[1]) - S(x[0])) / dx[0]
+ assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)
+ else:
+ assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)
+ elif bc_end == 'clamped':
+ assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)
+ elif bc_end == 'natural':
+ assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)
+ else:
+ order, value = bc_end
+ assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)
+
+ def check_all_bc(self, x, y, axis):
+ deriv_shape = list(y.shape)
+ del deriv_shape[axis]
+ first_deriv = np.empty(deriv_shape)
+ first_deriv.fill(2)
+ second_deriv = np.empty(deriv_shape)
+ second_deriv.fill(-1)
+ bc_all = [
+ 'not-a-knot',
+ 'natural',
+ 'clamped',
+ (1, first_deriv),
+ (2, second_deriv)
+ ]
+ for bc in bc_all[:3]:
+ S = CubicSpline(x, y, axis=axis, bc_type=bc)
+ self.check_correctness(S, bc, bc)
+
+ for bc_start in bc_all:
+ for bc_end in bc_all:
+ S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
+ self.check_correctness(S, bc_start, bc_end, tol=2e-14)
+
+ def test_general(self):
+ x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
+ y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
+ for n in [2, 3, x.size]:
+ self.check_all_bc(x[:n], y[:n], 0)
+
+ Y = np.empty((2, n, 2))
+ Y[0, :, 0] = y[:n]
+ Y[0, :, 1] = y[:n] - 1
+ Y[1, :, 0] = y[:n] + 2
+ Y[1, :, 1] = y[:n] + 3
+ self.check_all_bc(x[:n], Y, 1)
+
+ def test_periodic(self):
+ for n in [2, 3, 5]:
+ x = np.linspace(0, 2 * np.pi, n)
+ y = np.cos(x)
+ S = CubicSpline(x, y, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ Y = np.empty((2, n, 2))
+ Y[0, :, 0] = y
+ Y[0, :, 1] = y + 2
+ Y[1, :, 0] = y - 1
+ Y[1, :, 1] = y + 5
+ S = CubicSpline(x, Y, axis=1, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ def test_periodic_eval(self):
+ x = np.linspace(0, 2 * np.pi, 10)
+ y = np.cos(x)
+ S = CubicSpline(x, y, bc_type='periodic')
+ assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
+
+ def test_second_derivative_continuity_gh_11758(self):
+ # gh-11758: C2 continuity fail
+ x = np.array([0.9, 1.3, 1.9, 2.1, 2.6, 3.0, 3.9, 4.4, 4.7, 5.0, 6.0,
+ 7.0, 8.0, 9.2, 10.5, 11.3, 11.6, 12.0, 12.6, 13.0, 13.3])
+ y = np.array([1.3, 1.5, 1.85, 2.1, 2.6, 2.7, 2.4, 2.15, 2.05, 2.1,
+ 2.25, 2.3, 2.25, 1.95, 1.4, 0.9, 0.7, 0.6, 0.5, 0.4, 1.3])
+ S = CubicSpline(x, y, bc_type='periodic', extrapolate='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+
+ def test_three_points(self):
+ # gh-11758: Fails computing a_m2_m1
+ # In this case, s (first derivatives) could be found manually by solving
+ # system of 2 linear equations. Due to solution of this system,
+ # s[i] = (h1m2 + h2m1) / (h1 + h2), where h1 = x[1] - x[0], h2 = x[2] - x[1],
+ # m1 = (y[1] - y[0]) / h1, m2 = (y[2] - y[1]) / h2
+ x = np.array([1.0, 2.75, 3.0])
+ y = np.array([1.0, 15.0, 1.0])
+ S = CubicSpline(x, y, bc_type='periodic')
+ self.check_correctness(S, 'periodic', 'periodic')
+ assert_allclose(S.derivative(1)(x), np.array([-48.0, -48.0, -48.0]))
+
+ def test_periodic_three_points_multidim(self):
+ # make sure one multidimensional interpolator does the same as multiple
+ # one-dimensional interpolators
+ x = np.array([0.0, 1.0, 3.0])
+ y = np.array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
+ S = CubicSpline(x, y, bc_type="periodic")
+ self.check_correctness(S, 'periodic', 'periodic')
+ S0 = CubicSpline(x, y[:, 0], bc_type="periodic")
+ S1 = CubicSpline(x, y[:, 1], bc_type="periodic")
+ q = np.linspace(0, 2, 5)
+ assert_allclose(S(q)[:, 0], S0(q))
+ assert_allclose(S(q)[:, 1], S1(q))
+
+ def test_dtypes(self):
+ x = np.array([0, 1, 2, 3], dtype=int)
+ y = np.array([-5, 2, 3, 1], dtype=int)
+ S = CubicSpline(x, y)
+ self.check_correctness(S)
+
+ y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
+ S = CubicSpline(x, y)
+ self.check_correctness(S)
+
+ S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
+ self.check_correctness(S, "natural", (1, 2j))
+
+ y = np.array([-5, 2, 3, 1])
+ S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
+ self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
+
+ def test_small_dx(self):
+ rng = np.random.RandomState(0)
+ x = np.sort(rng.uniform(size=100))
+ y = 1e4 + rng.uniform(size=100)
+ S = CubicSpline(x, y)
+ self.check_correctness(S, tol=1e-13)
+
+ def test_incorrect_inputs(self):
+ x = np.array([1, 2, 3, 4])
+ y = np.array([1, 2, 3, 4])
+ xc = np.array([1 + 1j, 2, 3, 4])
+ xn = np.array([np.nan, 2, 3, 4])
+ xo = np.array([2, 1, 3, 4])
+ yn = np.array([np.nan, 2, 3, 4])
+ y3 = [1, 2, 3]
+ x1 = [1]
+ y1 = [1]
+
+ assert_raises(ValueError, CubicSpline, xc, y)
+ assert_raises(ValueError, CubicSpline, xn, y)
+ assert_raises(ValueError, CubicSpline, x, yn)
+ assert_raises(ValueError, CubicSpline, xo, y)
+ assert_raises(ValueError, CubicSpline, x, y3)
+ assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
+ assert_raises(ValueError, CubicSpline, x1, y1)
+
+ wrong_bc = [('periodic', 'clamped'),
+ ((2, 0), (3, 10)),
+ ((1, 0), ),
+ (0., 0.),
+ 'not-a-typo']
+
+ for bc_type in wrong_bc:
+ assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
+
+ # Shapes mismatch when giving arbitrary derivative values:
+ Y = np.c_[y, y]
+ bc1 = ('clamped', (1, 0))
+ bc2 = ('clamped', (1, [0, 0, 0]))
+ bc3 = ('clamped', (1, [[0, 0]]))
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
+ assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
+
+ # periodic condition, y[-1] must be equal to y[0]:
+ assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
+
+
+def test_CubicHermiteSpline_correctness():
+ x = [0, 2, 7]
+ y = [-1, 2, 3]
+ dydx = [0, 3, 7]
+ s = CubicHermiteSpline(x, y, dydx)
+ assert_allclose(s(x), y, rtol=1e-15)
+ assert_allclose(s(x, 1), dydx, rtol=1e-15)
+
+
+def test_CubicHermiteSpline_error_handling():
+ x = [1, 2, 3]
+ y = [0, 3, 5]
+ dydx = [1, -1, 2, 3]
+ assert_raises(ValueError, CubicHermiteSpline, x, y, dydx)
+
+ dydx_with_nan = [1, 0, np.nan]
+ assert_raises(ValueError, CubicHermiteSpline, x, y, dydx_with_nan)
+
+
+def test_roots_extrapolate_gh_11185():
+ x = np.array([0.001, 0.002])
+ y = np.array([1.66066935e-06, 1.10410807e-06])
+ dy = np.array([-1.60061854, -1.600619])
+ p = CubicHermiteSpline(x, y, dy)
+
+ # roots(extrapolate=True) for a polynomial with a single interval
+ # should return all three real roots
+ r = p.roots(extrapolate=True)
+ assert_equal(p.c.shape[1], 1)
+ assert_equal(r.size, 3)
+
+
+class TestZeroSizeArrays:
+ # regression tests for gh-17241 : CubicSpline et al must not segfault
+ # when y.size == 0
+ # The two methods below are _almost_ the same, but not quite:
+ # one is for objects which have the `bc_type` argument (CubicSpline)
+ # and the other one is for those which do not (Pchip, Akima1D)
+
+ @pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
+ np.zeros((10, 5, 0))])
+ @pytest.mark.parametrize('bc_type',
+ ['not-a-knot', 'periodic', 'natural', 'clamped'])
+ @pytest.mark.parametrize('axis', [0, 1, 2])
+ @pytest.mark.parametrize('cls', [make_interp_spline, CubicSpline])
+ def test_zero_size(self, cls, y, bc_type, axis):
+ x = np.arange(10)
+ xval = np.arange(3)
+
+ obj = cls(x, y, bc_type=bc_type)
+ assert obj(xval).size == 0
+ assert obj(xval).shape == xval.shape + y.shape[1:]
+
+ # Also check with an explicit non-default axis
+ yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
+
+ obj = cls(x, yt, bc_type=bc_type, axis=axis)
+ sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
+ assert obj(xval).size == 0
+ assert obj(xval).shape == sh
+
+ @pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
+ np.zeros((10, 5, 0))])
+ @pytest.mark.parametrize('axis', [0, 1, 2])
+ @pytest.mark.parametrize('cls', [PchipInterpolator, Akima1DInterpolator])
+ def test_zero_size_2(self, cls, y, axis):
+ x = np.arange(10)
+ xval = np.arange(3)
+
+ obj = cls(x, y)
+ assert obj(xval).size == 0
+ assert obj(xval).shape == xval.shape + y.shape[1:]
+
+ # Also check with an explicit non-default axis
+ yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
+
+ obj = cls(x, yt, axis=axis)
+ sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
+ assert obj(xval).size == 0
+ assert obj(xval).shape == sh
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbf.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbf.py
new file mode 100644
index 0000000000000000000000000000000000000000..418042c65a906430ddecc5dabd98af2777747184
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbf.py
@@ -0,0 +1,222 @@
+# Created by John Travers, Robert Hetland, 2007
+""" Test functions for rbf module """
+
+import numpy as np
+from numpy.testing import (assert_, assert_array_almost_equal,
+ assert_almost_equal)
+from numpy import linspace, sin, cos, random, exp, allclose
+from scipy.interpolate._rbf import Rbf
+
+FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
+ 'cubic', 'quintic', 'thin-plate', 'linear')
+
+
+def check_rbf1d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (1D)
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=function)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+ assert_almost_equal(rbf(float(x[0])), y[0])
+
+
+def check_rbf2d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (2D).
+ x = random.rand(50,1)*4-2
+ y = random.rand(50,1)*4-2
+ z = x*exp(-x**2-1j*y**2)
+ rbf = Rbf(x, y, z, epsilon=2, function=function)
+ zi = rbf(x, y)
+ zi.shape = x.shape
+ assert_array_almost_equal(z, zi)
+
+
+def check_rbf3d_interpolation(function):
+ # Check that the Rbf function interpolates through the nodes (3D).
+ x = random.rand(50, 1)*4 - 2
+ y = random.rand(50, 1)*4 - 2
+ z = random.rand(50, 1)*4 - 2
+ d = x*exp(-x**2 - y**2)
+ rbf = Rbf(x, y, z, d, epsilon=2, function=function)
+ di = rbf(x, y, z)
+ di.shape = x.shape
+ assert_array_almost_equal(di, d)
+
+
+def test_rbf_interpolation():
+ for function in FUNCTIONS:
+ check_rbf1d_interpolation(function)
+ check_rbf2d_interpolation(function)
+ check_rbf3d_interpolation(function)
+
+
+def check_2drbf1d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (1D)
+ x = linspace(0, 10, 9)
+ y0 = sin(x)
+ y1 = cos(x)
+ y = np.vstack([y0, y1]).T
+ rbf = Rbf(x, y, function=function, mode='N-D')
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+ assert_almost_equal(rbf(float(x[0])), y[0])
+
+
+def check_2drbf2d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (2D).
+ x = random.rand(50, ) * 4 - 2
+ y = random.rand(50, ) * 4 - 2
+ z0 = x * exp(-x ** 2 - 1j * y ** 2)
+ z1 = y * exp(-y ** 2 - 1j * x ** 2)
+ z = np.vstack([z0, z1]).T
+ rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
+ zi = rbf(x, y)
+ zi.shape = z.shape
+ assert_array_almost_equal(z, zi)
+
+
+def check_2drbf3d_interpolation(function):
+ # Check that the 2-D Rbf function interpolates through the nodes (3D).
+ x = random.rand(50, ) * 4 - 2
+ y = random.rand(50, ) * 4 - 2
+ z = random.rand(50, ) * 4 - 2
+ d0 = x * exp(-x ** 2 - y ** 2)
+ d1 = y * exp(-y ** 2 - x ** 2)
+ d = np.vstack([d0, d1]).T
+ rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
+ di = rbf(x, y, z)
+ di.shape = d.shape
+ assert_array_almost_equal(di, d)
+
+
+def test_2drbf_interpolation():
+ for function in FUNCTIONS:
+ check_2drbf1d_interpolation(function)
+ check_2drbf2d_interpolation(function)
+ check_2drbf3d_interpolation(function)
+
+
+def check_rbf1d_regularity(function, atol):
+ # Check that the Rbf function approximates a smooth function well away
+ # from the nodes.
+ x = linspace(0, 10, 9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=function)
+ xi = linspace(0, 10, 100)
+ yi = rbf(xi)
+ msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
+ assert_(allclose(yi, sin(xi), atol=atol), msg)
+
+
+def test_rbf_regularity():
+ tolerances = {
+ 'multiquadric': 0.1,
+ 'inverse multiquadric': 0.15,
+ 'gaussian': 0.15,
+ 'cubic': 0.15,
+ 'quintic': 0.1,
+ 'thin-plate': 0.1,
+ 'linear': 0.2
+ }
+ for function in FUNCTIONS:
+ check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
+
+
+def check_2drbf1d_regularity(function, atol):
+ # Check that the 2-D Rbf function approximates a smooth function well away
+ # from the nodes.
+ x = linspace(0, 10, 9)
+ y0 = sin(x)
+ y1 = cos(x)
+ y = np.vstack([y0, y1]).T
+ rbf = Rbf(x, y, function=function, mode='N-D')
+ xi = linspace(0, 10, 100)
+ yi = rbf(xi)
+ msg = "abs-diff: %f" % abs(yi - np.vstack([sin(xi), cos(xi)]).T).max()
+ assert_(allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg)
+
+
+def test_2drbf_regularity():
+ tolerances = {
+ 'multiquadric': 0.1,
+ 'inverse multiquadric': 0.15,
+ 'gaussian': 0.15,
+ 'cubic': 0.15,
+ 'quintic': 0.1,
+ 'thin-plate': 0.15,
+ 'linear': 0.2
+ }
+ for function in FUNCTIONS:
+ check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
+
+
+def check_rbf1d_stability(function):
+ # Check that the Rbf function with default epsilon is not subject
+ # to overshoot. Regression for issue #4523.
+ #
+ # Generate some data (fixed random seed hence deterministic)
+ np.random.seed(1234)
+ x = np.linspace(0, 10, 50)
+ z = x + 4.0 * np.random.randn(len(x))
+
+ rbf = Rbf(x, z, function=function)
+ xi = np.linspace(0, 10, 1000)
+ yi = rbf(xi)
+
+ # subtract the linear trend and make sure there no spikes
+ assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
+
+def test_rbf_stability():
+ for function in FUNCTIONS:
+ check_rbf1d_stability(function)
+
+
+def test_default_construction():
+ # Check that the Rbf class can be constructed with the default
+ # multiquadric basis function. Regression test for ticket #1228.
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_function_is_callable():
+ # Check that the Rbf class can be constructed with function=callable.
+ x = linspace(0,10,9)
+ y = sin(x)
+ def linfunc(x):
+ return x
+ rbf = Rbf(x, y, function=linfunc)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_two_arg_function_is_callable():
+ # Check that the Rbf class can be constructed with a two argument
+ # function=callable.
+ def _func(self, r):
+ return self.epsilon + r
+
+ x = linspace(0,10,9)
+ y = sin(x)
+ rbf = Rbf(x, y, function=_func)
+ yi = rbf(x)
+ assert_array_almost_equal(y, yi)
+
+
+def test_rbf_epsilon_none():
+ x = linspace(0, 10, 9)
+ y = sin(x)
+ Rbf(x, y, epsilon=None)
+
+
+def test_rbf_epsilon_none_collinear():
+ # Check that collinear points in one dimension doesn't cause an error
+ # due to epsilon = 0
+ x = [1, 2, 3]
+ y = [4, 4, 4]
+ z = [5, 6, 7]
+ rbf = Rbf(x, y, z, epsilon=None)
+ assert_(rbf.epsilon > 0)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbfinterp.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbfinterp.py
new file mode 100644
index 0000000000000000000000000000000000000000..188d5e1d8ad9b6e0d93ba0d02736dc11042dacaf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rbfinterp.py
@@ -0,0 +1,516 @@
+import pickle
+import pytest
+import numpy as np
+from numpy.linalg import LinAlgError
+from numpy.testing import assert_allclose, assert_array_equal
+from scipy.stats.qmc import Halton
+from scipy.spatial import cKDTree
+from scipy.interpolate._rbfinterp import (
+ _AVAILABLE, _SCALE_INVARIANT, _NAME_TO_MIN_DEGREE, _monomial_powers,
+ RBFInterpolator
+ )
+from scipy.interpolate import _rbfinterp_pythran
+
+
+def _vandermonde(x, degree):
+ # Returns a matrix of monomials that span polynomials with the specified
+ # degree evaluated at x.
+ powers = _monomial_powers(x.shape[1], degree)
+ return _rbfinterp_pythran._polynomial_matrix(x, powers)
+
+
+def _1d_test_function(x):
+ # Test function used in Wahba's "Spline Models for Observational Data".
+ # domain ~= (0, 3), range ~= (-1.0, 0.2)
+ x = x[:, 0]
+ y = 4.26*(np.exp(-x) - 4*np.exp(-2*x) + 3*np.exp(-3*x))
+ return y
+
+
+def _2d_test_function(x):
+ # Franke's test function.
+ # domain ~= (0, 1) X (0, 1), range ~= (0.0, 1.2)
+ x1, x2 = x[:, 0], x[:, 1]
+ term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
+ term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
+ term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
+ term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
+ y = term1 + term2 + term3 + term4
+ return y
+
+
+def _is_conditionally_positive_definite(kernel, m):
+ # Tests whether the kernel is conditionally positive definite of order m.
+ # See chapter 7 of Fasshauer's "Meshfree Approximation Methods with
+ # MATLAB".
+ nx = 10
+ ntests = 100
+ for ndim in [1, 2, 3, 4, 5]:
+ # Generate sample points with a Halton sequence to avoid samples that
+ # are too close to each other, which can make the matrix singular.
+ seq = Halton(ndim, scramble=False, seed=np.random.RandomState())
+ for _ in range(ntests):
+ x = 2*seq.random(nx) - 1
+ A = _rbfinterp_pythran._kernel_matrix(x, kernel)
+ P = _vandermonde(x, m - 1)
+ Q, R = np.linalg.qr(P, mode='complete')
+ # Q2 forms a basis spanning the space where P.T.dot(x) = 0. Project
+ # A onto this space, and then see if it is positive definite using
+ # the Cholesky decomposition. If not, then the kernel is not c.p.d.
+ # of order m.
+ Q2 = Q[:, P.shape[1]:]
+ B = Q2.T.dot(A).dot(Q2)
+ try:
+ np.linalg.cholesky(B)
+ except np.linalg.LinAlgError:
+ return False
+
+ return True
+
+
+# Sorting the parametrize arguments is necessary to avoid a parallelization
+# issue described here: https://github.com/pytest-dev/pytest-xdist/issues/432.
+@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+def test_conditionally_positive_definite(kernel):
+ # Test if each kernel in _AVAILABLE is conditionally positive definite of
+ # order m, where m comes from _NAME_TO_MIN_DEGREE. This is a necessary
+ # condition for the smoothed RBF interpolant to be well-posed in general.
+ m = _NAME_TO_MIN_DEGREE.get(kernel, -1) + 1
+ assert _is_conditionally_positive_definite(kernel, m)
+
+
+class _TestRBFInterpolator:
+ @pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
+ def test_scale_invariance_1d(self, kernel):
+ # Verify that the functions in _SCALE_INVARIANT are insensitive to the
+ # shape parameter (when smoothing == 0) in 1d.
+ seq = Halton(1, scramble=False, seed=np.random.RandomState())
+ x = 3*seq.random(50)
+ y = _1d_test_function(x)
+ xitp = 3*seq.random(50)
+ yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
+ yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ @pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
+ def test_scale_invariance_2d(self, kernel):
+ # Verify that the functions in _SCALE_INVARIANT are insensitive to the
+ # shape parameter (when smoothing == 0) in 2d.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+ x = seq.random(100)
+ y = _2d_test_function(x)
+ xitp = seq.random(100)
+ yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
+ yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+ def test_extreme_domains(self, kernel):
+ # Make sure the interpolant remains numerically stable for very
+ # large/small domains.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+ scale = 1e50
+ shift = 1e55
+
+ x = seq.random(100)
+ y = _2d_test_function(x)
+ xitp = seq.random(100)
+
+ if kernel in _SCALE_INVARIANT:
+ yitp1 = self.build(x, y, kernel=kernel)(xitp)
+ yitp2 = self.build(
+ x*scale + shift, y,
+ kernel=kernel
+ )(xitp*scale + shift)
+ else:
+ yitp1 = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+ yitp2 = self.build(
+ x*scale + shift, y,
+ epsilon=5.0/scale,
+ kernel=kernel
+ )(xitp*scale + shift)
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ def test_polynomial_reproduction(self):
+ # If the observed data comes from a polynomial, then the interpolant
+ # should be able to reproduce the polynomial exactly, provided that
+ # `degree` is sufficiently high.
+ rng = np.random.RandomState(0)
+ seq = Halton(2, scramble=False, seed=rng)
+ degree = 3
+
+ x = seq.random(50)
+ xitp = seq.random(50)
+
+ P = _vandermonde(x, degree)
+ Pitp = _vandermonde(xitp, degree)
+
+ poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+
+ y = P.dot(poly_coeffs)
+ yitp1 = Pitp.dot(poly_coeffs)
+ yitp2 = self.build(x, y, degree=degree)(xitp)
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ @pytest.mark.slow
+ def test_chunking(self, monkeypatch):
+ # If the observed data comes from a polynomial, then the interpolant
+ # should be able to reproduce the polynomial exactly, provided that
+ # `degree` is sufficiently high.
+ rng = np.random.RandomState(0)
+ seq = Halton(2, scramble=False, seed=rng)
+ degree = 3
+
+ largeN = 1000 + 33
+ # this is large to check that chunking of the RBFInterpolator is tested
+ x = seq.random(50)
+ xitp = seq.random(largeN)
+
+ P = _vandermonde(x, degree)
+ Pitp = _vandermonde(xitp, degree)
+
+ poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+
+ y = P.dot(poly_coeffs)
+ yitp1 = Pitp.dot(poly_coeffs)
+ interp = self.build(x, y, degree=degree)
+ ce_real = interp._chunk_evaluator
+
+ def _chunk_evaluator(*args, **kwargs):
+ kwargs.update(memory_budget=100)
+ return ce_real(*args, **kwargs)
+
+ monkeypatch.setattr(interp, '_chunk_evaluator', _chunk_evaluator)
+ yitp2 = interp(xitp)
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ def test_vector_data(self):
+ # Make sure interpolating a vector field is the same as interpolating
+ # each component separately.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = np.array([_2d_test_function(x),
+ _2d_test_function(x[:, ::-1])]).T
+
+ yitp1 = self.build(x, y)(xitp)
+ yitp2 = self.build(x, y[:, 0])(xitp)
+ yitp3 = self.build(x, y[:, 1])(xitp)
+
+ assert_allclose(yitp1[:, 0], yitp2)
+ assert_allclose(yitp1[:, 1], yitp3)
+
+ def test_complex_data(self):
+ # Interpolating complex input should be the same as interpolating the
+ # real and complex components.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = _2d_test_function(x) + 1j*_2d_test_function(x[:, ::-1])
+
+ yitp1 = self.build(x, y)(xitp)
+ yitp2 = self.build(x, y.real)(xitp)
+ yitp3 = self.build(x, y.imag)(xitp)
+
+ assert_allclose(yitp1.real, yitp2)
+ assert_allclose(yitp1.imag, yitp3)
+
+ @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+ def test_interpolation_misfit_1d(self, kernel):
+ # Make sure that each kernel, with its default `degree` and an
+ # appropriate `epsilon`, does a good job at interpolation in 1d.
+ seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+ x = 3*seq.random(50)
+ xitp = 3*seq.random(50)
+
+ y = _1d_test_function(x)
+ ytrue = _1d_test_function(xitp)
+ yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+
+ mse = np.mean((yitp - ytrue)**2)
+ assert mse < 1.0e-4
+
+ @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+ def test_interpolation_misfit_2d(self, kernel):
+ # Make sure that each kernel, with its default `degree` and an
+ # appropriate `epsilon`, does a good job at interpolation in 2d.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = _2d_test_function(x)
+ ytrue = _2d_test_function(xitp)
+ yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+
+ mse = np.mean((yitp - ytrue)**2)
+ assert mse < 2.0e-4
+
+ @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+ def test_smoothing_misfit(self, kernel):
+ # Make sure we can find a smoothing parameter for each kernel that
+ # removes a sufficient amount of noise.
+ rng = np.random.RandomState(0)
+ seq = Halton(1, scramble=False, seed=rng)
+
+ noise = 0.2
+ rmse_tol = 0.1
+ smoothing_range = 10**np.linspace(-4, 1, 20)
+
+ x = 3*seq.random(100)
+ y = _1d_test_function(x) + rng.normal(0.0, noise, (100,))
+ ytrue = _1d_test_function(x)
+ rmse_within_tol = False
+ for smoothing in smoothing_range:
+ ysmooth = self.build(
+ x, y,
+ epsilon=1.0,
+ smoothing=smoothing,
+ kernel=kernel)(x)
+ rmse = np.sqrt(np.mean((ysmooth - ytrue)**2))
+ if rmse < rmse_tol:
+ rmse_within_tol = True
+ break
+
+ assert rmse_within_tol
+
+ def test_array_smoothing(self):
+ # Test using an array for `smoothing` to give less weight to a known
+ # outlier.
+ rng = np.random.RandomState(0)
+ seq = Halton(1, scramble=False, seed=rng)
+ degree = 2
+
+ x = seq.random(50)
+ P = _vandermonde(x, degree)
+ poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+ y = P.dot(poly_coeffs)
+ y_with_outlier = np.copy(y)
+ y_with_outlier[10] += 1.0
+ smoothing = np.zeros((50,))
+ smoothing[10] = 1000.0
+ yitp = self.build(x, y_with_outlier, smoothing=smoothing)(x)
+ # Should be able to reproduce the uncorrupted data almost exactly.
+ assert_allclose(yitp, y, atol=1e-4)
+
+ def test_inconsistent_x_dimensions_error(self):
+ # ValueError should be raised if the observation points and evaluation
+ # points have a different number of dimensions.
+ y = Halton(2, scramble=False, seed=np.random.RandomState()).random(10)
+ d = _2d_test_function(y)
+ x = Halton(1, scramble=False, seed=np.random.RandomState()).random(10)
+ match = 'Expected the second axis of `x`'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d)(x)
+
+ def test_inconsistent_d_length_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(1)
+ match = 'Expected the first axis of `d`'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d)
+
+ def test_y_not_2d_error(self):
+ y = np.linspace(0, 1, 5)
+ d = np.zeros(5)
+ match = '`y` must be a 2-dimensional array.'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d)
+
+ def test_inconsistent_smoothing_length_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ smoothing = np.ones(1)
+ match = 'Expected `smoothing` to be'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d, smoothing=smoothing)
+
+ def test_invalid_kernel_name_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ match = '`kernel` must be one of'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d, kernel='test')
+
+ def test_epsilon_not_specified_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ for kernel in _AVAILABLE:
+ if kernel in _SCALE_INVARIANT:
+ continue
+
+ match = '`epsilon` must be specified'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d, kernel=kernel)
+
+ def test_x_not_2d_error(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ x = np.linspace(0, 1, 5)
+ d = np.zeros(5)
+ match = '`x` must be a 2-dimensional array.'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d)(x)
+
+ def test_not_enough_observations_error(self):
+ y = np.linspace(0, 1, 1)[:, None]
+ d = np.zeros(1)
+ match = 'At least 2 data points are required'
+ with pytest.raises(ValueError, match=match):
+ self.build(y, d, kernel='thin_plate_spline')
+
+ def test_degree_warning(self):
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ for kernel, deg in _NAME_TO_MIN_DEGREE.items():
+ # Only test for kernels that its minimum degree is not 0.
+ if deg >= 1:
+ match = f'`degree` should not be below {deg}'
+ with pytest.warns(Warning, match=match):
+ self.build(y, d, epsilon=1.0, kernel=kernel, degree=deg-1)
+
+ def test_minus_one_degree(self):
+ # Make sure a degree of -1 is accepted without any warning.
+ y = np.linspace(0, 1, 5)[:, None]
+ d = np.zeros(5)
+ for kernel, _ in _NAME_TO_MIN_DEGREE.items():
+ self.build(y, d, epsilon=1.0, kernel=kernel, degree=-1)
+
+ def test_rank_error(self):
+ # An error should be raised when `kernel` is "thin_plate_spline" and
+ # observations are 2-D and collinear.
+ y = np.array([[2.0, 0.0], [1.0, 0.0], [0.0, 0.0]])
+ d = np.array([0.0, 0.0, 0.0])
+ match = 'does not have full column rank'
+ with pytest.raises(LinAlgError, match=match):
+ self.build(y, d, kernel='thin_plate_spline')(y)
+
+ def test_single_point(self):
+ # Make sure interpolation still works with only one point (in 1, 2, and
+ # 3 dimensions).
+ for dim in [1, 2, 3]:
+ y = np.zeros((1, dim))
+ d = np.ones((1,))
+ f = self.build(y, d, kernel='linear')(y)
+ assert_allclose(d, f)
+
+ def test_pickleable(self):
+ # Make sure we can pickle and unpickle the interpolant without any
+ # changes in the behavior.
+ seq = Halton(1, scramble=False, seed=np.random.RandomState(2305982309))
+
+ x = 3*seq.random(50)
+ xitp = 3*seq.random(50)
+
+ y = _1d_test_function(x)
+
+ interp = self.build(x, y)
+
+ yitp1 = interp(xitp)
+ yitp2 = pickle.loads(pickle.dumps(interp))(xitp)
+
+ assert_array_equal(yitp1, yitp2)
+
+
+class TestRBFInterpolatorNeighborsNone(_TestRBFInterpolator):
+ def build(self, *args, **kwargs):
+ return RBFInterpolator(*args, **kwargs)
+
+ def test_smoothing_limit_1d(self):
+ # For large smoothing parameters, the interpolant should approach a
+ # least squares fit of a polynomial with the specified degree.
+ seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+ degree = 3
+ smoothing = 1e8
+
+ x = 3*seq.random(50)
+ xitp = 3*seq.random(50)
+
+ y = _1d_test_function(x)
+
+ yitp1 = self.build(
+ x, y,
+ degree=degree,
+ smoothing=smoothing
+ )(xitp)
+
+ P = _vandermonde(x, degree)
+ Pitp = _vandermonde(xitp, degree)
+ yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+ def test_smoothing_limit_2d(self):
+ # For large smoothing parameters, the interpolant should approach a
+ # least squares fit of a polynomial with the specified degree.
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ degree = 3
+ smoothing = 1e8
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = _2d_test_function(x)
+
+ yitp1 = self.build(
+ x, y,
+ degree=degree,
+ smoothing=smoothing
+ )(xitp)
+
+ P = _vandermonde(x, degree)
+ Pitp = _vandermonde(xitp, degree)
+ yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+
+class TestRBFInterpolatorNeighbors20(_TestRBFInterpolator):
+ # RBFInterpolator using 20 nearest neighbors.
+ def build(self, *args, **kwargs):
+ return RBFInterpolator(*args, **kwargs, neighbors=20)
+
+ def test_equivalent_to_rbf_interpolator(self):
+ seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+ x = seq.random(100)
+ xitp = seq.random(100)
+
+ y = _2d_test_function(x)
+
+ yitp1 = self.build(x, y)(xitp)
+
+ yitp2 = []
+ tree = cKDTree(x)
+ for xi in xitp:
+ _, nbr = tree.query(xi, 20)
+ yitp2.append(RBFInterpolator(x[nbr], y[nbr])(xi[None])[0])
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
+
+
+class TestRBFInterpolatorNeighborsInf(TestRBFInterpolatorNeighborsNone):
+ # RBFInterpolator using neighbors=np.inf. This should give exactly the same
+ # results as neighbors=None, but it will be slower.
+ def build(self, *args, **kwargs):
+ return RBFInterpolator(*args, **kwargs, neighbors=np.inf)
+
+ def test_equivalent_to_rbf_interpolator(self):
+ seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+ x = 3*seq.random(50)
+ xitp = 3*seq.random(50)
+
+ y = _1d_test_function(x)
+ yitp1 = self.build(x, y)(xitp)
+ yitp2 = RBFInterpolator(x, y)(xitp)
+
+ assert_allclose(yitp1, yitp2, atol=1e-8)
diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rgi.py b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rgi.py
new file mode 100644
index 0000000000000000000000000000000000000000..5503b39dc67eb3c1d50e338ac26708095aff55d6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/test_rgi.py
@@ -0,0 +1,1111 @@
+import itertools
+
+import pytest
+import numpy as np
+
+from numpy.testing import (assert_allclose, assert_equal, assert_warns,
+ assert_array_almost_equal, assert_array_equal)
+from pytest import raises as assert_raises
+
+from scipy.interpolate import (RegularGridInterpolator, interpn,
+ RectBivariateSpline,
+ NearestNDInterpolator, LinearNDInterpolator)
+
+from scipy.sparse._sputils import matrix
+from scipy._lib._util import ComplexWarning
+
+
+parametrize_rgi_interp_methods = pytest.mark.parametrize(
+ "method", RegularGridInterpolator._ALL_METHODS
+)
+
+class TestRegularGridInterpolator:
+ def _get_sample_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points = [(0., .5, 1.)] * 4
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def _get_sample_4d_2(self):
+ # create another 4-D grid of 3 points in each dimension
+ points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def _get_sample_4d_3(self):
+ # create another 4-D grid of 7 points in each dimension
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0)] * 4
+ values = np.asarray([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def _get_sample_4d_4(self):
+ # create another 4-D grid of 2 points in each dimension
+ points = [(0.0, 1.0)] * 4
+ values = np.asarray([0.0, 1.0])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ @parametrize_rgi_interp_methods
+ def test_list_input(self, method):
+ points, values = self._get_sample_4d_3()
+
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+
+ interp = RegularGridInterpolator(points,
+ values.tolist(),
+ method=method)
+ v1 = interp(sample.tolist())
+ interp = RegularGridInterpolator(points,
+ values,
+ method=method)
+ v2 = interp(sample)
+ assert_allclose(v1, v2)
+
+ @pytest.mark.parametrize('method', ['cubic', 'quintic', 'pchip'])
+ def test_spline_dim_error(self, method):
+ points, values = self._get_sample_4d_4()
+ match = "points in dimension"
+
+ # Check error raise when creating interpolator
+ with pytest.raises(ValueError, match=match):
+ RegularGridInterpolator(points, values, method=method)
+
+ # Check error raise when creating interpolator
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ with pytest.raises(ValueError, match=match):
+ interp(sample, method=method)
+
+ @pytest.mark.parametrize(
+ "points_values, sample",
+ [
+ (
+ _get_sample_4d,
+ np.asarray(
+ [[0.1, 0.1, 1.0, 0.9],
+ [0.2, 0.1, 0.45, 0.8],
+ [0.5, 0.5, 0.5, 0.5]]
+ ),
+ ),
+ (_get_sample_4d_2, np.asarray([0.1, 0.1, 10.0, 9.0])),
+ ],
+ )
+ def test_linear_and_slinear_close(self, points_values, sample):
+ points, values = points_values(self)
+ interp = RegularGridInterpolator(points, values, method="linear")
+ v1 = interp(sample)
+ interp = RegularGridInterpolator(points, values, method="slinear")
+ v2 = interp(sample)
+ assert_allclose(v1, v2)
+
+ def test_derivatives(self):
+ points, values = self._get_sample_4d()
+ sample = np.array([[0.1 , 0.1 , 1. , 0.9 ],
+ [0.2 , 0.1 , 0.45, 0.8 ],
+ [0.5 , 0.5 , 0.5 , 0.5 ]])
+ interp = RegularGridInterpolator(points, values, method="slinear")
+
+ with assert_raises(ValueError):
+ # wrong number of derivatives (need 4)
+ interp(sample, nu=1)
+
+ assert_allclose(interp(sample, nu=(1, 0, 0, 0)),
+ [1, 1, 1], atol=1e-15)
+ assert_allclose(interp(sample, nu=(0, 1, 0, 0)),
+ [10, 10, 10], atol=1e-15)
+
+ # 2nd derivatives of a linear function are zero
+ assert_allclose(interp(sample, nu=(0, 1, 1, 0)),
+ [0, 0, 0], atol=1e-12)
+
+ @parametrize_rgi_interp_methods
+ def test_complex(self, method):
+ if method == "pchip":
+ pytest.skip("pchip does not make sense for complex data")
+ points, values = self._get_sample_4d_3()
+ values = values - 2j*values
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+
+ interp = RegularGridInterpolator(points, values, method=method)
+ rinterp = RegularGridInterpolator(points, values.real, method=method)
+ iinterp = RegularGridInterpolator(points, values.imag, method=method)
+
+ v1 = interp(sample)
+ v2 = rinterp(sample) + 1j*iinterp(sample)
+ assert_allclose(v1, v2)
+
+ def test_cubic_vs_pchip(self):
+ x, y = [1, 2, 3, 4], [1, 2, 3, 4]
+ xg, yg = np.meshgrid(x, y, indexing='ij')
+
+ values = (lambda x, y: x**4 * y**4)(xg, yg)
+ cubic = RegularGridInterpolator((x, y), values, method='cubic')
+ pchip = RegularGridInterpolator((x, y), values, method='pchip')
+
+ vals_cubic = cubic([1.5, 2])
+ vals_pchip = pchip([1.5, 2])
+ assert not np.allclose(vals_cubic, vals_pchip, atol=1e-14, rtol=0)
+
+ def test_linear_xi1d(self):
+ points, values = self._get_sample_4d_2()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([0.1, 0.1, 10., 9.])
+ wanted = 1001.1
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_linear_xi3d(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ wanted = np.asarray([1001.1, 846.2, 555.5])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ @pytest.mark.parametrize(
+ "sample, wanted",
+ [
+ (np.asarray([0.1, 0.1, 0.9, 0.9]), 1100.0),
+ (np.asarray([0.1, 0.1, 0.1, 0.1]), 0.0),
+ (np.asarray([0.0, 0.0, 0.0, 0.0]), 0.0),
+ (np.asarray([1.0, 1.0, 1.0, 1.0]), 1111.0),
+ (np.asarray([0.1, 0.4, 0.6, 0.9]), 1055.0),
+ ],
+ )
+ def test_nearest(self, sample, wanted):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, method="nearest")
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_linear_edges(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
+ wanted = np.asarray([0., 1111.])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_valid_create(self):
+ # create a 2-D grid of 3 points in each dimension
+ points = [(0., .5, 1.), (0., 1., .5)]
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis]
+ values1 = values[np.newaxis, :]
+ values = (values0 + values1 * 10)
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [((0., .5, 1.), ), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, .75, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values)
+ points = [(0., .5, 1.), (0., .5, 1.)]
+ assert_raises(ValueError, RegularGridInterpolator, points, values,
+ method="undefmethod")
+
+ def test_valid_call(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
+ assert_raises(ValueError, interp, sample, "undefmethod")
+ sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
+ assert_raises(ValueError, interp, sample)
+ sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
+ assert_raises(ValueError, interp, sample)
+
+ def test_out_of_bounds_extrap(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=None)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([0., 1111., 11., 11.])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+
+ def test_out_of_bounds_extrap2(self):
+ points, values = self._get_sample_4d_2()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=None)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([0., 11., 11., 11.])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+
+ def test_out_of_bounds_fill(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, bounds_error=False,
+ fill_value=np.nan)
+ sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+ [2.1, 2.1, -1.1, -1.1]])
+ wanted = np.asarray([np.nan, np.nan, np.nan])
+ assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+ assert_array_almost_equal(interp(sample, method="linear"), wanted)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ wanted = np.asarray([1001.1, 846.2, 555.5])
+ assert_array_almost_equal(interp(sample), wanted)
+
+ def test_nearest_compare_qhull(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values, method="nearest")
+ points_qhull = itertools.product(*points)
+ points_qhull = [p for p in points_qhull]
+ points_qhull = np.asarray(points_qhull)
+ values_qhull = values.reshape(-1)
+ interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ assert_array_almost_equal(interp(sample), interp_qhull(sample))
+
+ def test_linear_compare_qhull(self):
+ points, values = self._get_sample_4d()
+ interp = RegularGridInterpolator(points, values)
+ points_qhull = itertools.product(*points)
+ points_qhull = [p for p in points_qhull]
+ points_qhull = np.asarray(points_qhull)
+ values_qhull = values.reshape(-1)
+ interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
+ sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+ [0.5, 0.5, .5, .5]])
+ assert_array_almost_equal(interp(sample), interp_qhull(sample))
+
+ @pytest.mark.parametrize("method", ["nearest", "linear"])
+ def test_duck_typed_values(self, method):
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+
+ values = MyValue((5, 7))
+
+ interp = RegularGridInterpolator((x, y), values, method=method)
+ v1 = interp([0.4, 0.7])
+
+ interp = RegularGridInterpolator((x, y), values._v, method=method)
+ v2 = interp([0.4, 0.7])
+ assert_allclose(v1, v2)
+
+ def test_invalid_fill_value(self):
+ np.random.seed(1234)
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+ values = np.random.rand(5, 7)
+
+ # integers can be cast to floats
+ RegularGridInterpolator((x, y), values, fill_value=1)
+
+ # complex values cannot
+ assert_raises(ValueError, RegularGridInterpolator,
+ (x, y), values, fill_value=1+2j)
+
+ def test_fillvalue_type(self):
+ # from #3703; test that interpolator object construction succeeds
+ values = np.ones((10, 20, 30), dtype='>f4')
+ points = [np.arange(n) for n in values.shape]
+ # xi = [(1, 1, 1)]
+ RegularGridInterpolator(points, values)
+ RegularGridInterpolator(points, values, fill_value=0.)
+
+ def test_length_one_axis(self):
+ # gh-5890, gh-9524 : length-1 axis is legal for method='linear'.
+ # Along the axis it's linear interpolation; away from the length-1
+ # axis, it's an extrapolation, so fill_value should be used.
+ def f(x, y):
+ return x + y
+ x = np.linspace(1, 1, 1)
+ y = np.linspace(1, 10, 10)
+ data = f(*np.meshgrid(x, y, indexing="ij", sparse=True))
+
+ interp = RegularGridInterpolator((x, y), data, method="linear",
+ bounds_error=False, fill_value=101)
+
+ # check values at the grid
+ assert_allclose(interp(np.array([[1, 1], [1, 5], [1, 10]])),
+ [2, 6, 11],
+ atol=1e-14)
+
+ # check off-grid interpolation is indeed linear
+ assert_allclose(interp(np.array([[1, 1.4], [1, 5.3], [1, 10]])),
+ [2.4, 6.3, 11],
+ atol=1e-14)
+
+ # check exrapolation w/ fill_value
+ assert_allclose(interp(np.array([1.1, 2.4])),
+ interp.fill_value,
+ atol=1e-14)
+
+ # check extrapolation: linear along the `y` axis, const along `x`
+ interp.fill_value = None
+ assert_allclose(interp([[1, 0.3], [1, 11.5]]),
+ [1.3, 12.5], atol=1e-15)
+
+ assert_allclose(interp([[1.5, 0.3], [1.9, 11.5]]),
+ [1.3, 12.5], atol=1e-15)
+
+ # extrapolation with method='nearest'
+ interp = RegularGridInterpolator((x, y), data, method="nearest",
+ bounds_error=False, fill_value=None)
+ assert_allclose(interp([[1.5, 1.8], [-4, 5.1]]),
+ [3, 6],
+ atol=1e-15)
+
+ @pytest.mark.parametrize("fill_value", [None, np.nan, np.pi])
+ @pytest.mark.parametrize("method", ['linear', 'nearest'])
+ def test_length_one_axis2(self, fill_value, method):
+ options = {"fill_value": fill_value, "bounds_error": False,
+ "method": method}
+
+ x = np.linspace(0, 2*np.pi, 20)
+ z = np.sin(x)
+
+ fa = RegularGridInterpolator((x,), z[:], **options)
+ fb = RegularGridInterpolator((x, [0]), z[:, None], **options)
+
+ x1a = np.linspace(-1, 2*np.pi+1, 100)
+ za = fa(x1a)
+
+ # evaluated at provided y-value, fb should behave exactly as fa
+ y1b = np.zeros(100)
+ zb = fb(np.vstack([x1a, y1b]).T)
+ assert_allclose(zb, za)
+
+ # evaluated at a different y-value, fb should return fill value
+ y1b = np.ones(100)
+ zb = fb(np.vstack([x1a, y1b]).T)
+ if fill_value is None:
+ assert_allclose(zb, za)
+ else:
+ assert_allclose(zb, fill_value)
+
+ @pytest.mark.parametrize("method", ['nearest', 'linear'])
+ def test_nan_x_1d(self, method):
+ # gh-6624 : if x is nan, result should be nan
+ f = RegularGridInterpolator(([1, 2, 3],), [10, 20, 30], fill_value=1,
+ bounds_error=False, method=method)
+ assert np.isnan(f([np.nan]))
+
+ # test arbitrary nan pattern
+ rng = np.random.default_rng(8143215468)
+ x = rng.random(size=100)*4
+ i = rng.random(size=100) > 0.5
+ x[i] = np.nan
+ with np.errstate(invalid='ignore'):
+ # out-of-bounds comparisons, `out_of_bounds += x < grid[0]`,
+ # generate numpy warnings if `x` contains nans.
+ # These warnings should propagate to user (since `x` is user
+ # input) and we simply filter them out.
+ res = f(x)
+
+ assert_equal(res[i], np.nan)
+ assert_equal(res[~i], f(x[~i]))
+
+ # also test the length-one axis f(nan)
+ x = [1, 2, 3]
+ y = [1, ]
+ data = np.ones((3, 1))
+ f = RegularGridInterpolator((x, y), data, fill_value=1,
+ bounds_error=False, method=method)
+ assert np.isnan(f([np.nan, 1]))
+ assert np.isnan(f([1, np.nan]))
+
+ @pytest.mark.parametrize("method", ['nearest', 'linear'])
+ def test_nan_x_2d(self, method):
+ x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
+
+ def f(x, y):
+ return x**2 + y**2
+
+ xg, yg = np.meshgrid(x, y, indexing='ij', sparse=True)
+ data = f(xg, yg)
+ interp = RegularGridInterpolator((x, y), data,
+ method=method, bounds_error=False)
+
+ with np.errstate(invalid='ignore'):
+ res = interp([[1.5, np.nan], [1, 1]])
+ assert_allclose(res[1], 2, atol=1e-14)
+ assert np.isnan(res[0])
+
+ # test arbitrary nan pattern
+ rng = np.random.default_rng(8143215468)
+ x = rng.random(size=100)*4-1
+ y = rng.random(size=100)*8
+ i1 = rng.random(size=100) > 0.5
+ i2 = rng.random(size=100) > 0.5
+ i = i1 | i2
+ x[i1] = np.nan
+ y[i2] = np.nan
+ z = np.array([x, y]).T
+ with np.errstate(invalid='ignore'):
+ # out-of-bounds comparisons, `out_of_bounds += x < grid[0]`,
+ # generate numpy warnings if `x` contains nans.
+ # These warnings should propagate to user (since `x` is user
+ # input) and we simply filter them out.
+ res = interp(z)
+
+ assert_equal(res[i], np.nan)
+ assert_equal(res[~i], interp(z[~i]))
+
+ @parametrize_rgi_interp_methods
+ @pytest.mark.parametrize(("ndims", "func"), [
+ (2, lambda x, y: 2 * x ** 3 + 3 * y ** 2),
+ (3, lambda x, y, z: 2 * x ** 3 + 3 * y ** 2 - z),
+ (4, lambda x, y, z, a: 2 * x ** 3 + 3 * y ** 2 - z + a),
+ (5, lambda x, y, z, a, b: 2 * x ** 3 + 3 * y ** 2 - z + a * b),
+ ])
+ def test_descending_points_nd(self, method, ndims, func):
+
+ if ndims == 5 and method in {"cubic", "quintic"}:
+ pytest.skip("too slow; OOM (quintic); or nearly so (cubic)")
+
+ rng = np.random.default_rng(42)
+ sample_low = 1
+ sample_high = 5
+ test_points = rng.uniform(sample_low, sample_high, size=(2, ndims))
+
+ ascending_points = [np.linspace(sample_low, sample_high, 12)
+ for _ in range(ndims)]
+
+ ascending_values = func(*np.meshgrid(*ascending_points,
+ indexing="ij",
+ sparse=True))
+
+ ascending_interp = RegularGridInterpolator(ascending_points,
+ ascending_values,
+ method=method)
+ ascending_result = ascending_interp(test_points)
+
+ descending_points = [xi[::-1] for xi in ascending_points]
+ descending_values = func(*np.meshgrid(*descending_points,
+ indexing="ij",
+ sparse=True))
+ descending_interp = RegularGridInterpolator(descending_points,
+ descending_values,
+ method=method)
+ descending_result = descending_interp(test_points)
+
+ assert_array_equal(ascending_result, descending_result)
+
+ def test_invalid_points_order(self):
+ def val_func_2d(x, y):
+ return 2 * x ** 3 + 3 * y ** 2
+
+ x = np.array([.5, 2., 0., 4., 5.5]) # not ascending or descending
+ y = np.array([.5, 2., 3., 4., 5.5])
+ points = (x, y)
+ values = val_func_2d(*np.meshgrid(*points, indexing='ij',
+ sparse=True))
+ match = "must be strictly ascending or descending"
+ with pytest.raises(ValueError, match=match):
+ RegularGridInterpolator(points, values)
+
+ @parametrize_rgi_interp_methods
+ def test_fill_value(self, method):
+ interp = RegularGridInterpolator([np.arange(6)], np.ones(6),
+ method=method, bounds_error=False)
+ assert np.isnan(interp([10]))
+
+ @parametrize_rgi_interp_methods
+ def test_nonscalar_values(self, method):
+
+ if method == "quintic":
+ pytest.skip("Way too slow.")
+
+ # Verify that non-scalar valued values also works
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] * 2 + [
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0)
+ ] * 2
+
+ rng = np.random.default_rng(1234)
+ values = rng.random((6, 6, 6, 6, 8))
+ sample = rng.random((7, 3, 4))
+
+ interp = RegularGridInterpolator(points, values, method=method,
+ bounds_error=False)
+ v = interp(sample)
+ assert_equal(v.shape, (7, 3, 8), err_msg=method)
+
+ vs = []
+ for j in range(8):
+ interp = RegularGridInterpolator(points, values[..., j],
+ method=method,
+ bounds_error=False)
+ vs.append(interp(sample))
+ v2 = np.array(vs).transpose(1, 2, 0)
+
+ assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+ @parametrize_rgi_interp_methods
+ @pytest.mark.parametrize("flip_points", [False, True])
+ def test_nonscalar_values_2(self, method, flip_points):
+
+ if method in {"cubic", "quintic"}:
+ pytest.skip("Way too slow.")
+
+ # Verify that non-scalar valued values also work : use different
+ # lengths of axes to simplify tracing the internals
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+ (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0),
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0),
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0, 47)]
+
+ # verify, that strictly decreasing dimensions work
+ if flip_points:
+ points = [tuple(reversed(p)) for p in points]
+
+ rng = np.random.default_rng(1234)
+
+ trailing_points = (3, 2)
+ # NB: values has a `num_trailing_dims` trailing dimension
+ values = rng.random((6, 7, 8, 9, *trailing_points))
+ sample = rng.random(4) # a single sample point !
+
+ interp = RegularGridInterpolator(points, values, method=method,
+ bounds_error=False)
+ v = interp(sample)
+
+ # v has a single sample point *per entry in the trailing dimensions*
+ assert v.shape == (1, *trailing_points)
+
+ # check the values, too : manually loop over the trailing dimensions
+ vs = np.empty(values.shape[-2:])
+ for i in range(values.shape[-2]):
+ for j in range(values.shape[-1]):
+ interp = RegularGridInterpolator(points, values[..., i, j],
+ method=method,
+ bounds_error=False)
+ vs[i, j] = interp(sample).item()
+ v2 = np.expand_dims(vs, axis=0)
+ assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+ def test_nonscalar_values_linear_2D(self):
+ # Verify that non-scalar values work in the 2D fast path
+ method = 'linear'
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+ (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0), ]
+
+ rng = np.random.default_rng(1234)
+
+ trailing_points = (3, 4)
+ # NB: values has a `num_trailing_dims` trailing dimension
+ values = rng.random((6, 7, *trailing_points))
+ sample = rng.random(2) # a single sample point !
+
+ interp = RegularGridInterpolator(points, values, method=method,
+ bounds_error=False)
+ v = interp(sample)
+
+ # v has a single sample point *per entry in the trailing dimensions*
+ assert v.shape == (1, *trailing_points)
+
+ # check the values, too : manually loop over the trailing dimensions
+ vs = np.empty(values.shape[-2:])
+ for i in range(values.shape[-2]):
+ for j in range(values.shape[-1]):
+ interp = RegularGridInterpolator(points, values[..., i, j],
+ method=method,
+ bounds_error=False)
+ vs[i, j] = interp(sample).item()
+ v2 = np.expand_dims(vs, axis=0)
+ assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+ @pytest.mark.parametrize(
+ "dtype",
+ [np.float32, np.float64, np.complex64, np.complex128]
+ )
+ @pytest.mark.parametrize("xi_dtype", [np.float32, np.float64])
+ def test_float32_values(self, dtype, xi_dtype):
+ # regression test for gh-17718: values.dtype=float32 fails
+ def f(x, y):
+ return 2 * x**3 + 3 * y**2
+
+ x = np.linspace(1, 4, 11)
+ y = np.linspace(4, 7, 22)
+
+ xg, yg = np.meshgrid(x, y, indexing='ij', sparse=True)
+ data = f(xg, yg)
+
+ data = data.astype(dtype)
+
+ interp = RegularGridInterpolator((x, y), data)
+
+ pts = np.array([[2.1, 6.2],
+ [3.3, 5.2]], dtype=xi_dtype)
+
+ # the values here are just what the call returns; the test checks that
+ # that the call succeeds at all, instead of failing with cython not
+ # having a float32 kernel
+ assert_allclose(interp(pts), [134.10469388, 153.40069388], atol=1e-7)
+
+ def test_bad_solver(self):
+ x = np.linspace(0, 3, 7)
+ y = np.linspace(0, 3, 7)
+ xg, yg = np.meshgrid(x, y, indexing='ij', sparse=True)
+ data = xg + yg
+
+ # default method 'linear' does not accept 'solver'
+ with assert_raises(ValueError):
+ RegularGridInterpolator((x, y), data, solver=lambda x: x)
+
+ with assert_raises(TypeError):
+ # wrong solver interface
+ RegularGridInterpolator(
+ (x, y), data, method='slinear', solver=lambda x: x
+ )
+
+ with assert_raises(TypeError):
+ # unknown argument
+ RegularGridInterpolator(
+ (x, y), data, method='slinear', solver=lambda x: x, woof='woof'
+ )
+
+ with assert_raises(TypeError):
+ # unknown argument
+ RegularGridInterpolator(
+ (x, y), data, method='slinear', solver_args={'woof': 42}
+ )
+
+
+class MyValue:
+ """
+ Minimal indexable object
+ """
+
+ def __init__(self, shape):
+ self.ndim = 2
+ self.shape = shape
+ self._v = np.arange(np.prod(shape)).reshape(shape)
+
+ def __getitem__(self, idx):
+ return self._v[idx]
+
+ def __array_interface__(self):
+ return None
+
+ def __array__(self, dtype=None, copy=None):
+ raise RuntimeError("No array representation")
+
+
+class TestInterpN:
+ def _sample_2d_data(self):
+ x = np.array([.5, 2., 3., 4., 5.5, 6.])
+ y = np.array([.5, 2., 3., 4., 5.5, 6.])
+ z = np.array(
+ [
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 3, 2, 1, 1],
+ [1, 2, 2, 2, 1, 1],
+ [1, 2, 1, 2, 1, 1],
+ [1, 2, 2, 2, 1, 1],
+ ]
+ )
+ return x, y, z
+
+ def test_spline_2d(self):
+ x, y, z = self._sample_2d_data()
+ lut = RectBivariateSpline(x, y, z)
+
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
+ lut.ev(xi[:, 0], xi[:, 1]))
+
+ @parametrize_rgi_interp_methods
+ def test_list_input(self, method):
+ x, y, z = self._sample_2d_data()
+ xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ v1 = interpn((x, y), z, xi, method=method)
+ v2 = interpn(
+ (x.tolist(), y.tolist()), z.tolist(), xi.tolist(), method=method
+ )
+ assert_allclose(v1, v2, err_msg=method)
+
+ def test_spline_2d_outofbounds(self):
+ x = np.array([.5, 2., 3., 4., 5.5])
+ y = np.array([.5, 2., 3., 4., 5.5])
+ z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ lut = RectBivariateSpline(x, y, z)
+
+ xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
+ actual = interpn((x, y), z, xi, method="splinef2d",
+ bounds_error=False, fill_value=999.99)
+ expected = lut.ev(xi[:, 0], xi[:, 1])
+ expected[2:4] = 999.99
+ assert_array_almost_equal(actual, expected)
+
+ # no extrapolation for splinef2d
+ assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
+ bounds_error=False, fill_value=None)
+
+ def _sample_4d_data(self):
+ points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
+ values = np.asarray([0., .5, 1.])
+ values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+ values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+ values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+ values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+ values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+ return points, values
+
+ def test_linear_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ interp_rg = RegularGridInterpolator(points, values)
+ sample = np.asarray([[0.1, 0.1, 10., 9.]])
+ wanted = interpn(points, values, sample, method="linear")
+ assert_array_almost_equal(interp_rg(sample), wanted)
+
+ def test_4d_linear_outofbounds(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
+ wanted = 999.99
+ actual = interpn(points, values, sample, method="linear",
+ bounds_error=False, fill_value=999.99)
+ assert_array_almost_equal(actual, wanted)
+
+ def test_nearest_4d(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ interp_rg = RegularGridInterpolator(points, values, method="nearest")
+ sample = np.asarray([[0.1, 0.1, 10., 9.]])
+ wanted = interpn(points, values, sample, method="nearest")
+ assert_array_almost_equal(interp_rg(sample), wanted)
+
+ def test_4d_nearest_outofbounds(self):
+ # create a 4-D grid of 3 points in each dimension
+ points, values = self._sample_4d_data()
+ sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
+ wanted = 999.99
+ actual = interpn(points, values, sample, method="nearest",
+ bounds_error=False, fill_value=999.99)
+ assert_array_almost_equal(actual, wanted)
+
+ def test_xi_1d(self):
+ # verify that 1-D xi works as expected
+ points, values = self._sample_4d_data()
+ sample = np.asarray([0.1, 0.1, 10., 9.])
+ v1 = interpn(points, values, sample, bounds_error=False)
+ v2 = interpn(points, values, sample[None,:], bounds_error=False)
+ assert_allclose(v1, v2)
+
+ def test_xi_nd(self):
+ # verify that higher-d xi works as expected
+ points, values = self._sample_4d_data()
+
+ np.random.seed(1234)
+ sample = np.random.rand(2, 3, 4)
+
+ v1 = interpn(points, values, sample, method='nearest',
+ bounds_error=False)
+ assert_equal(v1.shape, (2, 3))
+
+ v2 = interpn(points, values, sample.reshape(-1, 4),
+ method='nearest', bounds_error=False)
+ assert_allclose(v1, v2.reshape(v1.shape))
+
+ @parametrize_rgi_interp_methods
+ def test_xi_broadcast(self, method):
+ # verify that the interpolators broadcast xi
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+
+ xi = np.linspace(0, 1, 2)
+ yi = np.linspace(0, 3, 3)
+
+ sample = (xi[:, None], yi[None, :])
+ v1 = interpn(points, values, sample, method=method, bounds_error=False)
+ assert_equal(v1.shape, (2, 3))
+
+ xx, yy = np.meshgrid(xi, yi)
+ sample = np.c_[xx.T.ravel(), yy.T.ravel()]
+
+ v2 = interpn(points, values, sample,
+ method=method, bounds_error=False)
+ assert_allclose(v1, v2.reshape(v1.shape))
+
+ @parametrize_rgi_interp_methods
+ def test_nonscalar_values(self, method):
+
+ if method == "quintic":
+ pytest.skip("Way too slow.")
+
+ # Verify that non-scalar valued values also works
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] * 2 + [
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0)
+ ] * 2
+
+ rng = np.random.default_rng(1234)
+ values = rng.random((6, 6, 6, 6, 8))
+ sample = rng.random((7, 3, 4))
+
+ v = interpn(points, values, sample, method=method,
+ bounds_error=False)
+ assert_equal(v.shape, (7, 3, 8), err_msg=method)
+
+ vs = [interpn(points, values[..., j], sample, method=method,
+ bounds_error=False) for j in range(8)]
+ v2 = np.array(vs).transpose(1, 2, 0)
+
+ assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+ @parametrize_rgi_interp_methods
+ def test_nonscalar_values_2(self, method):
+
+ if method in {"cubic", "quintic"}:
+ pytest.skip("Way too slow.")
+
+ # Verify that non-scalar valued values also work : use different
+ # lengths of axes to simplify tracing the internals
+ points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+ (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0),
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0),
+ (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0, 47)]
+
+ rng = np.random.default_rng(1234)
+
+ trailing_points = (3, 2)
+ # NB: values has a `num_trailing_dims` trailing dimension
+ values = rng.random((6, 7, 8, 9, *trailing_points))
+ sample = rng.random(4) # a single sample point !
+
+ v = interpn(points, values, sample, method=method, bounds_error=False)
+
+ # v has a single sample point *per entry in the trailing dimensions*
+ assert v.shape == (1, *trailing_points)
+
+ # check the values, too : manually loop over the trailing dimensions
+ vs = [[
+ interpn(points, values[..., i, j], sample, method=method,
+ bounds_error=False) for i in range(values.shape[-2])
+ ] for j in range(values.shape[-1])]
+
+ assert_allclose(v, np.asarray(vs).T, atol=1e-14, err_msg=method)
+
+ def test_non_scalar_values_splinef2d(self):
+ # Vector-valued splines supported with fitpack
+ points, values = self._sample_4d_data()
+
+ np.random.seed(1234)
+ values = np.random.rand(3, 3, 3, 3, 6)
+ sample = np.random.rand(7, 11, 4)
+ assert_raises(ValueError, interpn, points, values, sample,
+ method='splinef2d')
+
+ @parametrize_rgi_interp_methods
+ def test_complex(self, method):
+ if method == "pchip":
+ pytest.skip("pchip does not make sense for complex data")
+
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+ values = values - 2j*values
+
+ sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+ v1 = interpn(points, values, sample, method=method)
+ v2r = interpn(points, values.real, sample, method=method)
+ v2i = interpn(points, values.imag, sample, method=method)
+ v2 = v2r + 1j*v2i
+
+ assert_allclose(v1, v2)
+
+ def test_complex_pchip(self):
+ # Complex-valued data deprecated for pchip
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+ values = values - 2j*values
+
+ sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ with pytest.deprecated_call(match='complex'):
+ interpn(points, values, sample, method='pchip')
+
+ def test_complex_spline2fd(self):
+ # Complex-valued data not supported by spline2fd
+ x, y, values = self._sample_2d_data()
+ points = (x, y)
+ values = values - 2j*values
+
+ sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+ with assert_warns(ComplexWarning):
+ interpn(points, values, sample, method='splinef2d')
+
+ @pytest.mark.parametrize(
+ "method",
+ ["linear", "nearest"]
+ )
+ def test_duck_typed_values(self, method):
+ x = np.linspace(0, 2, 5)
+ y = np.linspace(0, 1, 7)
+
+ values = MyValue((5, 7))
+
+ v1 = interpn((x, y), values, [0.4, 0.7], method=method)
+ v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
+ assert_allclose(v1, v2)
+
+ @parametrize_rgi_interp_methods
+ def test_matrix_input(self, method):
+ x = np.linspace(0, 2, 6)
+ y = np.linspace(0, 1, 7)
+
+ values = matrix(np.random.rand(6, 7))
+
+ sample = np.random.rand(3, 7, 2)
+
+ v1 = interpn((x, y), values, sample, method=method)
+ v2 = interpn((x, y), np.asarray(values), sample, method=method)
+ assert_allclose(v1, v2)
+
+ def test_length_one_axis(self):
+ # gh-5890, gh-9524 : length-1 axis is legal for method='linear'.
+ # Along the axis it's linear interpolation; away from the length-1
+ # axis, it's an extrapolation, so fill_value should be used.
+
+ values = np.array([[0.1, 1, 10]])
+ xi = np.array([[1, 2.2], [1, 3.2], [1, 3.8]])
+
+ res = interpn(([1], [2, 3, 4]), values, xi)
+ wanted = [0.9*0.2 + 0.1, # on [2, 3) it's 0.9*(x-2) + 0.1
+ 9*0.2 + 1, # on [3, 4] it's 9*(x-3) + 1
+ 9*0.8 + 1]
+
+ assert_allclose(res, wanted, atol=1e-15)
+
+ # check extrapolation
+ xi = np.array([[1.1, 2.2], [1.5, 3.2], [-2.3, 3.8]])
+ res = interpn(([1], [2, 3, 4]), values, xi,
+ bounds_error=False, fill_value=None)
+
+ assert_allclose(res, wanted, atol=1e-15)
+
+ def test_descending_points(self):
+ def value_func_4d(x, y, z, a):
+ return 2 * x ** 3 + 3 * y ** 2 - z - a
+
+ x1 = np.array([0, 1, 2, 3])
+ x2 = np.array([0, 10, 20, 30])
+ x3 = np.array([0, 10, 20, 30])
+ x4 = np.array([0, .1, .2, .30])
+ points = (x1, x2, x3, x4)
+ values = value_func_4d(
+ *np.meshgrid(*points, indexing='ij', sparse=True))
+ pts = (0.1, 0.3, np.transpose(np.linspace(0, 30, 4)),
+ np.linspace(0, 0.3, 4))
+ correct_result = interpn(points, values, pts)
+
+ x1_descend = x1[::-1]
+ x2_descend = x2[::-1]
+ x3_descend = x3[::-1]
+ x4_descend = x4[::-1]
+ points_shuffled = (x1_descend, x2_descend, x3_descend, x4_descend)
+ values_shuffled = value_func_4d(
+ *np.meshgrid(*points_shuffled, indexing='ij', sparse=True))
+ test_result = interpn(points_shuffled, values_shuffled, pts)
+
+ assert_array_equal(correct_result, test_result)
+
+ def test_invalid_points_order(self):
+ x = np.array([.5, 2., 0., 4., 5.5]) # not ascending or descending
+ y = np.array([.5, 2., 3., 4., 5.5])
+ z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+ [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+ xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
+ [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
+
+ match = "must be strictly ascending or descending"
+ with pytest.raises(ValueError, match=match):
+ interpn((x, y), z, xi)
+
+ def test_invalid_xi_dimensions(self):
+ # https://github.com/scipy/scipy/issues/16519
+ points = [(0, 1)]
+ values = [0, 1]
+ xi = np.ones((1, 1, 3))
+ msg = ("The requested sample points xi have dimension 3, but this "
+ "RegularGridInterpolator has dimension 1")
+ with assert_raises(ValueError, match=msg):
+ interpn(points, values, xi)
+
+ def test_readonly_grid(self):
+ # https://github.com/scipy/scipy/issues/17716
+ x = np.linspace(0, 4, 5)
+ y = np.linspace(0, 5, 6)
+ z = np.linspace(0, 6, 7)
+ points = (x, y, z)
+ values = np.ones((5, 6, 7))
+ point = np.array([2.21, 3.12, 1.15])
+ for d in points:
+ d.flags.writeable = False
+ values.flags.writeable = False
+ point.flags.writeable = False
+ interpn(points, values, point)
+ RegularGridInterpolator(points, values)(point)
+
+ def test_2d_readonly_grid(self):
+ # https://github.com/scipy/scipy/issues/17716
+ # test special 2d case
+ x = np.linspace(0, 4, 5)
+ y = np.linspace(0, 5, 6)
+ points = (x, y)
+ values = np.ones((5, 6))
+ point = np.array([2.21, 3.12])
+ for d in points:
+ d.flags.writeable = False
+ values.flags.writeable = False
+ point.flags.writeable = False
+ interpn(points, values, point)
+ RegularGridInterpolator(points, values)(point)
+
+ def test_non_c_contiguous_grid(self):
+ # https://github.com/scipy/scipy/issues/17716
+ x = np.linspace(0, 4, 5)
+ x = np.vstack((x, np.empty_like(x))).T.copy()[:, 0]
+ assert not x.flags.c_contiguous
+ y = np.linspace(0, 5, 6)
+ z = np.linspace(0, 6, 7)
+ points = (x, y, z)
+ values = np.ones((5, 6, 7))
+ point = np.array([2.21, 3.12, 1.15])
+ interpn(points, values, point)
+ RegularGridInterpolator(points, values)(point)
+
+ @pytest.mark.parametrize("dtype", ['>f8', '