applied-ai-018 commited on
Commit
a8c1dd3
·
verified ·
1 Parent(s): c1dec76

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/1.word_embeddings.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step40/zero/6.attention.query_key_value.weight/exp_avg.pt +3 -0
  3. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py +8 -0
  4. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py +290 -0
  14. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py +479 -0
  15. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py +440 -0
  16. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py +193 -0
  17. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py +748 -0
  18. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py +224 -0
  19. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py +574 -0
  20. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py +601 -0
  21. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__init__.py +0 -0
  22. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py +1135 -0
  26. venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py +37 -0
  27. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/scipy/spatial/__init__.py +129 -0
  31. venv/lib/python3.10/site-packages/scipy/spatial/_ckdtree.pyi +214 -0
  32. venv/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so +0 -0
  33. venv/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so +0 -0
  34. venv/lib/python3.10/site-packages/scipy/spatial/_geometric_slerp.py +240 -0
  35. venv/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so +0 -0
  36. venv/lib/python3.10/site-packages/scipy/spatial/_kdtree.py +920 -0
  37. venv/lib/python3.10/site-packages/scipy/spatial/_plotutils.py +270 -0
  38. venv/lib/python3.10/site-packages/scipy/spatial/_procrustes.py +132 -0
  39. venv/lib/python3.10/site-packages/scipy/spatial/_qhull.pyi +213 -0
  40. venv/lib/python3.10/site-packages/scipy/spatial/_spherical_voronoi.py +341 -0
  41. venv/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so +0 -0
  42. venv/lib/python3.10/site-packages/scipy/spatial/_voronoi.pyi +4 -0
  43. venv/lib/python3.10/site-packages/scipy/spatial/ckdtree.py +27 -0
  44. venv/lib/python3.10/site-packages/scipy/spatial/distance.py +2993 -0
  45. venv/lib/python3.10/site-packages/scipy/spatial/distance.pyi +211 -0
  46. venv/lib/python3.10/site-packages/scipy/spatial/kdtree.py +26 -0
  47. venv/lib/python3.10/site-packages/scipy/spatial/qhull.py +25 -0
  48. venv/lib/python3.10/site-packages/scipy/spatial/qhull_src/COPYING.txt +38 -0
  49. venv/lib/python3.10/site-packages/scipy/spatial/tests/__init__.py +0 -0
  50. venv/lib/python3.10/site-packages/scipy/spatial/tests/__pycache__/__init__.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/1.word_embeddings.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c26fa72452f81276635a61227ef3a65c6c432ce6d6abff91652b240eb0601c7
3
+ size 415237325
ckpts/universal/global_step40/zero/6.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce25be7492d38d2401e256ae8089ef032633f16bfa045b27e9a74b1a60828331
3
+ size 50332828
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """Suite of ODE solvers implemented in Python."""
2
+ from .ivp import solve_ivp
3
+ from .rk import RK23, RK45, DOP853
4
+ from .radau import Radau
5
+ from .bdf import BDF
6
+ from .lsoda import LSODA
7
+ from .common import OdeSolution
8
+ from .base import DenseOutput, OdeSolver
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (551 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc ADDED
Binary file (4.93 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc ADDED
Binary file (29.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc ADDED
Binary file (8.51 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def check_arguments(fun, y0, support_complex):
5
+ """Helper function for checking arguments common to all solvers."""
6
+ y0 = np.asarray(y0)
7
+ if np.issubdtype(y0.dtype, np.complexfloating):
8
+ if not support_complex:
9
+ raise ValueError("`y0` is complex, but the chosen solver does "
10
+ "not support integration in a complex domain.")
11
+ dtype = complex
12
+ else:
13
+ dtype = float
14
+ y0 = y0.astype(dtype, copy=False)
15
+
16
+ if y0.ndim != 1:
17
+ raise ValueError("`y0` must be 1-dimensional.")
18
+
19
+ if not np.isfinite(y0).all():
20
+ raise ValueError("All components of the initial state `y0` must be finite.")
21
+
22
+ def fun_wrapped(t, y):
23
+ return np.asarray(fun(t, y), dtype=dtype)
24
+
25
+ return fun_wrapped, y0
26
+
27
+
28
+ class OdeSolver:
29
+ """Base class for ODE solvers.
30
+
31
+ In order to implement a new solver you need to follow the guidelines:
32
+
33
+ 1. A constructor must accept parameters presented in the base class
34
+ (listed below) along with any other parameters specific to a solver.
35
+ 2. A constructor must accept arbitrary extraneous arguments
36
+ ``**extraneous``, but warn that these arguments are irrelevant
37
+ using `common.warn_extraneous` function. Do not pass these
38
+ arguments to the base class.
39
+ 3. A solver must implement a private method `_step_impl(self)` which
40
+ propagates a solver one step further. It must return tuple
41
+ ``(success, message)``, where ``success`` is a boolean indicating
42
+ whether a step was successful, and ``message`` is a string
43
+ containing description of a failure if a step failed or None
44
+ otherwise.
45
+ 4. A solver must implement a private method `_dense_output_impl(self)`,
46
+ which returns a `DenseOutput` object covering the last successful
47
+ step.
48
+ 5. A solver must have attributes listed below in Attributes section.
49
+ Note that ``t_old`` and ``step_size`` are updated automatically.
50
+ 6. Use `fun(self, t, y)` method for the system rhs evaluation, this
51
+ way the number of function evaluations (`nfev`) will be tracked
52
+ automatically.
53
+ 7. For convenience, a base class provides `fun_single(self, t, y)` and
54
+ `fun_vectorized(self, t, y)` for evaluating the rhs in
55
+ non-vectorized and vectorized fashions respectively (regardless of
56
+ how `fun` from the constructor is implemented). These calls don't
57
+ increment `nfev`.
58
+ 8. If a solver uses a Jacobian matrix and LU decompositions, it should
59
+ track the number of Jacobian evaluations (`njev`) and the number of
60
+ LU decompositions (`nlu`).
61
+ 9. By convention, the function evaluations used to compute a finite
62
+ difference approximation of the Jacobian should not be counted in
63
+ `nfev`, thus use `fun_single(self, t, y)` or
64
+ `fun_vectorized(self, t, y)` when computing a finite difference
65
+ approximation of the Jacobian.
66
+
67
+ Parameters
68
+ ----------
69
+ fun : callable
70
+ Right-hand side of the system: the time derivative of the state ``y``
71
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
72
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
73
+ return an array of the same shape as ``y``. See `vectorized` for more
74
+ information.
75
+ t0 : float
76
+ Initial time.
77
+ y0 : array_like, shape (n,)
78
+ Initial state.
79
+ t_bound : float
80
+ Boundary time --- the integration won't continue beyond it. It also
81
+ determines the direction of the integration.
82
+ vectorized : bool
83
+ Whether `fun` can be called in a vectorized fashion. Default is False.
84
+
85
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
86
+ shape ``(n,)``, where ``n = len(y0)``.
87
+
88
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
89
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
90
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
91
+ the returned array is the time derivative of the state corresponding
92
+ with a column of ``y``).
93
+
94
+ Setting ``vectorized=True`` allows for faster finite difference
95
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
96
+ will result in slower execution for other methods. It can also
97
+ result in slower overall execution for 'Radau' and 'BDF' in some
98
+ circumstances (e.g. small ``len(y0)``).
99
+ support_complex : bool, optional
100
+ Whether integration in a complex domain should be supported.
101
+ Generally determined by a derived solver class capabilities.
102
+ Default is False.
103
+
104
+ Attributes
105
+ ----------
106
+ n : int
107
+ Number of equations.
108
+ status : string
109
+ Current status of the solver: 'running', 'finished' or 'failed'.
110
+ t_bound : float
111
+ Boundary time.
112
+ direction : float
113
+ Integration direction: +1 or -1.
114
+ t : float
115
+ Current time.
116
+ y : ndarray
117
+ Current state.
118
+ t_old : float
119
+ Previous time. None if no steps were made yet.
120
+ step_size : float
121
+ Size of the last successful step. None if no steps were made yet.
122
+ nfev : int
123
+ Number of the system's rhs evaluations.
124
+ njev : int
125
+ Number of the Jacobian evaluations.
126
+ nlu : int
127
+ Number of LU decompositions.
128
+ """
129
+ TOO_SMALL_STEP = "Required step size is less than spacing between numbers."
130
+
131
+ def __init__(self, fun, t0, y0, t_bound, vectorized,
132
+ support_complex=False):
133
+ self.t_old = None
134
+ self.t = t0
135
+ self._fun, self.y = check_arguments(fun, y0, support_complex)
136
+ self.t_bound = t_bound
137
+ self.vectorized = vectorized
138
+
139
+ if vectorized:
140
+ def fun_single(t, y):
141
+ return self._fun(t, y[:, None]).ravel()
142
+ fun_vectorized = self._fun
143
+ else:
144
+ fun_single = self._fun
145
+
146
+ def fun_vectorized(t, y):
147
+ f = np.empty_like(y)
148
+ for i, yi in enumerate(y.T):
149
+ f[:, i] = self._fun(t, yi)
150
+ return f
151
+
152
+ def fun(t, y):
153
+ self.nfev += 1
154
+ return self.fun_single(t, y)
155
+
156
+ self.fun = fun
157
+ self.fun_single = fun_single
158
+ self.fun_vectorized = fun_vectorized
159
+
160
+ self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1
161
+ self.n = self.y.size
162
+ self.status = 'running'
163
+
164
+ self.nfev = 0
165
+ self.njev = 0
166
+ self.nlu = 0
167
+
168
+ @property
169
+ def step_size(self):
170
+ if self.t_old is None:
171
+ return None
172
+ else:
173
+ return np.abs(self.t - self.t_old)
174
+
175
+ def step(self):
176
+ """Perform one integration step.
177
+
178
+ Returns
179
+ -------
180
+ message : string or None
181
+ Report from the solver. Typically a reason for a failure if
182
+ `self.status` is 'failed' after the step was taken or None
183
+ otherwise.
184
+ """
185
+ if self.status != 'running':
186
+ raise RuntimeError("Attempt to step on a failed or finished "
187
+ "solver.")
188
+
189
+ if self.n == 0 or self.t == self.t_bound:
190
+ # Handle corner cases of empty solver or no integration.
191
+ self.t_old = self.t
192
+ self.t = self.t_bound
193
+ message = None
194
+ self.status = 'finished'
195
+ else:
196
+ t = self.t
197
+ success, message = self._step_impl()
198
+
199
+ if not success:
200
+ self.status = 'failed'
201
+ else:
202
+ self.t_old = t
203
+ if self.direction * (self.t - self.t_bound) >= 0:
204
+ self.status = 'finished'
205
+
206
+ return message
207
+
208
+ def dense_output(self):
209
+ """Compute a local interpolant over the last successful step.
210
+
211
+ Returns
212
+ -------
213
+ sol : `DenseOutput`
214
+ Local interpolant over the last successful step.
215
+ """
216
+ if self.t_old is None:
217
+ raise RuntimeError("Dense output is available after a successful "
218
+ "step was made.")
219
+
220
+ if self.n == 0 or self.t == self.t_old:
221
+ # Handle corner cases of empty solver and no integration.
222
+ return ConstantDenseOutput(self.t_old, self.t, self.y)
223
+ else:
224
+ return self._dense_output_impl()
225
+
226
+ def _step_impl(self):
227
+ raise NotImplementedError
228
+
229
+ def _dense_output_impl(self):
230
+ raise NotImplementedError
231
+
232
+
233
+ class DenseOutput:
234
+ """Base class for local interpolant over step made by an ODE solver.
235
+
236
+ It interpolates between `t_min` and `t_max` (see Attributes below).
237
+ Evaluation outside this interval is not forbidden, but the accuracy is not
238
+ guaranteed.
239
+
240
+ Attributes
241
+ ----------
242
+ t_min, t_max : float
243
+ Time range of the interpolation.
244
+ """
245
+ def __init__(self, t_old, t):
246
+ self.t_old = t_old
247
+ self.t = t
248
+ self.t_min = min(t, t_old)
249
+ self.t_max = max(t, t_old)
250
+
251
+ def __call__(self, t):
252
+ """Evaluate the interpolant.
253
+
254
+ Parameters
255
+ ----------
256
+ t : float or array_like with shape (n_points,)
257
+ Points to evaluate the solution at.
258
+
259
+ Returns
260
+ -------
261
+ y : ndarray, shape (n,) or (n, n_points)
262
+ Computed values. Shape depends on whether `t` was a scalar or a
263
+ 1-D array.
264
+ """
265
+ t = np.asarray(t)
266
+ if t.ndim > 1:
267
+ raise ValueError("`t` must be a float or a 1-D array.")
268
+ return self._call_impl(t)
269
+
270
+ def _call_impl(self, t):
271
+ raise NotImplementedError
272
+
273
+
274
+ class ConstantDenseOutput(DenseOutput):
275
+ """Constant value interpolator.
276
+
277
+ This class used for degenerate integration cases: equal integration limits
278
+ or a system with 0 equations.
279
+ """
280
+ def __init__(self, t_old, t, value):
281
+ super().__init__(t_old, t)
282
+ self.value = value
283
+
284
+ def _call_impl(self, t):
285
+ if t.ndim == 0:
286
+ return self.value
287
+ else:
288
+ ret = np.empty((self.value.shape[0], t.shape[0]))
289
+ ret[:] = self.value[:, None]
290
+ return ret
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import lu_factor, lu_solve
3
+ from scipy.sparse import issparse, csc_matrix, eye
4
+ from scipy.sparse.linalg import splu
5
+ from scipy.optimize._numdiff import group_columns
6
+ from .common import (validate_max_step, validate_tol, select_initial_step,
7
+ norm, EPS, num_jac, validate_first_step,
8
+ warn_extraneous)
9
+ from .base import OdeSolver, DenseOutput
10
+
11
+
12
+ MAX_ORDER = 5
13
+ NEWTON_MAXITER = 4
14
+ MIN_FACTOR = 0.2
15
+ MAX_FACTOR = 10
16
+
17
+
18
+ def compute_R(order, factor):
19
+ """Compute the matrix for changing the differences array."""
20
+ I = np.arange(1, order + 1)[:, None]
21
+ J = np.arange(1, order + 1)
22
+ M = np.zeros((order + 1, order + 1))
23
+ M[1:, 1:] = (I - 1 - factor * J) / I
24
+ M[0] = 1
25
+ return np.cumprod(M, axis=0)
26
+
27
+
28
+ def change_D(D, order, factor):
29
+ """Change differences array in-place when step size is changed."""
30
+ R = compute_R(order, factor)
31
+ U = compute_R(order, 1)
32
+ RU = R.dot(U)
33
+ D[:order + 1] = np.dot(RU.T, D[:order + 1])
34
+
35
+
36
+ def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):
37
+ """Solve the algebraic system resulting from BDF method."""
38
+ d = 0
39
+ y = y_predict.copy()
40
+ dy_norm_old = None
41
+ converged = False
42
+ for k in range(NEWTON_MAXITER):
43
+ f = fun(t_new, y)
44
+ if not np.all(np.isfinite(f)):
45
+ break
46
+
47
+ dy = solve_lu(LU, c * f - psi - d)
48
+ dy_norm = norm(dy / scale)
49
+
50
+ if dy_norm_old is None:
51
+ rate = None
52
+ else:
53
+ rate = dy_norm / dy_norm_old
54
+
55
+ if (rate is not None and (rate >= 1 or
56
+ rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)):
57
+ break
58
+
59
+ y += dy
60
+ d += dy
61
+
62
+ if (dy_norm == 0 or
63
+ rate is not None and rate / (1 - rate) * dy_norm < tol):
64
+ converged = True
65
+ break
66
+
67
+ dy_norm_old = dy_norm
68
+
69
+ return converged, k + 1, y, d
70
+
71
+
72
+ class BDF(OdeSolver):
73
+ """Implicit method based on backward-differentiation formulas.
74
+
75
+ This is a variable order method with the order varying automatically from
76
+ 1 to 5. The general framework of the BDF algorithm is described in [1]_.
77
+ This class implements a quasi-constant step size as explained in [2]_.
78
+ The error estimation strategy for the constant-step BDF is derived in [3]_.
79
+ An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
80
+
81
+ Can be applied in the complex domain.
82
+
83
+ Parameters
84
+ ----------
85
+ fun : callable
86
+ Right-hand side of the system: the time derivative of the state ``y``
87
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
88
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
89
+ return an array of the same shape as ``y``. See `vectorized` for more
90
+ information.
91
+ t0 : float
92
+ Initial time.
93
+ y0 : array_like, shape (n,)
94
+ Initial state.
95
+ t_bound : float
96
+ Boundary time - the integration won't continue beyond it. It also
97
+ determines the direction of the integration.
98
+ first_step : float or None, optional
99
+ Initial step size. Default is ``None`` which means that the algorithm
100
+ should choose.
101
+ max_step : float, optional
102
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
103
+ bounded and determined solely by the solver.
104
+ rtol, atol : float and array_like, optional
105
+ Relative and absolute tolerances. The solver keeps the local error
106
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
107
+ relative accuracy (number of correct digits), while `atol` controls
108
+ absolute accuracy (number of correct decimal places). To achieve the
109
+ desired `rtol`, set `atol` to be smaller than the smallest value that
110
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
111
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
112
+ number of correct digits is not guaranteed. Conversely, to achieve the
113
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
114
+ than `atol`. If components of y have different scales, it might be
115
+ beneficial to set different `atol` values for different components by
116
+ passing array_like with shape (n,) for `atol`. Default values are
117
+ 1e-3 for `rtol` and 1e-6 for `atol`.
118
+ jac : {None, array_like, sparse_matrix, callable}, optional
119
+ Jacobian matrix of the right-hand side of the system with respect to y,
120
+ required by this method. The Jacobian matrix has shape (n, n) and its
121
+ element (i, j) is equal to ``d f_i / d y_j``.
122
+ There are three ways to define the Jacobian:
123
+
124
+ * If array_like or sparse_matrix, the Jacobian is assumed to
125
+ be constant.
126
+ * If callable, the Jacobian is assumed to depend on both
127
+ t and y; it will be called as ``jac(t, y)`` as necessary.
128
+ For the 'Radau' and 'BDF' methods, the return value might be a
129
+ sparse matrix.
130
+ * If None (default), the Jacobian will be approximated by
131
+ finite differences.
132
+
133
+ It is generally recommended to provide the Jacobian rather than
134
+ relying on a finite-difference approximation.
135
+ jac_sparsity : {None, array_like, sparse matrix}, optional
136
+ Defines a sparsity structure of the Jacobian matrix for a
137
+ finite-difference approximation. Its shape must be (n, n). This argument
138
+ is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
139
+ elements in *each* row, providing the sparsity structure will greatly
140
+ speed up the computations [4]_. A zero entry means that a corresponding
141
+ element in the Jacobian is always zero. If None (default), the Jacobian
142
+ is assumed to be dense.
143
+ vectorized : bool, optional
144
+ Whether `fun` can be called in a vectorized fashion. Default is False.
145
+
146
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
147
+ shape ``(n,)``, where ``n = len(y0)``.
148
+
149
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
150
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
151
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
152
+ the returned array is the time derivative of the state corresponding
153
+ with a column of ``y``).
154
+
155
+ Setting ``vectorized=True`` allows for faster finite difference
156
+ approximation of the Jacobian by this method, but may result in slower
157
+ execution overall in some circumstances (e.g. small ``len(y0)``).
158
+
159
+ Attributes
160
+ ----------
161
+ n : int
162
+ Number of equations.
163
+ status : string
164
+ Current status of the solver: 'running', 'finished' or 'failed'.
165
+ t_bound : float
166
+ Boundary time.
167
+ direction : float
168
+ Integration direction: +1 or -1.
169
+ t : float
170
+ Current time.
171
+ y : ndarray
172
+ Current state.
173
+ t_old : float
174
+ Previous time. None if no steps were made yet.
175
+ step_size : float
176
+ Size of the last successful step. None if no steps were made yet.
177
+ nfev : int
178
+ Number of evaluations of the right-hand side.
179
+ njev : int
180
+ Number of evaluations of the Jacobian.
181
+ nlu : int
182
+ Number of LU decompositions.
183
+
184
+ References
185
+ ----------
186
+ .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
187
+ Solution of Ordinary Differential Equations", ACM Transactions on
188
+ Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
189
+ .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
190
+ COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
191
+ .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
192
+ Nonstiff Problems", Sec. III.2.
193
+ .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
194
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
195
+ and its Applications, 13, pp. 117-120, 1974.
196
+ """
197
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
198
+ rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
199
+ vectorized=False, first_step=None, **extraneous):
200
+ warn_extraneous(extraneous)
201
+ super().__init__(fun, t0, y0, t_bound, vectorized,
202
+ support_complex=True)
203
+ self.max_step = validate_max_step(max_step)
204
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
205
+ f = self.fun(self.t, self.y)
206
+ if first_step is None:
207
+ self.h_abs = select_initial_step(self.fun, self.t, self.y, f,
208
+ self.direction, 1,
209
+ self.rtol, self.atol)
210
+ else:
211
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
212
+ self.h_abs_old = None
213
+ self.error_norm_old = None
214
+
215
+ self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
216
+
217
+ self.jac_factor = None
218
+ self.jac, self.J = self._validate_jac(jac, jac_sparsity)
219
+ if issparse(self.J):
220
+ def lu(A):
221
+ self.nlu += 1
222
+ return splu(A)
223
+
224
+ def solve_lu(LU, b):
225
+ return LU.solve(b)
226
+
227
+ I = eye(self.n, format='csc', dtype=self.y.dtype)
228
+ else:
229
+ def lu(A):
230
+ self.nlu += 1
231
+ return lu_factor(A, overwrite_a=True)
232
+
233
+ def solve_lu(LU, b):
234
+ return lu_solve(LU, b, overwrite_b=True)
235
+
236
+ I = np.identity(self.n, dtype=self.y.dtype)
237
+
238
+ self.lu = lu
239
+ self.solve_lu = solve_lu
240
+ self.I = I
241
+
242
+ kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
243
+ self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
244
+ self.alpha = (1 - kappa) * self.gamma
245
+ self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
246
+
247
+ D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
248
+ D[0] = self.y
249
+ D[1] = f * self.h_abs * self.direction
250
+ self.D = D
251
+
252
+ self.order = 1
253
+ self.n_equal_steps = 0
254
+ self.LU = None
255
+
256
+ def _validate_jac(self, jac, sparsity):
257
+ t0 = self.t
258
+ y0 = self.y
259
+
260
+ if jac is None:
261
+ if sparsity is not None:
262
+ if issparse(sparsity):
263
+ sparsity = csc_matrix(sparsity)
264
+ groups = group_columns(sparsity)
265
+ sparsity = (sparsity, groups)
266
+
267
+ def jac_wrapped(t, y):
268
+ self.njev += 1
269
+ f = self.fun_single(t, y)
270
+ J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
271
+ self.atol, self.jac_factor,
272
+ sparsity)
273
+ return J
274
+ J = jac_wrapped(t0, y0)
275
+ elif callable(jac):
276
+ J = jac(t0, y0)
277
+ self.njev += 1
278
+ if issparse(J):
279
+ J = csc_matrix(J, dtype=y0.dtype)
280
+
281
+ def jac_wrapped(t, y):
282
+ self.njev += 1
283
+ return csc_matrix(jac(t, y), dtype=y0.dtype)
284
+ else:
285
+ J = np.asarray(J, dtype=y0.dtype)
286
+
287
+ def jac_wrapped(t, y):
288
+ self.njev += 1
289
+ return np.asarray(jac(t, y), dtype=y0.dtype)
290
+
291
+ if J.shape != (self.n, self.n):
292
+ raise ValueError("`jac` is expected to have shape {}, but "
293
+ "actually has {}."
294
+ .format((self.n, self.n), J.shape))
295
+ else:
296
+ if issparse(jac):
297
+ J = csc_matrix(jac, dtype=y0.dtype)
298
+ else:
299
+ J = np.asarray(jac, dtype=y0.dtype)
300
+
301
+ if J.shape != (self.n, self.n):
302
+ raise ValueError("`jac` is expected to have shape {}, but "
303
+ "actually has {}."
304
+ .format((self.n, self.n), J.shape))
305
+ jac_wrapped = None
306
+
307
+ return jac_wrapped, J
308
+
309
+ def _step_impl(self):
310
+ t = self.t
311
+ D = self.D
312
+
313
+ max_step = self.max_step
314
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
315
+ if self.h_abs > max_step:
316
+ h_abs = max_step
317
+ change_D(D, self.order, max_step / self.h_abs)
318
+ self.n_equal_steps = 0
319
+ elif self.h_abs < min_step:
320
+ h_abs = min_step
321
+ change_D(D, self.order, min_step / self.h_abs)
322
+ self.n_equal_steps = 0
323
+ else:
324
+ h_abs = self.h_abs
325
+
326
+ atol = self.atol
327
+ rtol = self.rtol
328
+ order = self.order
329
+
330
+ alpha = self.alpha
331
+ gamma = self.gamma
332
+ error_const = self.error_const
333
+
334
+ J = self.J
335
+ LU = self.LU
336
+ current_jac = self.jac is None
337
+
338
+ step_accepted = False
339
+ while not step_accepted:
340
+ if h_abs < min_step:
341
+ return False, self.TOO_SMALL_STEP
342
+
343
+ h = h_abs * self.direction
344
+ t_new = t + h
345
+
346
+ if self.direction * (t_new - self.t_bound) > 0:
347
+ t_new = self.t_bound
348
+ change_D(D, order, np.abs(t_new - t) / h_abs)
349
+ self.n_equal_steps = 0
350
+ LU = None
351
+
352
+ h = t_new - t
353
+ h_abs = np.abs(h)
354
+
355
+ y_predict = np.sum(D[:order + 1], axis=0)
356
+
357
+ scale = atol + rtol * np.abs(y_predict)
358
+ psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
359
+
360
+ converged = False
361
+ c = h / alpha[order]
362
+ while not converged:
363
+ if LU is None:
364
+ LU = self.lu(self.I - c * J)
365
+
366
+ converged, n_iter, y_new, d = solve_bdf_system(
367
+ self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
368
+ scale, self.newton_tol)
369
+
370
+ if not converged:
371
+ if current_jac:
372
+ break
373
+ J = self.jac(t_new, y_predict)
374
+ LU = None
375
+ current_jac = True
376
+
377
+ if not converged:
378
+ factor = 0.5
379
+ h_abs *= factor
380
+ change_D(D, order, factor)
381
+ self.n_equal_steps = 0
382
+ LU = None
383
+ continue
384
+
385
+ safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
386
+ + n_iter)
387
+
388
+ scale = atol + rtol * np.abs(y_new)
389
+ error = error_const[order] * d
390
+ error_norm = norm(error / scale)
391
+
392
+ if error_norm > 1:
393
+ factor = max(MIN_FACTOR,
394
+ safety * error_norm ** (-1 / (order + 1)))
395
+ h_abs *= factor
396
+ change_D(D, order, factor)
397
+ self.n_equal_steps = 0
398
+ # As we didn't have problems with convergence, we don't
399
+ # reset LU here.
400
+ else:
401
+ step_accepted = True
402
+
403
+ self.n_equal_steps += 1
404
+
405
+ self.t = t_new
406
+ self.y = y_new
407
+
408
+ self.h_abs = h_abs
409
+ self.J = J
410
+ self.LU = LU
411
+
412
+ # Update differences. The principal relation here is
413
+ # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
414
+ # contained difference for previous interpolating polynomial and
415
+ # d = D^{k + 1} y_n. Thus this elegant code follows.
416
+ D[order + 2] = d - D[order + 1]
417
+ D[order + 1] = d
418
+ for i in reversed(range(order + 1)):
419
+ D[i] += D[i + 1]
420
+
421
+ if self.n_equal_steps < order + 1:
422
+ return True, None
423
+
424
+ if order > 1:
425
+ error_m = error_const[order - 1] * D[order]
426
+ error_m_norm = norm(error_m / scale)
427
+ else:
428
+ error_m_norm = np.inf
429
+
430
+ if order < MAX_ORDER:
431
+ error_p = error_const[order + 1] * D[order + 2]
432
+ error_p_norm = norm(error_p / scale)
433
+ else:
434
+ error_p_norm = np.inf
435
+
436
+ error_norms = np.array([error_m_norm, error_norm, error_p_norm])
437
+ with np.errstate(divide='ignore'):
438
+ factors = error_norms ** (-1 / np.arange(order, order + 3))
439
+
440
+ delta_order = np.argmax(factors) - 1
441
+ order += delta_order
442
+ self.order = order
443
+
444
+ factor = min(MAX_FACTOR, safety * np.max(factors))
445
+ self.h_abs *= factor
446
+ change_D(D, order, factor)
447
+ self.n_equal_steps = 0
448
+ self.LU = None
449
+
450
+ return True, None
451
+
452
+ def _dense_output_impl(self):
453
+ return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
454
+ self.order, self.D[:self.order + 1].copy())
455
+
456
+
457
+ class BdfDenseOutput(DenseOutput):
458
+ def __init__(self, t_old, t, h, order, D):
459
+ super().__init__(t_old, t)
460
+ self.order = order
461
+ self.t_shift = self.t - h * np.arange(self.order)
462
+ self.denom = h * (1 + np.arange(self.order))
463
+ self.D = D
464
+
465
+ def _call_impl(self, t):
466
+ if t.ndim == 0:
467
+ x = (t - self.t_shift) / self.denom
468
+ p = np.cumprod(x)
469
+ else:
470
+ x = (t - self.t_shift[:, None]) / self.denom[:, None]
471
+ p = np.cumprod(x, axis=0)
472
+
473
+ y = np.dot(self.D[1:].T, p)
474
+ if y.ndim == 1:
475
+ y += self.D[0]
476
+ else:
477
+ y += self.D[0, :, None]
478
+
479
+ return y
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import groupby
2
+ from warnings import warn
3
+ import numpy as np
4
+ from scipy.sparse import find, coo_matrix
5
+
6
+
7
+ EPS = np.finfo(float).eps
8
+
9
+
10
+ def validate_first_step(first_step, t0, t_bound):
11
+ """Assert that first_step is valid and return it."""
12
+ if first_step <= 0:
13
+ raise ValueError("`first_step` must be positive.")
14
+ if first_step > np.abs(t_bound - t0):
15
+ raise ValueError("`first_step` exceeds bounds.")
16
+ return first_step
17
+
18
+
19
+ def validate_max_step(max_step):
20
+ """Assert that max_Step is valid and return it."""
21
+ if max_step <= 0:
22
+ raise ValueError("`max_step` must be positive.")
23
+ return max_step
24
+
25
+
26
+ def warn_extraneous(extraneous):
27
+ """Display a warning for extraneous keyword arguments.
28
+
29
+ The initializer of each solver class is expected to collect keyword
30
+ arguments that it doesn't understand and warn about them. This function
31
+ prints a warning for each key in the supplied dictionary.
32
+
33
+ Parameters
34
+ ----------
35
+ extraneous : dict
36
+ Extraneous keyword arguments
37
+ """
38
+ if extraneous:
39
+ warn("The following arguments have no effect for a chosen solver: {}."
40
+ .format(", ".join(f"`{x}`" for x in extraneous)),
41
+ stacklevel=3)
42
+
43
+
44
+ def validate_tol(rtol, atol, n):
45
+ """Validate tolerance values."""
46
+
47
+ if np.any(rtol < 100 * EPS):
48
+ warn("At least one element of `rtol` is too small. "
49
+ f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.",
50
+ stacklevel=3)
51
+ rtol = np.maximum(rtol, 100 * EPS)
52
+
53
+ atol = np.asarray(atol)
54
+ if atol.ndim > 0 and atol.shape != (n,):
55
+ raise ValueError("`atol` has wrong shape.")
56
+
57
+ if np.any(atol < 0):
58
+ raise ValueError("`atol` must be positive.")
59
+
60
+ return rtol, atol
61
+
62
+
63
+ def norm(x):
64
+ """Compute RMS norm."""
65
+ return np.linalg.norm(x) / x.size ** 0.5
66
+
67
+
68
+ def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):
69
+ """Empirically select a good initial step.
70
+
71
+ The algorithm is described in [1]_.
72
+
73
+ Parameters
74
+ ----------
75
+ fun : callable
76
+ Right-hand side of the system.
77
+ t0 : float
78
+ Initial value of the independent variable.
79
+ y0 : ndarray, shape (n,)
80
+ Initial value of the dependent variable.
81
+ f0 : ndarray, shape (n,)
82
+ Initial value of the derivative, i.e., ``fun(t0, y0)``.
83
+ direction : float
84
+ Integration direction.
85
+ order : float
86
+ Error estimator order. It means that the error controlled by the
87
+ algorithm is proportional to ``step_size ** (order + 1)`.
88
+ rtol : float
89
+ Desired relative tolerance.
90
+ atol : float
91
+ Desired absolute tolerance.
92
+
93
+ Returns
94
+ -------
95
+ h_abs : float
96
+ Absolute value of the suggested initial step.
97
+
98
+ References
99
+ ----------
100
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
101
+ Equations I: Nonstiff Problems", Sec. II.4.
102
+ """
103
+ if y0.size == 0:
104
+ return np.inf
105
+
106
+ scale = atol + np.abs(y0) * rtol
107
+ d0 = norm(y0 / scale)
108
+ d1 = norm(f0 / scale)
109
+ if d0 < 1e-5 or d1 < 1e-5:
110
+ h0 = 1e-6
111
+ else:
112
+ h0 = 0.01 * d0 / d1
113
+
114
+ y1 = y0 + h0 * direction * f0
115
+ f1 = fun(t0 + h0 * direction, y1)
116
+ d2 = norm((f1 - f0) / scale) / h0
117
+
118
+ if d1 <= 1e-15 and d2 <= 1e-15:
119
+ h1 = max(1e-6, h0 * 1e-3)
120
+ else:
121
+ h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
122
+
123
+ return min(100 * h0, h1)
124
+
125
+
126
+ class OdeSolution:
127
+ """Continuous ODE solution.
128
+
129
+ It is organized as a collection of `DenseOutput` objects which represent
130
+ local interpolants. It provides an algorithm to select a right interpolant
131
+ for each given point.
132
+
133
+ The interpolants cover the range between `t_min` and `t_max` (see
134
+ Attributes below). Evaluation outside this interval is not forbidden, but
135
+ the accuracy is not guaranteed.
136
+
137
+ When evaluating at a breakpoint (one of the values in `ts`) a segment with
138
+ the lower index is selected.
139
+
140
+ Parameters
141
+ ----------
142
+ ts : array_like, shape (n_segments + 1,)
143
+ Time instants between which local interpolants are defined. Must
144
+ be strictly increasing or decreasing (zero segment with two points is
145
+ also allowed).
146
+ interpolants : list of DenseOutput with n_segments elements
147
+ Local interpolants. An i-th interpolant is assumed to be defined
148
+ between ``ts[i]`` and ``ts[i + 1]``.
149
+ alt_segment : boolean
150
+ Requests the alternative interpolant segment selection scheme. At each
151
+ solver integration point, two interpolant segments are available. The
152
+ default (False) and alternative (True) behaviours select the segment
153
+ for which the requested time corresponded to ``t`` and ``t_old``,
154
+ respectively. This functionality is only relevant for testing the
155
+ interpolants' accuracy: different integrators use different
156
+ construction strategies.
157
+
158
+ Attributes
159
+ ----------
160
+ t_min, t_max : float
161
+ Time range of the interpolation.
162
+ """
163
+ def __init__(self, ts, interpolants, alt_segment=False):
164
+ ts = np.asarray(ts)
165
+ d = np.diff(ts)
166
+ # The first case covers integration on zero segment.
167
+ if not ((ts.size == 2 and ts[0] == ts[-1])
168
+ or np.all(d > 0) or np.all(d < 0)):
169
+ raise ValueError("`ts` must be strictly increasing or decreasing.")
170
+
171
+ self.n_segments = len(interpolants)
172
+ if ts.shape != (self.n_segments + 1,):
173
+ raise ValueError("Numbers of time stamps and interpolants "
174
+ "don't match.")
175
+
176
+ self.ts = ts
177
+ self.interpolants = interpolants
178
+ if ts[-1] >= ts[0]:
179
+ self.t_min = ts[0]
180
+ self.t_max = ts[-1]
181
+ self.ascending = True
182
+ self.side = "right" if alt_segment else "left"
183
+ self.ts_sorted = ts
184
+ else:
185
+ self.t_min = ts[-1]
186
+ self.t_max = ts[0]
187
+ self.ascending = False
188
+ self.side = "left" if alt_segment else "right"
189
+ self.ts_sorted = ts[::-1]
190
+
191
+ def _call_single(self, t):
192
+ # Here we preserve a certain symmetry that when t is in self.ts,
193
+ # if alt_segment=False, then we prioritize a segment with a lower
194
+ # index.
195
+ ind = np.searchsorted(self.ts_sorted, t, side=self.side)
196
+
197
+ segment = min(max(ind - 1, 0), self.n_segments - 1)
198
+ if not self.ascending:
199
+ segment = self.n_segments - 1 - segment
200
+
201
+ return self.interpolants[segment](t)
202
+
203
+ def __call__(self, t):
204
+ """Evaluate the solution.
205
+
206
+ Parameters
207
+ ----------
208
+ t : float or array_like with shape (n_points,)
209
+ Points to evaluate at.
210
+
211
+ Returns
212
+ -------
213
+ y : ndarray, shape (n_states,) or (n_states, n_points)
214
+ Computed values. Shape depends on whether `t` is a scalar or a
215
+ 1-D array.
216
+ """
217
+ t = np.asarray(t)
218
+
219
+ if t.ndim == 0:
220
+ return self._call_single(t)
221
+
222
+ order = np.argsort(t)
223
+ reverse = np.empty_like(order)
224
+ reverse[order] = np.arange(order.shape[0])
225
+ t_sorted = t[order]
226
+
227
+ # See comment in self._call_single.
228
+ segments = np.searchsorted(self.ts_sorted, t_sorted, side=self.side)
229
+ segments -= 1
230
+ segments[segments < 0] = 0
231
+ segments[segments > self.n_segments - 1] = self.n_segments - 1
232
+ if not self.ascending:
233
+ segments = self.n_segments - 1 - segments
234
+
235
+ ys = []
236
+ group_start = 0
237
+ for segment, group in groupby(segments):
238
+ group_end = group_start + len(list(group))
239
+ y = self.interpolants[segment](t_sorted[group_start:group_end])
240
+ ys.append(y)
241
+ group_start = group_end
242
+
243
+ ys = np.hstack(ys)
244
+ ys = ys[:, reverse]
245
+
246
+ return ys
247
+
248
+
249
+ NUM_JAC_DIFF_REJECT = EPS ** 0.875
250
+ NUM_JAC_DIFF_SMALL = EPS ** 0.75
251
+ NUM_JAC_DIFF_BIG = EPS ** 0.25
252
+ NUM_JAC_MIN_FACTOR = 1e3 * EPS
253
+ NUM_JAC_FACTOR_INCREASE = 10
254
+ NUM_JAC_FACTOR_DECREASE = 0.1
255
+
256
+
257
+ def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
258
+ """Finite differences Jacobian approximation tailored for ODE solvers.
259
+
260
+ This function computes finite difference approximation to the Jacobian
261
+ matrix of `fun` with respect to `y` using forward differences.
262
+ The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
263
+ ``d f_i / d y_j``.
264
+
265
+ A special feature of this function is the ability to correct the step
266
+ size from iteration to iteration. The main idea is to keep the finite
267
+ difference significantly separated from its round-off error which
268
+ approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
269
+ huge error and assures that the estimated derivative are reasonably close
270
+ to the true values (i.e., the finite difference approximation is at least
271
+ qualitatively reflects the structure of the true Jacobian).
272
+
273
+ Parameters
274
+ ----------
275
+ fun : callable
276
+ Right-hand side of the system implemented in a vectorized fashion.
277
+ t : float
278
+ Current time.
279
+ y : ndarray, shape (n,)
280
+ Current state.
281
+ f : ndarray, shape (n,)
282
+ Value of the right hand side at (t, y).
283
+ threshold : float
284
+ Threshold for `y` value used for computing the step size as
285
+ ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of
286
+ absolute tolerance (atol) for a solver should be passed as `threshold`.
287
+ factor : ndarray with shape (n,) or None
288
+ Factor to use for computing the step size. Pass None for the very
289
+ evaluation, then use the value returned from this function.
290
+ sparsity : tuple (structure, groups) or None
291
+ Sparsity structure of the Jacobian, `structure` must be csc_matrix.
292
+
293
+ Returns
294
+ -------
295
+ J : ndarray or csc_matrix, shape (n, n)
296
+ Jacobian matrix.
297
+ factor : ndarray, shape (n,)
298
+ Suggested `factor` for the next evaluation.
299
+ """
300
+ y = np.asarray(y)
301
+ n = y.shape[0]
302
+ if n == 0:
303
+ return np.empty((0, 0)), factor
304
+
305
+ if factor is None:
306
+ factor = np.full(n, EPS ** 0.5)
307
+ else:
308
+ factor = factor.copy()
309
+
310
+ # Direct the step as ODE dictates, hoping that such a step won't lead to
311
+ # a problematic region. For complex ODEs it makes sense to use the real
312
+ # part of f as we use steps along real axis.
313
+ f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
314
+ y_scale = f_sign * np.maximum(threshold, np.abs(y))
315
+ h = (y + factor * y_scale) - y
316
+
317
+ # Make sure that the step is not 0 to start with. Not likely it will be
318
+ # executed often.
319
+ for i in np.nonzero(h == 0)[0]:
320
+ while h[i] == 0:
321
+ factor[i] *= 10
322
+ h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
323
+
324
+ if sparsity is None:
325
+ return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
326
+ else:
327
+ structure, groups = sparsity
328
+ return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
329
+ structure, groups)
330
+
331
+
332
+ def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
333
+ n = y.shape[0]
334
+ h_vecs = np.diag(h)
335
+ f_new = fun(t, y[:, None] + h_vecs)
336
+ diff = f_new - f[:, None]
337
+ max_ind = np.argmax(np.abs(diff), axis=0)
338
+ r = np.arange(n)
339
+ max_diff = np.abs(diff[max_ind, r])
340
+ scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
341
+
342
+ diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
343
+ if np.any(diff_too_small):
344
+ ind, = np.nonzero(diff_too_small)
345
+ new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
346
+ h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
347
+ h_vecs[ind, ind] = h_new
348
+ f_new = fun(t, y[:, None] + h_vecs[:, ind])
349
+ diff_new = f_new - f[:, None]
350
+ max_ind = np.argmax(np.abs(diff_new), axis=0)
351
+ r = np.arange(ind.shape[0])
352
+ max_diff_new = np.abs(diff_new[max_ind, r])
353
+ scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
354
+
355
+ update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
356
+ if np.any(update):
357
+ update, = np.nonzero(update)
358
+ update_ind = ind[update]
359
+ factor[update_ind] = new_factor[update]
360
+ h[update_ind] = h_new[update]
361
+ diff[:, update_ind] = diff_new[:, update]
362
+ scale[update_ind] = scale_new[update]
363
+ max_diff[update_ind] = max_diff_new[update]
364
+
365
+ diff /= h
366
+
367
+ factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
368
+ factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
369
+ factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
370
+
371
+ return diff, factor
372
+
373
+
374
+ def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
375
+ n = y.shape[0]
376
+ n_groups = np.max(groups) + 1
377
+ h_vecs = np.empty((n_groups, n))
378
+ for group in range(n_groups):
379
+ e = np.equal(group, groups)
380
+ h_vecs[group] = h * e
381
+ h_vecs = h_vecs.T
382
+
383
+ f_new = fun(t, y[:, None] + h_vecs)
384
+ df = f_new - f[:, None]
385
+
386
+ i, j, _ = find(structure)
387
+ diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
388
+ max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
389
+ r = np.arange(n)
390
+ max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
391
+ scale = np.maximum(np.abs(f[max_ind]),
392
+ np.abs(f_new[max_ind, groups[r]]))
393
+
394
+ diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
395
+ if np.any(diff_too_small):
396
+ ind, = np.nonzero(diff_too_small)
397
+ new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
398
+ h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
399
+ h_new_all = np.zeros(n)
400
+ h_new_all[ind] = h_new
401
+
402
+ groups_unique = np.unique(groups[ind])
403
+ groups_map = np.empty(n_groups, dtype=int)
404
+ h_vecs = np.empty((groups_unique.shape[0], n))
405
+ for k, group in enumerate(groups_unique):
406
+ e = np.equal(group, groups)
407
+ h_vecs[k] = h_new_all * e
408
+ groups_map[group] = k
409
+ h_vecs = h_vecs.T
410
+
411
+ f_new = fun(t, y[:, None] + h_vecs)
412
+ df = f_new - f[:, None]
413
+ i, j, _ = find(structure[:, ind])
414
+ diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
415
+ (i, j)), shape=(n, ind.shape[0])).tocsc()
416
+
417
+ max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
418
+ r = np.arange(ind.shape[0])
419
+ max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
420
+ scale_new = np.maximum(
421
+ np.abs(f[max_ind_new]),
422
+ np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
423
+
424
+ update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
425
+ if np.any(update):
426
+ update, = np.nonzero(update)
427
+ update_ind = ind[update]
428
+ factor[update_ind] = new_factor[update]
429
+ h[update_ind] = h_new[update]
430
+ diff[:, update_ind] = diff_new[:, update]
431
+ scale[update_ind] = scale_new[update]
432
+ max_diff[update_ind] = max_diff_new[update]
433
+
434
+ diff.data /= np.repeat(h, np.diff(diff.indptr))
435
+
436
+ factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
437
+ factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
438
+ factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
439
+
440
+ return diff, factor
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ N_STAGES = 12
4
+ N_STAGES_EXTENDED = 16
5
+ INTERPOLATOR_POWER = 7
6
+
7
+ C = np.array([0.0,
8
+ 0.526001519587677318785587544488e-01,
9
+ 0.789002279381515978178381316732e-01,
10
+ 0.118350341907227396726757197510,
11
+ 0.281649658092772603273242802490,
12
+ 0.333333333333333333333333333333,
13
+ 0.25,
14
+ 0.307692307692307692307692307692,
15
+ 0.651282051282051282051282051282,
16
+ 0.6,
17
+ 0.857142857142857142857142857142,
18
+ 1.0,
19
+ 1.0,
20
+ 0.1,
21
+ 0.2,
22
+ 0.777777777777777777777777777778])
23
+
24
+ A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
25
+ A[1, 0] = 5.26001519587677318785587544488e-2
26
+
27
+ A[2, 0] = 1.97250569845378994544595329183e-2
28
+ A[2, 1] = 5.91751709536136983633785987549e-2
29
+
30
+ A[3, 0] = 2.95875854768068491816892993775e-2
31
+ A[3, 2] = 8.87627564304205475450678981324e-2
32
+
33
+ A[4, 0] = 2.41365134159266685502369798665e-1
34
+ A[4, 2] = -8.84549479328286085344864962717e-1
35
+ A[4, 3] = 9.24834003261792003115737966543e-1
36
+
37
+ A[5, 0] = 3.7037037037037037037037037037e-2
38
+ A[5, 3] = 1.70828608729473871279604482173e-1
39
+ A[5, 4] = 1.25467687566822425016691814123e-1
40
+
41
+ A[6, 0] = 3.7109375e-2
42
+ A[6, 3] = 1.70252211019544039314978060272e-1
43
+ A[6, 4] = 6.02165389804559606850219397283e-2
44
+ A[6, 5] = -1.7578125e-2
45
+
46
+ A[7, 0] = 3.70920001185047927108779319836e-2
47
+ A[7, 3] = 1.70383925712239993810214054705e-1
48
+ A[7, 4] = 1.07262030446373284651809199168e-1
49
+ A[7, 5] = -1.53194377486244017527936158236e-2
50
+ A[7, 6] = 8.27378916381402288758473766002e-3
51
+
52
+ A[8, 0] = 6.24110958716075717114429577812e-1
53
+ A[8, 3] = -3.36089262944694129406857109825
54
+ A[8, 4] = -8.68219346841726006818189891453e-1
55
+ A[8, 5] = 2.75920996994467083049415600797e1
56
+ A[8, 6] = 2.01540675504778934086186788979e1
57
+ A[8, 7] = -4.34898841810699588477366255144e1
58
+
59
+ A[9, 0] = 4.77662536438264365890433908527e-1
60
+ A[9, 3] = -2.48811461997166764192642586468
61
+ A[9, 4] = -5.90290826836842996371446475743e-1
62
+ A[9, 5] = 2.12300514481811942347288949897e1
63
+ A[9, 6] = 1.52792336328824235832596922938e1
64
+ A[9, 7] = -3.32882109689848629194453265587e1
65
+ A[9, 8] = -2.03312017085086261358222928593e-2
66
+
67
+ A[10, 0] = -9.3714243008598732571704021658e-1
68
+ A[10, 3] = 5.18637242884406370830023853209
69
+ A[10, 4] = 1.09143734899672957818500254654
70
+ A[10, 5] = -8.14978701074692612513997267357
71
+ A[10, 6] = -1.85200656599969598641566180701e1
72
+ A[10, 7] = 2.27394870993505042818970056734e1
73
+ A[10, 8] = 2.49360555267965238987089396762
74
+ A[10, 9] = -3.0467644718982195003823669022
75
+
76
+ A[11, 0] = 2.27331014751653820792359768449
77
+ A[11, 3] = -1.05344954667372501984066689879e1
78
+ A[11, 4] = -2.00087205822486249909675718444
79
+ A[11, 5] = -1.79589318631187989172765950534e1
80
+ A[11, 6] = 2.79488845294199600508499808837e1
81
+ A[11, 7] = -2.85899827713502369474065508674
82
+ A[11, 8] = -8.87285693353062954433549289258
83
+ A[11, 9] = 1.23605671757943030647266201528e1
84
+ A[11, 10] = 6.43392746015763530355970484046e-1
85
+
86
+ A[12, 0] = 5.42937341165687622380535766363e-2
87
+ A[12, 5] = 4.45031289275240888144113950566
88
+ A[12, 6] = 1.89151789931450038304281599044
89
+ A[12, 7] = -5.8012039600105847814672114227
90
+ A[12, 8] = 3.1116436695781989440891606237e-1
91
+ A[12, 9] = -1.52160949662516078556178806805e-1
92
+ A[12, 10] = 2.01365400804030348374776537501e-1
93
+ A[12, 11] = 4.47106157277725905176885569043e-2
94
+
95
+ A[13, 0] = 5.61675022830479523392909219681e-2
96
+ A[13, 6] = 2.53500210216624811088794765333e-1
97
+ A[13, 7] = -2.46239037470802489917441475441e-1
98
+ A[13, 8] = -1.24191423263816360469010140626e-1
99
+ A[13, 9] = 1.5329179827876569731206322685e-1
100
+ A[13, 10] = 8.20105229563468988491666602057e-3
101
+ A[13, 11] = 7.56789766054569976138603589584e-3
102
+ A[13, 12] = -8.298e-3
103
+
104
+ A[14, 0] = 3.18346481635021405060768473261e-2
105
+ A[14, 5] = 2.83009096723667755288322961402e-2
106
+ A[14, 6] = 5.35419883074385676223797384372e-2
107
+ A[14, 7] = -5.49237485713909884646569340306e-2
108
+ A[14, 10] = -1.08347328697249322858509316994e-4
109
+ A[14, 11] = 3.82571090835658412954920192323e-4
110
+ A[14, 12] = -3.40465008687404560802977114492e-4
111
+ A[14, 13] = 1.41312443674632500278074618366e-1
112
+
113
+ A[15, 0] = -4.28896301583791923408573538692e-1
114
+ A[15, 5] = -4.69762141536116384314449447206
115
+ A[15, 6] = 7.68342119606259904184240953878
116
+ A[15, 7] = 4.06898981839711007970213554331
117
+ A[15, 8] = 3.56727187455281109270669543021e-1
118
+ A[15, 12] = -1.39902416515901462129418009734e-3
119
+ A[15, 13] = 2.9475147891527723389556272149
120
+ A[15, 14] = -9.15095847217987001081870187138
121
+
122
+
123
+ B = A[N_STAGES, :N_STAGES]
124
+
125
+ E3 = np.zeros(N_STAGES + 1)
126
+ E3[:-1] = B.copy()
127
+ E3[0] -= 0.244094488188976377952755905512
128
+ E3[8] -= 0.733846688281611857341361741547
129
+ E3[11] -= 0.220588235294117647058823529412e-1
130
+
131
+ E5 = np.zeros(N_STAGES + 1)
132
+ E5[0] = 0.1312004499419488073250102996e-1
133
+ E5[5] = -0.1225156446376204440720569753e+1
134
+ E5[6] = -0.4957589496572501915214079952
135
+ E5[7] = 0.1664377182454986536961530415e+1
136
+ E5[8] = -0.3503288487499736816886487290
137
+ E5[9] = 0.3341791187130174790297318841
138
+ E5[10] = 0.8192320648511571246570742613e-1
139
+ E5[11] = -0.2235530786388629525884427845e-1
140
+
141
+ # First 3 coefficients are computed separately.
142
+ D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
143
+ D[0, 0] = -0.84289382761090128651353491142e+1
144
+ D[0, 5] = 0.56671495351937776962531783590
145
+ D[0, 6] = -0.30689499459498916912797304727e+1
146
+ D[0, 7] = 0.23846676565120698287728149680e+1
147
+ D[0, 8] = 0.21170345824450282767155149946e+1
148
+ D[0, 9] = -0.87139158377797299206789907490
149
+ D[0, 10] = 0.22404374302607882758541771650e+1
150
+ D[0, 11] = 0.63157877876946881815570249290
151
+ D[0, 12] = -0.88990336451333310820698117400e-1
152
+ D[0, 13] = 0.18148505520854727256656404962e+2
153
+ D[0, 14] = -0.91946323924783554000451984436e+1
154
+ D[0, 15] = -0.44360363875948939664310572000e+1
155
+
156
+ D[1, 0] = 0.10427508642579134603413151009e+2
157
+ D[1, 5] = 0.24228349177525818288430175319e+3
158
+ D[1, 6] = 0.16520045171727028198505394887e+3
159
+ D[1, 7] = -0.37454675472269020279518312152e+3
160
+ D[1, 8] = -0.22113666853125306036270938578e+2
161
+ D[1, 9] = 0.77334326684722638389603898808e+1
162
+ D[1, 10] = -0.30674084731089398182061213626e+2
163
+ D[1, 11] = -0.93321305264302278729567221706e+1
164
+ D[1, 12] = 0.15697238121770843886131091075e+2
165
+ D[1, 13] = -0.31139403219565177677282850411e+2
166
+ D[1, 14] = -0.93529243588444783865713862664e+1
167
+ D[1, 15] = 0.35816841486394083752465898540e+2
168
+
169
+ D[2, 0] = 0.19985053242002433820987653617e+2
170
+ D[2, 5] = -0.38703730874935176555105901742e+3
171
+ D[2, 6] = -0.18917813819516756882830838328e+3
172
+ D[2, 7] = 0.52780815920542364900561016686e+3
173
+ D[2, 8] = -0.11573902539959630126141871134e+2
174
+ D[2, 9] = 0.68812326946963000169666922661e+1
175
+ D[2, 10] = -0.10006050966910838403183860980e+1
176
+ D[2, 11] = 0.77771377980534432092869265740
177
+ D[2, 12] = -0.27782057523535084065932004339e+1
178
+ D[2, 13] = -0.60196695231264120758267380846e+2
179
+ D[2, 14] = 0.84320405506677161018159903784e+2
180
+ D[2, 15] = 0.11992291136182789328035130030e+2
181
+
182
+ D[3, 0] = -0.25693933462703749003312586129e+2
183
+ D[3, 5] = -0.15418974869023643374053993627e+3
184
+ D[3, 6] = -0.23152937917604549567536039109e+3
185
+ D[3, 7] = 0.35763911791061412378285349910e+3
186
+ D[3, 8] = 0.93405324183624310003907691704e+2
187
+ D[3, 9] = -0.37458323136451633156875139351e+2
188
+ D[3, 10] = 0.10409964950896230045147246184e+3
189
+ D[3, 11] = 0.29840293426660503123344363579e+2
190
+ D[3, 12] = -0.43533456590011143754432175058e+2
191
+ D[3, 13] = 0.96324553959188282948394950600e+2
192
+ D[3, 14] = -0.39177261675615439165231486172e+2
193
+ D[3, 15] = -0.14972683625798562581422125276e+3
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py ADDED
@@ -0,0 +1,748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import numpy as np
3
+ from .bdf import BDF
4
+ from .radau import Radau
5
+ from .rk import RK23, RK45, DOP853
6
+ from .lsoda import LSODA
7
+ from scipy.optimize import OptimizeResult
8
+ from .common import EPS, OdeSolution
9
+ from .base import OdeSolver
10
+
11
+
12
+ METHODS = {'RK23': RK23,
13
+ 'RK45': RK45,
14
+ 'DOP853': DOP853,
15
+ 'Radau': Radau,
16
+ 'BDF': BDF,
17
+ 'LSODA': LSODA}
18
+
19
+
20
+ MESSAGES = {0: "The solver successfully reached the end of the integration interval.",
21
+ 1: "A termination event occurred."}
22
+
23
+
24
+ class OdeResult(OptimizeResult):
25
+ pass
26
+
27
+
28
+ def prepare_events(events):
29
+ """Standardize event functions and extract attributes."""
30
+ if callable(events):
31
+ events = (events,)
32
+
33
+ max_events = np.empty(len(events))
34
+ direction = np.empty(len(events))
35
+ for i, event in enumerate(events):
36
+ terminal = getattr(event, 'terminal', None)
37
+ direction[i] = getattr(event, 'direction', 0)
38
+
39
+ message = ('The `terminal` attribute of each event '
40
+ 'must be a boolean or positive integer.')
41
+ if terminal is None or terminal == 0:
42
+ max_events[i] = np.inf
43
+ elif int(terminal) == terminal and terminal > 0:
44
+ max_events[i] = terminal
45
+ else:
46
+ raise ValueError(message)
47
+
48
+ return events, max_events, direction
49
+
50
+
51
+ def solve_event_equation(event, sol, t_old, t):
52
+ """Solve an equation corresponding to an ODE event.
53
+
54
+ The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
55
+ ODE solver using some sort of interpolation. It is solved by
56
+ `scipy.optimize.brentq` with xtol=atol=4*EPS.
57
+
58
+ Parameters
59
+ ----------
60
+ event : callable
61
+ Function ``event(t, y)``.
62
+ sol : callable
63
+ Function ``sol(t)`` which evaluates an ODE solution between `t_old`
64
+ and `t`.
65
+ t_old, t : float
66
+ Previous and new values of time. They will be used as a bracketing
67
+ interval.
68
+
69
+ Returns
70
+ -------
71
+ root : float
72
+ Found solution.
73
+ """
74
+ from scipy.optimize import brentq
75
+ return brentq(lambda t: event(t, sol(t)), t_old, t,
76
+ xtol=4 * EPS, rtol=4 * EPS)
77
+
78
+
79
+ def handle_events(sol, events, active_events, event_count, max_events,
80
+ t_old, t):
81
+ """Helper function to handle events.
82
+
83
+ Parameters
84
+ ----------
85
+ sol : DenseOutput
86
+ Function ``sol(t)`` which evaluates an ODE solution between `t_old`
87
+ and `t`.
88
+ events : list of callables, length n_events
89
+ Event functions with signatures ``event(t, y)``.
90
+ active_events : ndarray
91
+ Indices of events which occurred.
92
+ event_count : ndarray
93
+ Current number of occurrences for each event.
94
+ max_events : ndarray, shape (n_events,)
95
+ Number of occurrences allowed for each event before integration
96
+ termination is issued.
97
+ t_old, t : float
98
+ Previous and new values of time.
99
+
100
+ Returns
101
+ -------
102
+ root_indices : ndarray
103
+ Indices of events which take zero between `t_old` and `t` and before
104
+ a possible termination.
105
+ roots : ndarray
106
+ Values of t at which events occurred.
107
+ terminate : bool
108
+ Whether a terminal event occurred.
109
+ """
110
+ roots = [solve_event_equation(events[event_index], sol, t_old, t)
111
+ for event_index in active_events]
112
+
113
+ roots = np.asarray(roots)
114
+
115
+ if np.any(event_count[active_events] >= max_events[active_events]):
116
+ if t > t_old:
117
+ order = np.argsort(roots)
118
+ else:
119
+ order = np.argsort(-roots)
120
+ active_events = active_events[order]
121
+ roots = roots[order]
122
+ t = np.nonzero(event_count[active_events]
123
+ >= max_events[active_events])[0][0]
124
+ active_events = active_events[:t + 1]
125
+ roots = roots[:t + 1]
126
+ terminate = True
127
+ else:
128
+ terminate = False
129
+
130
+ return active_events, roots, terminate
131
+
132
+
133
+ def find_active_events(g, g_new, direction):
134
+ """Find which event occurred during an integration step.
135
+
136
+ Parameters
137
+ ----------
138
+ g, g_new : array_like, shape (n_events,)
139
+ Values of event functions at a current and next points.
140
+ direction : ndarray, shape (n_events,)
141
+ Event "direction" according to the definition in `solve_ivp`.
142
+
143
+ Returns
144
+ -------
145
+ active_events : ndarray
146
+ Indices of events which occurred during the step.
147
+ """
148
+ g, g_new = np.asarray(g), np.asarray(g_new)
149
+ up = (g <= 0) & (g_new >= 0)
150
+ down = (g >= 0) & (g_new <= 0)
151
+ either = up | down
152
+ mask = (up & (direction > 0) |
153
+ down & (direction < 0) |
154
+ either & (direction == 0))
155
+
156
+ return np.nonzero(mask)[0]
157
+
158
+
159
+ def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
160
+ events=None, vectorized=False, args=None, **options):
161
+ """Solve an initial value problem for a system of ODEs.
162
+
163
+ This function numerically integrates a system of ordinary differential
164
+ equations given an initial value::
165
+
166
+ dy / dt = f(t, y)
167
+ y(t0) = y0
168
+
169
+ Here t is a 1-D independent variable (time), y(t) is an
170
+ N-D vector-valued function (state), and an N-D
171
+ vector-valued function f(t, y) determines the differential equations.
172
+ The goal is to find y(t) approximately satisfying the differential
173
+ equations, given an initial value y(t0)=y0.
174
+
175
+ Some of the solvers support integration in the complex domain, but note
176
+ that for stiff ODE solvers, the right-hand side must be
177
+ complex-differentiable (satisfy Cauchy-Riemann equations [11]_).
178
+ To solve a problem in the complex domain, pass y0 with a complex data type.
179
+ Another option always available is to rewrite your problem for real and
180
+ imaginary parts separately.
181
+
182
+ Parameters
183
+ ----------
184
+ fun : callable
185
+ Right-hand side of the system: the time derivative of the state ``y``
186
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
187
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. Additional
188
+ arguments need to be passed if ``args`` is used (see documentation of
189
+ ``args`` argument). ``fun`` must return an array of the same shape as
190
+ ``y``. See `vectorized` for more information.
191
+ t_span : 2-member sequence
192
+ Interval of integration (t0, tf). The solver starts with t=t0 and
193
+ integrates until it reaches t=tf. Both t0 and tf must be floats
194
+ or values interpretable by the float conversion function.
195
+ y0 : array_like, shape (n,)
196
+ Initial state. For problems in the complex domain, pass `y0` with a
197
+ complex data type (even if the initial value is purely real).
198
+ method : string or `OdeSolver`, optional
199
+ Integration method to use:
200
+
201
+ * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
202
+ The error is controlled assuming accuracy of the fourth-order
203
+ method, but steps are taken using the fifth-order accurate
204
+ formula (local extrapolation is done). A quartic interpolation
205
+ polynomial is used for the dense output [2]_. Can be applied in
206
+ the complex domain.
207
+ * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
208
+ is controlled assuming accuracy of the second-order method, but
209
+ steps are taken using the third-order accurate formula (local
210
+ extrapolation is done). A cubic Hermite polynomial is used for the
211
+ dense output. Can be applied in the complex domain.
212
+ * 'DOP853': Explicit Runge-Kutta method of order 8 [13]_.
213
+ Python implementation of the "DOP853" algorithm originally
214
+ written in Fortran [14]_. A 7-th order interpolation polynomial
215
+ accurate to 7-th order is used for the dense output.
216
+ Can be applied in the complex domain.
217
+ * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
218
+ order 5 [4]_. The error is controlled with a third-order accurate
219
+ embedded formula. A cubic polynomial which satisfies the
220
+ collocation conditions is used for the dense output.
221
+ * 'BDF': Implicit multi-step variable-order (1 to 5) method based
222
+ on a backward differentiation formula for the derivative
223
+ approximation [5]_. The implementation follows the one described
224
+ in [6]_. A quasi-constant step scheme is used and accuracy is
225
+ enhanced using the NDF modification. Can be applied in the
226
+ complex domain.
227
+ * 'LSODA': Adams/BDF method with automatic stiffness detection and
228
+ switching [7]_, [8]_. This is a wrapper of the Fortran solver
229
+ from ODEPACK.
230
+
231
+ Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used
232
+ for non-stiff problems and implicit methods ('Radau', 'BDF') for
233
+ stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended
234
+ for solving with high precision (low values of `rtol` and `atol`).
235
+
236
+ If not sure, first try to run 'RK45'. If it makes unusually many
237
+ iterations, diverges, or fails, your problem is likely to be stiff and
238
+ you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal
239
+ choice, but it might be somewhat less convenient to work with as it
240
+ wraps old Fortran code.
241
+
242
+ You can also pass an arbitrary class derived from `OdeSolver` which
243
+ implements the solver.
244
+ t_eval : array_like or None, optional
245
+ Times at which to store the computed solution, must be sorted and lie
246
+ within `t_span`. If None (default), use points selected by the solver.
247
+ dense_output : bool, optional
248
+ Whether to compute a continuous solution. Default is False.
249
+ events : callable, or list of callables, optional
250
+ Events to track. If None (default), no events will be tracked.
251
+ Each event occurs at the zeros of a continuous function of time and
252
+ state. Each function must have the signature ``event(t, y)`` where
253
+ additional argument have to be passed if ``args`` is used (see
254
+ documentation of ``args`` argument). Each function must return a
255
+ float. The solver will find an accurate value of `t` at which
256
+ ``event(t, y(t)) = 0`` using a root-finding algorithm. By default,
257
+ all zeros will be found. The solver looks for a sign change over
258
+ each step, so if multiple zero crossings occur within one step,
259
+ events may be missed. Additionally each `event` function might
260
+ have the following attributes:
261
+
262
+ terminal: bool or int, optional
263
+ When boolean, whether to terminate integration if this event occurs.
264
+ When integral, termination occurs after the specified the number of
265
+ occurences of this event.
266
+ Implicitly False if not assigned.
267
+ direction: float, optional
268
+ Direction of a zero crossing. If `direction` is positive,
269
+ `event` will only trigger when going from negative to positive,
270
+ and vice versa if `direction` is negative. If 0, then either
271
+ direction will trigger event. Implicitly 0 if not assigned.
272
+
273
+ You can assign attributes like ``event.terminal = True`` to any
274
+ function in Python.
275
+ vectorized : bool, optional
276
+ Whether `fun` can be called in a vectorized fashion. Default is False.
277
+
278
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
279
+ shape ``(n,)``, where ``n = len(y0)``.
280
+
281
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
282
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
283
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
284
+ the returned array is the time derivative of the state corresponding
285
+ with a column of ``y``).
286
+
287
+ Setting ``vectorized=True`` allows for faster finite difference
288
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
289
+ will result in slower execution for other methods and for 'Radau' and
290
+ 'BDF' in some circumstances (e.g. small ``len(y0)``).
291
+ args : tuple, optional
292
+ Additional arguments to pass to the user-defined functions. If given,
293
+ the additional arguments are passed to all user-defined functions.
294
+ So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``,
295
+ then `jac` (if given) and any event functions must have the same
296
+ signature, and `args` must be a tuple of length 3.
297
+ **options
298
+ Options passed to a chosen solver. All options available for already
299
+ implemented solvers are listed below.
300
+ first_step : float or None, optional
301
+ Initial step size. Default is `None` which means that the algorithm
302
+ should choose.
303
+ max_step : float, optional
304
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
305
+ bounded and determined solely by the solver.
306
+ rtol, atol : float or array_like, optional
307
+ Relative and absolute tolerances. The solver keeps the local error
308
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
309
+ relative accuracy (number of correct digits), while `atol` controls
310
+ absolute accuracy (number of correct decimal places). To achieve the
311
+ desired `rtol`, set `atol` to be smaller than the smallest value that
312
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
313
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
314
+ number of correct digits is not guaranteed. Conversely, to achieve the
315
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
316
+ than `atol`. If components of y have different scales, it might be
317
+ beneficial to set different `atol` values for different components by
318
+ passing array_like with shape (n,) for `atol`. Default values are
319
+ 1e-3 for `rtol` and 1e-6 for `atol`.
320
+ jac : array_like, sparse_matrix, callable or None, optional
321
+ Jacobian matrix of the right-hand side of the system with respect
322
+ to y, required by the 'Radau', 'BDF' and 'LSODA' method. The
323
+ Jacobian matrix has shape (n, n) and its element (i, j) is equal to
324
+ ``d f_i / d y_j``. There are three ways to define the Jacobian:
325
+
326
+ * If array_like or sparse_matrix, the Jacobian is assumed to
327
+ be constant. Not supported by 'LSODA'.
328
+ * If callable, the Jacobian is assumed to depend on both
329
+ t and y; it will be called as ``jac(t, y)``, as necessary.
330
+ Additional arguments have to be passed if ``args`` is
331
+ used (see documentation of ``args`` argument).
332
+ For 'Radau' and 'BDF' methods, the return value might be a
333
+ sparse matrix.
334
+ * If None (default), the Jacobian will be approximated by
335
+ finite differences.
336
+
337
+ It is generally recommended to provide the Jacobian rather than
338
+ relying on a finite-difference approximation.
339
+ jac_sparsity : array_like, sparse matrix or None, optional
340
+ Defines a sparsity structure of the Jacobian matrix for a finite-
341
+ difference approximation. Its shape must be (n, n). This argument
342
+ is ignored if `jac` is not `None`. If the Jacobian has only few
343
+ non-zero elements in *each* row, providing the sparsity structure
344
+ will greatly speed up the computations [10]_. A zero entry means that
345
+ a corresponding element in the Jacobian is always zero. If None
346
+ (default), the Jacobian is assumed to be dense.
347
+ Not supported by 'LSODA', see `lband` and `uband` instead.
348
+ lband, uband : int or None, optional
349
+ Parameters defining the bandwidth of the Jacobian for the 'LSODA'
350
+ method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``.
351
+ Default is None. Setting these requires your jac routine to return the
352
+ Jacobian in the packed format: the returned array must have ``n``
353
+ columns and ``uband + lband + 1`` rows in which Jacobian diagonals are
354
+ written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``.
355
+ The same format is used in `scipy.linalg.solve_banded` (check for an
356
+ illustration). These parameters can be also used with ``jac=None`` to
357
+ reduce the number of Jacobian elements estimated by finite differences.
358
+ min_step : float, optional
359
+ The minimum allowed step size for 'LSODA' method.
360
+ By default `min_step` is zero.
361
+
362
+ Returns
363
+ -------
364
+ Bunch object with the following fields defined:
365
+ t : ndarray, shape (n_points,)
366
+ Time points.
367
+ y : ndarray, shape (n, n_points)
368
+ Values of the solution at `t`.
369
+ sol : `OdeSolution` or None
370
+ Found solution as `OdeSolution` instance; None if `dense_output` was
371
+ set to False.
372
+ t_events : list of ndarray or None
373
+ Contains for each event type a list of arrays at which an event of
374
+ that type event was detected. None if `events` was None.
375
+ y_events : list of ndarray or None
376
+ For each value of `t_events`, the corresponding value of the solution.
377
+ None if `events` was None.
378
+ nfev : int
379
+ Number of evaluations of the right-hand side.
380
+ njev : int
381
+ Number of evaluations of the Jacobian.
382
+ nlu : int
383
+ Number of LU decompositions.
384
+ status : int
385
+ Reason for algorithm termination:
386
+
387
+ * -1: Integration step failed.
388
+ * 0: The solver successfully reached the end of `tspan`.
389
+ * 1: A termination event occurred.
390
+
391
+ message : string
392
+ Human-readable description of the termination reason.
393
+ success : bool
394
+ True if the solver reached the interval end or a termination event
395
+ occurred (``status >= 0``).
396
+
397
+ References
398
+ ----------
399
+ .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
400
+ formulae", Journal of Computational and Applied Mathematics, Vol. 6,
401
+ No. 1, pp. 19-26, 1980.
402
+ .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
403
+ of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
404
+ .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
405
+ Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
406
+ .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
407
+ Stiff and Differential-Algebraic Problems", Sec. IV.8.
408
+ .. [5] `Backward Differentiation Formula
409
+ <https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_
410
+ on Wikipedia.
411
+ .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
412
+ COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
413
+ .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
414
+ Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
415
+ pp. 55-64, 1983.
416
+ .. [8] L. Petzold, "Automatic selection of methods for solving stiff and
417
+ nonstiff systems of ordinary differential equations", SIAM Journal
418
+ on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
419
+ 1983.
420
+ .. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on
421
+ Wikipedia.
422
+ .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
423
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
424
+ and its Applications, 13, pp. 117-120, 1974.
425
+ .. [11] `Cauchy-Riemann equations
426
+ <https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
427
+ Wikipedia.
428
+ .. [12] `Lotka-Volterra equations
429
+ <https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations>`_
430
+ on Wikipedia.
431
+ .. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
432
+ Equations I: Nonstiff Problems", Sec. II.
433
+ .. [14] `Page with original Fortran code of DOP853
434
+ <http://www.unige.ch/~hairer/software.html>`_.
435
+
436
+ Examples
437
+ --------
438
+ Basic exponential decay showing automatically chosen time points.
439
+
440
+ >>> import numpy as np
441
+ >>> from scipy.integrate import solve_ivp
442
+ >>> def exponential_decay(t, y): return -0.5 * y
443
+ >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
444
+ >>> print(sol.t)
445
+ [ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806
446
+ 8.33328988 10. ]
447
+ >>> print(sol.y)
448
+ [[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045
449
+ 0.03107158 0.01350781]
450
+ [4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091
451
+ 0.06214316 0.02701561]
452
+ [8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181
453
+ 0.12428631 0.05403123]]
454
+
455
+ Specifying points where the solution is desired.
456
+
457
+ >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
458
+ ... t_eval=[0, 1, 2, 4, 10])
459
+ >>> print(sol.t)
460
+ [ 0 1 2 4 10]
461
+ >>> print(sol.y)
462
+ [[2. 1.21305369 0.73534021 0.27066736 0.01350938]
463
+ [4. 2.42610739 1.47068043 0.54133472 0.02701876]
464
+ [8. 4.85221478 2.94136085 1.08266944 0.05403753]]
465
+
466
+ Cannon fired upward with terminal event upon impact. The ``terminal`` and
467
+ ``direction`` fields of an event are applied by monkey patching a function.
468
+ Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts
469
+ at position 0 with velocity +10. Note that the integration never reaches
470
+ t=100 because the event is terminal.
471
+
472
+ >>> def upward_cannon(t, y): return [y[1], -0.5]
473
+ >>> def hit_ground(t, y): return y[0]
474
+ >>> hit_ground.terminal = True
475
+ >>> hit_ground.direction = -1
476
+ >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
477
+ >>> print(sol.t_events)
478
+ [array([40.])]
479
+ >>> print(sol.t)
480
+ [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
481
+ 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
482
+
483
+ Use `dense_output` and `events` to find position, which is 100, at the apex
484
+ of the cannonball's trajectory. Apex is not defined as terminal, so both
485
+ apex and hit_ground are found. There is no information at t=20, so the sol
486
+ attribute is used to evaluate the solution. The sol attribute is returned
487
+ by setting ``dense_output=True``. Alternatively, the `y_events` attribute
488
+ can be used to access the solution at the time of the event.
489
+
490
+ >>> def apex(t, y): return y[1]
491
+ >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10],
492
+ ... events=(hit_ground, apex), dense_output=True)
493
+ >>> print(sol.t_events)
494
+ [array([40.]), array([20.])]
495
+ >>> print(sol.t)
496
+ [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
497
+ 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
498
+ >>> print(sol.sol(sol.t_events[1][0]))
499
+ [100. 0.]
500
+ >>> print(sol.y_events)
501
+ [array([[-5.68434189e-14, -1.00000000e+01]]),
502
+ array([[1.00000000e+02, 1.77635684e-15]])]
503
+
504
+ As an example of a system with additional parameters, we'll implement
505
+ the Lotka-Volterra equations [12]_.
506
+
507
+ >>> def lotkavolterra(t, z, a, b, c, d):
508
+ ... x, y = z
509
+ ... return [a*x - b*x*y, -c*y + d*x*y]
510
+ ...
511
+
512
+ We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args`
513
+ argument.
514
+
515
+ >>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1),
516
+ ... dense_output=True)
517
+
518
+ Compute a dense solution and plot it.
519
+
520
+ >>> t = np.linspace(0, 15, 300)
521
+ >>> z = sol.sol(t)
522
+ >>> import matplotlib.pyplot as plt
523
+ >>> plt.plot(t, z.T)
524
+ >>> plt.xlabel('t')
525
+ >>> plt.legend(['x', 'y'], shadow=True)
526
+ >>> plt.title('Lotka-Volterra System')
527
+ >>> plt.show()
528
+
529
+ A couple examples of using solve_ivp to solve the differential
530
+ equation ``y' = Ay`` with complex matrix ``A``.
531
+
532
+ >>> A = np.array([[-0.25 + 0.14j, 0, 0.33 + 0.44j],
533
+ ... [0.25 + 0.58j, -0.2 + 0.14j, 0],
534
+ ... [0, 0.2 + 0.4j, -0.1 + 0.97j]])
535
+
536
+ Solving an IVP with ``A`` from above and ``y`` as 3x1 vector:
537
+
538
+ >>> def deriv_vec(t, y):
539
+ ... return A @ y
540
+ >>> result = solve_ivp(deriv_vec, [0, 25],
541
+ ... np.array([10 + 0j, 20 + 0j, 30 + 0j]),
542
+ ... t_eval=np.linspace(0, 25, 101))
543
+ >>> print(result.y[:, 0])
544
+ [10.+0.j 20.+0.j 30.+0.j]
545
+ >>> print(result.y[:, -1])
546
+ [18.46291039+45.25653651j 10.01569306+36.23293216j
547
+ -4.98662741+80.07360388j]
548
+
549
+ Solving an IVP with ``A`` from above with ``y`` as 3x3 matrix :
550
+
551
+ >>> def deriv_mat(t, y):
552
+ ... return (A @ y.reshape(3, 3)).flatten()
553
+ >>> y0 = np.array([[2 + 0j, 3 + 0j, 4 + 0j],
554
+ ... [5 + 0j, 6 + 0j, 7 + 0j],
555
+ ... [9 + 0j, 34 + 0j, 78 + 0j]])
556
+
557
+ >>> result = solve_ivp(deriv_mat, [0, 25], y0.flatten(),
558
+ ... t_eval=np.linspace(0, 25, 101))
559
+ >>> print(result.y[:, 0].reshape(3, 3))
560
+ [[ 2.+0.j 3.+0.j 4.+0.j]
561
+ [ 5.+0.j 6.+0.j 7.+0.j]
562
+ [ 9.+0.j 34.+0.j 78.+0.j]]
563
+ >>> print(result.y[:, -1].reshape(3, 3))
564
+ [[ 5.67451179 +12.07938445j 17.2888073 +31.03278837j
565
+ 37.83405768 +63.25138759j]
566
+ [ 3.39949503 +11.82123994j 21.32530996 +44.88668871j
567
+ 53.17531184+103.80400411j]
568
+ [ -2.26105874 +22.19277664j -15.1255713 +70.19616341j
569
+ -38.34616845+153.29039931j]]
570
+
571
+
572
+ """
573
+ if method not in METHODS and not (
574
+ inspect.isclass(method) and issubclass(method, OdeSolver)):
575
+ raise ValueError(f"`method` must be one of {METHODS} or OdeSolver class.")
576
+
577
+ t0, tf = map(float, t_span)
578
+
579
+ if args is not None:
580
+ # Wrap the user's fun (and jac, if given) in lambdas to hide the
581
+ # additional parameters. Pass in the original fun as a keyword
582
+ # argument to keep it in the scope of the lambda.
583
+ try:
584
+ _ = [*(args)]
585
+ except TypeError as exp:
586
+ suggestion_tuple = (
587
+ "Supplied 'args' cannot be unpacked. Please supply `args`"
588
+ f" as a tuple (e.g. `args=({args},)`)"
589
+ )
590
+ raise TypeError(suggestion_tuple) from exp
591
+
592
+ def fun(t, x, fun=fun):
593
+ return fun(t, x, *args)
594
+ jac = options.get('jac')
595
+ if callable(jac):
596
+ options['jac'] = lambda t, x: jac(t, x, *args)
597
+
598
+ if t_eval is not None:
599
+ t_eval = np.asarray(t_eval)
600
+ if t_eval.ndim != 1:
601
+ raise ValueError("`t_eval` must be 1-dimensional.")
602
+
603
+ if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
604
+ raise ValueError("Values in `t_eval` are not within `t_span`.")
605
+
606
+ d = np.diff(t_eval)
607
+ if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
608
+ raise ValueError("Values in `t_eval` are not properly sorted.")
609
+
610
+ if tf > t0:
611
+ t_eval_i = 0
612
+ else:
613
+ # Make order of t_eval decreasing to use np.searchsorted.
614
+ t_eval = t_eval[::-1]
615
+ # This will be an upper bound for slices.
616
+ t_eval_i = t_eval.shape[0]
617
+
618
+ if method in METHODS:
619
+ method = METHODS[method]
620
+
621
+ solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
622
+
623
+ if t_eval is None:
624
+ ts = [t0]
625
+ ys = [y0]
626
+ elif t_eval is not None and dense_output:
627
+ ts = []
628
+ ti = [t0]
629
+ ys = []
630
+ else:
631
+ ts = []
632
+ ys = []
633
+
634
+ interpolants = []
635
+
636
+ if events is not None:
637
+ events, max_events, event_dir = prepare_events(events)
638
+ event_count = np.zeros(len(events))
639
+ if args is not None:
640
+ # Wrap user functions in lambdas to hide the additional parameters.
641
+ # The original event function is passed as a keyword argument to the
642
+ # lambda to keep the original function in scope (i.e., avoid the
643
+ # late binding closure "gotcha").
644
+ events = [lambda t, x, event=event: event(t, x, *args)
645
+ for event in events]
646
+ g = [event(t0, y0) for event in events]
647
+ t_events = [[] for _ in range(len(events))]
648
+ y_events = [[] for _ in range(len(events))]
649
+ else:
650
+ t_events = None
651
+ y_events = None
652
+
653
+ status = None
654
+ while status is None:
655
+ message = solver.step()
656
+
657
+ if solver.status == 'finished':
658
+ status = 0
659
+ elif solver.status == 'failed':
660
+ status = -1
661
+ break
662
+
663
+ t_old = solver.t_old
664
+ t = solver.t
665
+ y = solver.y
666
+
667
+ if dense_output:
668
+ sol = solver.dense_output()
669
+ interpolants.append(sol)
670
+ else:
671
+ sol = None
672
+
673
+ if events is not None:
674
+ g_new = [event(t, y) for event in events]
675
+ active_events = find_active_events(g, g_new, event_dir)
676
+ if active_events.size > 0:
677
+ if sol is None:
678
+ sol = solver.dense_output()
679
+
680
+ event_count[active_events] += 1
681
+ root_indices, roots, terminate = handle_events(
682
+ sol, events, active_events, event_count, max_events,
683
+ t_old, t)
684
+
685
+ for e, te in zip(root_indices, roots):
686
+ t_events[e].append(te)
687
+ y_events[e].append(sol(te))
688
+
689
+ if terminate:
690
+ status = 1
691
+ t = roots[-1]
692
+ y = sol(t)
693
+
694
+ g = g_new
695
+
696
+ if t_eval is None:
697
+ ts.append(t)
698
+ ys.append(y)
699
+ else:
700
+ # The value in t_eval equal to t will be included.
701
+ if solver.direction > 0:
702
+ t_eval_i_new = np.searchsorted(t_eval, t, side='right')
703
+ t_eval_step = t_eval[t_eval_i:t_eval_i_new]
704
+ else:
705
+ t_eval_i_new = np.searchsorted(t_eval, t, side='left')
706
+ # It has to be done with two slice operations, because
707
+ # you can't slice to 0th element inclusive using backward
708
+ # slicing.
709
+ t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
710
+
711
+ if t_eval_step.size > 0:
712
+ if sol is None:
713
+ sol = solver.dense_output()
714
+ ts.append(t_eval_step)
715
+ ys.append(sol(t_eval_step))
716
+ t_eval_i = t_eval_i_new
717
+
718
+ if t_eval is not None and dense_output:
719
+ ti.append(t)
720
+
721
+ message = MESSAGES.get(status, message)
722
+
723
+ if t_events is not None:
724
+ t_events = [np.asarray(te) for te in t_events]
725
+ y_events = [np.asarray(ye) for ye in y_events]
726
+
727
+ if t_eval is None:
728
+ ts = np.array(ts)
729
+ ys = np.vstack(ys).T
730
+ elif ts:
731
+ ts = np.hstack(ts)
732
+ ys = np.hstack(ys)
733
+
734
+ if dense_output:
735
+ if t_eval is None:
736
+ sol = OdeSolution(
737
+ ts, interpolants, alt_segment=True if method in [BDF, LSODA] else False
738
+ )
739
+ else:
740
+ sol = OdeSolution(
741
+ ti, interpolants, alt_segment=True if method in [BDF, LSODA] else False
742
+ )
743
+ else:
744
+ sol = None
745
+
746
+ return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events,
747
+ nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu,
748
+ status=status, message=message, success=status >= 0)
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.integrate import ode
3
+ from .common import validate_tol, validate_first_step, warn_extraneous
4
+ from .base import OdeSolver, DenseOutput
5
+
6
+
7
+ class LSODA(OdeSolver):
8
+ """Adams/BDF method with automatic stiffness detection and switching.
9
+
10
+ This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches
11
+ automatically between the nonstiff Adams method and the stiff BDF method.
12
+ The method was originally detailed in [2]_.
13
+
14
+ Parameters
15
+ ----------
16
+ fun : callable
17
+ Right-hand side of the system: the time derivative of the state ``y``
18
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
19
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
20
+ return an array of the same shape as ``y``. See `vectorized` for more
21
+ information.
22
+ t0 : float
23
+ Initial time.
24
+ y0 : array_like, shape (n,)
25
+ Initial state.
26
+ t_bound : float
27
+ Boundary time - the integration won't continue beyond it. It also
28
+ determines the direction of the integration.
29
+ first_step : float or None, optional
30
+ Initial step size. Default is ``None`` which means that the algorithm
31
+ should choose.
32
+ min_step : float, optional
33
+ Minimum allowed step size. Default is 0.0, i.e., the step size is not
34
+ bounded and determined solely by the solver.
35
+ max_step : float, optional
36
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
37
+ bounded and determined solely by the solver.
38
+ rtol, atol : float and array_like, optional
39
+ Relative and absolute tolerances. The solver keeps the local error
40
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
41
+ relative accuracy (number of correct digits), while `atol` controls
42
+ absolute accuracy (number of correct decimal places). To achieve the
43
+ desired `rtol`, set `atol` to be smaller than the smallest value that
44
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
45
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
46
+ number of correct digits is not guaranteed. Conversely, to achieve the
47
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
48
+ than `atol`. If components of y have different scales, it might be
49
+ beneficial to set different `atol` values for different components by
50
+ passing array_like with shape (n,) for `atol`. Default values are
51
+ 1e-3 for `rtol` and 1e-6 for `atol`.
52
+ jac : None or callable, optional
53
+ Jacobian matrix of the right-hand side of the system with respect to
54
+ ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is
55
+ equal to ``d f_i / d y_j``. The function will be called as
56
+ ``jac(t, y)``. If None (default), the Jacobian will be
57
+ approximated by finite differences. It is generally recommended to
58
+ provide the Jacobian rather than relying on a finite-difference
59
+ approximation.
60
+ lband, uband : int or None
61
+ Parameters defining the bandwidth of the Jacobian,
62
+ i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting
63
+ these requires your jac routine to return the Jacobian in the packed format:
64
+ the returned array must have ``n`` columns and ``uband + lband + 1``
65
+ rows in which Jacobian diagonals are written. Specifically
66
+ ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used
67
+ in `scipy.linalg.solve_banded` (check for an illustration).
68
+ These parameters can be also used with ``jac=None`` to reduce the
69
+ number of Jacobian elements estimated by finite differences.
70
+ vectorized : bool, optional
71
+ Whether `fun` may be called in a vectorized fashion. False (default)
72
+ is recommended for this solver.
73
+
74
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
75
+ shape ``(n,)``, where ``n = len(y0)``.
76
+
77
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
78
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
79
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
80
+ the returned array is the time derivative of the state corresponding
81
+ with a column of ``y``).
82
+
83
+ Setting ``vectorized=True`` allows for faster finite difference
84
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
85
+ will result in slower execution for this solver.
86
+
87
+ Attributes
88
+ ----------
89
+ n : int
90
+ Number of equations.
91
+ status : string
92
+ Current status of the solver: 'running', 'finished' or 'failed'.
93
+ t_bound : float
94
+ Boundary time.
95
+ direction : float
96
+ Integration direction: +1 or -1.
97
+ t : float
98
+ Current time.
99
+ y : ndarray
100
+ Current state.
101
+ t_old : float
102
+ Previous time. None if no steps were made yet.
103
+ nfev : int
104
+ Number of evaluations of the right-hand side.
105
+ njev : int
106
+ Number of evaluations of the Jacobian.
107
+
108
+ References
109
+ ----------
110
+ .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
111
+ Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
112
+ pp. 55-64, 1983.
113
+ .. [2] L. Petzold, "Automatic selection of methods for solving stiff and
114
+ nonstiff systems of ordinary differential equations", SIAM Journal
115
+ on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
116
+ 1983.
117
+ """
118
+ def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,
119
+ max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,
120
+ uband=None, vectorized=False, **extraneous):
121
+ warn_extraneous(extraneous)
122
+ super().__init__(fun, t0, y0, t_bound, vectorized)
123
+
124
+ if first_step is None:
125
+ first_step = 0 # LSODA value for automatic selection.
126
+ else:
127
+ first_step = validate_first_step(first_step, t0, t_bound)
128
+
129
+ first_step *= self.direction
130
+
131
+ if max_step == np.inf:
132
+ max_step = 0 # LSODA value for infinity.
133
+ elif max_step <= 0:
134
+ raise ValueError("`max_step` must be positive.")
135
+
136
+ if min_step < 0:
137
+ raise ValueError("`min_step` must be nonnegative.")
138
+
139
+ rtol, atol = validate_tol(rtol, atol, self.n)
140
+
141
+ solver = ode(self.fun, jac)
142
+ solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,
143
+ min_step=min_step, first_step=first_step,
144
+ lband=lband, uband=uband)
145
+ solver.set_initial_value(y0, t0)
146
+
147
+ # Inject t_bound into rwork array as needed for itask=5.
148
+ solver._integrator.rwork[0] = self.t_bound
149
+ solver._integrator.call_args[4] = solver._integrator.rwork
150
+
151
+ self._lsoda_solver = solver
152
+
153
+ def _step_impl(self):
154
+ solver = self._lsoda_solver
155
+ integrator = solver._integrator
156
+
157
+ # From lsoda.step and lsoda.integrate itask=5 means take a single
158
+ # step and do not go past t_bound.
159
+ itask = integrator.call_args[2]
160
+ integrator.call_args[2] = 5
161
+ solver._y, solver.t = integrator.run(
162
+ solver.f, solver.jac or (lambda: None), solver._y, solver.t,
163
+ self.t_bound, solver.f_params, solver.jac_params)
164
+ integrator.call_args[2] = itask
165
+
166
+ if solver.successful():
167
+ self.t = solver.t
168
+ self.y = solver._y
169
+ # From LSODA Fortran source njev is equal to nlu.
170
+ self.njev = integrator.iwork[12]
171
+ self.nlu = integrator.iwork[12]
172
+ return True, None
173
+ else:
174
+ return False, 'Unexpected istate in LSODA.'
175
+
176
+ def _dense_output_impl(self):
177
+ iwork = self._lsoda_solver._integrator.iwork
178
+ rwork = self._lsoda_solver._integrator.rwork
179
+
180
+ # We want to produce the Nordsieck history array, yh, up to the order
181
+ # used in the last successful iteration. The step size is unimportant
182
+ # because it will be scaled out in LsodaDenseOutput. Some additional
183
+ # work may be required because ODEPACK's LSODA implementation produces
184
+ # the Nordsieck history in the state needed for the next iteration.
185
+
186
+ # iwork[13] contains order from last successful iteration, while
187
+ # iwork[14] contains order to be attempted next.
188
+ order = iwork[13]
189
+
190
+ # rwork[11] contains the step size to be attempted next, while
191
+ # rwork[10] contains step size from last successful iteration.
192
+ h = rwork[11]
193
+
194
+ # rwork[20:20 + (iwork[14] + 1) * self.n] contains entries of the
195
+ # Nordsieck array in state needed for next iteration. We want
196
+ # the entries up to order for the last successful step so use the
197
+ # following.
198
+ yh = np.reshape(rwork[20:20 + (order + 1) * self.n],
199
+ (self.n, order + 1), order='F').copy()
200
+ if iwork[14] < order:
201
+ # If the order is set to decrease then the final column of yh
202
+ # has not been updated within ODEPACK's LSODA
203
+ # implementation because this column will not be used in the
204
+ # next iteration. We must rescale this column to make the
205
+ # associated step size consistent with the other columns.
206
+ yh[:, -1] *= (h / rwork[10]) ** order
207
+
208
+ return LsodaDenseOutput(self.t_old, self.t, h, order, yh)
209
+
210
+
211
+ class LsodaDenseOutput(DenseOutput):
212
+ def __init__(self, t_old, t, h, order, yh):
213
+ super().__init__(t_old, t)
214
+ self.h = h
215
+ self.yh = yh
216
+ self.p = np.arange(order + 1)
217
+
218
+ def _call_impl(self, t):
219
+ if t.ndim == 0:
220
+ x = ((t - self.t) / self.h) ** self.p
221
+ else:
222
+ x = ((t - self.t) / self.h) ** self.p[:, None]
223
+
224
+ return np.dot(self.yh, x)
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import lu_factor, lu_solve
3
+ from scipy.sparse import csc_matrix, issparse, eye
4
+ from scipy.sparse.linalg import splu
5
+ from scipy.optimize._numdiff import group_columns
6
+ from .common import (validate_max_step, validate_tol, select_initial_step,
7
+ norm, num_jac, EPS, warn_extraneous,
8
+ validate_first_step)
9
+ from .base import OdeSolver, DenseOutput
10
+
11
+ S6 = 6 ** 0.5
12
+
13
+ # Butcher tableau. A is not used directly, see below.
14
+ C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1])
15
+ E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3
16
+
17
+ # Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue
18
+ # and a complex conjugate pair. They are written below.
19
+ MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3)
20
+ MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3))
21
+ - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6)))
22
+
23
+ # These are transformation matrices.
24
+ T = np.array([
25
+ [0.09443876248897524, -0.14125529502095421, 0.03002919410514742],
26
+ [0.25021312296533332, 0.20412935229379994, -0.38294211275726192],
27
+ [1, 1, 0]])
28
+ TI = np.array([
29
+ [4.17871859155190428, 0.32768282076106237, 0.52337644549944951],
30
+ [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044],
31
+ [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]])
32
+ # These linear combinations are used in the algorithm.
33
+ TI_REAL = TI[0]
34
+ TI_COMPLEX = TI[1] + 1j * TI[2]
35
+
36
+ # Interpolator coefficients.
37
+ P = np.array([
38
+ [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6],
39
+ [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6],
40
+ [1/3, -8/3, 10/3]])
41
+
42
+
43
+ NEWTON_MAXITER = 6 # Maximum number of Newton iterations.
44
+ MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
45
+ MAX_FACTOR = 10 # Maximum allowed increase in a step size.
46
+
47
+
48
+ def solve_collocation_system(fun, t, y, h, Z0, scale, tol,
49
+ LU_real, LU_complex, solve_lu):
50
+ """Solve the collocation system.
51
+
52
+ Parameters
53
+ ----------
54
+ fun : callable
55
+ Right-hand side of the system.
56
+ t : float
57
+ Current time.
58
+ y : ndarray, shape (n,)
59
+ Current state.
60
+ h : float
61
+ Step to try.
62
+ Z0 : ndarray, shape (3, n)
63
+ Initial guess for the solution. It determines new values of `y` at
64
+ ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants.
65
+ scale : ndarray, shape (n)
66
+ Problem tolerance scale, i.e. ``rtol * abs(y) + atol``.
67
+ tol : float
68
+ Tolerance to which solve the system. This value is compared with
69
+ the normalized by `scale` error.
70
+ LU_real, LU_complex
71
+ LU decompositions of the system Jacobians.
72
+ solve_lu : callable
73
+ Callable which solves a linear system given a LU decomposition. The
74
+ signature is ``solve_lu(LU, b)``.
75
+
76
+ Returns
77
+ -------
78
+ converged : bool
79
+ Whether iterations converged.
80
+ n_iter : int
81
+ Number of completed iterations.
82
+ Z : ndarray, shape (3, n)
83
+ Found solution.
84
+ rate : float
85
+ The rate of convergence.
86
+ """
87
+ n = y.shape[0]
88
+ M_real = MU_REAL / h
89
+ M_complex = MU_COMPLEX / h
90
+
91
+ W = TI.dot(Z0)
92
+ Z = Z0
93
+
94
+ F = np.empty((3, n))
95
+ ch = h * C
96
+
97
+ dW_norm_old = None
98
+ dW = np.empty_like(W)
99
+ converged = False
100
+ rate = None
101
+ for k in range(NEWTON_MAXITER):
102
+ for i in range(3):
103
+ F[i] = fun(t + ch[i], y + Z[i])
104
+
105
+ if not np.all(np.isfinite(F)):
106
+ break
107
+
108
+ f_real = F.T.dot(TI_REAL) - M_real * W[0]
109
+ f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2])
110
+
111
+ dW_real = solve_lu(LU_real, f_real)
112
+ dW_complex = solve_lu(LU_complex, f_complex)
113
+
114
+ dW[0] = dW_real
115
+ dW[1] = dW_complex.real
116
+ dW[2] = dW_complex.imag
117
+
118
+ dW_norm = norm(dW / scale)
119
+ if dW_norm_old is not None:
120
+ rate = dW_norm / dW_norm_old
121
+
122
+ if (rate is not None and (rate >= 1 or
123
+ rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)):
124
+ break
125
+
126
+ W += dW
127
+ Z = T.dot(W)
128
+
129
+ if (dW_norm == 0 or
130
+ rate is not None and rate / (1 - rate) * dW_norm < tol):
131
+ converged = True
132
+ break
133
+
134
+ dW_norm_old = dW_norm
135
+
136
+ return converged, k + 1, Z, rate
137
+
138
+
139
+ def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old):
140
+ """Predict by which factor to increase/decrease the step size.
141
+
142
+ The algorithm is described in [1]_.
143
+
144
+ Parameters
145
+ ----------
146
+ h_abs, h_abs_old : float
147
+ Current and previous values of the step size, `h_abs_old` can be None
148
+ (see Notes).
149
+ error_norm, error_norm_old : float
150
+ Current and previous values of the error norm, `error_norm_old` can
151
+ be None (see Notes).
152
+
153
+ Returns
154
+ -------
155
+ factor : float
156
+ Predicted factor.
157
+
158
+ Notes
159
+ -----
160
+ If `h_abs_old` and `error_norm_old` are both not None then a two-step
161
+ algorithm is used, otherwise a one-step algorithm is used.
162
+
163
+ References
164
+ ----------
165
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
166
+ Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8.
167
+ """
168
+ if error_norm_old is None or h_abs_old is None or error_norm == 0:
169
+ multiplier = 1
170
+ else:
171
+ multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25
172
+
173
+ with np.errstate(divide='ignore'):
174
+ factor = min(1, multiplier) * error_norm ** -0.25
175
+
176
+ return factor
177
+
178
+
179
+ class Radau(OdeSolver):
180
+ """Implicit Runge-Kutta method of Radau IIA family of order 5.
181
+
182
+ The implementation follows [1]_. The error is controlled with a
183
+ third-order accurate embedded formula. A cubic polynomial which satisfies
184
+ the collocation conditions is used for the dense output.
185
+
186
+ Parameters
187
+ ----------
188
+ fun : callable
189
+ Right-hand side of the system: the time derivative of the state ``y``
190
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
191
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
192
+ return an array of the same shape as ``y``. See `vectorized` for more
193
+ information.
194
+ t0 : float
195
+ Initial time.
196
+ y0 : array_like, shape (n,)
197
+ Initial state.
198
+ t_bound : float
199
+ Boundary time - the integration won't continue beyond it. It also
200
+ determines the direction of the integration.
201
+ first_step : float or None, optional
202
+ Initial step size. Default is ``None`` which means that the algorithm
203
+ should choose.
204
+ max_step : float, optional
205
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
206
+ bounded and determined solely by the solver.
207
+ rtol, atol : float and array_like, optional
208
+ Relative and absolute tolerances. The solver keeps the local error
209
+ estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a
210
+ relative accuracy (number of correct digits), while `atol` controls
211
+ absolute accuracy (number of correct decimal places). To achieve the
212
+ desired `rtol`, set `atol` to be smaller than the smallest value that
213
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
214
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
215
+ number of correct digits is not guaranteed. Conversely, to achieve the
216
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
217
+ than `atol`. If components of y have different scales, it might be
218
+ beneficial to set different `atol` values for different components by
219
+ passing array_like with shape (n,) for `atol`. Default values are
220
+ 1e-3 for `rtol` and 1e-6 for `atol`.
221
+ jac : {None, array_like, sparse_matrix, callable}, optional
222
+ Jacobian matrix of the right-hand side of the system with respect to
223
+ y, required by this method. The Jacobian matrix has shape (n, n) and
224
+ its element (i, j) is equal to ``d f_i / d y_j``.
225
+ There are three ways to define the Jacobian:
226
+
227
+ * If array_like or sparse_matrix, the Jacobian is assumed to
228
+ be constant.
229
+ * If callable, the Jacobian is assumed to depend on both
230
+ t and y; it will be called as ``jac(t, y)`` as necessary.
231
+ For the 'Radau' and 'BDF' methods, the return value might be a
232
+ sparse matrix.
233
+ * If None (default), the Jacobian will be approximated by
234
+ finite differences.
235
+
236
+ It is generally recommended to provide the Jacobian rather than
237
+ relying on a finite-difference approximation.
238
+ jac_sparsity : {None, array_like, sparse matrix}, optional
239
+ Defines a sparsity structure of the Jacobian matrix for a
240
+ finite-difference approximation. Its shape must be (n, n). This argument
241
+ is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
242
+ elements in *each* row, providing the sparsity structure will greatly
243
+ speed up the computations [2]_. A zero entry means that a corresponding
244
+ element in the Jacobian is always zero. If None (default), the Jacobian
245
+ is assumed to be dense.
246
+ vectorized : bool, optional
247
+ Whether `fun` can be called in a vectorized fashion. Default is False.
248
+
249
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
250
+ shape ``(n,)``, where ``n = len(y0)``.
251
+
252
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
253
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
254
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
255
+ the returned array is the time derivative of the state corresponding
256
+ with a column of ``y``).
257
+
258
+ Setting ``vectorized=True`` allows for faster finite difference
259
+ approximation of the Jacobian by this method, but may result in slower
260
+ execution overall in some circumstances (e.g. small ``len(y0)``).
261
+
262
+ Attributes
263
+ ----------
264
+ n : int
265
+ Number of equations.
266
+ status : string
267
+ Current status of the solver: 'running', 'finished' or 'failed'.
268
+ t_bound : float
269
+ Boundary time.
270
+ direction : float
271
+ Integration direction: +1 or -1.
272
+ t : float
273
+ Current time.
274
+ y : ndarray
275
+ Current state.
276
+ t_old : float
277
+ Previous time. None if no steps were made yet.
278
+ step_size : float
279
+ Size of the last successful step. None if no steps were made yet.
280
+ nfev : int
281
+ Number of evaluations of the right-hand side.
282
+ njev : int
283
+ Number of evaluations of the Jacobian.
284
+ nlu : int
285
+ Number of LU decompositions.
286
+
287
+ References
288
+ ----------
289
+ .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
290
+ Stiff and Differential-Algebraic Problems", Sec. IV.8.
291
+ .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
292
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
293
+ and its Applications, 13, pp. 117-120, 1974.
294
+ """
295
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
296
+ rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
297
+ vectorized=False, first_step=None, **extraneous):
298
+ warn_extraneous(extraneous)
299
+ super().__init__(fun, t0, y0, t_bound, vectorized)
300
+ self.y_old = None
301
+ self.max_step = validate_max_step(max_step)
302
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
303
+ self.f = self.fun(self.t, self.y)
304
+ # Select initial step assuming the same order which is used to control
305
+ # the error.
306
+ if first_step is None:
307
+ self.h_abs = select_initial_step(
308
+ self.fun, self.t, self.y, self.f, self.direction,
309
+ 3, self.rtol, self.atol)
310
+ else:
311
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
312
+ self.h_abs_old = None
313
+ self.error_norm_old = None
314
+
315
+ self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
316
+ self.sol = None
317
+
318
+ self.jac_factor = None
319
+ self.jac, self.J = self._validate_jac(jac, jac_sparsity)
320
+ if issparse(self.J):
321
+ def lu(A):
322
+ self.nlu += 1
323
+ return splu(A)
324
+
325
+ def solve_lu(LU, b):
326
+ return LU.solve(b)
327
+
328
+ I = eye(self.n, format='csc')
329
+ else:
330
+ def lu(A):
331
+ self.nlu += 1
332
+ return lu_factor(A, overwrite_a=True)
333
+
334
+ def solve_lu(LU, b):
335
+ return lu_solve(LU, b, overwrite_b=True)
336
+
337
+ I = np.identity(self.n)
338
+
339
+ self.lu = lu
340
+ self.solve_lu = solve_lu
341
+ self.I = I
342
+
343
+ self.current_jac = True
344
+ self.LU_real = None
345
+ self.LU_complex = None
346
+ self.Z = None
347
+
348
+ def _validate_jac(self, jac, sparsity):
349
+ t0 = self.t
350
+ y0 = self.y
351
+
352
+ if jac is None:
353
+ if sparsity is not None:
354
+ if issparse(sparsity):
355
+ sparsity = csc_matrix(sparsity)
356
+ groups = group_columns(sparsity)
357
+ sparsity = (sparsity, groups)
358
+
359
+ def jac_wrapped(t, y, f):
360
+ self.njev += 1
361
+ J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
362
+ self.atol, self.jac_factor,
363
+ sparsity)
364
+ return J
365
+ J = jac_wrapped(t0, y0, self.f)
366
+ elif callable(jac):
367
+ J = jac(t0, y0)
368
+ self.njev = 1
369
+ if issparse(J):
370
+ J = csc_matrix(J)
371
+
372
+ def jac_wrapped(t, y, _=None):
373
+ self.njev += 1
374
+ return csc_matrix(jac(t, y), dtype=float)
375
+
376
+ else:
377
+ J = np.asarray(J, dtype=float)
378
+
379
+ def jac_wrapped(t, y, _=None):
380
+ self.njev += 1
381
+ return np.asarray(jac(t, y), dtype=float)
382
+
383
+ if J.shape != (self.n, self.n):
384
+ raise ValueError("`jac` is expected to have shape {}, but "
385
+ "actually has {}."
386
+ .format((self.n, self.n), J.shape))
387
+ else:
388
+ if issparse(jac):
389
+ J = csc_matrix(jac)
390
+ else:
391
+ J = np.asarray(jac, dtype=float)
392
+
393
+ if J.shape != (self.n, self.n):
394
+ raise ValueError("`jac` is expected to have shape {}, but "
395
+ "actually has {}."
396
+ .format((self.n, self.n), J.shape))
397
+ jac_wrapped = None
398
+
399
+ return jac_wrapped, J
400
+
401
+ def _step_impl(self):
402
+ t = self.t
403
+ y = self.y
404
+ f = self.f
405
+
406
+ max_step = self.max_step
407
+ atol = self.atol
408
+ rtol = self.rtol
409
+
410
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
411
+ if self.h_abs > max_step:
412
+ h_abs = max_step
413
+ h_abs_old = None
414
+ error_norm_old = None
415
+ elif self.h_abs < min_step:
416
+ h_abs = min_step
417
+ h_abs_old = None
418
+ error_norm_old = None
419
+ else:
420
+ h_abs = self.h_abs
421
+ h_abs_old = self.h_abs_old
422
+ error_norm_old = self.error_norm_old
423
+
424
+ J = self.J
425
+ LU_real = self.LU_real
426
+ LU_complex = self.LU_complex
427
+
428
+ current_jac = self.current_jac
429
+ jac = self.jac
430
+
431
+ rejected = False
432
+ step_accepted = False
433
+ message = None
434
+ while not step_accepted:
435
+ if h_abs < min_step:
436
+ return False, self.TOO_SMALL_STEP
437
+
438
+ h = h_abs * self.direction
439
+ t_new = t + h
440
+
441
+ if self.direction * (t_new - self.t_bound) > 0:
442
+ t_new = self.t_bound
443
+
444
+ h = t_new - t
445
+ h_abs = np.abs(h)
446
+
447
+ if self.sol is None:
448
+ Z0 = np.zeros((3, y.shape[0]))
449
+ else:
450
+ Z0 = self.sol(t + h * C).T - y
451
+
452
+ scale = atol + np.abs(y) * rtol
453
+
454
+ converged = False
455
+ while not converged:
456
+ if LU_real is None or LU_complex is None:
457
+ LU_real = self.lu(MU_REAL / h * self.I - J)
458
+ LU_complex = self.lu(MU_COMPLEX / h * self.I - J)
459
+
460
+ converged, n_iter, Z, rate = solve_collocation_system(
461
+ self.fun, t, y, h, Z0, scale, self.newton_tol,
462
+ LU_real, LU_complex, self.solve_lu)
463
+
464
+ if not converged:
465
+ if current_jac:
466
+ break
467
+
468
+ J = self.jac(t, y, f)
469
+ current_jac = True
470
+ LU_real = None
471
+ LU_complex = None
472
+
473
+ if not converged:
474
+ h_abs *= 0.5
475
+ LU_real = None
476
+ LU_complex = None
477
+ continue
478
+
479
+ y_new = y + Z[-1]
480
+ ZE = Z.T.dot(E) / h
481
+ error = self.solve_lu(LU_real, f + ZE)
482
+ scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
483
+ error_norm = norm(error / scale)
484
+ safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
485
+ + n_iter)
486
+
487
+ if rejected and error_norm > 1:
488
+ error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE)
489
+ error_norm = norm(error / scale)
490
+
491
+ if error_norm > 1:
492
+ factor = predict_factor(h_abs, h_abs_old,
493
+ error_norm, error_norm_old)
494
+ h_abs *= max(MIN_FACTOR, safety * factor)
495
+
496
+ LU_real = None
497
+ LU_complex = None
498
+ rejected = True
499
+ else:
500
+ step_accepted = True
501
+
502
+ recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3
503
+
504
+ factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old)
505
+ factor = min(MAX_FACTOR, safety * factor)
506
+
507
+ if not recompute_jac and factor < 1.2:
508
+ factor = 1
509
+ else:
510
+ LU_real = None
511
+ LU_complex = None
512
+
513
+ f_new = self.fun(t_new, y_new)
514
+ if recompute_jac:
515
+ J = jac(t_new, y_new, f_new)
516
+ current_jac = True
517
+ elif jac is not None:
518
+ current_jac = False
519
+
520
+ self.h_abs_old = self.h_abs
521
+ self.error_norm_old = error_norm
522
+
523
+ self.h_abs = h_abs * factor
524
+
525
+ self.y_old = y
526
+
527
+ self.t = t_new
528
+ self.y = y_new
529
+ self.f = f_new
530
+
531
+ self.Z = Z
532
+
533
+ self.LU_real = LU_real
534
+ self.LU_complex = LU_complex
535
+ self.current_jac = current_jac
536
+ self.J = J
537
+
538
+ self.t_old = t
539
+ self.sol = self._compute_dense_output()
540
+
541
+ return step_accepted, message
542
+
543
+ def _compute_dense_output(self):
544
+ Q = np.dot(self.Z.T, P)
545
+ return RadauDenseOutput(self.t_old, self.t, self.y_old, Q)
546
+
547
+ def _dense_output_impl(self):
548
+ return self.sol
549
+
550
+
551
+ class RadauDenseOutput(DenseOutput):
552
+ def __init__(self, t_old, t, y_old, Q):
553
+ super().__init__(t_old, t)
554
+ self.h = t - t_old
555
+ self.Q = Q
556
+ self.order = Q.shape[1] - 1
557
+ self.y_old = y_old
558
+
559
+ def _call_impl(self, t):
560
+ x = (t - self.t_old) / self.h
561
+ if t.ndim == 0:
562
+ p = np.tile(x, self.order + 1)
563
+ p = np.cumprod(p)
564
+ else:
565
+ p = np.tile(x, (self.order + 1, 1))
566
+ p = np.cumprod(p, axis=0)
567
+ # Here we don't multiply by h, not a mistake.
568
+ y = np.dot(self.Q, p)
569
+ if y.ndim == 2:
570
+ y += self.y_old[:, None]
571
+ else:
572
+ y += self.y_old
573
+
574
+ return y
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py ADDED
@@ -0,0 +1,601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from .base import OdeSolver, DenseOutput
3
+ from .common import (validate_max_step, validate_tol, select_initial_step,
4
+ norm, warn_extraneous, validate_first_step)
5
+ from . import dop853_coefficients
6
+
7
+ # Multiply steps computed from asymptotic behaviour of errors by this.
8
+ SAFETY = 0.9
9
+
10
+ MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
11
+ MAX_FACTOR = 10 # Maximum allowed increase in a step size.
12
+
13
+
14
+ def rk_step(fun, t, y, f, h, A, B, C, K):
15
+ """Perform a single Runge-Kutta step.
16
+
17
+ This function computes a prediction of an explicit Runge-Kutta method and
18
+ also estimates the error of a less accurate method.
19
+
20
+ Notation for Butcher tableau is as in [1]_.
21
+
22
+ Parameters
23
+ ----------
24
+ fun : callable
25
+ Right-hand side of the system.
26
+ t : float
27
+ Current time.
28
+ y : ndarray, shape (n,)
29
+ Current state.
30
+ f : ndarray, shape (n,)
31
+ Current value of the derivative, i.e., ``fun(x, y)``.
32
+ h : float
33
+ Step to use.
34
+ A : ndarray, shape (n_stages, n_stages)
35
+ Coefficients for combining previous RK stages to compute the next
36
+ stage. For explicit methods the coefficients at and above the main
37
+ diagonal are zeros.
38
+ B : ndarray, shape (n_stages,)
39
+ Coefficients for combining RK stages for computing the final
40
+ prediction.
41
+ C : ndarray, shape (n_stages,)
42
+ Coefficients for incrementing time for consecutive RK stages.
43
+ The value for the first stage is always zero.
44
+ K : ndarray, shape (n_stages + 1, n)
45
+ Storage array for putting RK stages here. Stages are stored in rows.
46
+ The last row is a linear combination of the previous rows with
47
+ coefficients
48
+
49
+ Returns
50
+ -------
51
+ y_new : ndarray, shape (n,)
52
+ Solution at t + h computed with a higher accuracy.
53
+ f_new : ndarray, shape (n,)
54
+ Derivative ``fun(t + h, y_new)``.
55
+
56
+ References
57
+ ----------
58
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
59
+ Equations I: Nonstiff Problems", Sec. II.4.
60
+ """
61
+ K[0] = f
62
+ for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
63
+ dy = np.dot(K[:s].T, a[:s]) * h
64
+ K[s] = fun(t + c * h, y + dy)
65
+
66
+ y_new = y + h * np.dot(K[:-1].T, B)
67
+ f_new = fun(t + h, y_new)
68
+
69
+ K[-1] = f_new
70
+
71
+ return y_new, f_new
72
+
73
+
74
+ class RungeKutta(OdeSolver):
75
+ """Base class for explicit Runge-Kutta methods."""
76
+ C: np.ndarray = NotImplemented
77
+ A: np.ndarray = NotImplemented
78
+ B: np.ndarray = NotImplemented
79
+ E: np.ndarray = NotImplemented
80
+ P: np.ndarray = NotImplemented
81
+ order: int = NotImplemented
82
+ error_estimator_order: int = NotImplemented
83
+ n_stages: int = NotImplemented
84
+
85
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
86
+ rtol=1e-3, atol=1e-6, vectorized=False,
87
+ first_step=None, **extraneous):
88
+ warn_extraneous(extraneous)
89
+ super().__init__(fun, t0, y0, t_bound, vectorized,
90
+ support_complex=True)
91
+ self.y_old = None
92
+ self.max_step = validate_max_step(max_step)
93
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
94
+ self.f = self.fun(self.t, self.y)
95
+ if first_step is None:
96
+ self.h_abs = select_initial_step(
97
+ self.fun, self.t, self.y, self.f, self.direction,
98
+ self.error_estimator_order, self.rtol, self.atol)
99
+ else:
100
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
101
+ self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
102
+ self.error_exponent = -1 / (self.error_estimator_order + 1)
103
+ self.h_previous = None
104
+
105
+ def _estimate_error(self, K, h):
106
+ return np.dot(K.T, self.E) * h
107
+
108
+ def _estimate_error_norm(self, K, h, scale):
109
+ return norm(self._estimate_error(K, h) / scale)
110
+
111
+ def _step_impl(self):
112
+ t = self.t
113
+ y = self.y
114
+
115
+ max_step = self.max_step
116
+ rtol = self.rtol
117
+ atol = self.atol
118
+
119
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
120
+
121
+ if self.h_abs > max_step:
122
+ h_abs = max_step
123
+ elif self.h_abs < min_step:
124
+ h_abs = min_step
125
+ else:
126
+ h_abs = self.h_abs
127
+
128
+ step_accepted = False
129
+ step_rejected = False
130
+
131
+ while not step_accepted:
132
+ if h_abs < min_step:
133
+ return False, self.TOO_SMALL_STEP
134
+
135
+ h = h_abs * self.direction
136
+ t_new = t + h
137
+
138
+ if self.direction * (t_new - self.t_bound) > 0:
139
+ t_new = self.t_bound
140
+
141
+ h = t_new - t
142
+ h_abs = np.abs(h)
143
+
144
+ y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
145
+ self.B, self.C, self.K)
146
+ scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
147
+ error_norm = self._estimate_error_norm(self.K, h, scale)
148
+
149
+ if error_norm < 1:
150
+ if error_norm == 0:
151
+ factor = MAX_FACTOR
152
+ else:
153
+ factor = min(MAX_FACTOR,
154
+ SAFETY * error_norm ** self.error_exponent)
155
+
156
+ if step_rejected:
157
+ factor = min(1, factor)
158
+
159
+ h_abs *= factor
160
+
161
+ step_accepted = True
162
+ else:
163
+ h_abs *= max(MIN_FACTOR,
164
+ SAFETY * error_norm ** self.error_exponent)
165
+ step_rejected = True
166
+
167
+ self.h_previous = h
168
+ self.y_old = y
169
+
170
+ self.t = t_new
171
+ self.y = y_new
172
+
173
+ self.h_abs = h_abs
174
+ self.f = f_new
175
+
176
+ return True, None
177
+
178
+ def _dense_output_impl(self):
179
+ Q = self.K.T.dot(self.P)
180
+ return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
181
+
182
+
183
+ class RK23(RungeKutta):
184
+ """Explicit Runge-Kutta method of order 3(2).
185
+
186
+ This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
187
+ assuming accuracy of the second-order method, but steps are taken using the
188
+ third-order accurate formula (local extrapolation is done). A cubic Hermite
189
+ polynomial is used for the dense output.
190
+
191
+ Can be applied in the complex domain.
192
+
193
+ Parameters
194
+ ----------
195
+ fun : callable
196
+ Right-hand side of the system: the time derivative of the state ``y``
197
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
198
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
199
+ return an array of the same shape as ``y``. See `vectorized` for more
200
+ information.
201
+ t0 : float
202
+ Initial time.
203
+ y0 : array_like, shape (n,)
204
+ Initial state.
205
+ t_bound : float
206
+ Boundary time - the integration won't continue beyond it. It also
207
+ determines the direction of the integration.
208
+ first_step : float or None, optional
209
+ Initial step size. Default is ``None`` which means that the algorithm
210
+ should choose.
211
+ max_step : float, optional
212
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
213
+ bounded and determined solely by the solver.
214
+ rtol, atol : float and array_like, optional
215
+ Relative and absolute tolerances. The solver keeps the local error
216
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
217
+ relative accuracy (number of correct digits), while `atol` controls
218
+ absolute accuracy (number of correct decimal places). To achieve the
219
+ desired `rtol`, set `atol` to be smaller than the smallest value that
220
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
221
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
222
+ number of correct digits is not guaranteed. Conversely, to achieve the
223
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
224
+ than `atol`. If components of y have different scales, it might be
225
+ beneficial to set different `atol` values for different components by
226
+ passing array_like with shape (n,) for `atol`. Default values are
227
+ 1e-3 for `rtol` and 1e-6 for `atol`.
228
+ vectorized : bool, optional
229
+ Whether `fun` may be called in a vectorized fashion. False (default)
230
+ is recommended for this solver.
231
+
232
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
233
+ shape ``(n,)``, where ``n = len(y0)``.
234
+
235
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
236
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
237
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
238
+ the returned array is the time derivative of the state corresponding
239
+ with a column of ``y``).
240
+
241
+ Setting ``vectorized=True`` allows for faster finite difference
242
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
243
+ will result in slower execution for this solver.
244
+
245
+ Attributes
246
+ ----------
247
+ n : int
248
+ Number of equations.
249
+ status : string
250
+ Current status of the solver: 'running', 'finished' or 'failed'.
251
+ t_bound : float
252
+ Boundary time.
253
+ direction : float
254
+ Integration direction: +1 or -1.
255
+ t : float
256
+ Current time.
257
+ y : ndarray
258
+ Current state.
259
+ t_old : float
260
+ Previous time. None if no steps were made yet.
261
+ step_size : float
262
+ Size of the last successful step. None if no steps were made yet.
263
+ nfev : int
264
+ Number evaluations of the system's right-hand side.
265
+ njev : int
266
+ Number of evaluations of the Jacobian.
267
+ Is always 0 for this solver as it does not use the Jacobian.
268
+ nlu : int
269
+ Number of LU decompositions. Is always 0 for this solver.
270
+
271
+ References
272
+ ----------
273
+ .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
274
+ Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
275
+ """
276
+ order = 3
277
+ error_estimator_order = 2
278
+ n_stages = 3
279
+ C = np.array([0, 1/2, 3/4])
280
+ A = np.array([
281
+ [0, 0, 0],
282
+ [1/2, 0, 0],
283
+ [0, 3/4, 0]
284
+ ])
285
+ B = np.array([2/9, 1/3, 4/9])
286
+ E = np.array([5/72, -1/12, -1/9, 1/8])
287
+ P = np.array([[1, -4 / 3, 5 / 9],
288
+ [0, 1, -2/3],
289
+ [0, 4/3, -8/9],
290
+ [0, -1, 1]])
291
+
292
+
293
+ class RK45(RungeKutta):
294
+ """Explicit Runge-Kutta method of order 5(4).
295
+
296
+ This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
297
+ assuming accuracy of the fourth-order method accuracy, but steps are taken
298
+ using the fifth-order accurate formula (local extrapolation is done).
299
+ A quartic interpolation polynomial is used for the dense output [2]_.
300
+
301
+ Can be applied in the complex domain.
302
+
303
+ Parameters
304
+ ----------
305
+ fun : callable
306
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
307
+ Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
308
+ It can either have shape (n,); then ``fun`` must return array_like with
309
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
310
+ must return an array_like with shape (n, k), i.e., each column
311
+ corresponds to a single column in ``y``. The choice between the two
312
+ options is determined by `vectorized` argument (see below).
313
+ t0 : float
314
+ Initial time.
315
+ y0 : array_like, shape (n,)
316
+ Initial state.
317
+ t_bound : float
318
+ Boundary time - the integration won't continue beyond it. It also
319
+ determines the direction of the integration.
320
+ first_step : float or None, optional
321
+ Initial step size. Default is ``None`` which means that the algorithm
322
+ should choose.
323
+ max_step : float, optional
324
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
325
+ bounded and determined solely by the solver.
326
+ rtol, atol : float and array_like, optional
327
+ Relative and absolute tolerances. The solver keeps the local error
328
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
329
+ relative accuracy (number of correct digits), while `atol` controls
330
+ absolute accuracy (number of correct decimal places). To achieve the
331
+ desired `rtol`, set `atol` to be smaller than the smallest value that
332
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
333
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
334
+ number of correct digits is not guaranteed. Conversely, to achieve the
335
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
336
+ than `atol`. If components of y have different scales, it might be
337
+ beneficial to set different `atol` values for different components by
338
+ passing array_like with shape (n,) for `atol`. Default values are
339
+ 1e-3 for `rtol` and 1e-6 for `atol`.
340
+ vectorized : bool, optional
341
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
342
+
343
+ Attributes
344
+ ----------
345
+ n : int
346
+ Number of equations.
347
+ status : string
348
+ Current status of the solver: 'running', 'finished' or 'failed'.
349
+ t_bound : float
350
+ Boundary time.
351
+ direction : float
352
+ Integration direction: +1 or -1.
353
+ t : float
354
+ Current time.
355
+ y : ndarray
356
+ Current state.
357
+ t_old : float
358
+ Previous time. None if no steps were made yet.
359
+ step_size : float
360
+ Size of the last successful step. None if no steps were made yet.
361
+ nfev : int
362
+ Number evaluations of the system's right-hand side.
363
+ njev : int
364
+ Number of evaluations of the Jacobian.
365
+ Is always 0 for this solver as it does not use the Jacobian.
366
+ nlu : int
367
+ Number of LU decompositions. Is always 0 for this solver.
368
+
369
+ References
370
+ ----------
371
+ .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
372
+ formulae", Journal of Computational and Applied Mathematics, Vol. 6,
373
+ No. 1, pp. 19-26, 1980.
374
+ .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
375
+ of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
376
+ """
377
+ order = 5
378
+ error_estimator_order = 4
379
+ n_stages = 6
380
+ C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
381
+ A = np.array([
382
+ [0, 0, 0, 0, 0],
383
+ [1/5, 0, 0, 0, 0],
384
+ [3/40, 9/40, 0, 0, 0],
385
+ [44/45, -56/15, 32/9, 0, 0],
386
+ [19372/6561, -25360/2187, 64448/6561, -212/729, 0],
387
+ [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
388
+ ])
389
+ B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
390
+ E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
391
+ 1/40])
392
+ # Corresponds to the optimum value of c_6 from [2]_.
393
+ P = np.array([
394
+ [1, -8048581381/2820520608, 8663915743/2820520608,
395
+ -12715105075/11282082432],
396
+ [0, 0, 0, 0],
397
+ [0, 131558114200/32700410799, -68118460800/10900136933,
398
+ 87487479700/32700410799],
399
+ [0, -1754552775/470086768, 14199869525/1410260304,
400
+ -10690763975/1880347072],
401
+ [0, 127303824393/49829197408, -318862633887/49829197408,
402
+ 701980252875 / 199316789632],
403
+ [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
404
+ [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
405
+
406
+
407
+ class DOP853(RungeKutta):
408
+ """Explicit Runge-Kutta method of order 8.
409
+
410
+ This is a Python implementation of "DOP853" algorithm originally written
411
+ in Fortran [1]_, [2]_. Note that this is not a literal translation, but
412
+ the algorithmic core and coefficients are the same.
413
+
414
+ Can be applied in the complex domain.
415
+
416
+ Parameters
417
+ ----------
418
+ fun : callable
419
+ Right-hand side of the system. The calling signature is ``fun(t, y)``.
420
+ Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
421
+ It can either have shape (n,); then ``fun`` must return array_like with
422
+ shape (n,). Alternatively it can have shape (n, k); then ``fun``
423
+ must return an array_like with shape (n, k), i.e. each column
424
+ corresponds to a single column in ``y``. The choice between the two
425
+ options is determined by `vectorized` argument (see below).
426
+ t0 : float
427
+ Initial time.
428
+ y0 : array_like, shape (n,)
429
+ Initial state.
430
+ t_bound : float
431
+ Boundary time - the integration won't continue beyond it. It also
432
+ determines the direction of the integration.
433
+ first_step : float or None, optional
434
+ Initial step size. Default is ``None`` which means that the algorithm
435
+ should choose.
436
+ max_step : float, optional
437
+ Maximum allowed step size. Default is np.inf, i.e. the step size is not
438
+ bounded and determined solely by the solver.
439
+ rtol, atol : float and array_like, optional
440
+ Relative and absolute tolerances. The solver keeps the local error
441
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
442
+ relative accuracy (number of correct digits), while `atol` controls
443
+ absolute accuracy (number of correct decimal places). To achieve the
444
+ desired `rtol`, set `atol` to be smaller than the smallest value that
445
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
446
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
447
+ number of correct digits is not guaranteed. Conversely, to achieve the
448
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
449
+ than `atol`. If components of y have different scales, it might be
450
+ beneficial to set different `atol` values for different components by
451
+ passing array_like with shape (n,) for `atol`. Default values are
452
+ 1e-3 for `rtol` and 1e-6 for `atol`.
453
+ vectorized : bool, optional
454
+ Whether `fun` is implemented in a vectorized fashion. Default is False.
455
+
456
+ Attributes
457
+ ----------
458
+ n : int
459
+ Number of equations.
460
+ status : string
461
+ Current status of the solver: 'running', 'finished' or 'failed'.
462
+ t_bound : float
463
+ Boundary time.
464
+ direction : float
465
+ Integration direction: +1 or -1.
466
+ t : float
467
+ Current time.
468
+ y : ndarray
469
+ Current state.
470
+ t_old : float
471
+ Previous time. None if no steps were made yet.
472
+ step_size : float
473
+ Size of the last successful step. None if no steps were made yet.
474
+ nfev : int
475
+ Number evaluations of the system's right-hand side.
476
+ njev : int
477
+ Number of evaluations of the Jacobian. Is always 0 for this solver
478
+ as it does not use the Jacobian.
479
+ nlu : int
480
+ Number of LU decompositions. Is always 0 for this solver.
481
+
482
+ References
483
+ ----------
484
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
485
+ Equations I: Nonstiff Problems", Sec. II.
486
+ .. [2] `Page with original Fortran code of DOP853
487
+ <http://www.unige.ch/~hairer/software.html>`_.
488
+ """
489
+ n_stages = dop853_coefficients.N_STAGES
490
+ order = 8
491
+ error_estimator_order = 7
492
+ A = dop853_coefficients.A[:n_stages, :n_stages]
493
+ B = dop853_coefficients.B
494
+ C = dop853_coefficients.C[:n_stages]
495
+ E3 = dop853_coefficients.E3
496
+ E5 = dop853_coefficients.E5
497
+ D = dop853_coefficients.D
498
+
499
+ A_EXTRA = dop853_coefficients.A[n_stages + 1:]
500
+ C_EXTRA = dop853_coefficients.C[n_stages + 1:]
501
+
502
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
503
+ rtol=1e-3, atol=1e-6, vectorized=False,
504
+ first_step=None, **extraneous):
505
+ super().__init__(fun, t0, y0, t_bound, max_step, rtol, atol,
506
+ vectorized, first_step, **extraneous)
507
+ self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
508
+ self.n), dtype=self.y.dtype)
509
+ self.K = self.K_extended[:self.n_stages + 1]
510
+
511
+ def _estimate_error(self, K, h): # Left for testing purposes.
512
+ err5 = np.dot(K.T, self.E5)
513
+ err3 = np.dot(K.T, self.E3)
514
+ denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
515
+ correction_factor = np.ones_like(err5)
516
+ mask = denom > 0
517
+ correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
518
+ return h * err5 * correction_factor
519
+
520
+ def _estimate_error_norm(self, K, h, scale):
521
+ err5 = np.dot(K.T, self.E5) / scale
522
+ err3 = np.dot(K.T, self.E3) / scale
523
+ err5_norm_2 = np.linalg.norm(err5)**2
524
+ err3_norm_2 = np.linalg.norm(err3)**2
525
+ if err5_norm_2 == 0 and err3_norm_2 == 0:
526
+ return 0.0
527
+ denom = err5_norm_2 + 0.01 * err3_norm_2
528
+ return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
529
+
530
+ def _dense_output_impl(self):
531
+ K = self.K_extended
532
+ h = self.h_previous
533
+ for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
534
+ start=self.n_stages + 1):
535
+ dy = np.dot(K[:s].T, a[:s]) * h
536
+ K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
537
+
538
+ F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
539
+ dtype=self.y_old.dtype)
540
+
541
+ f_old = K[0]
542
+ delta_y = self.y - self.y_old
543
+
544
+ F[0] = delta_y
545
+ F[1] = h * f_old - delta_y
546
+ F[2] = 2 * delta_y - h * (self.f + f_old)
547
+ F[3:] = h * np.dot(self.D, K)
548
+
549
+ return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
550
+
551
+
552
+ class RkDenseOutput(DenseOutput):
553
+ def __init__(self, t_old, t, y_old, Q):
554
+ super().__init__(t_old, t)
555
+ self.h = t - t_old
556
+ self.Q = Q
557
+ self.order = Q.shape[1] - 1
558
+ self.y_old = y_old
559
+
560
+ def _call_impl(self, t):
561
+ x = (t - self.t_old) / self.h
562
+ if t.ndim == 0:
563
+ p = np.tile(x, self.order + 1)
564
+ p = np.cumprod(p)
565
+ else:
566
+ p = np.tile(x, (self.order + 1, 1))
567
+ p = np.cumprod(p, axis=0)
568
+ y = self.h * np.dot(self.Q, p)
569
+ if y.ndim == 2:
570
+ y += self.y_old[:, None]
571
+ else:
572
+ y += self.y_old
573
+
574
+ return y
575
+
576
+
577
+ class Dop853DenseOutput(DenseOutput):
578
+ def __init__(self, t_old, t, y_old, F):
579
+ super().__init__(t_old, t)
580
+ self.h = t - t_old
581
+ self.F = F
582
+ self.y_old = y_old
583
+
584
+ def _call_impl(self, t):
585
+ x = (t - self.t_old) / self.h
586
+
587
+ if t.ndim == 0:
588
+ y = np.zeros_like(self.y_old)
589
+ else:
590
+ x = x[:, None]
591
+ y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
592
+
593
+ for i, f in enumerate(reversed(self.F)):
594
+ y += f
595
+ if i % 2 == 0:
596
+ y *= x
597
+ else:
598
+ y *= 1 - x
599
+ y += self.y_old
600
+
601
+ return y.T
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc ADDED
Binary file (29.5 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py ADDED
@@ -0,0 +1,1135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+ from numpy.testing import (assert_, assert_allclose, assert_array_less,
3
+ assert_equal, assert_no_warnings, suppress_warnings)
4
+ import pytest
5
+ from pytest import raises as assert_raises
6
+ import numpy as np
7
+ from scipy.optimize._numdiff import group_columns
8
+ from scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA
9
+ from scipy.integrate import OdeSolution
10
+ from scipy.integrate._ivp.common import num_jac
11
+ from scipy.integrate._ivp.base import ConstantDenseOutput
12
+ from scipy.sparse import coo_matrix, csc_matrix
13
+
14
+
15
+ def fun_zero(t, y):
16
+ return np.zeros_like(y)
17
+
18
+
19
+ def fun_linear(t, y):
20
+ return np.array([-y[0] - 5 * y[1], y[0] + y[1]])
21
+
22
+
23
+ def jac_linear():
24
+ return np.array([[-1, -5], [1, 1]])
25
+
26
+
27
+ def sol_linear(t):
28
+ return np.vstack((-5 * np.sin(2 * t),
29
+ 2 * np.cos(2 * t) + np.sin(2 * t)))
30
+
31
+
32
+ def fun_rational(t, y):
33
+ return np.array([y[1] / t,
34
+ y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])
35
+
36
+
37
+ def fun_rational_vectorized(t, y):
38
+ return np.vstack((y[1] / t,
39
+ y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))
40
+
41
+
42
+ def jac_rational(t, y):
43
+ return np.array([
44
+ [0, 1 / t],
45
+ [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
46
+ (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
47
+ ])
48
+
49
+
50
+ def jac_rational_sparse(t, y):
51
+ return csc_matrix([
52
+ [0, 1 / t],
53
+ [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
54
+ (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
55
+ ])
56
+
57
+
58
+ def sol_rational(t):
59
+ return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))
60
+
61
+
62
+ def fun_medazko(t, y):
63
+ n = y.shape[0] // 2
64
+ k = 100
65
+ c = 4
66
+
67
+ phi = 2 if t <= 5 else 0
68
+ y = np.hstack((phi, 0, y, y[-2]))
69
+
70
+ d = 1 / n
71
+ j = np.arange(n) + 1
72
+ alpha = 2 * (j * d - 1) ** 3 / c ** 2
73
+ beta = (j * d - 1) ** 4 / c ** 2
74
+
75
+ j_2_p1 = 2 * j + 2
76
+ j_2_m3 = 2 * j - 2
77
+ j_2_m1 = 2 * j
78
+ j_2 = 2 * j + 1
79
+
80
+ f = np.empty(2 * n)
81
+ f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +
82
+ beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -
83
+ k * y[j_2_m1] * y[j_2])
84
+ f[1::2] = -k * y[j_2] * y[j_2_m1]
85
+
86
+ return f
87
+
88
+
89
+ def medazko_sparsity(n):
90
+ cols = []
91
+ rows = []
92
+
93
+ i = np.arange(n) * 2
94
+
95
+ cols.append(i[1:])
96
+ rows.append(i[1:] - 2)
97
+
98
+ cols.append(i)
99
+ rows.append(i)
100
+
101
+ cols.append(i)
102
+ rows.append(i + 1)
103
+
104
+ cols.append(i[:-1])
105
+ rows.append(i[:-1] + 2)
106
+
107
+ i = np.arange(n) * 2 + 1
108
+
109
+ cols.append(i)
110
+ rows.append(i)
111
+
112
+ cols.append(i)
113
+ rows.append(i - 1)
114
+
115
+ cols = np.hstack(cols)
116
+ rows = np.hstack(rows)
117
+
118
+ return coo_matrix((np.ones_like(cols), (cols, rows)))
119
+
120
+
121
+ def fun_complex(t, y):
122
+ return -y
123
+
124
+
125
+ def jac_complex(t, y):
126
+ return -np.eye(y.shape[0])
127
+
128
+
129
+ def jac_complex_sparse(t, y):
130
+ return csc_matrix(jac_complex(t, y))
131
+
132
+
133
+ def sol_complex(t):
134
+ y = (0.5 + 1j) * np.exp(-t)
135
+ return y.reshape((1, -1))
136
+
137
+
138
+ def fun_event_dense_output_LSODA(t, y):
139
+ return y * (t - 2)
140
+
141
+
142
+ def jac_event_dense_output_LSODA(t, y):
143
+ return t - 2
144
+
145
+
146
+ def sol_event_dense_output_LSODA(t):
147
+ return np.exp(t ** 2 / 2 - 2 * t + np.log(0.05) - 6)
148
+
149
+
150
+ def compute_error(y, y_true, rtol, atol):
151
+ e = (y - y_true) / (atol + rtol * np.abs(y_true))
152
+ return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0])
153
+
154
+
155
+ def test_integration():
156
+ rtol = 1e-3
157
+ atol = 1e-6
158
+ y0 = [1/3, 2/9]
159
+
160
+ for vectorized, method, t_span, jac in product(
161
+ [False, True],
162
+ ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'],
163
+ [[5, 9], [5, 1]],
164
+ [None, jac_rational, jac_rational_sparse]):
165
+
166
+ if vectorized:
167
+ fun = fun_rational_vectorized
168
+ else:
169
+ fun = fun_rational
170
+
171
+ with suppress_warnings() as sup:
172
+ sup.filter(UserWarning,
173
+ "The following arguments have no effect for a chosen "
174
+ "solver: `jac`")
175
+ res = solve_ivp(fun, t_span, y0, rtol=rtol,
176
+ atol=atol, method=method, dense_output=True,
177
+ jac=jac, vectorized=vectorized)
178
+ assert_equal(res.t[0], t_span[0])
179
+ assert_(res.t_events is None)
180
+ assert_(res.y_events is None)
181
+ assert_(res.success)
182
+ assert_equal(res.status, 0)
183
+
184
+ if method == 'DOP853':
185
+ # DOP853 spends more functions evaluation because it doesn't
186
+ # have enough time to develop big enough step size.
187
+ assert_(res.nfev < 50)
188
+ else:
189
+ assert_(res.nfev < 40)
190
+
191
+ if method in ['RK23', 'RK45', 'DOP853', 'LSODA']:
192
+ assert_equal(res.njev, 0)
193
+ assert_equal(res.nlu, 0)
194
+ else:
195
+ assert_(0 < res.njev < 3)
196
+ assert_(0 < res.nlu < 10)
197
+
198
+ y_true = sol_rational(res.t)
199
+ e = compute_error(res.y, y_true, rtol, atol)
200
+ assert_(np.all(e < 5))
201
+
202
+ tc = np.linspace(*t_span)
203
+ yc_true = sol_rational(tc)
204
+ yc = res.sol(tc)
205
+
206
+ e = compute_error(yc, yc_true, rtol, atol)
207
+ assert_(np.all(e < 5))
208
+
209
+ tc = (t_span[0] + t_span[-1]) / 2
210
+ yc_true = sol_rational(tc)
211
+ yc = res.sol(tc)
212
+
213
+ e = compute_error(yc, yc_true, rtol, atol)
214
+ assert_(np.all(e < 5))
215
+
216
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
217
+
218
+
219
+ def test_integration_complex():
220
+ rtol = 1e-3
221
+ atol = 1e-6
222
+ y0 = [0.5 + 1j]
223
+ t_span = [0, 1]
224
+ tc = np.linspace(t_span[0], t_span[1])
225
+ for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'],
226
+ [None, jac_complex, jac_complex_sparse]):
227
+ with suppress_warnings() as sup:
228
+ sup.filter(UserWarning,
229
+ "The following arguments have no effect for a chosen "
230
+ "solver: `jac`")
231
+ res = solve_ivp(fun_complex, t_span, y0, method=method,
232
+ dense_output=True, rtol=rtol, atol=atol, jac=jac)
233
+
234
+ assert_equal(res.t[0], t_span[0])
235
+ assert_(res.t_events is None)
236
+ assert_(res.y_events is None)
237
+ assert_(res.success)
238
+ assert_equal(res.status, 0)
239
+
240
+ if method == 'DOP853':
241
+ assert res.nfev < 35
242
+ else:
243
+ assert res.nfev < 25
244
+
245
+ if method == 'BDF':
246
+ assert_equal(res.njev, 1)
247
+ assert res.nlu < 6
248
+ else:
249
+ assert res.njev == 0
250
+ assert res.nlu == 0
251
+
252
+ y_true = sol_complex(res.t)
253
+ e = compute_error(res.y, y_true, rtol, atol)
254
+ assert np.all(e < 5)
255
+
256
+ yc_true = sol_complex(tc)
257
+ yc = res.sol(tc)
258
+ e = compute_error(yc, yc_true, rtol, atol)
259
+
260
+ assert np.all(e < 5)
261
+
262
+
263
+ def test_integration_sparse_difference():
264
+ n = 200
265
+ t_span = [0, 20]
266
+ y0 = np.zeros(2 * n)
267
+ y0[1::2] = 1
268
+ sparsity = medazko_sparsity(n)
269
+
270
+ for method in ['BDF', 'Radau']:
271
+ res = solve_ivp(fun_medazko, t_span, y0, method=method,
272
+ jac_sparsity=sparsity)
273
+
274
+ assert_equal(res.t[0], t_span[0])
275
+ assert_(res.t_events is None)
276
+ assert_(res.y_events is None)
277
+ assert_(res.success)
278
+ assert_equal(res.status, 0)
279
+
280
+ assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)
281
+ assert_allclose(res.y[79, -1], 0, atol=1e-3)
282
+ assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)
283
+ assert_allclose(res.y[149, -1], 0, atol=1e-3)
284
+ assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)
285
+ assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)
286
+ assert_allclose(res.y[238, -1], 0, atol=1e-3)
287
+ assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)
288
+
289
+
290
+ def test_integration_const_jac():
291
+ rtol = 1e-3
292
+ atol = 1e-6
293
+ y0 = [0, 2]
294
+ t_span = [0, 2]
295
+ J = jac_linear()
296
+ J_sparse = csc_matrix(J)
297
+
298
+ for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):
299
+ res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,
300
+ method=method, dense_output=True, jac=jac)
301
+ assert_equal(res.t[0], t_span[0])
302
+ assert_(res.t_events is None)
303
+ assert_(res.y_events is None)
304
+ assert_(res.success)
305
+ assert_equal(res.status, 0)
306
+
307
+ assert_(res.nfev < 100)
308
+ assert_equal(res.njev, 0)
309
+ assert_(0 < res.nlu < 15)
310
+
311
+ y_true = sol_linear(res.t)
312
+ e = compute_error(res.y, y_true, rtol, atol)
313
+ assert_(np.all(e < 10))
314
+
315
+ tc = np.linspace(*t_span)
316
+ yc_true = sol_linear(tc)
317
+ yc = res.sol(tc)
318
+
319
+ e = compute_error(yc, yc_true, rtol, atol)
320
+ assert_(np.all(e < 15))
321
+
322
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)
323
+
324
+
325
+ @pytest.mark.slow
326
+ @pytest.mark.parametrize('method', ['Radau', 'BDF', 'LSODA'])
327
+ def test_integration_stiff(method):
328
+ rtol = 1e-6
329
+ atol = 1e-6
330
+ y0 = [1e4, 0, 0]
331
+ tspan = [0, 1e8]
332
+
333
+ def fun_robertson(t, state):
334
+ x, y, z = state
335
+ return [
336
+ -0.04 * x + 1e4 * y * z,
337
+ 0.04 * x - 1e4 * y * z - 3e7 * y * y,
338
+ 3e7 * y * y,
339
+ ]
340
+
341
+ res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol,
342
+ atol=atol, method=method)
343
+
344
+ # If the stiff mode is not activated correctly, these numbers will be much bigger
345
+ assert res.nfev < 5000
346
+ assert res.njev < 200
347
+
348
+
349
+ def test_events():
350
+ def event_rational_1(t, y):
351
+ return y[0] - y[1] ** 0.7
352
+
353
+ def event_rational_2(t, y):
354
+ return y[1] ** 0.6 - y[0]
355
+
356
+ def event_rational_3(t, y):
357
+ return t - 7.4
358
+
359
+ event_rational_3.terminal = True
360
+
361
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
362
+ res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,
363
+ events=(event_rational_1, event_rational_2))
364
+ assert_equal(res.status, 0)
365
+ assert_equal(res.t_events[0].size, 1)
366
+ assert_equal(res.t_events[1].size, 1)
367
+ assert_(5.3 < res.t_events[0][0] < 5.7)
368
+ assert_(7.3 < res.t_events[1][0] < 7.7)
369
+
370
+ assert_equal(res.y_events[0].shape, (1, 2))
371
+ assert_equal(res.y_events[1].shape, (1, 2))
372
+ assert np.isclose(
373
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
374
+ assert np.isclose(
375
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
376
+
377
+ event_rational_1.direction = 1
378
+ event_rational_2.direction = 1
379
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
380
+ events=(event_rational_1, event_rational_2))
381
+ assert_equal(res.status, 0)
382
+ assert_equal(res.t_events[0].size, 1)
383
+ assert_equal(res.t_events[1].size, 0)
384
+ assert_(5.3 < res.t_events[0][0] < 5.7)
385
+ assert_equal(res.y_events[0].shape, (1, 2))
386
+ assert_equal(res.y_events[1].shape, (0,))
387
+ assert np.isclose(
388
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
389
+
390
+ event_rational_1.direction = -1
391
+ event_rational_2.direction = -1
392
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
393
+ events=(event_rational_1, event_rational_2))
394
+ assert_equal(res.status, 0)
395
+ assert_equal(res.t_events[0].size, 0)
396
+ assert_equal(res.t_events[1].size, 1)
397
+ assert_(7.3 < res.t_events[1][0] < 7.7)
398
+ assert_equal(res.y_events[0].shape, (0,))
399
+ assert_equal(res.y_events[1].shape, (1, 2))
400
+ assert np.isclose(
401
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
402
+
403
+ event_rational_1.direction = 0
404
+ event_rational_2.direction = 0
405
+
406
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
407
+ events=(event_rational_1, event_rational_2,
408
+ event_rational_3), dense_output=True)
409
+ assert_equal(res.status, 1)
410
+ assert_equal(res.t_events[0].size, 1)
411
+ assert_equal(res.t_events[1].size, 0)
412
+ assert_equal(res.t_events[2].size, 1)
413
+ assert_(5.3 < res.t_events[0][0] < 5.7)
414
+ assert_(7.3 < res.t_events[2][0] < 7.5)
415
+ assert_equal(res.y_events[0].shape, (1, 2))
416
+ assert_equal(res.y_events[1].shape, (0,))
417
+ assert_equal(res.y_events[2].shape, (1, 2))
418
+ assert np.isclose(
419
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
420
+ assert np.isclose(
421
+ event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
422
+
423
+ res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
424
+ events=event_rational_1, dense_output=True)
425
+ assert_equal(res.status, 0)
426
+ assert_equal(res.t_events[0].size, 1)
427
+ assert_(5.3 < res.t_events[0][0] < 5.7)
428
+
429
+ assert_equal(res.y_events[0].shape, (1, 2))
430
+ assert np.isclose(
431
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
432
+
433
+ # Also test that termination by event doesn't break interpolants.
434
+ tc = np.linspace(res.t[0], res.t[-1])
435
+ yc_true = sol_rational(tc)
436
+ yc = res.sol(tc)
437
+ e = compute_error(yc, yc_true, 1e-3, 1e-6)
438
+ assert_(np.all(e < 5))
439
+
440
+ # Test that the y_event matches solution
441
+ assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0],
442
+ rtol=1e-3, atol=1e-6)
443
+
444
+ # Test in backward direction.
445
+ event_rational_1.direction = 0
446
+ event_rational_2.direction = 0
447
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
448
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
449
+ events=(event_rational_1, event_rational_2))
450
+ assert_equal(res.status, 0)
451
+ assert_equal(res.t_events[0].size, 1)
452
+ assert_equal(res.t_events[1].size, 1)
453
+ assert_(5.3 < res.t_events[0][0] < 5.7)
454
+ assert_(7.3 < res.t_events[1][0] < 7.7)
455
+
456
+ assert_equal(res.y_events[0].shape, (1, 2))
457
+ assert_equal(res.y_events[1].shape, (1, 2))
458
+ assert np.isclose(
459
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
460
+ assert np.isclose(
461
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
462
+
463
+ event_rational_1.direction = -1
464
+ event_rational_2.direction = -1
465
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
466
+ events=(event_rational_1, event_rational_2))
467
+ assert_equal(res.status, 0)
468
+ assert_equal(res.t_events[0].size, 1)
469
+ assert_equal(res.t_events[1].size, 0)
470
+ assert_(5.3 < res.t_events[0][0] < 5.7)
471
+
472
+ assert_equal(res.y_events[0].shape, (1, 2))
473
+ assert_equal(res.y_events[1].shape, (0,))
474
+ assert np.isclose(
475
+ event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
476
+
477
+ event_rational_1.direction = 1
478
+ event_rational_2.direction = 1
479
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
480
+ events=(event_rational_1, event_rational_2))
481
+ assert_equal(res.status, 0)
482
+ assert_equal(res.t_events[0].size, 0)
483
+ assert_equal(res.t_events[1].size, 1)
484
+ assert_(7.3 < res.t_events[1][0] < 7.7)
485
+
486
+ assert_equal(res.y_events[0].shape, (0,))
487
+ assert_equal(res.y_events[1].shape, (1, 2))
488
+ assert np.isclose(
489
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
490
+
491
+ event_rational_1.direction = 0
492
+ event_rational_2.direction = 0
493
+
494
+ res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
495
+ events=(event_rational_1, event_rational_2,
496
+ event_rational_3), dense_output=True)
497
+ assert_equal(res.status, 1)
498
+ assert_equal(res.t_events[0].size, 0)
499
+ assert_equal(res.t_events[1].size, 1)
500
+ assert_equal(res.t_events[2].size, 1)
501
+ assert_(7.3 < res.t_events[1][0] < 7.7)
502
+ assert_(7.3 < res.t_events[2][0] < 7.5)
503
+
504
+ assert_equal(res.y_events[0].shape, (0,))
505
+ assert_equal(res.y_events[1].shape, (1, 2))
506
+ assert_equal(res.y_events[2].shape, (1, 2))
507
+ assert np.isclose(
508
+ event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
509
+ assert np.isclose(
510
+ event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
511
+
512
+ # Also test that termination by event doesn't break interpolants.
513
+ tc = np.linspace(res.t[-1], res.t[0])
514
+ yc_true = sol_rational(tc)
515
+ yc = res.sol(tc)
516
+ e = compute_error(yc, yc_true, 1e-3, 1e-6)
517
+ assert_(np.all(e < 5))
518
+
519
+ assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0],
520
+ rtol=1e-3, atol=1e-6)
521
+ assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0],
522
+ rtol=1e-3, atol=1e-6)
523
+
524
+
525
+ def _get_harmonic_oscillator():
526
+ def f(t, y):
527
+ return [y[1], -y[0]]
528
+
529
+ def event(t, y):
530
+ return y[0]
531
+
532
+ return f, event
533
+
534
+
535
+ @pytest.mark.parametrize('n_events', [3, 4])
536
+ def test_event_terminal_integer(n_events):
537
+ f, event = _get_harmonic_oscillator()
538
+ event.terminal = n_events
539
+ res = solve_ivp(f, (0, 100), [1, 0], events=event)
540
+ assert len(res.t_events[0]) == n_events
541
+ assert len(res.y_events[0]) == n_events
542
+ assert_allclose(res.y_events[0][:, 0], 0, atol=1e-14)
543
+
544
+
545
+ def test_event_terminal_iv():
546
+ f, event = _get_harmonic_oscillator()
547
+ args = (f, (0, 100), [1, 0])
548
+
549
+ event.terminal = None
550
+ res = solve_ivp(*args, events=event)
551
+ event.terminal = 0
552
+ ref = solve_ivp(*args, events=event)
553
+ assert_allclose(res.t_events, ref.t_events)
554
+
555
+ message = "The `terminal` attribute..."
556
+ event.terminal = -1
557
+ with pytest.raises(ValueError, match=message):
558
+ solve_ivp(*args, events=event)
559
+ event.terminal = 3.5
560
+ with pytest.raises(ValueError, match=message):
561
+ solve_ivp(*args, events=event)
562
+
563
+
564
+ def test_max_step():
565
+ rtol = 1e-3
566
+ atol = 1e-6
567
+ y0 = [1/3, 2/9]
568
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
569
+ for t_span in ([5, 9], [5, 1]):
570
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
571
+ max_step=0.5, atol=atol, method=method,
572
+ dense_output=True)
573
+ assert_equal(res.t[0], t_span[0])
574
+ assert_equal(res.t[-1], t_span[-1])
575
+ assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15))
576
+ assert_(res.t_events is None)
577
+ assert_(res.success)
578
+ assert_equal(res.status, 0)
579
+
580
+ y_true = sol_rational(res.t)
581
+ e = compute_error(res.y, y_true, rtol, atol)
582
+ assert_(np.all(e < 5))
583
+
584
+ tc = np.linspace(*t_span)
585
+ yc_true = sol_rational(tc)
586
+ yc = res.sol(tc)
587
+
588
+ e = compute_error(yc, yc_true, rtol, atol)
589
+ assert_(np.all(e < 5))
590
+
591
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
592
+
593
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
594
+ t_span[1], max_step=-1)
595
+
596
+ if method is not LSODA:
597
+ solver = method(fun_rational, t_span[0], y0, t_span[1],
598
+ rtol=rtol, atol=atol, max_step=1e-20)
599
+ message = solver.step()
600
+
601
+ assert_equal(solver.status, 'failed')
602
+ assert_("step size is less" in message)
603
+ assert_raises(RuntimeError, solver.step)
604
+
605
+
606
+ def test_first_step():
607
+ rtol = 1e-3
608
+ atol = 1e-6
609
+ y0 = [1/3, 2/9]
610
+ first_step = 0.1
611
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
612
+ for t_span in ([5, 9], [5, 1]):
613
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
614
+ max_step=0.5, atol=atol, method=method,
615
+ dense_output=True, first_step=first_step)
616
+
617
+ assert_equal(res.t[0], t_span[0])
618
+ assert_equal(res.t[-1], t_span[-1])
619
+ assert_allclose(first_step, np.abs(res.t[1] - 5))
620
+ assert_(res.t_events is None)
621
+ assert_(res.success)
622
+ assert_equal(res.status, 0)
623
+
624
+ y_true = sol_rational(res.t)
625
+ e = compute_error(res.y, y_true, rtol, atol)
626
+ assert_(np.all(e < 5))
627
+
628
+ tc = np.linspace(*t_span)
629
+ yc_true = sol_rational(tc)
630
+ yc = res.sol(tc)
631
+
632
+ e = compute_error(yc, yc_true, rtol, atol)
633
+ assert_(np.all(e < 5))
634
+
635
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
636
+
637
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
638
+ t_span[1], first_step=-1)
639
+ assert_raises(ValueError, method, fun_rational, t_span[0], y0,
640
+ t_span[1], first_step=5)
641
+
642
+
643
+ def test_t_eval():
644
+ rtol = 1e-3
645
+ atol = 1e-6
646
+ y0 = [1/3, 2/9]
647
+ for t_span in ([5, 9], [5, 1]):
648
+ t_eval = np.linspace(t_span[0], t_span[1], 10)
649
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
650
+ t_eval=t_eval)
651
+ assert_equal(res.t, t_eval)
652
+ assert_(res.t_events is None)
653
+ assert_(res.success)
654
+ assert_equal(res.status, 0)
655
+
656
+ y_true = sol_rational(res.t)
657
+ e = compute_error(res.y, y_true, rtol, atol)
658
+ assert_(np.all(e < 5))
659
+
660
+ t_eval = [5, 5.01, 7, 8, 8.01, 9]
661
+ res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
662
+ t_eval=t_eval)
663
+ assert_equal(res.t, t_eval)
664
+ assert_(res.t_events is None)
665
+ assert_(res.success)
666
+ assert_equal(res.status, 0)
667
+
668
+ y_true = sol_rational(res.t)
669
+ e = compute_error(res.y, y_true, rtol, atol)
670
+ assert_(np.all(e < 5))
671
+
672
+ t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]
673
+ res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
674
+ t_eval=t_eval)
675
+ assert_equal(res.t, t_eval)
676
+ assert_(res.t_events is None)
677
+ assert_(res.success)
678
+ assert_equal(res.status, 0)
679
+
680
+ t_eval = [5.01, 7, 8, 8.01]
681
+ res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
682
+ t_eval=t_eval)
683
+ assert_equal(res.t, t_eval)
684
+ assert_(res.t_events is None)
685
+ assert_(res.success)
686
+ assert_equal(res.status, 0)
687
+
688
+ y_true = sol_rational(res.t)
689
+ e = compute_error(res.y, y_true, rtol, atol)
690
+ assert_(np.all(e < 5))
691
+
692
+ t_eval = [4.99, 3, 1.5, 1.1, 1.01]
693
+ res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
694
+ t_eval=t_eval)
695
+ assert_equal(res.t, t_eval)
696
+ assert_(res.t_events is None)
697
+ assert_(res.success)
698
+ assert_equal(res.status, 0)
699
+
700
+ t_eval = [4, 6]
701
+ assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,
702
+ rtol=rtol, atol=atol, t_eval=t_eval)
703
+
704
+
705
+ def test_t_eval_dense_output():
706
+ rtol = 1e-3
707
+ atol = 1e-6
708
+ y0 = [1/3, 2/9]
709
+ t_span = [5, 9]
710
+ t_eval = np.linspace(t_span[0], t_span[1], 10)
711
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
712
+ t_eval=t_eval)
713
+ res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
714
+ t_eval=t_eval, dense_output=True)
715
+ assert_equal(res.t, t_eval)
716
+ assert_(res.t_events is None)
717
+ assert_(res.success)
718
+ assert_equal(res.status, 0)
719
+
720
+ assert_equal(res.t, res_d.t)
721
+ assert_equal(res.y, res_d.y)
722
+ assert_(res_d.t_events is None)
723
+ assert_(res_d.success)
724
+ assert_equal(res_d.status, 0)
725
+
726
+ # if t and y are equal only test values for one case
727
+ y_true = sol_rational(res.t)
728
+ e = compute_error(res.y, y_true, rtol, atol)
729
+ assert_(np.all(e < 5))
730
+
731
+
732
+ def test_t_eval_early_event():
733
+ def early_event(t, y):
734
+ return t - 7
735
+
736
+ early_event.terminal = True
737
+
738
+ rtol = 1e-3
739
+ atol = 1e-6
740
+ y0 = [1/3, 2/9]
741
+ t_span = [5, 9]
742
+ t_eval = np.linspace(7.5, 9, 16)
743
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
744
+ with suppress_warnings() as sup:
745
+ sup.filter(UserWarning,
746
+ "The following arguments have no effect for a chosen "
747
+ "solver: `jac`")
748
+ res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
749
+ method=method, t_eval=t_eval, events=early_event,
750
+ jac=jac_rational)
751
+ assert res.success
752
+ assert res.message == 'A termination event occurred.'
753
+ assert res.status == 1
754
+ assert not res.t and not res.y
755
+ assert len(res.t_events) == 1
756
+ assert res.t_events[0].size == 1
757
+ assert res.t_events[0][0] == 7
758
+
759
+
760
+ def test_event_dense_output_LSODA():
761
+ def event_lsoda(t, y):
762
+ return y[0] - 2.02e-5
763
+
764
+ rtol = 1e-3
765
+ atol = 1e-6
766
+ y0 = [0.05]
767
+ t_span = [-2, 2]
768
+ first_step = 1e-3
769
+ res = solve_ivp(
770
+ fun_event_dense_output_LSODA,
771
+ t_span,
772
+ y0,
773
+ method="LSODA",
774
+ dense_output=True,
775
+ events=event_lsoda,
776
+ first_step=first_step,
777
+ max_step=1,
778
+ rtol=rtol,
779
+ atol=atol,
780
+ jac=jac_event_dense_output_LSODA,
781
+ )
782
+
783
+ assert_equal(res.t[0], t_span[0])
784
+ assert_equal(res.t[-1], t_span[-1])
785
+ assert_allclose(first_step, np.abs(res.t[1] - t_span[0]))
786
+ assert res.success
787
+ assert_equal(res.status, 0)
788
+
789
+ y_true = sol_event_dense_output_LSODA(res.t)
790
+ e = compute_error(res.y, y_true, rtol, atol)
791
+ assert_array_less(e, 5)
792
+
793
+ tc = np.linspace(*t_span)
794
+ yc_true = sol_event_dense_output_LSODA(tc)
795
+ yc = res.sol(tc)
796
+ e = compute_error(yc, yc_true, rtol, atol)
797
+ assert_array_less(e, 5)
798
+
799
+ assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
800
+
801
+
802
+ def test_no_integration():
803
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
804
+ sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],
805
+ method=method, dense_output=True)
806
+ assert_equal(sol.sol(4), [2, 3])
807
+ assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])
808
+
809
+
810
+ def test_no_integration_class():
811
+ for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
812
+ solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)
813
+ solver.step()
814
+ assert_equal(solver.status, 'finished')
815
+ sol = solver.dense_output()
816
+ assert_equal(sol(0.0), [10.0, 0.0])
817
+ assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])
818
+
819
+ solver = method(lambda t, y: -y, 0.0, [], np.inf)
820
+ solver.step()
821
+ assert_equal(solver.status, 'finished')
822
+ sol = solver.dense_output()
823
+ assert_equal(sol(100.0), [])
824
+ assert_equal(sol([0, 1, 2]), np.empty((0, 3)))
825
+
826
+
827
+ def test_empty():
828
+ def fun(t, y):
829
+ return np.zeros((0,))
830
+
831
+ y0 = np.zeros((0,))
832
+
833
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
834
+ sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,
835
+ method=method, dense_output=True)
836
+ assert_equal(sol.sol(10), np.zeros((0,)))
837
+ assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
838
+
839
+ for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
840
+ sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,
841
+ method=method, dense_output=True)
842
+ assert_equal(sol.sol(10), np.zeros((0,)))
843
+ assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
844
+
845
+
846
+ def test_ConstantDenseOutput():
847
+ sol = ConstantDenseOutput(0, 1, np.array([1, 2]))
848
+ assert_allclose(sol(1.5), [1, 2])
849
+ assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])
850
+
851
+ sol = ConstantDenseOutput(0, 1, np.array([]))
852
+ assert_allclose(sol(1.5), np.empty(0))
853
+ assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))
854
+
855
+
856
+ def test_classes():
857
+ y0 = [1 / 3, 2 / 9]
858
+ for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
859
+ solver = cls(fun_rational, 5, y0, np.inf)
860
+ assert_equal(solver.n, 2)
861
+ assert_equal(solver.status, 'running')
862
+ assert_equal(solver.t_bound, np.inf)
863
+ assert_equal(solver.direction, 1)
864
+ assert_equal(solver.t, 5)
865
+ assert_equal(solver.y, y0)
866
+ assert_(solver.step_size is None)
867
+ if cls is not LSODA:
868
+ assert_(solver.nfev > 0)
869
+ assert_(solver.njev >= 0)
870
+ assert_equal(solver.nlu, 0)
871
+ else:
872
+ assert_equal(solver.nfev, 0)
873
+ assert_equal(solver.njev, 0)
874
+ assert_equal(solver.nlu, 0)
875
+
876
+ assert_raises(RuntimeError, solver.dense_output)
877
+
878
+ message = solver.step()
879
+ assert_equal(solver.status, 'running')
880
+ assert_equal(message, None)
881
+ assert_equal(solver.n, 2)
882
+ assert_equal(solver.t_bound, np.inf)
883
+ assert_equal(solver.direction, 1)
884
+ assert_(solver.t > 5)
885
+ assert_(not np.all(np.equal(solver.y, y0)))
886
+ assert_(solver.step_size > 0)
887
+ assert_(solver.nfev > 0)
888
+ assert_(solver.njev >= 0)
889
+ assert_(solver.nlu >= 0)
890
+ sol = solver.dense_output()
891
+ assert_allclose(sol(5), y0, rtol=1e-15, atol=0)
892
+
893
+
894
+ def test_OdeSolution():
895
+ ts = np.array([0, 2, 5], dtype=float)
896
+ s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
897
+ s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
898
+
899
+ sol = OdeSolution(ts, [s1, s2])
900
+
901
+ assert_equal(sol(-1), [-1])
902
+ assert_equal(sol(1), [-1])
903
+ assert_equal(sol(2), [-1])
904
+ assert_equal(sol(3), [1])
905
+ assert_equal(sol(5), [1])
906
+ assert_equal(sol(6), [1])
907
+
908
+ assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),
909
+ np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))
910
+
911
+ ts = np.array([10, 4, -3])
912
+ s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
913
+ s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
914
+
915
+ sol = OdeSolution(ts, [s1, s2])
916
+ assert_equal(sol(11), [-1])
917
+ assert_equal(sol(10), [-1])
918
+ assert_equal(sol(5), [-1])
919
+ assert_equal(sol(4), [-1])
920
+ assert_equal(sol(0), [1])
921
+ assert_equal(sol(-3), [1])
922
+ assert_equal(sol(-4), [1])
923
+
924
+ assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),
925
+ np.array([[-1, 1, -1, 1, -1, 1, -1]]))
926
+
927
+ ts = np.array([1, 1])
928
+ s = ConstantDenseOutput(1, 1, np.array([10]))
929
+ sol = OdeSolution(ts, [s])
930
+ assert_equal(sol(0), [10])
931
+ assert_equal(sol(1), [10])
932
+ assert_equal(sol(2), [10])
933
+
934
+ assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))
935
+
936
+
937
+ def test_num_jac():
938
+ def fun(t, y):
939
+ return np.vstack([
940
+ -0.04 * y[0] + 1e4 * y[1] * y[2],
941
+ 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,
942
+ 3e7 * y[1] ** 2
943
+ ])
944
+
945
+ def jac(t, y):
946
+ return np.array([
947
+ [-0.04, 1e4 * y[2], 1e4 * y[1]],
948
+ [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],
949
+ [0, 6e7 * y[1], 0]
950
+ ])
951
+
952
+ t = 1
953
+ y = np.array([1, 0, 0])
954
+ J_true = jac(t, y)
955
+ threshold = 1e-5
956
+ f = fun(t, y).ravel()
957
+
958
+ J_num, factor = num_jac(fun, t, y, f, threshold, None)
959
+ assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
960
+
961
+ J_num, factor = num_jac(fun, t, y, f, threshold, factor)
962
+ assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
963
+
964
+
965
+ def test_num_jac_sparse():
966
+ def fun(t, y):
967
+ e = y[1:]**3 - y[:-1]**2
968
+ z = np.zeros(y.shape[1])
969
+ return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))
970
+
971
+ def structure(n):
972
+ A = np.zeros((n, n), dtype=int)
973
+ A[0, 0] = 1
974
+ A[0, 1] = 1
975
+ for i in range(1, n - 1):
976
+ A[i, i - 1: i + 2] = 1
977
+ A[-1, -1] = 1
978
+ A[-1, -2] = 1
979
+
980
+ return A
981
+
982
+ np.random.seed(0)
983
+ n = 20
984
+ y = np.random.randn(n)
985
+ A = structure(n)
986
+ groups = group_columns(A)
987
+
988
+ f = fun(0, y[:, None]).ravel()
989
+
990
+ # Compare dense and sparse results, assuming that dense implementation
991
+ # is correct (as it is straightforward).
992
+ J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,
993
+ sparsity=(A, groups))
994
+ J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)
995
+ assert_allclose(J_num_dense, J_num_sparse.toarray(),
996
+ rtol=1e-12, atol=1e-14)
997
+ assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
998
+
999
+ # Take small factors to trigger their recomputing inside.
1000
+ factor = np.random.uniform(0, 1e-12, size=n)
1001
+ J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,
1002
+ sparsity=(A, groups))
1003
+ J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)
1004
+
1005
+ assert_allclose(J_num_dense, J_num_sparse.toarray(),
1006
+ rtol=1e-12, atol=1e-14)
1007
+ assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
1008
+
1009
+
1010
+ def test_args():
1011
+
1012
+ # sys3 is actually two decoupled systems. (x, y) form a
1013
+ # linear oscillator, while z is a nonlinear first order
1014
+ # system with equilibria at z=0 and z=1. If k > 0, z=1
1015
+ # is stable and z=0 is unstable.
1016
+
1017
+ def sys3(t, w, omega, k, zfinal):
1018
+ x, y, z = w
1019
+ return [-omega*y, omega*x, k*z*(1 - z)]
1020
+
1021
+ def sys3_jac(t, w, omega, k, zfinal):
1022
+ x, y, z = w
1023
+ J = np.array([[0, -omega, 0],
1024
+ [omega, 0, 0],
1025
+ [0, 0, k*(1 - 2*z)]])
1026
+ return J
1027
+
1028
+ def sys3_x0decreasing(t, w, omega, k, zfinal):
1029
+ x, y, z = w
1030
+ return x
1031
+
1032
+ def sys3_y0increasing(t, w, omega, k, zfinal):
1033
+ x, y, z = w
1034
+ return y
1035
+
1036
+ def sys3_zfinal(t, w, omega, k, zfinal):
1037
+ x, y, z = w
1038
+ return z - zfinal
1039
+
1040
+ # Set the event flags for the event functions.
1041
+ sys3_x0decreasing.direction = -1
1042
+ sys3_y0increasing.direction = 1
1043
+ sys3_zfinal.terminal = True
1044
+
1045
+ omega = 2
1046
+ k = 4
1047
+
1048
+ tfinal = 5
1049
+ zfinal = 0.99
1050
+ # Find z0 such that when z(0) = z0, z(tfinal) = zfinal.
1051
+ # The condition z(tfinal) = zfinal is the terminal event.
1052
+ z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal))
1053
+
1054
+ w0 = [0, -1, z0]
1055
+
1056
+ # Provide the jac argument and use the Radau method to ensure that the use
1057
+ # of the Jacobian function is exercised.
1058
+ # If event handling is working, the solution will stop at tfinal, not tend.
1059
+ tend = 2*tfinal
1060
+ sol = solve_ivp(sys3, [0, tend], w0,
1061
+ events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal],
1062
+ dense_output=True, args=(omega, k, zfinal),
1063
+ method='Radau', jac=sys3_jac,
1064
+ rtol=1e-10, atol=1e-13)
1065
+
1066
+ # Check that we got the expected events at the expected times.
1067
+ x0events_t = sol.t_events[0]
1068
+ y0events_t = sol.t_events[1]
1069
+ zfinalevents_t = sol.t_events[2]
1070
+ assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi])
1071
+ assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi])
1072
+ assert_allclose(zfinalevents_t, [tfinal])
1073
+
1074
+ # Check that the solution agrees with the known exact solution.
1075
+ t = np.linspace(0, zfinalevents_t[0], 250)
1076
+ w = sol.sol(t)
1077
+ assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12)
1078
+ assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12)
1079
+ assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1),
1080
+ rtol=1e-9, atol=1e-12)
1081
+
1082
+ # Check that the state variables have the expected values at the events.
1083
+ x0events = sol.sol(x0events_t)
1084
+ y0events = sol.sol(y0events_t)
1085
+ zfinalevents = sol.sol(zfinalevents_t)
1086
+ assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14)
1087
+ assert_allclose(x0events[1], np.ones_like(x0events[1]))
1088
+ assert_allclose(y0events[0], np.ones_like(y0events[0]))
1089
+ assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14)
1090
+ assert_allclose(zfinalevents[2], [zfinal])
1091
+
1092
+
1093
+ def test_array_rtol():
1094
+ # solve_ivp had a bug with array_like `rtol`; see gh-15482
1095
+ # check that it's fixed
1096
+ def f(t, y):
1097
+ return y[0], y[1]
1098
+
1099
+ # no warning (or error) when `rtol` is array_like
1100
+ sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-1])
1101
+ err1 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1)))
1102
+
1103
+ # warning when an element of `rtol` is too small
1104
+ with pytest.warns(UserWarning, match="At least one element..."):
1105
+ sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-16])
1106
+ err2 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1)))
1107
+
1108
+ # tighter rtol improves the error
1109
+ assert err2 < err1
1110
+
1111
+ @pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'])
1112
+ def test_integration_zero_rhs(method):
1113
+ result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method)
1114
+ assert_(result.success)
1115
+ assert_equal(result.status, 0)
1116
+ assert_allclose(result.y, 1.0, rtol=1e-15)
1117
+
1118
+
1119
+ def test_args_single_value():
1120
+ def fun_with_arg(t, y, a):
1121
+ return a*y
1122
+
1123
+ message = "Supplied 'args' cannot be unpacked."
1124
+ with pytest.raises(TypeError, match=message):
1125
+ solve_ivp(fun_with_arg, (0, 0.1), [1], args=-1)
1126
+
1127
+ sol = solve_ivp(fun_with_arg, (0, 0.1), [1], args=(-1,))
1128
+ assert_allclose(sol.y[0, -1], np.exp(-0.1))
1129
+
1130
+ @pytest.mark.parametrize("f0_fill", [np.nan, np.inf])
1131
+ def test_initial_state_finiteness(f0_fill):
1132
+ # regression test for gh-17846
1133
+ msg = "All components of the initial state `y0` must be finite."
1134
+ with pytest.raises(ValueError, match=msg):
1135
+ solve_ivp(fun_zero, [0, 10], np.full(3, f0_fill))
venv/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from numpy.testing import assert_allclose, assert_
3
+ import numpy as np
4
+ from scipy.integrate import RK23, RK45, DOP853
5
+ from scipy.integrate._ivp import dop853_coefficients
6
+
7
+
8
+ @pytest.mark.parametrize("solver", [RK23, RK45, DOP853])
9
+ def test_coefficient_properties(solver):
10
+ assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
11
+ assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-14)
12
+
13
+
14
+ def test_coefficient_properties_dop853():
15
+ assert_allclose(np.sum(dop853_coefficients.B), 1, rtol=1e-15)
16
+ assert_allclose(np.sum(dop853_coefficients.A, axis=1),
17
+ dop853_coefficients.C,
18
+ rtol=1e-14)
19
+
20
+
21
+ @pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
22
+ def test_error_estimation(solver_class):
23
+ step = 0.2
24
+ solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step)
25
+ solver.step()
26
+ error_estimate = solver._estimate_error(solver.K, step)
27
+ error = solver.y - np.exp([step])
28
+ assert_(np.abs(error) < np.abs(error_estimate))
29
+
30
+
31
+ @pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
32
+ def test_error_estimation_complex(solver_class):
33
+ h = 0.2
34
+ solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h)
35
+ solver.step()
36
+ err_norm = solver._estimate_error_norm(solver.K, h, scale=[1])
37
+ assert np.isrealobj(err_norm)
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
venv/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/spatial/__init__.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =============================================================
3
+ Spatial algorithms and data structures (:mod:`scipy.spatial`)
4
+ =============================================================
5
+
6
+ .. currentmodule:: scipy.spatial
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ spatial.distance
12
+
13
+ Spatial transformations
14
+ =======================
15
+
16
+ These are contained in the `scipy.spatial.transform` submodule.
17
+
18
+ Nearest-neighbor queries
19
+ ========================
20
+ .. autosummary::
21
+ :toctree: generated/
22
+
23
+ KDTree -- class for efficient nearest-neighbor queries
24
+ cKDTree -- class for efficient nearest-neighbor queries (faster implementation)
25
+ Rectangle
26
+
27
+ Distance metrics
28
+ ================
29
+
30
+ Distance metrics are contained in the :mod:`scipy.spatial.distance` submodule.
31
+
32
+ Delaunay triangulation, convex hulls, and Voronoi diagrams
33
+ ==========================================================
34
+
35
+ .. autosummary::
36
+ :toctree: generated/
37
+
38
+ Delaunay -- compute Delaunay triangulation of input points
39
+ ConvexHull -- compute a convex hull for input points
40
+ Voronoi -- compute a Voronoi diagram hull from input points
41
+ SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere
42
+ HalfspaceIntersection -- compute the intersection points of input halfspaces
43
+
44
+ Plotting helpers
45
+ ================
46
+
47
+ .. autosummary::
48
+ :toctree: generated/
49
+
50
+ delaunay_plot_2d -- plot 2-D triangulation
51
+ convex_hull_plot_2d -- plot 2-D convex hull
52
+ voronoi_plot_2d -- plot 2-D Voronoi diagram
53
+
54
+ .. seealso:: :ref:`Tutorial <qhulltutorial>`
55
+
56
+
57
+ Simplex representation
58
+ ======================
59
+ The simplices (triangles, tetrahedra, etc.) appearing in the Delaunay
60
+ tessellation (N-D simplices), convex hull facets, and Voronoi ridges
61
+ (N-1-D simplices) are represented in the following scheme::
62
+
63
+ tess = Delaunay(points)
64
+ hull = ConvexHull(points)
65
+ voro = Voronoi(points)
66
+
67
+ # coordinates of the jth vertex of the ith simplex
68
+ tess.points[tess.simplices[i, j], :] # tessellation element
69
+ hull.points[hull.simplices[i, j], :] # convex hull facet
70
+ voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells
71
+
72
+ For Delaunay triangulations and convex hulls, the neighborhood
73
+ structure of the simplices satisfies the condition:
74
+ ``tess.neighbors[i,j]`` is the neighboring simplex of the ith
75
+ simplex, opposite to the ``j``-vertex. It is -1 in case of no neighbor.
76
+
77
+ Convex hull facets also define a hyperplane equation::
78
+
79
+ (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0
80
+
81
+ Similar hyperplane equations for the Delaunay triangulation correspond
82
+ to the convex hull facets on the corresponding N+1-D
83
+ paraboloid.
84
+
85
+ The Delaunay triangulation objects offer a method for locating the
86
+ simplex containing a given point, and barycentric coordinate
87
+ computations.
88
+
89
+ Functions
90
+ ---------
91
+
92
+ .. autosummary::
93
+ :toctree: generated/
94
+
95
+ tsearch
96
+ distance_matrix
97
+ minkowski_distance
98
+ minkowski_distance_p
99
+ procrustes
100
+ geometric_slerp
101
+
102
+ Warnings / Errors used in :mod:`scipy.spatial`
103
+ ----------------------------------------------
104
+ .. autosummary::
105
+ :toctree: generated/
106
+
107
+ QhullError
108
+ """ # noqa: E501
109
+
110
+ from ._kdtree import *
111
+ from ._ckdtree import *
112
+ from ._qhull import *
113
+ from ._spherical_voronoi import SphericalVoronoi
114
+ from ._plotutils import *
115
+ from ._procrustes import procrustes
116
+ from ._geometric_slerp import geometric_slerp
117
+
118
+ # Deprecated namespaces, to be removed in v2.0.0
119
+ from . import ckdtree, kdtree, qhull
120
+
121
+ __all__ = [s for s in dir() if not s.startswith('_')]
122
+
123
+ from . import distance, transform
124
+
125
+ __all__ += ['distance', 'transform']
126
+
127
+ from scipy._lib._testutils import PytestTester
128
+ test = PytestTester(__name__)
129
+ del PytestTester
venv/lib/python3.10/site-packages/scipy/spatial/_ckdtree.pyi ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import (
3
+ Any,
4
+ Generic,
5
+ overload,
6
+ TypeVar,
7
+ )
8
+
9
+ import numpy as np
10
+ import numpy.typing as npt
11
+ from scipy.sparse import coo_matrix, dok_matrix
12
+
13
+ from typing import Literal
14
+
15
+ # TODO: Replace `ndarray` with a 1D float64 array when possible
16
+ _BoxType = TypeVar("_BoxType", None, npt.NDArray[np.float64])
17
+
18
+ # Copied from `numpy.typing._scalar_like._ScalarLike`
19
+ # TODO: Expand with 0D arrays once we have shape support
20
+ _ArrayLike0D = bool | int | float | complex | str | bytes | np.generic
21
+
22
+ _WeightType = npt.ArrayLike | tuple[npt.ArrayLike | None, npt.ArrayLike | None]
23
+
24
+ class cKDTreeNode:
25
+ @property
26
+ def data_points(self) -> npt.NDArray[np.float64]: ...
27
+ @property
28
+ def indices(self) -> npt.NDArray[np.intp]: ...
29
+
30
+ # These are read-only attributes in cython, which behave like properties
31
+ @property
32
+ def level(self) -> int: ...
33
+ @property
34
+ def split_dim(self) -> int: ...
35
+ @property
36
+ def children(self) -> int: ...
37
+ @property
38
+ def start_idx(self) -> int: ...
39
+ @property
40
+ def end_idx(self) -> int: ...
41
+ @property
42
+ def split(self) -> float: ...
43
+ @property
44
+ def lesser(self) -> cKDTreeNode | None: ...
45
+ @property
46
+ def greater(self) -> cKDTreeNode | None: ...
47
+
48
+ class cKDTree(Generic[_BoxType]):
49
+ @property
50
+ def n(self) -> int: ...
51
+ @property
52
+ def m(self) -> int: ...
53
+ @property
54
+ def leafsize(self) -> int: ...
55
+ @property
56
+ def size(self) -> int: ...
57
+ @property
58
+ def tree(self) -> cKDTreeNode: ...
59
+
60
+ # These are read-only attributes in cython, which behave like properties
61
+ @property
62
+ def data(self) -> npt.NDArray[np.float64]: ...
63
+ @property
64
+ def maxes(self) -> npt.NDArray[np.float64]: ...
65
+ @property
66
+ def mins(self) -> npt.NDArray[np.float64]: ...
67
+ @property
68
+ def indices(self) -> npt.NDArray[np.float64]: ...
69
+ @property
70
+ def boxsize(self) -> _BoxType: ...
71
+
72
+ # NOTE: In practice `__init__` is used as constructor, not `__new__`.
73
+ # The latter gives us more flexibility in setting the generic parameter
74
+ # though.
75
+ @overload
76
+ def __new__( # type: ignore[misc]
77
+ cls,
78
+ data: npt.ArrayLike,
79
+ leafsize: int = ...,
80
+ compact_nodes: bool = ...,
81
+ copy_data: bool = ...,
82
+ balanced_tree: bool = ...,
83
+ boxsize: None = ...,
84
+ ) -> cKDTree[None]: ...
85
+ @overload
86
+ def __new__(
87
+ cls,
88
+ data: npt.ArrayLike,
89
+ leafsize: int = ...,
90
+ compact_nodes: bool = ...,
91
+ copy_data: bool = ...,
92
+ balanced_tree: bool = ...,
93
+ boxsize: npt.ArrayLike = ...,
94
+ ) -> cKDTree[npt.NDArray[np.float64]]: ...
95
+
96
+ # TODO: returns a 2-tuple of scalars if `x.ndim == 1` and `k == 1`,
97
+ # returns a 2-tuple of arrays otherwise
98
+ def query(
99
+ self,
100
+ x: npt.ArrayLike,
101
+ k: npt.ArrayLike = ...,
102
+ eps: float = ...,
103
+ p: float = ...,
104
+ distance_upper_bound: float = ...,
105
+ workers: int | None = ...,
106
+ ) -> tuple[Any, Any]: ...
107
+
108
+ # TODO: returns a list scalars if `x.ndim <= 1`,
109
+ # returns an object array of lists otherwise
110
+ def query_ball_point(
111
+ self,
112
+ x: npt.ArrayLike,
113
+ r: npt.ArrayLike,
114
+ p: float,
115
+ eps: float = ...,
116
+ workers: int | None = ...,
117
+ return_sorted: bool | None = ...,
118
+ return_length: bool = ...
119
+ ) -> Any: ...
120
+
121
+ def query_ball_tree(
122
+ self,
123
+ other: cKDTree,
124
+ r: float,
125
+ p: float,
126
+ eps: float = ...,
127
+ ) -> list[list[int]]: ...
128
+
129
+ @overload
130
+ def query_pairs( # type: ignore[misc]
131
+ self,
132
+ r: float,
133
+ p: float = ...,
134
+ eps: float = ...,
135
+ output_type: Literal["set"] = ...,
136
+ ) -> set[tuple[int, int]]: ...
137
+ @overload
138
+ def query_pairs(
139
+ self,
140
+ r: float,
141
+ p: float = ...,
142
+ eps: float = ...,
143
+ output_type: Literal["ndarray"] = ...,
144
+ ) -> npt.NDArray[np.intp]: ...
145
+
146
+ @overload
147
+ def count_neighbors( # type: ignore[misc]
148
+ self,
149
+ other: cKDTree,
150
+ r: _ArrayLike0D,
151
+ p: float = ...,
152
+ weights: None | tuple[None, None] = ...,
153
+ cumulative: bool = ...,
154
+ ) -> int: ...
155
+ @overload
156
+ def count_neighbors( # type: ignore[misc]
157
+ self,
158
+ other: cKDTree,
159
+ r: _ArrayLike0D,
160
+ p: float = ...,
161
+ weights: _WeightType = ...,
162
+ cumulative: bool = ...,
163
+ ) -> np.float64: ...
164
+ @overload
165
+ def count_neighbors( # type: ignore[misc]
166
+ self,
167
+ other: cKDTree,
168
+ r: npt.ArrayLike,
169
+ p: float = ...,
170
+ weights: None | tuple[None, None] = ...,
171
+ cumulative: bool = ...,
172
+ ) -> npt.NDArray[np.intp]: ...
173
+ @overload
174
+ def count_neighbors(
175
+ self,
176
+ other: cKDTree,
177
+ r: npt.ArrayLike,
178
+ p: float = ...,
179
+ weights: _WeightType = ...,
180
+ cumulative: bool = ...,
181
+ ) -> npt.NDArray[np.float64]: ...
182
+
183
+ @overload
184
+ def sparse_distance_matrix( # type: ignore[misc]
185
+ self,
186
+ other: cKDTree,
187
+ max_distance: float,
188
+ p: float = ...,
189
+ output_type: Literal["dok_matrix"] = ...,
190
+ ) -> dok_matrix: ...
191
+ @overload
192
+ def sparse_distance_matrix( # type: ignore[misc]
193
+ self,
194
+ other: cKDTree,
195
+ max_distance: float,
196
+ p: float = ...,
197
+ output_type: Literal["coo_matrix"] = ...,
198
+ ) -> coo_matrix: ...
199
+ @overload
200
+ def sparse_distance_matrix( # type: ignore[misc]
201
+ self,
202
+ other: cKDTree,
203
+ max_distance: float,
204
+ p: float = ...,
205
+ output_type: Literal["dict"] = ...,
206
+ ) -> dict[tuple[int, int], float]: ...
207
+ @overload
208
+ def sparse_distance_matrix(
209
+ self,
210
+ other: cKDTree,
211
+ max_distance: float,
212
+ p: float = ...,
213
+ output_type: Literal["ndarray"] = ...,
214
+ ) -> npt.NDArray[np.void]: ...
venv/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (641 kB). View file
 
venv/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (113 kB). View file
 
venv/lib/python3.10/site-packages/scipy/spatial/_geometric_slerp.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ __all__ = ['geometric_slerp']
4
+
5
+ import warnings
6
+ from typing import TYPE_CHECKING
7
+
8
+ import numpy as np
9
+ from scipy.spatial.distance import euclidean
10
+
11
+ if TYPE_CHECKING:
12
+ import numpy.typing as npt
13
+
14
+
15
+ def _geometric_slerp(start, end, t):
16
+ # create an orthogonal basis using QR decomposition
17
+ basis = np.vstack([start, end])
18
+ Q, R = np.linalg.qr(basis.T)
19
+ signs = 2 * (np.diag(R) >= 0) - 1
20
+ Q = Q.T * signs.T[:, np.newaxis]
21
+ R = R.T * signs.T[:, np.newaxis]
22
+
23
+ # calculate the angle between `start` and `end`
24
+ c = np.dot(start, end)
25
+ s = np.linalg.det(R)
26
+ omega = np.arctan2(s, c)
27
+
28
+ # interpolate
29
+ start, end = Q
30
+ s = np.sin(t * omega)
31
+ c = np.cos(t * omega)
32
+ return start * c[:, np.newaxis] + end * s[:, np.newaxis]
33
+
34
+
35
+ def geometric_slerp(
36
+ start: npt.ArrayLike,
37
+ end: npt.ArrayLike,
38
+ t: npt.ArrayLike,
39
+ tol: float = 1e-7,
40
+ ) -> np.ndarray:
41
+ """
42
+ Geometric spherical linear interpolation.
43
+
44
+ The interpolation occurs along a unit-radius
45
+ great circle arc in arbitrary dimensional space.
46
+
47
+ Parameters
48
+ ----------
49
+ start : (n_dimensions, ) array-like
50
+ Single n-dimensional input coordinate in a 1-D array-like
51
+ object. `n` must be greater than 1.
52
+ end : (n_dimensions, ) array-like
53
+ Single n-dimensional input coordinate in a 1-D array-like
54
+ object. `n` must be greater than 1.
55
+ t : float or (n_points,) 1D array-like
56
+ A float or 1D array-like of doubles representing interpolation
57
+ parameters, with values required in the inclusive interval
58
+ between 0 and 1. A common approach is to generate the array
59
+ with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.
60
+ Ascending, descending, and scrambled orders are permitted.
61
+ tol : float
62
+ The absolute tolerance for determining if the start and end
63
+ coordinates are antipodes.
64
+
65
+ Returns
66
+ -------
67
+ result : (t.size, D)
68
+ An array of doubles containing the interpolated
69
+ spherical path and including start and
70
+ end when 0 and 1 t are used. The
71
+ interpolated values should correspond to the
72
+ same sort order provided in the t array. The result
73
+ may be 1-dimensional if ``t`` is a float.
74
+
75
+ Raises
76
+ ------
77
+ ValueError
78
+ If ``start`` and ``end`` are antipodes, not on the
79
+ unit n-sphere, or for a variety of degenerate conditions.
80
+
81
+ See Also
82
+ --------
83
+ scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions
84
+
85
+ Notes
86
+ -----
87
+ The implementation is based on the mathematical formula provided in [1]_,
88
+ and the first known presentation of this algorithm, derived from study of
89
+ 4-D geometry, is credited to Glenn Davis in a footnote of the original
90
+ quaternion Slerp publication by Ken Shoemake [2]_.
91
+
92
+ .. versionadded:: 1.5.0
93
+
94
+ References
95
+ ----------
96
+ .. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp
97
+ .. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.
98
+ ACM SIGGRAPH Computer Graphics, 19(3): 245-254.
99
+
100
+ Examples
101
+ --------
102
+ Interpolate four linearly-spaced values on the circumference of
103
+ a circle spanning 90 degrees:
104
+
105
+ >>> import numpy as np
106
+ >>> from scipy.spatial import geometric_slerp
107
+ >>> import matplotlib.pyplot as plt
108
+ >>> fig = plt.figure()
109
+ >>> ax = fig.add_subplot(111)
110
+ >>> start = np.array([1, 0])
111
+ >>> end = np.array([0, 1])
112
+ >>> t_vals = np.linspace(0, 1, 4)
113
+ >>> result = geometric_slerp(start,
114
+ ... end,
115
+ ... t_vals)
116
+
117
+ The interpolated results should be at 30 degree intervals
118
+ recognizable on the unit circle:
119
+
120
+ >>> ax.scatter(result[...,0], result[...,1], c='k')
121
+ >>> circle = plt.Circle((0, 0), 1, color='grey')
122
+ >>> ax.add_artist(circle)
123
+ >>> ax.set_aspect('equal')
124
+ >>> plt.show()
125
+
126
+ Attempting to interpolate between antipodes on a circle is
127
+ ambiguous because there are two possible paths, and on a
128
+ sphere there are infinite possible paths on the geodesic surface.
129
+ Nonetheless, one of the ambiguous paths is returned along
130
+ with a warning:
131
+
132
+ >>> opposite_pole = np.array([-1, 0])
133
+ >>> with np.testing.suppress_warnings() as sup:
134
+ ... sup.filter(UserWarning)
135
+ ... geometric_slerp(start,
136
+ ... opposite_pole,
137
+ ... t_vals)
138
+ array([[ 1.00000000e+00, 0.00000000e+00],
139
+ [ 5.00000000e-01, 8.66025404e-01],
140
+ [-5.00000000e-01, 8.66025404e-01],
141
+ [-1.00000000e+00, 1.22464680e-16]])
142
+
143
+ Extend the original example to a sphere and plot interpolation
144
+ points in 3D:
145
+
146
+ >>> from mpl_toolkits.mplot3d import proj3d
147
+ >>> fig = plt.figure()
148
+ >>> ax = fig.add_subplot(111, projection='3d')
149
+
150
+ Plot the unit sphere for reference (optional):
151
+
152
+ >>> u = np.linspace(0, 2 * np.pi, 100)
153
+ >>> v = np.linspace(0, np.pi, 100)
154
+ >>> x = np.outer(np.cos(u), np.sin(v))
155
+ >>> y = np.outer(np.sin(u), np.sin(v))
156
+ >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
157
+ >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
158
+
159
+ Interpolating over a larger number of points
160
+ may provide the appearance of a smooth curve on
161
+ the surface of the sphere, which is also useful
162
+ for discretized integration calculations on a
163
+ sphere surface:
164
+
165
+ >>> start = np.array([1, 0, 0])
166
+ >>> end = np.array([0, 0, 1])
167
+ >>> t_vals = np.linspace(0, 1, 200)
168
+ >>> result = geometric_slerp(start,
169
+ ... end,
170
+ ... t_vals)
171
+ >>> ax.plot(result[...,0],
172
+ ... result[...,1],
173
+ ... result[...,2],
174
+ ... c='k')
175
+ >>> plt.show()
176
+ """
177
+
178
+ start = np.asarray(start, dtype=np.float64)
179
+ end = np.asarray(end, dtype=np.float64)
180
+ t = np.asarray(t)
181
+
182
+ if t.ndim > 1:
183
+ raise ValueError("The interpolation parameter "
184
+ "value must be one dimensional.")
185
+
186
+ if start.ndim != 1 or end.ndim != 1:
187
+ raise ValueError("Start and end coordinates "
188
+ "must be one-dimensional")
189
+
190
+ if start.size != end.size:
191
+ raise ValueError("The dimensions of start and "
192
+ "end must match (have same size)")
193
+
194
+ if start.size < 2 or end.size < 2:
195
+ raise ValueError("The start and end coordinates must "
196
+ "both be in at least two-dimensional "
197
+ "space")
198
+
199
+ if np.array_equal(start, end):
200
+ return np.linspace(start, start, t.size)
201
+
202
+ # for points that violate equation for n-sphere
203
+ for coord in [start, end]:
204
+ if not np.allclose(np.linalg.norm(coord), 1.0,
205
+ rtol=1e-9,
206
+ atol=0):
207
+ raise ValueError("start and end are not"
208
+ " on a unit n-sphere")
209
+
210
+ if not isinstance(tol, float):
211
+ raise ValueError("tol must be a float")
212
+ else:
213
+ tol = np.fabs(tol)
214
+
215
+ coord_dist = euclidean(start, end)
216
+
217
+ # diameter of 2 within tolerance means antipodes, which is a problem
218
+ # for all unit n-spheres (even the 0-sphere would have an ambiguous path)
219
+ if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):
220
+ warnings.warn("start and end are antipodes "
221
+ "using the specified tolerance; "
222
+ "this may cause ambiguous slerp paths",
223
+ stacklevel=2)
224
+
225
+ t = np.asarray(t, dtype=np.float64)
226
+
227
+ if t.size == 0:
228
+ return np.empty((0, start.size))
229
+
230
+ if t.min() < 0 or t.max() > 1:
231
+ raise ValueError("interpolation parameter must be in [0, 1]")
232
+
233
+ if t.ndim == 0:
234
+ return _geometric_slerp(start,
235
+ end,
236
+ np.atleast_1d(t)).ravel()
237
+ else:
238
+ return _geometric_slerp(start,
239
+ end,
240
+ t)
venv/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (250 kB). View file
 
venv/lib/python3.10/site-packages/scipy/spatial/_kdtree.py ADDED
@@ -0,0 +1,920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Anne M. Archibald 2008
2
+ # Released under the scipy license
3
+ import numpy as np
4
+ from ._ckdtree import cKDTree, cKDTreeNode
5
+
6
+ __all__ = ['minkowski_distance_p', 'minkowski_distance',
7
+ 'distance_matrix',
8
+ 'Rectangle', 'KDTree']
9
+
10
+
11
+ def minkowski_distance_p(x, y, p=2):
12
+ """Compute the pth power of the L**p distance between two arrays.
13
+
14
+ For efficiency, this function computes the L**p distance but does
15
+ not extract the pth root. If `p` is 1 or infinity, this is equal to
16
+ the actual L**p distance.
17
+
18
+ The last dimensions of `x` and `y` must be the same length. Any
19
+ other dimensions must be compatible for broadcasting.
20
+
21
+ Parameters
22
+ ----------
23
+ x : (..., K) array_like
24
+ Input array.
25
+ y : (..., K) array_like
26
+ Input array.
27
+ p : float, 1 <= p <= infinity
28
+ Which Minkowski p-norm to use.
29
+
30
+ Returns
31
+ -------
32
+ dist : ndarray
33
+ pth power of the distance between the input arrays.
34
+
35
+ Examples
36
+ --------
37
+ >>> from scipy.spatial import minkowski_distance_p
38
+ >>> minkowski_distance_p([[0, 0], [0, 0]], [[1, 1], [0, 1]])
39
+ array([2, 1])
40
+
41
+ """
42
+ x = np.asarray(x)
43
+ y = np.asarray(y)
44
+
45
+ # Find smallest common datatype with float64 (return type of this
46
+ # function) - addresses #10262.
47
+ # Don't just cast to float64 for complex input case.
48
+ common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype),
49
+ 'float64')
50
+
51
+ # Make sure x and y are NumPy arrays of correct datatype.
52
+ x = x.astype(common_datatype)
53
+ y = y.astype(common_datatype)
54
+
55
+ if p == np.inf:
56
+ return np.amax(np.abs(y-x), axis=-1)
57
+ elif p == 1:
58
+ return np.sum(np.abs(y-x), axis=-1)
59
+ else:
60
+ return np.sum(np.abs(y-x)**p, axis=-1)
61
+
62
+
63
+ def minkowski_distance(x, y, p=2):
64
+ """Compute the L**p distance between two arrays.
65
+
66
+ The last dimensions of `x` and `y` must be the same length. Any
67
+ other dimensions must be compatible for broadcasting.
68
+
69
+ Parameters
70
+ ----------
71
+ x : (..., K) array_like
72
+ Input array.
73
+ y : (..., K) array_like
74
+ Input array.
75
+ p : float, 1 <= p <= infinity
76
+ Which Minkowski p-norm to use.
77
+
78
+ Returns
79
+ -------
80
+ dist : ndarray
81
+ Distance between the input arrays.
82
+
83
+ Examples
84
+ --------
85
+ >>> from scipy.spatial import minkowski_distance
86
+ >>> minkowski_distance([[0, 0], [0, 0]], [[1, 1], [0, 1]])
87
+ array([ 1.41421356, 1. ])
88
+
89
+ """
90
+ x = np.asarray(x)
91
+ y = np.asarray(y)
92
+ if p == np.inf or p == 1:
93
+ return minkowski_distance_p(x, y, p)
94
+ else:
95
+ return minkowski_distance_p(x, y, p)**(1./p)
96
+
97
+
98
+ class Rectangle:
99
+ """Hyperrectangle class.
100
+
101
+ Represents a Cartesian product of intervals.
102
+ """
103
+ def __init__(self, maxes, mins):
104
+ """Construct a hyperrectangle."""
105
+ self.maxes = np.maximum(maxes,mins).astype(float)
106
+ self.mins = np.minimum(maxes,mins).astype(float)
107
+ self.m, = self.maxes.shape
108
+
109
+ def __repr__(self):
110
+ return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
111
+
112
+ def volume(self):
113
+ """Total volume."""
114
+ return np.prod(self.maxes-self.mins)
115
+
116
+ def split(self, d, split):
117
+ """Produce two hyperrectangles by splitting.
118
+
119
+ In general, if you need to compute maximum and minimum
120
+ distances to the children, it can be done more efficiently
121
+ by updating the maximum and minimum distances to the parent.
122
+
123
+ Parameters
124
+ ----------
125
+ d : int
126
+ Axis to split hyperrectangle along.
127
+ split : float
128
+ Position along axis `d` to split at.
129
+
130
+ """
131
+ mid = np.copy(self.maxes)
132
+ mid[d] = split
133
+ less = Rectangle(self.mins, mid)
134
+ mid = np.copy(self.mins)
135
+ mid[d] = split
136
+ greater = Rectangle(mid, self.maxes)
137
+ return less, greater
138
+
139
+ def min_distance_point(self, x, p=2.):
140
+ """
141
+ Return the minimum distance between input and points in the
142
+ hyperrectangle.
143
+
144
+ Parameters
145
+ ----------
146
+ x : array_like
147
+ Input.
148
+ p : float, optional
149
+ Input.
150
+
151
+ """
152
+ return minkowski_distance(
153
+ 0, np.maximum(0, np.maximum(self.mins-x, x-self.maxes)),
154
+ p
155
+ )
156
+
157
+ def max_distance_point(self, x, p=2.):
158
+ """
159
+ Return the maximum distance between input and points in the hyperrectangle.
160
+
161
+ Parameters
162
+ ----------
163
+ x : array_like
164
+ Input array.
165
+ p : float, optional
166
+ Input.
167
+
168
+ """
169
+ return minkowski_distance(0, np.maximum(self.maxes-x, x-self.mins), p)
170
+
171
+ def min_distance_rectangle(self, other, p=2.):
172
+ """
173
+ Compute the minimum distance between points in the two hyperrectangles.
174
+
175
+ Parameters
176
+ ----------
177
+ other : hyperrectangle
178
+ Input.
179
+ p : float
180
+ Input.
181
+
182
+ """
183
+ return minkowski_distance(
184
+ 0,
185
+ np.maximum(0, np.maximum(self.mins-other.maxes,
186
+ other.mins-self.maxes)),
187
+ p
188
+ )
189
+
190
+ def max_distance_rectangle(self, other, p=2.):
191
+ """
192
+ Compute the maximum distance between points in the two hyperrectangles.
193
+
194
+ Parameters
195
+ ----------
196
+ other : hyperrectangle
197
+ Input.
198
+ p : float, optional
199
+ Input.
200
+
201
+ """
202
+ return minkowski_distance(
203
+ 0, np.maximum(self.maxes-other.mins, other.maxes-self.mins), p)
204
+
205
+
206
+ class KDTree(cKDTree):
207
+ """kd-tree for quick nearest-neighbor lookup.
208
+
209
+ This class provides an index into a set of k-dimensional points
210
+ which can be used to rapidly look up the nearest neighbors of any
211
+ point.
212
+
213
+ Parameters
214
+ ----------
215
+ data : array_like, shape (n,m)
216
+ The n data points of dimension m to be indexed. This array is
217
+ not copied unless this is necessary to produce a contiguous
218
+ array of doubles, and so modifying this data will result in
219
+ bogus results. The data are also copied if the kd-tree is built
220
+ with copy_data=True.
221
+ leafsize : positive int, optional
222
+ The number of points at which the algorithm switches over to
223
+ brute-force. Default: 10.
224
+ compact_nodes : bool, optional
225
+ If True, the kd-tree is built to shrink the hyperrectangles to
226
+ the actual data range. This usually gives a more compact tree that
227
+ is robust against degenerated input data and gives faster queries
228
+ at the expense of longer build time. Default: True.
229
+ copy_data : bool, optional
230
+ If True the data is always copied to protect the kd-tree against
231
+ data corruption. Default: False.
232
+ balanced_tree : bool, optional
233
+ If True, the median is used to split the hyperrectangles instead of
234
+ the midpoint. This usually gives a more compact tree and
235
+ faster queries at the expense of longer build time. Default: True.
236
+ boxsize : array_like or scalar, optional
237
+ Apply a m-d toroidal topology to the KDTree.. The topology is generated
238
+ by :math:`x_i + n_i L_i` where :math:`n_i` are integers and :math:`L_i`
239
+ is the boxsize along i-th dimension. The input data shall be wrapped
240
+ into :math:`[0, L_i)`. A ValueError is raised if any of the data is
241
+ outside of this bound.
242
+
243
+ Notes
244
+ -----
245
+ The algorithm used is described in Maneewongvatana and Mount 1999.
246
+ The general idea is that the kd-tree is a binary tree, each of whose
247
+ nodes represents an axis-aligned hyperrectangle. Each node specifies
248
+ an axis and splits the set of points based on whether their coordinate
249
+ along that axis is greater than or less than a particular value.
250
+
251
+ During construction, the axis and splitting point are chosen by the
252
+ "sliding midpoint" rule, which ensures that the cells do not all
253
+ become long and thin.
254
+
255
+ The tree can be queried for the r closest neighbors of any given point
256
+ (optionally returning only those within some maximum distance of the
257
+ point). It can also be queried, with a substantial gain in efficiency,
258
+ for the r approximate closest neighbors.
259
+
260
+ For large dimensions (20 is already large) do not expect this to run
261
+ significantly faster than brute force. High-dimensional nearest-neighbor
262
+ queries are a substantial open problem in computer science.
263
+
264
+ Attributes
265
+ ----------
266
+ data : ndarray, shape (n,m)
267
+ The n data points of dimension m to be indexed. This array is
268
+ not copied unless this is necessary to produce a contiguous
269
+ array of doubles. The data are also copied if the kd-tree is built
270
+ with `copy_data=True`.
271
+ leafsize : positive int
272
+ The number of points at which the algorithm switches over to
273
+ brute-force.
274
+ m : int
275
+ The dimension of a single data-point.
276
+ n : int
277
+ The number of data points.
278
+ maxes : ndarray, shape (m,)
279
+ The maximum value in each dimension of the n data points.
280
+ mins : ndarray, shape (m,)
281
+ The minimum value in each dimension of the n data points.
282
+ size : int
283
+ The number of nodes in the tree.
284
+
285
+ """
286
+
287
+ class node:
288
+ @staticmethod
289
+ def _create(ckdtree_node=None):
290
+ """Create either an inner or leaf node, wrapping a cKDTreeNode instance"""
291
+ if ckdtree_node is None:
292
+ return KDTree.node(ckdtree_node)
293
+ elif ckdtree_node.split_dim == -1:
294
+ return KDTree.leafnode(ckdtree_node)
295
+ else:
296
+ return KDTree.innernode(ckdtree_node)
297
+
298
+ def __init__(self, ckdtree_node=None):
299
+ if ckdtree_node is None:
300
+ ckdtree_node = cKDTreeNode()
301
+ self._node = ckdtree_node
302
+
303
+ def __lt__(self, other):
304
+ return id(self) < id(other)
305
+
306
+ def __gt__(self, other):
307
+ return id(self) > id(other)
308
+
309
+ def __le__(self, other):
310
+ return id(self) <= id(other)
311
+
312
+ def __ge__(self, other):
313
+ return id(self) >= id(other)
314
+
315
+ def __eq__(self, other):
316
+ return id(self) == id(other)
317
+
318
+ class leafnode(node):
319
+ @property
320
+ def idx(self):
321
+ return self._node.indices
322
+
323
+ @property
324
+ def children(self):
325
+ return self._node.children
326
+
327
+ class innernode(node):
328
+ def __init__(self, ckdtreenode):
329
+ assert isinstance(ckdtreenode, cKDTreeNode)
330
+ super().__init__(ckdtreenode)
331
+ self.less = KDTree.node._create(ckdtreenode.lesser)
332
+ self.greater = KDTree.node._create(ckdtreenode.greater)
333
+
334
+ @property
335
+ def split_dim(self):
336
+ return self._node.split_dim
337
+
338
+ @property
339
+ def split(self):
340
+ return self._node.split
341
+
342
+ @property
343
+ def children(self):
344
+ return self._node.children
345
+
346
+ @property
347
+ def tree(self):
348
+ if not hasattr(self, "_tree"):
349
+ self._tree = KDTree.node._create(super().tree)
350
+
351
+ return self._tree
352
+
353
+ def __init__(self, data, leafsize=10, compact_nodes=True, copy_data=False,
354
+ balanced_tree=True, boxsize=None):
355
+ data = np.asarray(data)
356
+ if data.dtype.kind == 'c':
357
+ raise TypeError("KDTree does not work with complex data")
358
+
359
+ # Note KDTree has different default leafsize from cKDTree
360
+ super().__init__(data, leafsize, compact_nodes, copy_data,
361
+ balanced_tree, boxsize)
362
+
363
+ def query(
364
+ self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf, workers=1):
365
+ r"""Query the kd-tree for nearest neighbors.
366
+
367
+ Parameters
368
+ ----------
369
+ x : array_like, last dimension self.m
370
+ An array of points to query.
371
+ k : int or Sequence[int], optional
372
+ Either the number of nearest neighbors to return, or a list of the
373
+ k-th nearest neighbors to return, starting from 1.
374
+ eps : nonnegative float, optional
375
+ Return approximate nearest neighbors; the kth returned value
376
+ is guaranteed to be no further than (1+eps) times the
377
+ distance to the real kth nearest neighbor.
378
+ p : float, 1<=p<=infinity, optional
379
+ Which Minkowski p-norm to use.
380
+ 1 is the sum-of-absolute-values distance ("Manhattan" distance).
381
+ 2 is the usual Euclidean distance.
382
+ infinity is the maximum-coordinate-difference distance.
383
+ A large, finite p may cause a ValueError if overflow can occur.
384
+ distance_upper_bound : nonnegative float, optional
385
+ Return only neighbors within this distance. This is used to prune
386
+ tree searches, so if you are doing a series of nearest-neighbor
387
+ queries, it may help to supply the distance to the nearest neighbor
388
+ of the most recent point.
389
+ workers : int, optional
390
+ Number of workers to use for parallel processing. If -1 is given
391
+ all CPU threads are used. Default: 1.
392
+
393
+ .. versionadded:: 1.6.0
394
+
395
+ Returns
396
+ -------
397
+ d : float or array of floats
398
+ The distances to the nearest neighbors.
399
+ If ``x`` has shape ``tuple+(self.m,)``, then ``d`` has shape
400
+ ``tuple+(k,)``.
401
+ When k == 1, the last dimension of the output is squeezed.
402
+ Missing neighbors are indicated with infinite distances.
403
+ Hits are sorted by distance (nearest first).
404
+
405
+ .. versionchanged:: 1.9.0
406
+ Previously if ``k=None``, then `d` was an object array of
407
+ shape ``tuple``, containing lists of distances. This behavior
408
+ has been removed, use `query_ball_point` instead.
409
+
410
+ i : integer or array of integers
411
+ The index of each neighbor in ``self.data``.
412
+ ``i`` is the same shape as d.
413
+ Missing neighbors are indicated with ``self.n``.
414
+
415
+ Examples
416
+ --------
417
+
418
+ >>> import numpy as np
419
+ >>> from scipy.spatial import KDTree
420
+ >>> x, y = np.mgrid[0:5, 2:8]
421
+ >>> tree = KDTree(np.c_[x.ravel(), y.ravel()])
422
+
423
+ To query the nearest neighbours and return squeezed result, use
424
+
425
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=1)
426
+ >>> print(dd, ii, sep='\n')
427
+ [2. 0.2236068]
428
+ [ 0 13]
429
+
430
+ To query the nearest neighbours and return unsqueezed result, use
431
+
432
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[1])
433
+ >>> print(dd, ii, sep='\n')
434
+ [[2. ]
435
+ [0.2236068]]
436
+ [[ 0]
437
+ [13]]
438
+
439
+ To query the second nearest neighbours and return unsqueezed result,
440
+ use
441
+
442
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[2])
443
+ >>> print(dd, ii, sep='\n')
444
+ [[2.23606798]
445
+ [0.80622577]]
446
+ [[ 6]
447
+ [19]]
448
+
449
+ To query the first and second nearest neighbours, use
450
+
451
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=2)
452
+ >>> print(dd, ii, sep='\n')
453
+ [[2. 2.23606798]
454
+ [0.2236068 0.80622577]]
455
+ [[ 0 6]
456
+ [13 19]]
457
+
458
+ or, be more specific
459
+
460
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[1, 2])
461
+ >>> print(dd, ii, sep='\n')
462
+ [[2. 2.23606798]
463
+ [0.2236068 0.80622577]]
464
+ [[ 0 6]
465
+ [13 19]]
466
+
467
+ """
468
+ x = np.asarray(x)
469
+ if x.dtype.kind == 'c':
470
+ raise TypeError("KDTree does not work with complex data")
471
+
472
+ if k is None:
473
+ raise ValueError("k must be an integer or a sequence of integers")
474
+
475
+ d, i = super().query(x, k, eps, p, distance_upper_bound, workers)
476
+ if isinstance(i, int):
477
+ i = np.intp(i)
478
+ return d, i
479
+
480
+ def query_ball_point(self, x, r, p=2., eps=0, workers=1,
481
+ return_sorted=None, return_length=False):
482
+ """Find all points within distance r of point(s) x.
483
+
484
+ Parameters
485
+ ----------
486
+ x : array_like, shape tuple + (self.m,)
487
+ The point or points to search for neighbors of.
488
+ r : array_like, float
489
+ The radius of points to return, must broadcast to the length of x.
490
+ p : float, optional
491
+ Which Minkowski p-norm to use. Should be in the range [1, inf].
492
+ A finite large p may cause a ValueError if overflow can occur.
493
+ eps : nonnegative float, optional
494
+ Approximate search. Branches of the tree are not explored if their
495
+ nearest points are further than ``r / (1 + eps)``, and branches are
496
+ added in bulk if their furthest points are nearer than
497
+ ``r * (1 + eps)``.
498
+ workers : int, optional
499
+ Number of jobs to schedule for parallel processing. If -1 is given
500
+ all processors are used. Default: 1.
501
+
502
+ .. versionadded:: 1.6.0
503
+ return_sorted : bool, optional
504
+ Sorts returned indices if True and does not sort them if False. If
505
+ None, does not sort single point queries, but does sort
506
+ multi-point queries which was the behavior before this option
507
+ was added.
508
+
509
+ .. versionadded:: 1.6.0
510
+ return_length : bool, optional
511
+ Return the number of points inside the radius instead of a list
512
+ of the indices.
513
+
514
+ .. versionadded:: 1.6.0
515
+
516
+ Returns
517
+ -------
518
+ results : list or array of lists
519
+ If `x` is a single point, returns a list of the indices of the
520
+ neighbors of `x`. If `x` is an array of points, returns an object
521
+ array of shape tuple containing lists of neighbors.
522
+
523
+ Notes
524
+ -----
525
+ If you have many points whose neighbors you want to find, you may save
526
+ substantial amounts of time by putting them in a KDTree and using
527
+ query_ball_tree.
528
+
529
+ Examples
530
+ --------
531
+ >>> import numpy as np
532
+ >>> from scipy import spatial
533
+ >>> x, y = np.mgrid[0:5, 0:5]
534
+ >>> points = np.c_[x.ravel(), y.ravel()]
535
+ >>> tree = spatial.KDTree(points)
536
+ >>> sorted(tree.query_ball_point([2, 0], 1))
537
+ [5, 10, 11, 15]
538
+
539
+ Query multiple points and plot the results:
540
+
541
+ >>> import matplotlib.pyplot as plt
542
+ >>> points = np.asarray(points)
543
+ >>> plt.plot(points[:,0], points[:,1], '.')
544
+ >>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
545
+ ... nearby_points = points[results]
546
+ ... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
547
+ >>> plt.margins(0.1, 0.1)
548
+ >>> plt.show()
549
+
550
+ """
551
+ x = np.asarray(x)
552
+ if x.dtype.kind == 'c':
553
+ raise TypeError("KDTree does not work with complex data")
554
+ return super().query_ball_point(
555
+ x, r, p, eps, workers, return_sorted, return_length)
556
+
557
+ def query_ball_tree(self, other, r, p=2., eps=0):
558
+ """
559
+ Find all pairs of points between `self` and `other` whose distance is
560
+ at most r.
561
+
562
+ Parameters
563
+ ----------
564
+ other : KDTree instance
565
+ The tree containing points to search against.
566
+ r : float
567
+ The maximum distance, has to be positive.
568
+ p : float, optional
569
+ Which Minkowski norm to use. `p` has to meet the condition
570
+ ``1 <= p <= infinity``.
571
+ eps : float, optional
572
+ Approximate search. Branches of the tree are not explored
573
+ if their nearest points are further than ``r/(1+eps)``, and
574
+ branches are added in bulk if their furthest points are nearer
575
+ than ``r * (1+eps)``. `eps` has to be non-negative.
576
+
577
+ Returns
578
+ -------
579
+ results : list of lists
580
+ For each element ``self.data[i]`` of this tree, ``results[i]`` is a
581
+ list of the indices of its neighbors in ``other.data``.
582
+
583
+ Examples
584
+ --------
585
+ You can search all pairs of points between two kd-trees within a distance:
586
+
587
+ >>> import matplotlib.pyplot as plt
588
+ >>> import numpy as np
589
+ >>> from scipy.spatial import KDTree
590
+ >>> rng = np.random.default_rng()
591
+ >>> points1 = rng.random((15, 2))
592
+ >>> points2 = rng.random((15, 2))
593
+ >>> plt.figure(figsize=(6, 6))
594
+ >>> plt.plot(points1[:, 0], points1[:, 1], "xk", markersize=14)
595
+ >>> plt.plot(points2[:, 0], points2[:, 1], "og", markersize=14)
596
+ >>> kd_tree1 = KDTree(points1)
597
+ >>> kd_tree2 = KDTree(points2)
598
+ >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
599
+ >>> for i in range(len(indexes)):
600
+ ... for j in indexes[i]:
601
+ ... plt.plot([points1[i, 0], points2[j, 0]],
602
+ ... [points1[i, 1], points2[j, 1]], "-r")
603
+ >>> plt.show()
604
+
605
+ """
606
+ return super().query_ball_tree(other, r, p, eps)
607
+
608
+ def query_pairs(self, r, p=2., eps=0, output_type='set'):
609
+ """Find all pairs of points in `self` whose distance is at most r.
610
+
611
+ Parameters
612
+ ----------
613
+ r : positive float
614
+ The maximum distance.
615
+ p : float, optional
616
+ Which Minkowski norm to use. `p` has to meet the condition
617
+ ``1 <= p <= infinity``.
618
+ eps : float, optional
619
+ Approximate search. Branches of the tree are not explored
620
+ if their nearest points are further than ``r/(1+eps)``, and
621
+ branches are added in bulk if their furthest points are nearer
622
+ than ``r * (1+eps)``. `eps` has to be non-negative.
623
+ output_type : string, optional
624
+ Choose the output container, 'set' or 'ndarray'. Default: 'set'
625
+
626
+ .. versionadded:: 1.6.0
627
+
628
+ Returns
629
+ -------
630
+ results : set or ndarray
631
+ Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
632
+ positions are close. If output_type is 'ndarray', an ndarry is
633
+ returned instead of a set.
634
+
635
+ Examples
636
+ --------
637
+ You can search all pairs of points in a kd-tree within a distance:
638
+
639
+ >>> import matplotlib.pyplot as plt
640
+ >>> import numpy as np
641
+ >>> from scipy.spatial import KDTree
642
+ >>> rng = np.random.default_rng()
643
+ >>> points = rng.random((20, 2))
644
+ >>> plt.figure(figsize=(6, 6))
645
+ >>> plt.plot(points[:, 0], points[:, 1], "xk", markersize=14)
646
+ >>> kd_tree = KDTree(points)
647
+ >>> pairs = kd_tree.query_pairs(r=0.2)
648
+ >>> for (i, j) in pairs:
649
+ ... plt.plot([points[i, 0], points[j, 0]],
650
+ ... [points[i, 1], points[j, 1]], "-r")
651
+ >>> plt.show()
652
+
653
+ """
654
+ return super().query_pairs(r, p, eps, output_type)
655
+
656
+ def count_neighbors(self, other, r, p=2., weights=None, cumulative=True):
657
+ """Count how many nearby pairs can be formed.
658
+
659
+ Count the number of pairs ``(x1,x2)`` can be formed, with ``x1`` drawn
660
+ from ``self`` and ``x2`` drawn from ``other``, and where
661
+ ``distance(x1, x2, p) <= r``.
662
+
663
+ Data points on ``self`` and ``other`` are optionally weighted by the
664
+ ``weights`` argument. (See below)
665
+
666
+ This is adapted from the "two-point correlation" algorithm described by
667
+ Gray and Moore [1]_. See notes for further discussion.
668
+
669
+ Parameters
670
+ ----------
671
+ other : KDTree
672
+ The other tree to draw points from, can be the same tree as self.
673
+ r : float or one-dimensional array of floats
674
+ The radius to produce a count for. Multiple radii are searched with
675
+ a single tree traversal.
676
+ If the count is non-cumulative(``cumulative=False``), ``r`` defines
677
+ the edges of the bins, and must be non-decreasing.
678
+ p : float, optional
679
+ 1<=p<=infinity.
680
+ Which Minkowski p-norm to use.
681
+ Default 2.0.
682
+ A finite large p may cause a ValueError if overflow can occur.
683
+ weights : tuple, array_like, or None, optional
684
+ If None, the pair-counting is unweighted.
685
+ If given as a tuple, weights[0] is the weights of points in
686
+ ``self``, and weights[1] is the weights of points in ``other``;
687
+ either can be None to indicate the points are unweighted.
688
+ If given as an array_like, weights is the weights of points in
689
+ ``self`` and ``other``. For this to make sense, ``self`` and
690
+ ``other`` must be the same tree. If ``self`` and ``other`` are two
691
+ different trees, a ``ValueError`` is raised.
692
+ Default: None
693
+
694
+ .. versionadded:: 1.6.0
695
+ cumulative : bool, optional
696
+ Whether the returned counts are cumulative. When cumulative is set
697
+ to ``False`` the algorithm is optimized to work with a large number
698
+ of bins (>10) specified by ``r``. When ``cumulative`` is set to
699
+ True, the algorithm is optimized to work with a small number of
700
+ ``r``. Default: True
701
+
702
+ .. versionadded:: 1.6.0
703
+
704
+ Returns
705
+ -------
706
+ result : scalar or 1-D array
707
+ The number of pairs. For unweighted counts, the result is integer.
708
+ For weighted counts, the result is float.
709
+ If cumulative is False, ``result[i]`` contains the counts with
710
+ ``(-inf if i == 0 else r[i-1]) < R <= r[i]``
711
+
712
+ Notes
713
+ -----
714
+ Pair-counting is the basic operation used to calculate the two point
715
+ correlation functions from a data set composed of position of objects.
716
+
717
+ Two point correlation function measures the clustering of objects and
718
+ is widely used in cosmology to quantify the large scale structure
719
+ in our Universe, but it may be useful for data analysis in other fields
720
+ where self-similar assembly of objects also occur.
721
+
722
+ The Landy-Szalay estimator for the two point correlation function of
723
+ ``D`` measures the clustering signal in ``D``. [2]_
724
+
725
+ For example, given the position of two sets of objects,
726
+
727
+ - objects ``D`` (data) contains the clustering signal, and
728
+
729
+ - objects ``R`` (random) that contains no signal,
730
+
731
+ .. math::
732
+
733
+ \\xi(r) = \\frac{<D, D> - 2 f <D, R> + f^2<R, R>}{f^2<R, R>},
734
+
735
+ where the brackets represents counting pairs between two data sets
736
+ in a finite bin around ``r`` (distance), corresponding to setting
737
+ `cumulative=False`, and ``f = float(len(D)) / float(len(R))`` is the
738
+ ratio between number of objects from data and random.
739
+
740
+ The algorithm implemented here is loosely based on the dual-tree
741
+ algorithm described in [1]_. We switch between two different
742
+ pair-cumulation scheme depending on the setting of ``cumulative``.
743
+ The computing time of the method we use when for
744
+ ``cumulative == False`` does not scale with the total number of bins.
745
+ The algorithm for ``cumulative == True`` scales linearly with the
746
+ number of bins, though it is slightly faster when only
747
+ 1 or 2 bins are used. [5]_.
748
+
749
+ As an extension to the naive pair-counting,
750
+ weighted pair-counting counts the product of weights instead
751
+ of number of pairs.
752
+ Weighted pair-counting is used to estimate marked correlation functions
753
+ ([3]_, section 2.2),
754
+ or to properly calculate the average of data per distance bin
755
+ (e.g. [4]_, section 2.1 on redshift).
756
+
757
+ .. [1] Gray and Moore,
758
+ "N-body problems in statistical learning",
759
+ Mining the sky, 2000,
760
+ https://arxiv.org/abs/astro-ph/0012333
761
+
762
+ .. [2] Landy and Szalay,
763
+ "Bias and variance of angular correlation functions",
764
+ The Astrophysical Journal, 1993,
765
+ http://adsabs.harvard.edu/abs/1993ApJ...412...64L
766
+
767
+ .. [3] Sheth, Connolly and Skibba,
768
+ "Marked correlations in galaxy formation models",
769
+ Arxiv e-print, 2005,
770
+ https://arxiv.org/abs/astro-ph/0511773
771
+
772
+ .. [4] Hawkins, et al.,
773
+ "The 2dF Galaxy Redshift Survey: correlation functions,
774
+ peculiar velocities and the matter density of the Universe",
775
+ Monthly Notices of the Royal Astronomical Society, 2002,
776
+ http://adsabs.harvard.edu/abs/2003MNRAS.346...78H
777
+
778
+ .. [5] https://github.com/scipy/scipy/pull/5647#issuecomment-168474926
779
+
780
+ Examples
781
+ --------
782
+ You can count neighbors number between two kd-trees within a distance:
783
+
784
+ >>> import numpy as np
785
+ >>> from scipy.spatial import KDTree
786
+ >>> rng = np.random.default_rng()
787
+ >>> points1 = rng.random((5, 2))
788
+ >>> points2 = rng.random((5, 2))
789
+ >>> kd_tree1 = KDTree(points1)
790
+ >>> kd_tree2 = KDTree(points2)
791
+ >>> kd_tree1.count_neighbors(kd_tree2, 0.2)
792
+ 1
793
+
794
+ This number is same as the total pair number calculated by
795
+ `query_ball_tree`:
796
+
797
+ >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
798
+ >>> sum([len(i) for i in indexes])
799
+ 1
800
+
801
+ """
802
+ return super().count_neighbors(other, r, p, weights, cumulative)
803
+
804
+ def sparse_distance_matrix(
805
+ self, other, max_distance, p=2., output_type='dok_matrix'):
806
+ """Compute a sparse distance matrix.
807
+
808
+ Computes a distance matrix between two KDTrees, leaving as zero
809
+ any distance greater than max_distance.
810
+
811
+ Parameters
812
+ ----------
813
+ other : KDTree
814
+
815
+ max_distance : positive float
816
+
817
+ p : float, 1<=p<=infinity
818
+ Which Minkowski p-norm to use.
819
+ A finite large p may cause a ValueError if overflow can occur.
820
+
821
+ output_type : string, optional
822
+ Which container to use for output data. Options: 'dok_matrix',
823
+ 'coo_matrix', 'dict', or 'ndarray'. Default: 'dok_matrix'.
824
+
825
+ .. versionadded:: 1.6.0
826
+
827
+ Returns
828
+ -------
829
+ result : dok_matrix, coo_matrix, dict or ndarray
830
+ Sparse matrix representing the results in "dictionary of keys"
831
+ format. If a dict is returned the keys are (i,j) tuples of indices.
832
+ If output_type is 'ndarray' a record array with fields 'i', 'j',
833
+ and 'v' is returned,
834
+
835
+ Examples
836
+ --------
837
+ You can compute a sparse distance matrix between two kd-trees:
838
+
839
+ >>> import numpy as np
840
+ >>> from scipy.spatial import KDTree
841
+ >>> rng = np.random.default_rng()
842
+ >>> points1 = rng.random((5, 2))
843
+ >>> points2 = rng.random((5, 2))
844
+ >>> kd_tree1 = KDTree(points1)
845
+ >>> kd_tree2 = KDTree(points2)
846
+ >>> sdm = kd_tree1.sparse_distance_matrix(kd_tree2, 0.3)
847
+ >>> sdm.toarray()
848
+ array([[0. , 0. , 0.12295571, 0. , 0. ],
849
+ [0. , 0. , 0. , 0. , 0. ],
850
+ [0.28942611, 0. , 0. , 0.2333084 , 0. ],
851
+ [0. , 0. , 0. , 0. , 0. ],
852
+ [0.24617575, 0.29571802, 0.26836782, 0. , 0. ]])
853
+
854
+ You can check distances above the `max_distance` are zeros:
855
+
856
+ >>> from scipy.spatial import distance_matrix
857
+ >>> distance_matrix(points1, points2)
858
+ array([[0.56906522, 0.39923701, 0.12295571, 0.8658745 , 0.79428925],
859
+ [0.37327919, 0.7225693 , 0.87665969, 0.32580855, 0.75679479],
860
+ [0.28942611, 0.30088013, 0.6395831 , 0.2333084 , 0.33630734],
861
+ [0.31994999, 0.72658602, 0.71124834, 0.55396483, 0.90785663],
862
+ [0.24617575, 0.29571802, 0.26836782, 0.57714465, 0.6473269 ]])
863
+
864
+ """
865
+ return super().sparse_distance_matrix(
866
+ other, max_distance, p, output_type)
867
+
868
+
869
+ def distance_matrix(x, y, p=2, threshold=1000000):
870
+ """Compute the distance matrix.
871
+
872
+ Returns the matrix of all pair-wise distances.
873
+
874
+ Parameters
875
+ ----------
876
+ x : (M, K) array_like
877
+ Matrix of M vectors in K dimensions.
878
+ y : (N, K) array_like
879
+ Matrix of N vectors in K dimensions.
880
+ p : float, 1 <= p <= infinity
881
+ Which Minkowski p-norm to use.
882
+ threshold : positive int
883
+ If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
884
+ of large temporary arrays.
885
+
886
+ Returns
887
+ -------
888
+ result : (M, N) ndarray
889
+ Matrix containing the distance from every vector in `x` to every vector
890
+ in `y`.
891
+
892
+ Examples
893
+ --------
894
+ >>> from scipy.spatial import distance_matrix
895
+ >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
896
+ array([[ 1. , 1.41421356],
897
+ [ 1.41421356, 1. ]])
898
+
899
+ """
900
+
901
+ x = np.asarray(x)
902
+ m, k = x.shape
903
+ y = np.asarray(y)
904
+ n, kk = y.shape
905
+
906
+ if k != kk:
907
+ raise ValueError(f"x contains {k}-dimensional vectors but y contains "
908
+ f"{kk}-dimensional vectors")
909
+
910
+ if m*n*k <= threshold:
911
+ return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
912
+ else:
913
+ result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
914
+ if m < n:
915
+ for i in range(m):
916
+ result[i,:] = minkowski_distance(x[i],y,p)
917
+ else:
918
+ for j in range(n):
919
+ result[:,j] = minkowski_distance(x,y[j],p)
920
+ return result
venv/lib/python3.10/site-packages/scipy/spatial/_plotutils.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy._lib.decorator import decorator as _decorator
3
+
4
+ __all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
5
+
6
+
7
+ @_decorator
8
+ def _held_figure(func, obj, ax=None, **kw):
9
+ import matplotlib.pyplot as plt
10
+
11
+ if ax is None:
12
+ fig = plt.figure()
13
+ ax = fig.gca()
14
+ return func(obj, ax=ax, **kw)
15
+
16
+ # As of matplotlib 2.0, the "hold" mechanism is deprecated.
17
+ # When matplotlib 1.x is no longer supported, this check can be removed.
18
+ was_held = getattr(ax, 'ishold', lambda: True)()
19
+ if was_held:
20
+ return func(obj, ax=ax, **kw)
21
+ try:
22
+ ax.hold(True)
23
+ return func(obj, ax=ax, **kw)
24
+ finally:
25
+ ax.hold(was_held)
26
+
27
+
28
+ def _adjust_bounds(ax, points):
29
+ margin = 0.1 * np.ptp(points, axis=0)
30
+ xy_min = points.min(axis=0) - margin
31
+ xy_max = points.max(axis=0) + margin
32
+ ax.set_xlim(xy_min[0], xy_max[0])
33
+ ax.set_ylim(xy_min[1], xy_max[1])
34
+
35
+
36
+ @_held_figure
37
+ def delaunay_plot_2d(tri, ax=None):
38
+ """
39
+ Plot the given Delaunay triangulation in 2-D
40
+
41
+ Parameters
42
+ ----------
43
+ tri : scipy.spatial.Delaunay instance
44
+ Triangulation to plot
45
+ ax : matplotlib.axes.Axes instance, optional
46
+ Axes to plot on
47
+
48
+ Returns
49
+ -------
50
+ fig : matplotlib.figure.Figure instance
51
+ Figure for the plot
52
+
53
+ See Also
54
+ --------
55
+ Delaunay
56
+ matplotlib.pyplot.triplot
57
+
58
+ Notes
59
+ -----
60
+ Requires Matplotlib.
61
+
62
+ Examples
63
+ --------
64
+
65
+ >>> import numpy as np
66
+ >>> import matplotlib.pyplot as plt
67
+ >>> from scipy.spatial import Delaunay, delaunay_plot_2d
68
+
69
+ The Delaunay triangulation of a set of random points:
70
+
71
+ >>> rng = np.random.default_rng()
72
+ >>> points = rng.random((30, 2))
73
+ >>> tri = Delaunay(points)
74
+
75
+ Plot it:
76
+
77
+ >>> _ = delaunay_plot_2d(tri)
78
+ >>> plt.show()
79
+
80
+ """
81
+ if tri.points.shape[1] != 2:
82
+ raise ValueError("Delaunay triangulation is not 2-D")
83
+
84
+ x, y = tri.points.T
85
+ ax.plot(x, y, 'o')
86
+ ax.triplot(x, y, tri.simplices.copy())
87
+
88
+ _adjust_bounds(ax, tri.points)
89
+
90
+ return ax.figure
91
+
92
+
93
+ @_held_figure
94
+ def convex_hull_plot_2d(hull, ax=None):
95
+ """
96
+ Plot the given convex hull diagram in 2-D
97
+
98
+ Parameters
99
+ ----------
100
+ hull : scipy.spatial.ConvexHull instance
101
+ Convex hull to plot
102
+ ax : matplotlib.axes.Axes instance, optional
103
+ Axes to plot on
104
+
105
+ Returns
106
+ -------
107
+ fig : matplotlib.figure.Figure instance
108
+ Figure for the plot
109
+
110
+ See Also
111
+ --------
112
+ ConvexHull
113
+
114
+ Notes
115
+ -----
116
+ Requires Matplotlib.
117
+
118
+
119
+ Examples
120
+ --------
121
+
122
+ >>> import numpy as np
123
+ >>> import matplotlib.pyplot as plt
124
+ >>> from scipy.spatial import ConvexHull, convex_hull_plot_2d
125
+
126
+ The convex hull of a random set of points:
127
+
128
+ >>> rng = np.random.default_rng()
129
+ >>> points = rng.random((30, 2))
130
+ >>> hull = ConvexHull(points)
131
+
132
+ Plot it:
133
+
134
+ >>> _ = convex_hull_plot_2d(hull)
135
+ >>> plt.show()
136
+
137
+ """
138
+ from matplotlib.collections import LineCollection
139
+
140
+ if hull.points.shape[1] != 2:
141
+ raise ValueError("Convex hull is not 2-D")
142
+
143
+ ax.plot(hull.points[:, 0], hull.points[:, 1], 'o')
144
+ line_segments = [hull.points[simplex] for simplex in hull.simplices]
145
+ ax.add_collection(LineCollection(line_segments,
146
+ colors='k',
147
+ linestyle='solid'))
148
+ _adjust_bounds(ax, hull.points)
149
+
150
+ return ax.figure
151
+
152
+
153
+ @_held_figure
154
+ def voronoi_plot_2d(vor, ax=None, **kw):
155
+ """
156
+ Plot the given Voronoi diagram in 2-D
157
+
158
+ Parameters
159
+ ----------
160
+ vor : scipy.spatial.Voronoi instance
161
+ Diagram to plot
162
+ ax : matplotlib.axes.Axes instance, optional
163
+ Axes to plot on
164
+ show_points : bool, optional
165
+ Add the Voronoi points to the plot.
166
+ show_vertices : bool, optional
167
+ Add the Voronoi vertices to the plot.
168
+ line_colors : string, optional
169
+ Specifies the line color for polygon boundaries
170
+ line_width : float, optional
171
+ Specifies the line width for polygon boundaries
172
+ line_alpha : float, optional
173
+ Specifies the line alpha for polygon boundaries
174
+ point_size : float, optional
175
+ Specifies the size of points
176
+
177
+ Returns
178
+ -------
179
+ fig : matplotlib.figure.Figure instance
180
+ Figure for the plot
181
+
182
+ See Also
183
+ --------
184
+ Voronoi
185
+
186
+ Notes
187
+ -----
188
+ Requires Matplotlib.
189
+
190
+ Examples
191
+ --------
192
+ >>> import numpy as np
193
+ >>> import matplotlib.pyplot as plt
194
+ >>> from scipy.spatial import Voronoi, voronoi_plot_2d
195
+
196
+ Create a set of points for the example:
197
+
198
+ >>> rng = np.random.default_rng()
199
+ >>> points = rng.random((10,2))
200
+
201
+ Generate the Voronoi diagram for the points:
202
+
203
+ >>> vor = Voronoi(points)
204
+
205
+ Use `voronoi_plot_2d` to plot the diagram:
206
+
207
+ >>> fig = voronoi_plot_2d(vor)
208
+
209
+ Use `voronoi_plot_2d` to plot the diagram again, with some settings
210
+ customized:
211
+
212
+ >>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
213
+ ... line_width=2, line_alpha=0.6, point_size=2)
214
+ >>> plt.show()
215
+
216
+ """
217
+ from matplotlib.collections import LineCollection
218
+
219
+ if vor.points.shape[1] != 2:
220
+ raise ValueError("Voronoi diagram is not 2-D")
221
+
222
+ if kw.get('show_points', True):
223
+ point_size = kw.get('point_size', None)
224
+ ax.plot(vor.points[:, 0], vor.points[:, 1], '.', markersize=point_size)
225
+ if kw.get('show_vertices', True):
226
+ ax.plot(vor.vertices[:, 0], vor.vertices[:, 1], 'o')
227
+
228
+ line_colors = kw.get('line_colors', 'k')
229
+ line_width = kw.get('line_width', 1.0)
230
+ line_alpha = kw.get('line_alpha', 1.0)
231
+
232
+ center = vor.points.mean(axis=0)
233
+ ptp_bound = np.ptp(vor.points, axis=0)
234
+
235
+ finite_segments = []
236
+ infinite_segments = []
237
+ for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
238
+ simplex = np.asarray(simplex)
239
+ if np.all(simplex >= 0):
240
+ finite_segments.append(vor.vertices[simplex])
241
+ else:
242
+ i = simplex[simplex >= 0][0] # finite end Voronoi vertex
243
+
244
+ t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
245
+ t /= np.linalg.norm(t)
246
+ n = np.array([-t[1], t[0]]) # normal
247
+
248
+ midpoint = vor.points[pointidx].mean(axis=0)
249
+ direction = np.sign(np.dot(midpoint - center, n)) * n
250
+ if (vor.furthest_site):
251
+ direction = -direction
252
+ aspect_factor = abs(ptp_bound.max() / ptp_bound.min())
253
+ far_point = vor.vertices[i] + direction * ptp_bound.max() * aspect_factor
254
+
255
+ infinite_segments.append([vor.vertices[i], far_point])
256
+
257
+ ax.add_collection(LineCollection(finite_segments,
258
+ colors=line_colors,
259
+ lw=line_width,
260
+ alpha=line_alpha,
261
+ linestyle='solid'))
262
+ ax.add_collection(LineCollection(infinite_segments,
263
+ colors=line_colors,
264
+ lw=line_width,
265
+ alpha=line_alpha,
266
+ linestyle='dashed'))
267
+
268
+ _adjust_bounds(ax, vor.points)
269
+
270
+ return ax.figure
venv/lib/python3.10/site-packages/scipy/spatial/_procrustes.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module provides functions to perform full Procrustes analysis.
3
+
4
+ This code was originally written by Justin Kucynski and ported over from
5
+ scikit-bio by Yoshiki Vazquez-Baeza.
6
+ """
7
+
8
+ import numpy as np
9
+ from scipy.linalg import orthogonal_procrustes
10
+
11
+
12
+ __all__ = ['procrustes']
13
+
14
+
15
+ def procrustes(data1, data2):
16
+ r"""Procrustes analysis, a similarity test for two data sets.
17
+
18
+ Each input matrix is a set of points or vectors (the rows of the matrix).
19
+ The dimension of the space is the number of columns of each matrix. Given
20
+ two identically sized matrices, procrustes standardizes both such that:
21
+
22
+ - :math:`tr(AA^{T}) = 1`.
23
+
24
+ - Both sets of points are centered around the origin.
25
+
26
+ Procrustes ([1]_, [2]_) then applies the optimal transform to the second
27
+ matrix (including scaling/dilation, rotations, and reflections) to minimize
28
+ :math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the
29
+ pointwise differences between the two input datasets.
30
+
31
+ This function was not designed to handle datasets with different numbers of
32
+ datapoints (rows). If two data sets have different dimensionality
33
+ (different number of columns), simply add columns of zeros to the smaller
34
+ of the two.
35
+
36
+ Parameters
37
+ ----------
38
+ data1 : array_like
39
+ Matrix, n rows represent points in k (columns) space `data1` is the
40
+ reference data, after it is standardised, the data from `data2` will be
41
+ transformed to fit the pattern in `data1` (must have >1 unique points).
42
+ data2 : array_like
43
+ n rows of data in k space to be fit to `data1`. Must be the same
44
+ shape ``(numrows, numcols)`` as data1 (must have >1 unique points).
45
+
46
+ Returns
47
+ -------
48
+ mtx1 : array_like
49
+ A standardized version of `data1`.
50
+ mtx2 : array_like
51
+ The orientation of `data2` that best fits `data1`. Centered, but not
52
+ necessarily :math:`tr(AA^{T}) = 1`.
53
+ disparity : float
54
+ :math:`M^{2}` as defined above.
55
+
56
+ Raises
57
+ ------
58
+ ValueError
59
+ If the input arrays are not two-dimensional.
60
+ If the shape of the input arrays is different.
61
+ If the input arrays have zero columns or zero rows.
62
+
63
+ See Also
64
+ --------
65
+ scipy.linalg.orthogonal_procrustes
66
+ scipy.spatial.distance.directed_hausdorff : Another similarity test
67
+ for two data sets
68
+
69
+ Notes
70
+ -----
71
+ - The disparity should not depend on the order of the input matrices, but
72
+ the output matrices will, as only the first output matrix is guaranteed
73
+ to be scaled such that :math:`tr(AA^{T}) = 1`.
74
+
75
+ - Duplicate data points are generally ok, duplicating a data point will
76
+ increase its effect on the procrustes fit.
77
+
78
+ - The disparity scales as the number of points per input matrix.
79
+
80
+ References
81
+ ----------
82
+ .. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
83
+ .. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
84
+
85
+ Examples
86
+ --------
87
+ >>> import numpy as np
88
+ >>> from scipy.spatial import procrustes
89
+
90
+ The matrix ``b`` is a rotated, shifted, scaled and mirrored version of
91
+ ``a`` here:
92
+
93
+ >>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
94
+ >>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
95
+ >>> mtx1, mtx2, disparity = procrustes(a, b)
96
+ >>> round(disparity)
97
+ 0.0
98
+
99
+ """
100
+ mtx1 = np.array(data1, dtype=np.float64, copy=True)
101
+ mtx2 = np.array(data2, dtype=np.float64, copy=True)
102
+
103
+ if mtx1.ndim != 2 or mtx2.ndim != 2:
104
+ raise ValueError("Input matrices must be two-dimensional")
105
+ if mtx1.shape != mtx2.shape:
106
+ raise ValueError("Input matrices must be of same shape")
107
+ if mtx1.size == 0:
108
+ raise ValueError("Input matrices must be >0 rows and >0 cols")
109
+
110
+ # translate all the data to the origin
111
+ mtx1 -= np.mean(mtx1, 0)
112
+ mtx2 -= np.mean(mtx2, 0)
113
+
114
+ norm1 = np.linalg.norm(mtx1)
115
+ norm2 = np.linalg.norm(mtx2)
116
+
117
+ if norm1 == 0 or norm2 == 0:
118
+ raise ValueError("Input matrices must contain >1 unique points")
119
+
120
+ # change scaling of data (in rows) such that trace(mtx*mtx') = 1
121
+ mtx1 /= norm1
122
+ mtx2 /= norm2
123
+
124
+ # transform mtx2 to minimize disparity
125
+ R, s = orthogonal_procrustes(mtx1, mtx2)
126
+ mtx2 = np.dot(mtx2, R.T) * s
127
+
128
+ # measure the dissimilarity between the two datasets
129
+ disparity = np.sum(np.square(mtx1 - mtx2))
130
+
131
+ return mtx1, mtx2, disparity
132
+
venv/lib/python3.10/site-packages/scipy/spatial/_qhull.pyi ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Static type checking stub file for scipy/spatial/qhull.pyx
3
+ '''
4
+
5
+
6
+ import numpy as np
7
+ from numpy.typing import ArrayLike, NDArray
8
+ from typing_extensions import final
9
+
10
+ class QhullError(RuntimeError):
11
+ ...
12
+
13
+ @final
14
+ class _Qhull:
15
+ # Read-only cython attribute that behaves, more or less, like a property
16
+ @property
17
+ def ndim(self) -> int: ...
18
+ mode_option: bytes
19
+ options: bytes
20
+ furthest_site: bool
21
+
22
+ def __init__(
23
+ self,
24
+ mode_option: bytes,
25
+ points: NDArray[np.float64],
26
+ options: None | bytes = ...,
27
+ required_options: None | bytes = ...,
28
+ furthest_site: bool = ...,
29
+ incremental: bool = ...,
30
+ interior_point: None | NDArray[np.float64] = ...,
31
+ ) -> None: ...
32
+ def check_active(self) -> None: ...
33
+ def close(self) -> None: ...
34
+ def get_points(self) -> NDArray[np.float64]: ...
35
+ def add_points(
36
+ self,
37
+ points: ArrayLike,
38
+ interior_point: ArrayLike = ...
39
+ ) -> None: ...
40
+ def get_paraboloid_shift_scale(self) -> tuple[float, float]: ...
41
+ def volume_area(self) -> tuple[float, float]: ...
42
+ def triangulate(self) -> None: ...
43
+ def get_simplex_facet_array(self) -> tuple[
44
+ NDArray[np.intc],
45
+ NDArray[np.intc],
46
+ NDArray[np.float64],
47
+ NDArray[np.intc],
48
+ NDArray[np.intc],
49
+ ]: ...
50
+ def get_hull_points(self) -> NDArray[np.float64]: ...
51
+ def get_hull_facets(self) -> tuple[
52
+ list[list[int]],
53
+ NDArray[np.float64],
54
+ ]: ...
55
+ def get_voronoi_diagram(self) -> tuple[
56
+ NDArray[np.float64],
57
+ NDArray[np.intc],
58
+ list[list[int]],
59
+ list[list[int]],
60
+ NDArray[np.intp],
61
+ ]: ...
62
+ def get_extremes_2d(self) -> NDArray[np.intc]: ...
63
+
64
+ def _get_barycentric_transforms(
65
+ points: NDArray[np.float64],
66
+ simplices: NDArray[np.intc],
67
+ eps: float
68
+ ) -> NDArray[np.float64]: ...
69
+
70
+ class _QhullUser:
71
+ ndim: int
72
+ npoints: int
73
+ min_bound: NDArray[np.float64]
74
+ max_bound: NDArray[np.float64]
75
+
76
+ def __init__(self, qhull: _Qhull, incremental: bool = ...) -> None: ...
77
+ def close(self) -> None: ...
78
+ def _update(self, qhull: _Qhull) -> None: ...
79
+ def _add_points(
80
+ self,
81
+ points: ArrayLike,
82
+ restart: bool = ...,
83
+ interior_point: ArrayLike = ...
84
+ ) -> None: ...
85
+
86
+ class Delaunay(_QhullUser):
87
+ furthest_site: bool
88
+ paraboloid_scale: float
89
+ paraboloid_shift: float
90
+ simplices: NDArray[np.intc]
91
+ neighbors: NDArray[np.intc]
92
+ equations: NDArray[np.float64]
93
+ coplanar: NDArray[np.intc]
94
+ good: NDArray[np.intc]
95
+ nsimplex: int
96
+ vertices: NDArray[np.intc]
97
+
98
+ def __init__(
99
+ self,
100
+ points: ArrayLike,
101
+ furthest_site: bool = ...,
102
+ incremental: bool = ...,
103
+ qhull_options: None | str = ...
104
+ ) -> None: ...
105
+ def _update(self, qhull: _Qhull) -> None: ...
106
+ def add_points(
107
+ self,
108
+ points: ArrayLike,
109
+ restart: bool = ...
110
+ ) -> None: ...
111
+ @property
112
+ def points(self) -> NDArray[np.float64]: ...
113
+ @property
114
+ def transform(self) -> NDArray[np.float64]: ...
115
+ @property
116
+ def vertex_to_simplex(self) -> NDArray[np.intc]: ...
117
+ @property
118
+ def vertex_neighbor_vertices(self) -> tuple[
119
+ NDArray[np.intc],
120
+ NDArray[np.intc],
121
+ ]: ...
122
+ @property
123
+ def convex_hull(self) -> NDArray[np.intc]: ...
124
+ def find_simplex(
125
+ self,
126
+ xi: ArrayLike,
127
+ bruteforce: bool = ...,
128
+ tol: float = ...
129
+ ) -> NDArray[np.intc]: ...
130
+ def plane_distance(self, xi: ArrayLike) -> NDArray[np.float64]: ...
131
+ def lift_points(self, x: ArrayLike) -> NDArray[np.float64]: ...
132
+
133
+ def tsearch(tri: Delaunay, xi: ArrayLike) -> NDArray[np.intc]: ...
134
+ def _copy_docstr(dst: object, src: object) -> None: ...
135
+
136
+ class ConvexHull(_QhullUser):
137
+ simplices: NDArray[np.intc]
138
+ neighbors: NDArray[np.intc]
139
+ equations: NDArray[np.float64]
140
+ coplanar: NDArray[np.intc]
141
+ good: None | NDArray[np.bool_]
142
+ volume: float
143
+ area: float
144
+ nsimplex: int
145
+
146
+ def __init__(
147
+ self,
148
+ points: ArrayLike,
149
+ incremental: bool = ...,
150
+ qhull_options: None | str = ...
151
+ ) -> None: ...
152
+ def _update(self, qhull: _Qhull) -> None: ...
153
+ def add_points(self, points: ArrayLike,
154
+ restart: bool = ...) -> None: ...
155
+ @property
156
+ def points(self) -> NDArray[np.float64]: ...
157
+ @property
158
+ def vertices(self) -> NDArray[np.intc]: ...
159
+
160
+ class Voronoi(_QhullUser):
161
+ vertices: NDArray[np.float64]
162
+ ridge_points: NDArray[np.intc]
163
+ ridge_vertices: list[list[int]]
164
+ regions: list[list[int]]
165
+ point_region: NDArray[np.intp]
166
+ furthest_site: bool
167
+
168
+ def __init__(
169
+ self,
170
+ points: ArrayLike,
171
+ furthest_site: bool = ...,
172
+ incremental: bool = ...,
173
+ qhull_options: None | str = ...
174
+ ) -> None: ...
175
+ def _update(self, qhull: _Qhull) -> None: ...
176
+ def add_points(
177
+ self,
178
+ points: ArrayLike,
179
+ restart: bool = ...
180
+ ) -> None: ...
181
+ @property
182
+ def points(self) -> NDArray[np.float64]: ...
183
+ @property
184
+ def ridge_dict(self) -> dict[tuple[int, int], list[int]]: ...
185
+
186
+ class HalfspaceIntersection(_QhullUser):
187
+ interior_point: NDArray[np.float64]
188
+ dual_facets: list[list[int]]
189
+ dual_equations: NDArray[np.float64]
190
+ dual_points: NDArray[np.float64]
191
+ dual_volume: float
192
+ dual_area: float
193
+ intersections: NDArray[np.float64]
194
+ ndim: int
195
+ nineq: int
196
+
197
+ def __init__(
198
+ self,
199
+ halfspaces: ArrayLike,
200
+ interior_point: ArrayLike,
201
+ incremental: bool = ...,
202
+ qhull_options: None | str = ...
203
+ ) -> None: ...
204
+ def _update(self, qhull: _Qhull) -> None: ...
205
+ def add_halfspaces(
206
+ self,
207
+ halfspaces: ArrayLike,
208
+ restart: bool = ...
209
+ ) -> None: ...
210
+ @property
211
+ def halfspaces(self) -> NDArray[np.float64]: ...
212
+ @property
213
+ def dual_vertices(self) -> NDArray[np.integer]: ...
venv/lib/python3.10/site-packages/scipy/spatial/_spherical_voronoi.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Spherical Voronoi Code
3
+
4
+ .. versionadded:: 0.18.0
5
+
6
+ """
7
+ #
8
+ # Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
9
+ # Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
10
+ #
11
+ # Distributed under the same BSD license as SciPy.
12
+ #
13
+
14
+ import numpy as np
15
+ import scipy
16
+ from . import _voronoi
17
+ from scipy.spatial import cKDTree
18
+
19
+ __all__ = ['SphericalVoronoi']
20
+
21
+
22
+ def calculate_solid_angles(R):
23
+ """Calculates the solid angles of plane triangles. Implements the method of
24
+ Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
25
+ that input points have unit norm."""
26
+ # Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
27
+ # This is equal to the determinant of the matrix [R1 R2 R3], which can be
28
+ # computed with better stability.
29
+ numerator = np.linalg.det(R)
30
+ denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
31
+ np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
32
+ np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
33
+ return np.abs(2 * np.arctan2(numerator, denominator))
34
+
35
+
36
+ class SphericalVoronoi:
37
+ """ Voronoi diagrams on the surface of a sphere.
38
+
39
+ .. versionadded:: 0.18.0
40
+
41
+ Parameters
42
+ ----------
43
+ points : ndarray of floats, shape (npoints, ndim)
44
+ Coordinates of points from which to construct a spherical
45
+ Voronoi diagram.
46
+ radius : float, optional
47
+ Radius of the sphere (Default: 1)
48
+ center : ndarray of floats, shape (ndim,)
49
+ Center of sphere (Default: origin)
50
+ threshold : float
51
+ Threshold for detecting duplicate points and
52
+ mismatches between points and sphere parameters.
53
+ (Default: 1e-06)
54
+
55
+ Attributes
56
+ ----------
57
+ points : double array of shape (npoints, ndim)
58
+ the points in `ndim` dimensions to generate the Voronoi diagram from
59
+ radius : double
60
+ radius of the sphere
61
+ center : double array of shape (ndim,)
62
+ center of the sphere
63
+ vertices : double array of shape (nvertices, ndim)
64
+ Voronoi vertices corresponding to points
65
+ regions : list of list of integers of shape (npoints, _ )
66
+ the n-th entry is a list consisting of the indices
67
+ of the vertices belonging to the n-th point in points
68
+
69
+ Methods
70
+ -------
71
+ calculate_areas
72
+ Calculates the areas of the Voronoi regions. For 2D point sets, the
73
+ regions are circular arcs. The sum of the areas is `2 * pi * radius`.
74
+ For 3D point sets, the regions are spherical polygons. The sum of the
75
+ areas is `4 * pi * radius**2`.
76
+
77
+ Raises
78
+ ------
79
+ ValueError
80
+ If there are duplicates in `points`.
81
+ If the provided `radius` is not consistent with `points`.
82
+
83
+ Notes
84
+ -----
85
+ The spherical Voronoi diagram algorithm proceeds as follows. The Convex
86
+ Hull of the input points (generators) is calculated, and is equivalent to
87
+ their Delaunay triangulation on the surface of the sphere [Caroli]_.
88
+ The Convex Hull neighbour information is then used to
89
+ order the Voronoi region vertices around each generator. The latter
90
+ approach is substantially less sensitive to floating point issues than
91
+ angle-based methods of Voronoi region vertex sorting.
92
+
93
+ Empirical assessment of spherical Voronoi algorithm performance suggests
94
+ quadratic time complexity (loglinear is optimal, but algorithms are more
95
+ challenging to implement).
96
+
97
+ References
98
+ ----------
99
+ .. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
100
+ points on or close to a sphere. Research Report RR-7004, 2009.
101
+
102
+ .. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
103
+ triangle. IEEE Transactions on Biomedical Engineering,
104
+ 2, 1983, pp 125--126.
105
+
106
+ See Also
107
+ --------
108
+ Voronoi : Conventional Voronoi diagrams in N dimensions.
109
+
110
+ Examples
111
+ --------
112
+ Do some imports and take some points on a cube:
113
+
114
+ >>> import numpy as np
115
+ >>> import matplotlib.pyplot as plt
116
+ >>> from scipy.spatial import SphericalVoronoi, geometric_slerp
117
+ >>> from mpl_toolkits.mplot3d import proj3d
118
+ >>> # set input data
119
+ >>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
120
+ ... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
121
+
122
+ Calculate the spherical Voronoi diagram:
123
+
124
+ >>> radius = 1
125
+ >>> center = np.array([0, 0, 0])
126
+ >>> sv = SphericalVoronoi(points, radius, center)
127
+
128
+ Generate plot:
129
+
130
+ >>> # sort vertices (optional, helpful for plotting)
131
+ >>> sv.sort_vertices_of_regions()
132
+ >>> t_vals = np.linspace(0, 1, 2000)
133
+ >>> fig = plt.figure()
134
+ >>> ax = fig.add_subplot(111, projection='3d')
135
+ >>> # plot the unit sphere for reference (optional)
136
+ >>> u = np.linspace(0, 2 * np.pi, 100)
137
+ >>> v = np.linspace(0, np.pi, 100)
138
+ >>> x = np.outer(np.cos(u), np.sin(v))
139
+ >>> y = np.outer(np.sin(u), np.sin(v))
140
+ >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
141
+ >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
142
+ >>> # plot generator points
143
+ >>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
144
+ >>> # plot Voronoi vertices
145
+ >>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
146
+ ... c='g')
147
+ >>> # indicate Voronoi regions (as Euclidean polygons)
148
+ >>> for region in sv.regions:
149
+ ... n = len(region)
150
+ ... for i in range(n):
151
+ ... start = sv.vertices[region][i]
152
+ ... end = sv.vertices[region][(i + 1) % n]
153
+ ... result = geometric_slerp(start, end, t_vals)
154
+ ... ax.plot(result[..., 0],
155
+ ... result[..., 1],
156
+ ... result[..., 2],
157
+ ... c='k')
158
+ >>> ax.azim = 10
159
+ >>> ax.elev = 40
160
+ >>> _ = ax.set_xticks([])
161
+ >>> _ = ax.set_yticks([])
162
+ >>> _ = ax.set_zticks([])
163
+ >>> fig.set_size_inches(4, 4)
164
+ >>> plt.show()
165
+
166
+ """
167
+ def __init__(self, points, radius=1, center=None, threshold=1e-06):
168
+
169
+ if radius is None:
170
+ raise ValueError('`radius` is `None`. '
171
+ 'Please provide a floating point number '
172
+ '(i.e. `radius=1`).')
173
+
174
+ self.radius = float(radius)
175
+ self.points = np.array(points).astype(np.float64)
176
+ self._dim = self.points.shape[1]
177
+ if center is None:
178
+ self.center = np.zeros(self._dim)
179
+ else:
180
+ self.center = np.array(center, dtype=float)
181
+
182
+ # test degenerate input
183
+ self._rank = np.linalg.matrix_rank(self.points - self.points[0],
184
+ tol=threshold * self.radius)
185
+ if self._rank < self._dim:
186
+ raise ValueError(f"Rank of input points must be at least {self._dim}")
187
+
188
+ if cKDTree(self.points).query_pairs(threshold * self.radius):
189
+ raise ValueError("Duplicate generators present.")
190
+
191
+ radii = np.linalg.norm(self.points - self.center, axis=1)
192
+ max_discrepancy = np.abs(radii - self.radius).max()
193
+ if max_discrepancy >= threshold * self.radius:
194
+ raise ValueError("Radius inconsistent with generators.")
195
+
196
+ self._calc_vertices_regions()
197
+
198
+ def _calc_vertices_regions(self):
199
+ """
200
+ Calculates the Voronoi vertices and regions of the generators stored
201
+ in self.points. The vertices will be stored in self.vertices and the
202
+ regions in self.regions.
203
+
204
+ This algorithm was discussed at PyData London 2015 by
205
+ Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
206
+ """
207
+ # get Convex Hull
208
+ conv = scipy.spatial.ConvexHull(self.points)
209
+ # get circumcenters of Convex Hull triangles from facet equations
210
+ # for 3D input circumcenters will have shape: (2N-4, 3)
211
+ self.vertices = self.radius * conv.equations[:, :-1] + self.center
212
+ self._simplices = conv.simplices
213
+ # calculate regions from triangulation
214
+ # for 3D input simplex_indices will have shape: (2N-4,)
215
+ simplex_indices = np.arange(len(self._simplices))
216
+ # for 3D input tri_indices will have shape: (6N-12,)
217
+ tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
218
+ # for 3D input point_indices will have shape: (6N-12,)
219
+ point_indices = self._simplices.ravel()
220
+ # for 3D input indices will have shape: (6N-12,)
221
+ indices = np.argsort(point_indices, kind='mergesort')
222
+ # for 3D input flattened_groups will have shape: (6N-12,)
223
+ flattened_groups = tri_indices[indices].astype(np.intp)
224
+ # intervals will have shape: (N+1,)
225
+ intervals = np.cumsum(np.bincount(point_indices + 1))
226
+ # split flattened groups to get nested list of unsorted regions
227
+ groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
228
+ for i in range(len(intervals) - 1)]
229
+ self.regions = groups
230
+
231
+ def sort_vertices_of_regions(self):
232
+ """Sort indices of the vertices to be (counter-)clockwise ordered.
233
+
234
+ Raises
235
+ ------
236
+ TypeError
237
+ If the points are not three-dimensional.
238
+
239
+ Notes
240
+ -----
241
+ For each region in regions, it sorts the indices of the Voronoi
242
+ vertices such that the resulting points are in a clockwise or
243
+ counterclockwise order around the generator point.
244
+
245
+ This is done as follows: Recall that the n-th region in regions
246
+ surrounds the n-th generator in points and that the k-th
247
+ Voronoi vertex in vertices is the circumcenter of the k-th triangle
248
+ in self._simplices. For each region n, we choose the first triangle
249
+ (=Voronoi vertex) in self._simplices and a vertex of that triangle
250
+ not equal to the center n. These determine a unique neighbor of that
251
+ triangle, which is then chosen as the second triangle. The second
252
+ triangle will have a unique vertex not equal to the current vertex or
253
+ the center. This determines a unique neighbor of the second triangle,
254
+ which is then chosen as the third triangle and so forth. We proceed
255
+ through all the triangles (=Voronoi vertices) belonging to the
256
+ generator in points and obtain a sorted version of the vertices
257
+ of its surrounding region.
258
+ """
259
+ if self._dim != 3:
260
+ raise TypeError("Only supported for three-dimensional point sets")
261
+ _voronoi.sort_vertices_of_regions(self._simplices, self.regions)
262
+
263
+ def _calculate_areas_3d(self):
264
+ self.sort_vertices_of_regions()
265
+ sizes = [len(region) for region in self.regions]
266
+ csizes = np.cumsum(sizes)
267
+ num_regions = csizes[-1]
268
+
269
+ # We create a set of triangles consisting of one point and two Voronoi
270
+ # vertices. The vertices of each triangle are adjacent in the sorted
271
+ # regions list.
272
+ point_indices = [i for i, size in enumerate(sizes)
273
+ for j in range(size)]
274
+
275
+ nbrs1 = np.array([r for region in self.regions for r in region])
276
+
277
+ # The calculation of nbrs2 is a vectorized version of:
278
+ # np.array([r for region in self.regions for r in np.roll(region, 1)])
279
+ nbrs2 = np.roll(nbrs1, 1)
280
+ indices = np.roll(csizes, 1)
281
+ indices[0] = 0
282
+ nbrs2[indices] = nbrs1[csizes - 1]
283
+
284
+ # Normalize points and vertices.
285
+ pnormalized = (self.points - self.center) / self.radius
286
+ vnormalized = (self.vertices - self.center) / self.radius
287
+
288
+ # Create the complete set of triangles and calculate their solid angles
289
+ triangles = np.hstack([pnormalized[point_indices],
290
+ vnormalized[nbrs1],
291
+ vnormalized[nbrs2]
292
+ ]).reshape((num_regions, 3, 3))
293
+ triangle_solid_angles = calculate_solid_angles(triangles)
294
+
295
+ # Sum the solid angles of the triangles in each region
296
+ solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
297
+ solid_angles[1:] -= solid_angles[:-1]
298
+
299
+ # Get polygon areas using A = omega * r**2
300
+ return solid_angles * self.radius**2
301
+
302
+ def _calculate_areas_2d(self):
303
+ # Find start and end points of arcs
304
+ arcs = self.points[self._simplices] - self.center
305
+
306
+ # Calculate the angle subtended by arcs
307
+ d = np.sum((arcs[:, 1] - arcs[:, 0]) ** 2, axis=1)
308
+ theta = np.arccos(1 - (d / (2 * (self.radius ** 2))))
309
+
310
+ # Get areas using A = r * theta
311
+ areas = self.radius * theta
312
+
313
+ # Correct arcs which go the wrong way (single-hemisphere inputs)
314
+ signs = np.sign(np.einsum('ij,ij->i', arcs[:, 0],
315
+ self.vertices - self.center))
316
+ indices = np.where(signs < 0)
317
+ areas[indices] = 2 * np.pi * self.radius - areas[indices]
318
+ return areas
319
+
320
+ def calculate_areas(self):
321
+ """Calculates the areas of the Voronoi regions.
322
+
323
+ For 2D point sets, the regions are circular arcs. The sum of the areas
324
+ is `2 * pi * radius`.
325
+
326
+ For 3D point sets, the regions are spherical polygons. The sum of the
327
+ areas is `4 * pi * radius**2`.
328
+
329
+ .. versionadded:: 1.5.0
330
+
331
+ Returns
332
+ -------
333
+ areas : double array of shape (npoints,)
334
+ The areas of the Voronoi regions.
335
+ """
336
+ if self._dim == 2:
337
+ return self._calculate_areas_2d()
338
+ elif self._dim == 3:
339
+ return self._calculate_areas_3d()
340
+ else:
341
+ raise TypeError("Only supported for 2D and 3D point sets")
venv/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (241 kB). View file
 
venv/lib/python3.10/site-packages/scipy/spatial/_voronoi.pyi ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ def sort_vertices_of_regions(simplices: np.ndarray, regions: list[list[int]]) -> None: ... # noqa: E501
venv/lib/python3.10/site-packages/scipy/spatial/ckdtree.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.spatial` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'cKDTree',
10
+ 'cKDTreeNode',
11
+ 'coo_entries',
12
+ 'operator',
13
+ 'ordered_pairs',
14
+ 'os',
15
+ 'scipy',
16
+ 'threading',
17
+ ]
18
+
19
+
20
+ def __dir__():
21
+ return __all__
22
+
23
+
24
+ def __getattr__(name):
25
+ return _sub_module_deprecation(sub_package="spatial", module="ckdtree",
26
+ private_modules=["_ckdtree"], all=__all__,
27
+ attribute=name)
venv/lib/python3.10/site-packages/scipy/spatial/distance.py ADDED
@@ -0,0 +1,2993 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Distance computations (:mod:`scipy.spatial.distance`)
3
+ =====================================================
4
+
5
+ .. sectionauthor:: Damian Eads
6
+
7
+ Function reference
8
+ ------------------
9
+
10
+ Distance matrix computation from a collection of raw observation vectors
11
+ stored in a rectangular array.
12
+
13
+ .. autosummary::
14
+ :toctree: generated/
15
+
16
+ pdist -- pairwise distances between observation vectors.
17
+ cdist -- distances between two collections of observation vectors
18
+ squareform -- convert distance matrix to a condensed one and vice versa
19
+ directed_hausdorff -- directed Hausdorff distance between arrays
20
+
21
+ Predicates for checking the validity of distance matrices, both
22
+ condensed and redundant. Also contained in this module are functions
23
+ for computing the number of observations in a distance matrix.
24
+
25
+ .. autosummary::
26
+ :toctree: generated/
27
+
28
+ is_valid_dm -- checks for a valid distance matrix
29
+ is_valid_y -- checks for a valid condensed distance matrix
30
+ num_obs_dm -- # of observations in a distance matrix
31
+ num_obs_y -- # of observations in a condensed distance matrix
32
+
33
+ Distance functions between two numeric vectors ``u`` and ``v``. Computing
34
+ distances over a large collection of vectors is inefficient for these
35
+ functions. Use ``pdist`` for this purpose.
36
+
37
+ .. autosummary::
38
+ :toctree: generated/
39
+
40
+ braycurtis -- the Bray-Curtis distance.
41
+ canberra -- the Canberra distance.
42
+ chebyshev -- the Chebyshev distance.
43
+ cityblock -- the Manhattan distance.
44
+ correlation -- the Correlation distance.
45
+ cosine -- the Cosine distance.
46
+ euclidean -- the Euclidean distance.
47
+ jensenshannon -- the Jensen-Shannon distance.
48
+ mahalanobis -- the Mahalanobis distance.
49
+ minkowski -- the Minkowski distance.
50
+ seuclidean -- the normalized Euclidean distance.
51
+ sqeuclidean -- the squared Euclidean distance.
52
+
53
+ Distance functions between two boolean vectors (representing sets) ``u`` and
54
+ ``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
55
+ computing the distances between all pairs.
56
+
57
+ .. autosummary::
58
+ :toctree: generated/
59
+
60
+ dice -- the Dice dissimilarity.
61
+ hamming -- the Hamming distance.
62
+ jaccard -- the Jaccard distance.
63
+ kulczynski1 -- the Kulczynski 1 distance.
64
+ rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
65
+ russellrao -- the Russell-Rao dissimilarity.
66
+ sokalmichener -- the Sokal-Michener dissimilarity.
67
+ sokalsneath -- the Sokal-Sneath dissimilarity.
68
+ yule -- the Yule dissimilarity.
69
+
70
+ :func:`hamming` also operates over discrete numerical vectors.
71
+ """
72
+
73
+ # Copyright (C) Damian Eads, 2007-2008. New BSD License.
74
+
75
+ __all__ = [
76
+ 'braycurtis',
77
+ 'canberra',
78
+ 'cdist',
79
+ 'chebyshev',
80
+ 'cityblock',
81
+ 'correlation',
82
+ 'cosine',
83
+ 'dice',
84
+ 'directed_hausdorff',
85
+ 'euclidean',
86
+ 'hamming',
87
+ 'is_valid_dm',
88
+ 'is_valid_y',
89
+ 'jaccard',
90
+ 'jensenshannon',
91
+ 'kulczynski1',
92
+ 'mahalanobis',
93
+ 'minkowski',
94
+ 'num_obs_dm',
95
+ 'num_obs_y',
96
+ 'pdist',
97
+ 'rogerstanimoto',
98
+ 'russellrao',
99
+ 'seuclidean',
100
+ 'sokalmichener',
101
+ 'sokalsneath',
102
+ 'sqeuclidean',
103
+ 'squareform',
104
+ 'yule'
105
+ ]
106
+
107
+
108
+ import math
109
+ import warnings
110
+ import numpy as np
111
+ import dataclasses
112
+
113
+ from typing import Optional, Callable
114
+
115
+ from functools import partial
116
+ from scipy._lib._util import _asarray_validated
117
+
118
+ from . import _distance_wrap
119
+ from . import _hausdorff
120
+ from ..linalg import norm
121
+ from ..special import rel_entr
122
+
123
+ from . import _distance_pybind
124
+
125
+
126
+ def _copy_array_if_base_present(a):
127
+ """Copy the array if its base points to a parent array."""
128
+ if a.base is not None:
129
+ return a.copy()
130
+ return a
131
+
132
+
133
+ def _correlation_cdist_wrap(XA, XB, dm, **kwargs):
134
+ XA = XA - XA.mean(axis=1, keepdims=True)
135
+ XB = XB - XB.mean(axis=1, keepdims=True)
136
+ _distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)
137
+
138
+
139
+ def _correlation_pdist_wrap(X, dm, **kwargs):
140
+ X2 = X - X.mean(axis=1, keepdims=True)
141
+ _distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)
142
+
143
+
144
+ def _convert_to_type(X, out_type):
145
+ return np.ascontiguousarray(X, dtype=out_type)
146
+
147
+
148
+ def _nbool_correspond_all(u, v, w=None):
149
+ if u.dtype == v.dtype == bool and w is None:
150
+ not_u = ~u
151
+ not_v = ~v
152
+ nff = (not_u & not_v).sum()
153
+ nft = (not_u & v).sum()
154
+ ntf = (u & not_v).sum()
155
+ ntt = (u & v).sum()
156
+ else:
157
+ dtype = np.result_type(int, u.dtype, v.dtype)
158
+ u = u.astype(dtype)
159
+ v = v.astype(dtype)
160
+ not_u = 1.0 - u
161
+ not_v = 1.0 - v
162
+ if w is not None:
163
+ not_u = w * not_u
164
+ u = w * u
165
+ nff = (not_u * not_v).sum()
166
+ nft = (not_u * v).sum()
167
+ ntf = (u * not_v).sum()
168
+ ntt = (u * v).sum()
169
+ return (nff, nft, ntf, ntt)
170
+
171
+
172
+ def _nbool_correspond_ft_tf(u, v, w=None):
173
+ if u.dtype == v.dtype == bool and w is None:
174
+ not_u = ~u
175
+ not_v = ~v
176
+ nft = (not_u & v).sum()
177
+ ntf = (u & not_v).sum()
178
+ else:
179
+ dtype = np.result_type(int, u.dtype, v.dtype)
180
+ u = u.astype(dtype)
181
+ v = v.astype(dtype)
182
+ not_u = 1.0 - u
183
+ not_v = 1.0 - v
184
+ if w is not None:
185
+ not_u = w * not_u
186
+ u = w * u
187
+ nft = (not_u * v).sum()
188
+ ntf = (u * not_v).sum()
189
+ return (nft, ntf)
190
+
191
+
192
+ def _validate_cdist_input(XA, XB, mA, mB, n, metric_info, **kwargs):
193
+ # get supported types
194
+ types = metric_info.types
195
+ # choose best type
196
+ typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]
197
+ # validate data
198
+ XA = _convert_to_type(XA, out_type=typ)
199
+ XB = _convert_to_type(XB, out_type=typ)
200
+
201
+ # validate kwargs
202
+ _validate_kwargs = metric_info.validator
203
+ if _validate_kwargs:
204
+ kwargs = _validate_kwargs((XA, XB), mA + mB, n, **kwargs)
205
+ return XA, XB, typ, kwargs
206
+
207
+
208
+ def _validate_weight_with_size(X, m, n, **kwargs):
209
+ w = kwargs.pop('w', None)
210
+ if w is None:
211
+ return kwargs
212
+
213
+ if w.ndim != 1 or w.shape[0] != n:
214
+ raise ValueError("Weights must have same size as input vector. "
215
+ f"{w.shape[0]} vs. {n}")
216
+
217
+ kwargs['w'] = _validate_weights(w)
218
+ return kwargs
219
+
220
+
221
+ def _validate_hamming_kwargs(X, m, n, **kwargs):
222
+ w = kwargs.get('w', np.ones((n,), dtype='double'))
223
+
224
+ if w.ndim != 1 or w.shape[0] != n:
225
+ raise ValueError(
226
+ "Weights must have same size as input vector. %d vs. %d" % (w.shape[0], n)
227
+ )
228
+
229
+ kwargs['w'] = _validate_weights(w)
230
+ return kwargs
231
+
232
+
233
+ def _validate_mahalanobis_kwargs(X, m, n, **kwargs):
234
+ VI = kwargs.pop('VI', None)
235
+ if VI is None:
236
+ if m <= n:
237
+ # There are fewer observations than the dimension of
238
+ # the observations.
239
+ raise ValueError("The number of observations (%d) is too "
240
+ "small; the covariance matrix is "
241
+ "singular. For observations with %d "
242
+ "dimensions, at least %d observations "
243
+ "are required." % (m, n, n + 1))
244
+ if isinstance(X, tuple):
245
+ X = np.vstack(X)
246
+ CV = np.atleast_2d(np.cov(X.astype(np.float64, copy=False).T))
247
+ VI = np.linalg.inv(CV).T.copy()
248
+ kwargs["VI"] = _convert_to_double(VI)
249
+ return kwargs
250
+
251
+
252
+ def _validate_minkowski_kwargs(X, m, n, **kwargs):
253
+ kwargs = _validate_weight_with_size(X, m, n, **kwargs)
254
+ if 'p' not in kwargs:
255
+ kwargs['p'] = 2.
256
+ else:
257
+ if kwargs['p'] <= 0:
258
+ raise ValueError("p must be greater than 0")
259
+
260
+ return kwargs
261
+
262
+
263
+ def _validate_pdist_input(X, m, n, metric_info, **kwargs):
264
+ # get supported types
265
+ types = metric_info.types
266
+ # choose best type
267
+ typ = types[types.index(X.dtype)] if X.dtype in types else types[0]
268
+ # validate data
269
+ X = _convert_to_type(X, out_type=typ)
270
+
271
+ # validate kwargs
272
+ _validate_kwargs = metric_info.validator
273
+ if _validate_kwargs:
274
+ kwargs = _validate_kwargs(X, m, n, **kwargs)
275
+ return X, typ, kwargs
276
+
277
+
278
+ def _validate_seuclidean_kwargs(X, m, n, **kwargs):
279
+ V = kwargs.pop('V', None)
280
+ if V is None:
281
+ if isinstance(X, tuple):
282
+ X = np.vstack(X)
283
+ V = np.var(X.astype(np.float64, copy=False), axis=0, ddof=1)
284
+ else:
285
+ V = np.asarray(V, order='c')
286
+ if len(V.shape) != 1:
287
+ raise ValueError('Variance vector V must '
288
+ 'be one-dimensional.')
289
+ if V.shape[0] != n:
290
+ raise ValueError('Variance vector V must be of the same '
291
+ 'dimension as the vectors on which the distances '
292
+ 'are computed.')
293
+ kwargs['V'] = _convert_to_double(V)
294
+ return kwargs
295
+
296
+
297
+ def _validate_vector(u, dtype=None):
298
+ # XXX Is order='c' really necessary?
299
+ u = np.asarray(u, dtype=dtype, order='c')
300
+ if u.ndim == 1:
301
+ return u
302
+ raise ValueError("Input vector should be 1-D.")
303
+
304
+
305
+ def _validate_weights(w, dtype=np.float64):
306
+ w = _validate_vector(w, dtype=dtype)
307
+ if np.any(w < 0):
308
+ raise ValueError("Input weights should be all non-negative")
309
+ return w
310
+
311
+
312
+ def directed_hausdorff(u, v, seed=0):
313
+ """
314
+ Compute the directed Hausdorff distance between two 2-D arrays.
315
+
316
+ Distances between pairs are calculated using a Euclidean metric.
317
+
318
+ Parameters
319
+ ----------
320
+ u : (M,N) array_like
321
+ Input array with M points in N dimensions.
322
+ v : (O,N) array_like
323
+ Input array with O points in N dimensions.
324
+ seed : int or None, optional
325
+ Local `numpy.random.RandomState` seed. Default is 0, a random
326
+ shuffling of u and v that guarantees reproducibility.
327
+
328
+ Returns
329
+ -------
330
+ d : double
331
+ The directed Hausdorff distance between arrays `u` and `v`,
332
+
333
+ index_1 : int
334
+ index of point contributing to Hausdorff pair in `u`
335
+
336
+ index_2 : int
337
+ index of point contributing to Hausdorff pair in `v`
338
+
339
+ Raises
340
+ ------
341
+ ValueError
342
+ An exception is thrown if `u` and `v` do not have
343
+ the same number of columns.
344
+
345
+ See Also
346
+ --------
347
+ scipy.spatial.procrustes : Another similarity test for two data sets
348
+
349
+ Notes
350
+ -----
351
+ Uses the early break technique and the random sampling approach
352
+ described by [1]_. Although worst-case performance is ``O(m * o)``
353
+ (as with the brute force algorithm), this is unlikely in practice
354
+ as the input data would have to require the algorithm to explore
355
+ every single point interaction, and after the algorithm shuffles
356
+ the input points at that. The best case performance is O(m), which
357
+ is satisfied by selecting an inner loop distance that is less than
358
+ cmax and leads to an early break as often as possible. The authors
359
+ have formally shown that the average runtime is closer to O(m).
360
+
361
+ .. versionadded:: 0.19.0
362
+
363
+ References
364
+ ----------
365
+ .. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
366
+ calculating the exact Hausdorff distance." IEEE Transactions On
367
+ Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
368
+ 2015.
369
+
370
+ Examples
371
+ --------
372
+ Find the directed Hausdorff distance between two 2-D arrays of
373
+ coordinates:
374
+
375
+ >>> from scipy.spatial.distance import directed_hausdorff
376
+ >>> import numpy as np
377
+ >>> u = np.array([(1.0, 0.0),
378
+ ... (0.0, 1.0),
379
+ ... (-1.0, 0.0),
380
+ ... (0.0, -1.0)])
381
+ >>> v = np.array([(2.0, 0.0),
382
+ ... (0.0, 2.0),
383
+ ... (-2.0, 0.0),
384
+ ... (0.0, -4.0)])
385
+
386
+ >>> directed_hausdorff(u, v)[0]
387
+ 2.23606797749979
388
+ >>> directed_hausdorff(v, u)[0]
389
+ 3.0
390
+
391
+ Find the general (symmetric) Hausdorff distance between two 2-D
392
+ arrays of coordinates:
393
+
394
+ >>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
395
+ 3.0
396
+
397
+ Find the indices of the points that generate the Hausdorff distance
398
+ (the Hausdorff pair):
399
+
400
+ >>> directed_hausdorff(v, u)[1:]
401
+ (3, 3)
402
+
403
+ """
404
+ u = np.asarray(u, dtype=np.float64, order='c')
405
+ v = np.asarray(v, dtype=np.float64, order='c')
406
+ if u.shape[1] != v.shape[1]:
407
+ raise ValueError('u and v need to have the same '
408
+ 'number of columns')
409
+ result = _hausdorff.directed_hausdorff(u, v, seed)
410
+ return result
411
+
412
+
413
+ def minkowski(u, v, p=2, w=None):
414
+ """
415
+ Compute the Minkowski distance between two 1-D arrays.
416
+
417
+ The Minkowski distance between 1-D arrays `u` and `v`,
418
+ is defined as
419
+
420
+ .. math::
421
+
422
+ {\\|u-v\\|}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
423
+
424
+
425
+ \\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}.
426
+
427
+ Parameters
428
+ ----------
429
+ u : (N,) array_like
430
+ Input array.
431
+ v : (N,) array_like
432
+ Input array.
433
+ p : scalar
434
+ The order of the norm of the difference :math:`{\\|u-v\\|}_p`. Note
435
+ that for :math:`0 < p < 1`, the triangle inequality only holds with
436
+ an additional multiplicative factor, i.e. it is only a quasi-metric.
437
+ w : (N,) array_like, optional
438
+ The weights for each value in `u` and `v`. Default is None,
439
+ which gives each value a weight of 1.0
440
+
441
+ Returns
442
+ -------
443
+ minkowski : double
444
+ The Minkowski distance between vectors `u` and `v`.
445
+
446
+ Examples
447
+ --------
448
+ >>> from scipy.spatial import distance
449
+ >>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)
450
+ 2.0
451
+ >>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)
452
+ 1.4142135623730951
453
+ >>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)
454
+ 1.2599210498948732
455
+ >>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)
456
+ 1.0
457
+ >>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)
458
+ 1.0
459
+ >>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)
460
+ 1.0
461
+
462
+ """
463
+ u = _validate_vector(u)
464
+ v = _validate_vector(v)
465
+ if p <= 0:
466
+ raise ValueError("p must be greater than 0")
467
+ u_v = u - v
468
+ if w is not None:
469
+ w = _validate_weights(w)
470
+ if p == 1:
471
+ root_w = w
472
+ elif p == 2:
473
+ # better precision and speed
474
+ root_w = np.sqrt(w)
475
+ elif p == np.inf:
476
+ root_w = (w != 0)
477
+ else:
478
+ root_w = np.power(w, 1/p)
479
+ u_v = root_w * u_v
480
+ dist = norm(u_v, ord=p)
481
+ return dist
482
+
483
+
484
+ def euclidean(u, v, w=None):
485
+ """
486
+ Computes the Euclidean distance between two 1-D arrays.
487
+
488
+ The Euclidean distance between 1-D arrays `u` and `v`, is defined as
489
+
490
+ .. math::
491
+
492
+ {\\|u-v\\|}_2
493
+
494
+ \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
495
+
496
+ Parameters
497
+ ----------
498
+ u : (N,) array_like
499
+ Input array.
500
+ v : (N,) array_like
501
+ Input array.
502
+ w : (N,) array_like, optional
503
+ The weights for each value in `u` and `v`. Default is None,
504
+ which gives each value a weight of 1.0
505
+
506
+ Returns
507
+ -------
508
+ euclidean : double
509
+ The Euclidean distance between vectors `u` and `v`.
510
+
511
+ Examples
512
+ --------
513
+ >>> from scipy.spatial import distance
514
+ >>> distance.euclidean([1, 0, 0], [0, 1, 0])
515
+ 1.4142135623730951
516
+ >>> distance.euclidean([1, 1, 0], [0, 1, 0])
517
+ 1.0
518
+
519
+ """
520
+ return minkowski(u, v, p=2, w=w)
521
+
522
+
523
+ def sqeuclidean(u, v, w=None):
524
+ """
525
+ Compute the squared Euclidean distance between two 1-D arrays.
526
+
527
+ The squared Euclidean distance between `u` and `v` is defined as
528
+
529
+ .. math::
530
+
531
+ \\sum_i{w_i |u_i - v_i|^2}
532
+
533
+ Parameters
534
+ ----------
535
+ u : (N,) array_like
536
+ Input array.
537
+ v : (N,) array_like
538
+ Input array.
539
+ w : (N,) array_like, optional
540
+ The weights for each value in `u` and `v`. Default is None,
541
+ which gives each value a weight of 1.0
542
+
543
+ Returns
544
+ -------
545
+ sqeuclidean : double
546
+ The squared Euclidean distance between vectors `u` and `v`.
547
+
548
+ Examples
549
+ --------
550
+ >>> from scipy.spatial import distance
551
+ >>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])
552
+ 2.0
553
+ >>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])
554
+ 1.0
555
+
556
+ """
557
+ # Preserve float dtypes, but convert everything else to np.float64
558
+ # for stability.
559
+ utype, vtype = None, None
560
+ if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
561
+ utype = np.float64
562
+ if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
563
+ vtype = np.float64
564
+
565
+ u = _validate_vector(u, dtype=utype)
566
+ v = _validate_vector(v, dtype=vtype)
567
+ u_v = u - v
568
+ u_v_w = u_v # only want weights applied once
569
+ if w is not None:
570
+ w = _validate_weights(w)
571
+ u_v_w = w * u_v
572
+ return np.dot(u_v, u_v_w)
573
+
574
+
575
+ def correlation(u, v, w=None, centered=True):
576
+ """
577
+ Compute the correlation distance between two 1-D arrays.
578
+
579
+ The correlation distance between `u` and `v`, is
580
+ defined as
581
+
582
+ .. math::
583
+
584
+ 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
585
+ {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
586
+
587
+ where :math:`\\bar{u}` is the mean of the elements of `u`
588
+ and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
589
+
590
+ Parameters
591
+ ----------
592
+ u : (N,) array_like
593
+ Input array.
594
+ v : (N,) array_like
595
+ Input array.
596
+ w : (N,) array_like, optional
597
+ The weights for each value in `u` and `v`. Default is None,
598
+ which gives each value a weight of 1.0
599
+ centered : bool, optional
600
+ If True, `u` and `v` will be centered. Default is True.
601
+
602
+ Returns
603
+ -------
604
+ correlation : double
605
+ The correlation distance between 1-D array `u` and `v`.
606
+
607
+ Examples
608
+ --------
609
+ Find the correlation between two arrays.
610
+
611
+ >>> from scipy.spatial.distance import correlation
612
+ >>> correlation([1, 0, 1], [1, 1, 0])
613
+ 1.5
614
+
615
+ Using a weighting array, the correlation can be calculated as:
616
+
617
+ >>> correlation([1, 0, 1], [1, 1, 0], w=[0.9, 0.1, 0.1])
618
+ 1.1
619
+
620
+ If centering is not needed, the correlation can be calculated as:
621
+
622
+ >>> correlation([1, 0, 1], [1, 1, 0], centered=False)
623
+ 0.5
624
+ """
625
+ u = _validate_vector(u)
626
+ v = _validate_vector(v)
627
+ if w is not None:
628
+ w = _validate_weights(w)
629
+ w = w / w.sum()
630
+ if centered:
631
+ if w is not None:
632
+ umu = np.dot(u, w)
633
+ vmu = np.dot(v, w)
634
+ else:
635
+ umu = np.mean(u)
636
+ vmu = np.mean(v)
637
+ u = u - umu
638
+ v = v - vmu
639
+ if w is not None:
640
+ vw = v * w
641
+ uw = u * w
642
+ else:
643
+ vw, uw = v, u
644
+ uv = np.dot(u, vw)
645
+ uu = np.dot(u, uw)
646
+ vv = np.dot(v, vw)
647
+ dist = 1.0 - uv / math.sqrt(uu * vv)
648
+ # Clip the result to avoid rounding error
649
+ return np.clip(dist, 0.0, 2.0)
650
+
651
+
652
+ def cosine(u, v, w=None):
653
+ """
654
+ Compute the Cosine distance between 1-D arrays.
655
+
656
+ The Cosine distance between `u` and `v`, is defined as
657
+
658
+ .. math::
659
+
660
+ 1 - \\frac{u \\cdot v}
661
+ {\\|u\\|_2 \\|v\\|_2}.
662
+
663
+ where :math:`u \\cdot v` is the dot product of :math:`u` and
664
+ :math:`v`.
665
+
666
+ Parameters
667
+ ----------
668
+ u : (N,) array_like
669
+ Input array.
670
+ v : (N,) array_like
671
+ Input array.
672
+ w : (N,) array_like, optional
673
+ The weights for each value in `u` and `v`. Default is None,
674
+ which gives each value a weight of 1.0
675
+
676
+ Returns
677
+ -------
678
+ cosine : double
679
+ The Cosine distance between vectors `u` and `v`.
680
+
681
+ Examples
682
+ --------
683
+ >>> from scipy.spatial import distance
684
+ >>> distance.cosine([1, 0, 0], [0, 1, 0])
685
+ 1.0
686
+ >>> distance.cosine([100, 0, 0], [0, 1, 0])
687
+ 1.0
688
+ >>> distance.cosine([1, 1, 0], [0, 1, 0])
689
+ 0.29289321881345254
690
+
691
+ """
692
+ # cosine distance is also referred to as 'uncentered correlation',
693
+ # or 'reflective correlation'
694
+ return correlation(u, v, w=w, centered=False)
695
+
696
+
697
+ def hamming(u, v, w=None):
698
+ """
699
+ Compute the Hamming distance between two 1-D arrays.
700
+
701
+ The Hamming distance between 1-D arrays `u` and `v`, is simply the
702
+ proportion of disagreeing components in `u` and `v`. If `u` and `v` are
703
+ boolean vectors, the Hamming distance is
704
+
705
+ .. math::
706
+
707
+ \\frac{c_{01} + c_{10}}{n}
708
+
709
+ where :math:`c_{ij}` is the number of occurrences of
710
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
711
+ :math:`k < n`.
712
+
713
+ Parameters
714
+ ----------
715
+ u : (N,) array_like
716
+ Input array.
717
+ v : (N,) array_like
718
+ Input array.
719
+ w : (N,) array_like, optional
720
+ The weights for each value in `u` and `v`. Default is None,
721
+ which gives each value a weight of 1.0
722
+
723
+ Returns
724
+ -------
725
+ hamming : double
726
+ The Hamming distance between vectors `u` and `v`.
727
+
728
+ Examples
729
+ --------
730
+ >>> from scipy.spatial import distance
731
+ >>> distance.hamming([1, 0, 0], [0, 1, 0])
732
+ 0.66666666666666663
733
+ >>> distance.hamming([1, 0, 0], [1, 1, 0])
734
+ 0.33333333333333331
735
+ >>> distance.hamming([1, 0, 0], [2, 0, 0])
736
+ 0.33333333333333331
737
+ >>> distance.hamming([1, 0, 0], [3, 0, 0])
738
+ 0.33333333333333331
739
+
740
+ """
741
+ u = _validate_vector(u)
742
+ v = _validate_vector(v)
743
+ if u.shape != v.shape:
744
+ raise ValueError('The 1d arrays must have equal lengths.')
745
+ u_ne_v = u != v
746
+ if w is not None:
747
+ w = _validate_weights(w)
748
+ if w.shape != u.shape:
749
+ raise ValueError("'w' should have the same length as 'u' and 'v'.")
750
+ w = w / w.sum()
751
+ return np.dot(u_ne_v, w)
752
+ return np.mean(u_ne_v)
753
+
754
+
755
+ def jaccard(u, v, w=None):
756
+ """
757
+ Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
758
+
759
+ The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
760
+ is defined as
761
+
762
+ .. math::
763
+
764
+ \\frac{c_{TF} + c_{FT}}
765
+ {c_{TT} + c_{FT} + c_{TF}}
766
+
767
+ where :math:`c_{ij}` is the number of occurrences of
768
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
769
+ :math:`k < n`.
770
+
771
+ Parameters
772
+ ----------
773
+ u : (N,) array_like, bool
774
+ Input array.
775
+ v : (N,) array_like, bool
776
+ Input array.
777
+ w : (N,) array_like, optional
778
+ The weights for each value in `u` and `v`. Default is None,
779
+ which gives each value a weight of 1.0
780
+
781
+ Returns
782
+ -------
783
+ jaccard : double
784
+ The Jaccard distance between vectors `u` and `v`.
785
+
786
+ Notes
787
+ -----
788
+ When both `u` and `v` lead to a `0/0` division i.e. there is no overlap
789
+ between the items in the vectors the returned distance is 0. See the
790
+ Wikipedia page on the Jaccard index [1]_, and this paper [2]_.
791
+
792
+ .. versionchanged:: 1.2.0
793
+ Previously, when `u` and `v` lead to a `0/0` division, the function
794
+ would return NaN. This was changed to return 0 instead.
795
+
796
+ References
797
+ ----------
798
+ .. [1] https://en.wikipedia.org/wiki/Jaccard_index
799
+ .. [2] S. Kosub, "A note on the triangle inequality for the Jaccard
800
+ distance", 2016, :arxiv:`1612.02696`
801
+
802
+ Examples
803
+ --------
804
+ >>> from scipy.spatial import distance
805
+ >>> distance.jaccard([1, 0, 0], [0, 1, 0])
806
+ 1.0
807
+ >>> distance.jaccard([1, 0, 0], [1, 1, 0])
808
+ 0.5
809
+ >>> distance.jaccard([1, 0, 0], [1, 2, 0])
810
+ 0.5
811
+ >>> distance.jaccard([1, 0, 0], [1, 1, 1])
812
+ 0.66666666666666663
813
+
814
+ """
815
+ u = _validate_vector(u)
816
+ v = _validate_vector(v)
817
+
818
+ nonzero = np.bitwise_or(u != 0, v != 0)
819
+ unequal_nonzero = np.bitwise_and((u != v), nonzero)
820
+ if w is not None:
821
+ w = _validate_weights(w)
822
+ nonzero = w * nonzero
823
+ unequal_nonzero = w * unequal_nonzero
824
+ a = np.float64(unequal_nonzero.sum())
825
+ b = np.float64(nonzero.sum())
826
+ return (a / b) if b != 0 else 0
827
+
828
+
829
+ def kulczynski1(u, v, *, w=None):
830
+ """
831
+ Compute the Kulczynski 1 dissimilarity between two boolean 1-D arrays.
832
+
833
+ The Kulczynski 1 dissimilarity between two boolean 1-D arrays `u` and `v`
834
+ of length ``n``, is defined as
835
+
836
+ .. math::
837
+
838
+ \\frac{c_{11}}
839
+ {c_{01} + c_{10}}
840
+
841
+ where :math:`c_{ij}` is the number of occurrences of
842
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
843
+ :math:`k \\in {0, 1, ..., n-1}`.
844
+
845
+ Parameters
846
+ ----------
847
+ u : (N,) array_like, bool
848
+ Input array.
849
+ v : (N,) array_like, bool
850
+ Input array.
851
+ w : (N,) array_like, optional
852
+ The weights for each value in `u` and `v`. Default is None,
853
+ which gives each value a weight of 1.0
854
+
855
+ Returns
856
+ -------
857
+ kulczynski1 : float
858
+ The Kulczynski 1 distance between vectors `u` and `v`.
859
+
860
+ Notes
861
+ -----
862
+ This measure has a minimum value of 0 and no upper limit.
863
+ It is un-defined when there are no non-matches.
864
+
865
+ .. versionadded:: 1.8.0
866
+
867
+ References
868
+ ----------
869
+ .. [1] Kulczynski S. et al. Bulletin
870
+ International de l'Academie Polonaise des Sciences
871
+ et des Lettres, Classe des Sciences Mathematiques
872
+ et Naturelles, Serie B (Sciences Naturelles). 1927;
873
+ Supplement II: 57-203.
874
+
875
+ Examples
876
+ --------
877
+ >>> from scipy.spatial import distance
878
+ >>> distance.kulczynski1([1, 0, 0], [0, 1, 0])
879
+ 0.0
880
+ >>> distance.kulczynski1([True, False, False], [True, True, False])
881
+ 1.0
882
+ >>> distance.kulczynski1([True, False, False], [True])
883
+ 0.5
884
+ >>> distance.kulczynski1([1, 0, 0], [3, 1, 0])
885
+ -3.0
886
+
887
+ """
888
+ u = _validate_vector(u)
889
+ v = _validate_vector(v)
890
+ if w is not None:
891
+ w = _validate_weights(w)
892
+ (_, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
893
+
894
+ return ntt / (ntf + nft)
895
+
896
+
897
+ def seuclidean(u, v, V):
898
+ """
899
+ Return the standardized Euclidean distance between two 1-D arrays.
900
+
901
+ The standardized Euclidean distance between two n-vectors `u` and `v` is
902
+
903
+ .. math::
904
+
905
+ \\sqrt{\\sum\\limits_i \\frac{1}{V_i} \\left(u_i-v_i \\right)^2}
906
+
907
+ ``V`` is the variance vector; ``V[I]`` is the variance computed over all the i-th
908
+ components of the points. If not passed, it is automatically computed.
909
+
910
+ Parameters
911
+ ----------
912
+ u : (N,) array_like
913
+ Input array.
914
+ v : (N,) array_like
915
+ Input array.
916
+ V : (N,) array_like
917
+ `V` is an 1-D array of component variances. It is usually computed
918
+ among a larger collection vectors.
919
+
920
+ Returns
921
+ -------
922
+ seuclidean : double
923
+ The standardized Euclidean distance between vectors `u` and `v`.
924
+
925
+ Examples
926
+ --------
927
+ >>> from scipy.spatial import distance
928
+ >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])
929
+ 4.4721359549995796
930
+ >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])
931
+ 3.3166247903553998
932
+ >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])
933
+ 3.1780497164141406
934
+
935
+ """
936
+ u = _validate_vector(u)
937
+ v = _validate_vector(v)
938
+ V = _validate_vector(V, dtype=np.float64)
939
+ if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
940
+ raise TypeError('V must be a 1-D array of the same dimension '
941
+ 'as u and v.')
942
+ return euclidean(u, v, w=1/V)
943
+
944
+
945
+ def cityblock(u, v, w=None):
946
+ """
947
+ Compute the City Block (Manhattan) distance.
948
+
949
+ Computes the Manhattan distance between two 1-D arrays `u` and `v`,
950
+ which is defined as
951
+
952
+ .. math::
953
+
954
+ \\sum_i {\\left| u_i - v_i \\right|}.
955
+
956
+ Parameters
957
+ ----------
958
+ u : (N,) array_like
959
+ Input array.
960
+ v : (N,) array_like
961
+ Input array.
962
+ w : (N,) array_like, optional
963
+ The weights for each value in `u` and `v`. Default is None,
964
+ which gives each value a weight of 1.0
965
+
966
+ Returns
967
+ -------
968
+ cityblock : double
969
+ The City Block (Manhattan) distance between vectors `u` and `v`.
970
+
971
+ Examples
972
+ --------
973
+ >>> from scipy.spatial import distance
974
+ >>> distance.cityblock([1, 0, 0], [0, 1, 0])
975
+ 2
976
+ >>> distance.cityblock([1, 0, 0], [0, 2, 0])
977
+ 3
978
+ >>> distance.cityblock([1, 0, 0], [1, 1, 0])
979
+ 1
980
+
981
+ """
982
+ u = _validate_vector(u)
983
+ v = _validate_vector(v)
984
+ l1_diff = abs(u - v)
985
+ if w is not None:
986
+ w = _validate_weights(w)
987
+ l1_diff = w * l1_diff
988
+ return l1_diff.sum()
989
+
990
+
991
+ def mahalanobis(u, v, VI):
992
+ """
993
+ Compute the Mahalanobis distance between two 1-D arrays.
994
+
995
+ The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
996
+
997
+ .. math::
998
+
999
+ \\sqrt{ (u-v) V^{-1} (u-v)^T }
1000
+
1001
+ where ``V`` is the covariance matrix. Note that the argument `VI`
1002
+ is the inverse of ``V``.
1003
+
1004
+ Parameters
1005
+ ----------
1006
+ u : (N,) array_like
1007
+ Input array.
1008
+ v : (N,) array_like
1009
+ Input array.
1010
+ VI : array_like
1011
+ The inverse of the covariance matrix.
1012
+
1013
+ Returns
1014
+ -------
1015
+ mahalanobis : double
1016
+ The Mahalanobis distance between vectors `u` and `v`.
1017
+
1018
+ Examples
1019
+ --------
1020
+ >>> from scipy.spatial import distance
1021
+ >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
1022
+ >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)
1023
+ 1.0
1024
+ >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)
1025
+ 1.0
1026
+ >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)
1027
+ 1.7320508075688772
1028
+
1029
+ """
1030
+ u = _validate_vector(u)
1031
+ v = _validate_vector(v)
1032
+ VI = np.atleast_2d(VI)
1033
+ delta = u - v
1034
+ m = np.dot(np.dot(delta, VI), delta)
1035
+ return np.sqrt(m)
1036
+
1037
+
1038
+ def chebyshev(u, v, w=None):
1039
+ """
1040
+ Compute the Chebyshev distance.
1041
+
1042
+ Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
1043
+ which is defined as
1044
+
1045
+ .. math::
1046
+
1047
+ \\max_i {|u_i-v_i|}.
1048
+
1049
+ Parameters
1050
+ ----------
1051
+ u : (N,) array_like
1052
+ Input vector.
1053
+ v : (N,) array_like
1054
+ Input vector.
1055
+ w : (N,) array_like, optional
1056
+ Unused, as 'max' is a weightless operation. Here for API consistency.
1057
+
1058
+ Returns
1059
+ -------
1060
+ chebyshev : double
1061
+ The Chebyshev distance between vectors `u` and `v`.
1062
+
1063
+ Examples
1064
+ --------
1065
+ >>> from scipy.spatial import distance
1066
+ >>> distance.chebyshev([1, 0, 0], [0, 1, 0])
1067
+ 1
1068
+ >>> distance.chebyshev([1, 1, 0], [0, 1, 0])
1069
+ 1
1070
+
1071
+ """
1072
+ u = _validate_vector(u)
1073
+ v = _validate_vector(v)
1074
+ if w is not None:
1075
+ w = _validate_weights(w)
1076
+ has_weight = w > 0
1077
+ if has_weight.sum() < w.size:
1078
+ u = u[has_weight]
1079
+ v = v[has_weight]
1080
+ return max(abs(u - v))
1081
+
1082
+
1083
+ def braycurtis(u, v, w=None):
1084
+ """
1085
+ Compute the Bray-Curtis distance between two 1-D arrays.
1086
+
1087
+ Bray-Curtis distance is defined as
1088
+
1089
+ .. math::
1090
+
1091
+ \\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
1092
+
1093
+ The Bray-Curtis distance is in the range [0, 1] if all coordinates are
1094
+ positive, and is undefined if the inputs are of length zero.
1095
+
1096
+ Parameters
1097
+ ----------
1098
+ u : (N,) array_like
1099
+ Input array.
1100
+ v : (N,) array_like
1101
+ Input array.
1102
+ w : (N,) array_like, optional
1103
+ The weights for each value in `u` and `v`. Default is None,
1104
+ which gives each value a weight of 1.0
1105
+
1106
+ Returns
1107
+ -------
1108
+ braycurtis : double
1109
+ The Bray-Curtis distance between 1-D arrays `u` and `v`.
1110
+
1111
+ Examples
1112
+ --------
1113
+ >>> from scipy.spatial import distance
1114
+ >>> distance.braycurtis([1, 0, 0], [0, 1, 0])
1115
+ 1.0
1116
+ >>> distance.braycurtis([1, 1, 0], [0, 1, 0])
1117
+ 0.33333333333333331
1118
+
1119
+ """
1120
+ u = _validate_vector(u)
1121
+ v = _validate_vector(v, dtype=np.float64)
1122
+ l1_diff = abs(u - v)
1123
+ l1_sum = abs(u + v)
1124
+ if w is not None:
1125
+ w = _validate_weights(w)
1126
+ l1_diff = w * l1_diff
1127
+ l1_sum = w * l1_sum
1128
+ return l1_diff.sum() / l1_sum.sum()
1129
+
1130
+
1131
+ def canberra(u, v, w=None):
1132
+ """
1133
+ Compute the Canberra distance between two 1-D arrays.
1134
+
1135
+ The Canberra distance is defined as
1136
+
1137
+ .. math::
1138
+
1139
+ d(u,v) = \\sum_i \\frac{|u_i-v_i|}
1140
+ {|u_i|+|v_i|}.
1141
+
1142
+ Parameters
1143
+ ----------
1144
+ u : (N,) array_like
1145
+ Input array.
1146
+ v : (N,) array_like
1147
+ Input array.
1148
+ w : (N,) array_like, optional
1149
+ The weights for each value in `u` and `v`. Default is None,
1150
+ which gives each value a weight of 1.0
1151
+
1152
+ Returns
1153
+ -------
1154
+ canberra : double
1155
+ The Canberra distance between vectors `u` and `v`.
1156
+
1157
+ Notes
1158
+ -----
1159
+ When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
1160
+ used in the calculation.
1161
+
1162
+ Examples
1163
+ --------
1164
+ >>> from scipy.spatial import distance
1165
+ >>> distance.canberra([1, 0, 0], [0, 1, 0])
1166
+ 2.0
1167
+ >>> distance.canberra([1, 1, 0], [0, 1, 0])
1168
+ 1.0
1169
+
1170
+ """
1171
+ u = _validate_vector(u)
1172
+ v = _validate_vector(v, dtype=np.float64)
1173
+ if w is not None:
1174
+ w = _validate_weights(w)
1175
+ with np.errstate(invalid='ignore'):
1176
+ abs_uv = abs(u - v)
1177
+ abs_u = abs(u)
1178
+ abs_v = abs(v)
1179
+ d = abs_uv / (abs_u + abs_v)
1180
+ if w is not None:
1181
+ d = w * d
1182
+ d = np.nansum(d)
1183
+ return d
1184
+
1185
+
1186
+ def jensenshannon(p, q, base=None, *, axis=0, keepdims=False):
1187
+ """
1188
+ Compute the Jensen-Shannon distance (metric) between
1189
+ two probability arrays. This is the square root
1190
+ of the Jensen-Shannon divergence.
1191
+
1192
+ The Jensen-Shannon distance between two probability
1193
+ vectors `p` and `q` is defined as,
1194
+
1195
+ .. math::
1196
+
1197
+ \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
1198
+
1199
+ where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
1200
+ and :math:`D` is the Kullback-Leibler divergence.
1201
+
1202
+ This routine will normalize `p` and `q` if they don't sum to 1.0.
1203
+
1204
+ Parameters
1205
+ ----------
1206
+ p : (N,) array_like
1207
+ left probability vector
1208
+ q : (N,) array_like
1209
+ right probability vector
1210
+ base : double, optional
1211
+ the base of the logarithm used to compute the output
1212
+ if not given, then the routine uses the default base of
1213
+ scipy.stats.entropy.
1214
+ axis : int, optional
1215
+ Axis along which the Jensen-Shannon distances are computed. The default
1216
+ is 0.
1217
+
1218
+ .. versionadded:: 1.7.0
1219
+ keepdims : bool, optional
1220
+ If this is set to `True`, the reduced axes are left in the
1221
+ result as dimensions with size one. With this option,
1222
+ the result will broadcast correctly against the input array.
1223
+ Default is False.
1224
+
1225
+ .. versionadded:: 1.7.0
1226
+
1227
+ Returns
1228
+ -------
1229
+ js : double or ndarray
1230
+ The Jensen-Shannon distances between `p` and `q` along the `axis`.
1231
+
1232
+ Notes
1233
+ -----
1234
+
1235
+ .. versionadded:: 1.2.0
1236
+
1237
+ Examples
1238
+ --------
1239
+ >>> from scipy.spatial import distance
1240
+ >>> import numpy as np
1241
+ >>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0)
1242
+ 1.0
1243
+ >>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5])
1244
+ 0.46450140402245893
1245
+ >>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0])
1246
+ 0.0
1247
+ >>> a = np.array([[1, 2, 3, 4],
1248
+ ... [5, 6, 7, 8],
1249
+ ... [9, 10, 11, 12]])
1250
+ >>> b = np.array([[13, 14, 15, 16],
1251
+ ... [17, 18, 19, 20],
1252
+ ... [21, 22, 23, 24]])
1253
+ >>> distance.jensenshannon(a, b, axis=0)
1254
+ array([0.1954288, 0.1447697, 0.1138377, 0.0927636])
1255
+ >>> distance.jensenshannon(a, b, axis=1)
1256
+ array([0.1402339, 0.0399106, 0.0201815])
1257
+
1258
+ """
1259
+ p = np.asarray(p)
1260
+ q = np.asarray(q)
1261
+ p = p / np.sum(p, axis=axis, keepdims=True)
1262
+ q = q / np.sum(q, axis=axis, keepdims=True)
1263
+ m = (p + q) / 2.0
1264
+ left = rel_entr(p, m)
1265
+ right = rel_entr(q, m)
1266
+ left_sum = np.sum(left, axis=axis, keepdims=keepdims)
1267
+ right_sum = np.sum(right, axis=axis, keepdims=keepdims)
1268
+ js = left_sum + right_sum
1269
+ if base is not None:
1270
+ js /= np.log(base)
1271
+ return np.sqrt(js / 2.0)
1272
+
1273
+
1274
+ def yule(u, v, w=None):
1275
+ """
1276
+ Compute the Yule dissimilarity between two boolean 1-D arrays.
1277
+
1278
+ The Yule dissimilarity is defined as
1279
+
1280
+ .. math::
1281
+
1282
+ \\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
1283
+
1284
+ where :math:`c_{ij}` is the number of occurrences of
1285
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1286
+ :math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
1287
+
1288
+ Parameters
1289
+ ----------
1290
+ u : (N,) array_like, bool
1291
+ Input array.
1292
+ v : (N,) array_like, bool
1293
+ Input array.
1294
+ w : (N,) array_like, optional
1295
+ The weights for each value in `u` and `v`. Default is None,
1296
+ which gives each value a weight of 1.0
1297
+
1298
+ Returns
1299
+ -------
1300
+ yule : double
1301
+ The Yule dissimilarity between vectors `u` and `v`.
1302
+
1303
+ Examples
1304
+ --------
1305
+ >>> from scipy.spatial import distance
1306
+ >>> distance.yule([1, 0, 0], [0, 1, 0])
1307
+ 2.0
1308
+ >>> distance.yule([1, 1, 0], [0, 1, 0])
1309
+ 0.0
1310
+
1311
+ """
1312
+ u = _validate_vector(u)
1313
+ v = _validate_vector(v)
1314
+ if w is not None:
1315
+ w = _validate_weights(w)
1316
+ (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
1317
+ half_R = ntf * nft
1318
+ if half_R == 0:
1319
+ return 0.0
1320
+ else:
1321
+ return float(2.0 * half_R / (ntt * nff + half_R))
1322
+
1323
+
1324
+ def dice(u, v, w=None):
1325
+ """
1326
+ Compute the Dice dissimilarity between two boolean 1-D arrays.
1327
+
1328
+ The Dice dissimilarity between `u` and `v`, is
1329
+
1330
+ .. math::
1331
+
1332
+ \\frac{c_{TF} + c_{FT}}
1333
+ {2c_{TT} + c_{FT} + c_{TF}}
1334
+
1335
+ where :math:`c_{ij}` is the number of occurrences of
1336
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1337
+ :math:`k < n`.
1338
+
1339
+ Parameters
1340
+ ----------
1341
+ u : (N,) array_like, bool
1342
+ Input 1-D array.
1343
+ v : (N,) array_like, bool
1344
+ Input 1-D array.
1345
+ w : (N,) array_like, optional
1346
+ The weights for each value in `u` and `v`. Default is None,
1347
+ which gives each value a weight of 1.0
1348
+
1349
+ Returns
1350
+ -------
1351
+ dice : double
1352
+ The Dice dissimilarity between 1-D arrays `u` and `v`.
1353
+
1354
+ Notes
1355
+ -----
1356
+ This function computes the Dice dissimilarity index. To compute the
1357
+ Dice similarity index, convert one to the other with similarity =
1358
+ 1 - dissimilarity.
1359
+
1360
+ Examples
1361
+ --------
1362
+ >>> from scipy.spatial import distance
1363
+ >>> distance.dice([1, 0, 0], [0, 1, 0])
1364
+ 1.0
1365
+ >>> distance.dice([1, 0, 0], [1, 1, 0])
1366
+ 0.3333333333333333
1367
+ >>> distance.dice([1, 0, 0], [2, 0, 0])
1368
+ -0.3333333333333333
1369
+
1370
+ """
1371
+ u = _validate_vector(u)
1372
+ v = _validate_vector(v)
1373
+ if w is not None:
1374
+ w = _validate_weights(w)
1375
+ if u.dtype == v.dtype == bool and w is None:
1376
+ ntt = (u & v).sum()
1377
+ else:
1378
+ dtype = np.result_type(int, u.dtype, v.dtype)
1379
+ u = u.astype(dtype)
1380
+ v = v.astype(dtype)
1381
+ if w is None:
1382
+ ntt = (u * v).sum()
1383
+ else:
1384
+ ntt = (u * v * w).sum()
1385
+ (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
1386
+ return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
1387
+
1388
+
1389
+ def rogerstanimoto(u, v, w=None):
1390
+ """
1391
+ Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
1392
+
1393
+ The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
1394
+ `u` and `v`, is defined as
1395
+
1396
+ .. math::
1397
+ \\frac{R}
1398
+ {c_{TT} + c_{FF} + R}
1399
+
1400
+ where :math:`c_{ij}` is the number of occurrences of
1401
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1402
+ :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
1403
+
1404
+ Parameters
1405
+ ----------
1406
+ u : (N,) array_like, bool
1407
+ Input array.
1408
+ v : (N,) array_like, bool
1409
+ Input array.
1410
+ w : (N,) array_like, optional
1411
+ The weights for each value in `u` and `v`. Default is None,
1412
+ which gives each value a weight of 1.0
1413
+
1414
+ Returns
1415
+ -------
1416
+ rogerstanimoto : double
1417
+ The Rogers-Tanimoto dissimilarity between vectors
1418
+ `u` and `v`.
1419
+
1420
+ Examples
1421
+ --------
1422
+ >>> from scipy.spatial import distance
1423
+ >>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])
1424
+ 0.8
1425
+ >>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])
1426
+ 0.5
1427
+ >>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])
1428
+ -1.0
1429
+
1430
+ """
1431
+ u = _validate_vector(u)
1432
+ v = _validate_vector(v)
1433
+ if w is not None:
1434
+ w = _validate_weights(w)
1435
+ (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
1436
+ return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
1437
+
1438
+
1439
+ def russellrao(u, v, w=None):
1440
+ """
1441
+ Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.
1442
+
1443
+ The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
1444
+ `v`, is defined as
1445
+
1446
+ .. math::
1447
+
1448
+ \\frac{n - c_{TT}}
1449
+ {n}
1450
+
1451
+ where :math:`c_{ij}` is the number of occurrences of
1452
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1453
+ :math:`k < n`.
1454
+
1455
+ Parameters
1456
+ ----------
1457
+ u : (N,) array_like, bool
1458
+ Input array.
1459
+ v : (N,) array_like, bool
1460
+ Input array.
1461
+ w : (N,) array_like, optional
1462
+ The weights for each value in `u` and `v`. Default is None,
1463
+ which gives each value a weight of 1.0
1464
+
1465
+ Returns
1466
+ -------
1467
+ russellrao : double
1468
+ The Russell-Rao dissimilarity between vectors `u` and `v`.
1469
+
1470
+ Examples
1471
+ --------
1472
+ >>> from scipy.spatial import distance
1473
+ >>> distance.russellrao([1, 0, 0], [0, 1, 0])
1474
+ 1.0
1475
+ >>> distance.russellrao([1, 0, 0], [1, 1, 0])
1476
+ 0.6666666666666666
1477
+ >>> distance.russellrao([1, 0, 0], [2, 0, 0])
1478
+ 0.3333333333333333
1479
+
1480
+ """
1481
+ u = _validate_vector(u)
1482
+ v = _validate_vector(v)
1483
+ if u.dtype == v.dtype == bool and w is None:
1484
+ ntt = (u & v).sum()
1485
+ n = float(len(u))
1486
+ elif w is None:
1487
+ ntt = (u * v).sum()
1488
+ n = float(len(u))
1489
+ else:
1490
+ w = _validate_weights(w)
1491
+ ntt = (u * v * w).sum()
1492
+ n = w.sum()
1493
+ return float(n - ntt) / n
1494
+
1495
+
1496
+ def sokalmichener(u, v, w=None):
1497
+ """
1498
+ Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.
1499
+
1500
+ The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
1501
+ is defined as
1502
+
1503
+ .. math::
1504
+
1505
+ \\frac{R}
1506
+ {S + R}
1507
+
1508
+ where :math:`c_{ij}` is the number of occurrences of
1509
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1510
+ :math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
1511
+ :math:`S = c_{FF} + c_{TT}`.
1512
+
1513
+ Parameters
1514
+ ----------
1515
+ u : (N,) array_like, bool
1516
+ Input array.
1517
+ v : (N,) array_like, bool
1518
+ Input array.
1519
+ w : (N,) array_like, optional
1520
+ The weights for each value in `u` and `v`. Default is None,
1521
+ which gives each value a weight of 1.0
1522
+
1523
+ Returns
1524
+ -------
1525
+ sokalmichener : double
1526
+ The Sokal-Michener dissimilarity between vectors `u` and `v`.
1527
+
1528
+ Examples
1529
+ --------
1530
+ >>> from scipy.spatial import distance
1531
+ >>> distance.sokalmichener([1, 0, 0], [0, 1, 0])
1532
+ 0.8
1533
+ >>> distance.sokalmichener([1, 0, 0], [1, 1, 0])
1534
+ 0.5
1535
+ >>> distance.sokalmichener([1, 0, 0], [2, 0, 0])
1536
+ -1.0
1537
+
1538
+ """
1539
+ u = _validate_vector(u)
1540
+ v = _validate_vector(v)
1541
+ if w is not None:
1542
+ w = _validate_weights(w)
1543
+ nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)
1544
+ return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
1545
+
1546
+
1547
+ def sokalsneath(u, v, w=None):
1548
+ """
1549
+ Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
1550
+
1551
+ The Sokal-Sneath dissimilarity between `u` and `v`,
1552
+
1553
+ .. math::
1554
+
1555
+ \\frac{R}
1556
+ {c_{TT} + R}
1557
+
1558
+ where :math:`c_{ij}` is the number of occurrences of
1559
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1560
+ :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
1561
+
1562
+ Parameters
1563
+ ----------
1564
+ u : (N,) array_like, bool
1565
+ Input array.
1566
+ v : (N,) array_like, bool
1567
+ Input array.
1568
+ w : (N,) array_like, optional
1569
+ The weights for each value in `u` and `v`. Default is None,
1570
+ which gives each value a weight of 1.0
1571
+
1572
+ Returns
1573
+ -------
1574
+ sokalsneath : double
1575
+ The Sokal-Sneath dissimilarity between vectors `u` and `v`.
1576
+
1577
+ Examples
1578
+ --------
1579
+ >>> from scipy.spatial import distance
1580
+ >>> distance.sokalsneath([1, 0, 0], [0, 1, 0])
1581
+ 1.0
1582
+ >>> distance.sokalsneath([1, 0, 0], [1, 1, 0])
1583
+ 0.66666666666666663
1584
+ >>> distance.sokalsneath([1, 0, 0], [2, 1, 0])
1585
+ 0.0
1586
+ >>> distance.sokalsneath([1, 0, 0], [3, 1, 0])
1587
+ -2.0
1588
+
1589
+ """
1590
+ u = _validate_vector(u)
1591
+ v = _validate_vector(v)
1592
+ if u.dtype == v.dtype == bool and w is None:
1593
+ ntt = (u & v).sum()
1594
+ elif w is None:
1595
+ ntt = (u * v).sum()
1596
+ else:
1597
+ w = _validate_weights(w)
1598
+ ntt = (u * v * w).sum()
1599
+ (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
1600
+ denom = np.array(ntt + 2.0 * (ntf + nft))
1601
+ if not denom.any():
1602
+ raise ValueError('Sokal-Sneath dissimilarity is not defined for '
1603
+ 'vectors that are entirely false.')
1604
+ return float(2.0 * (ntf + nft)) / denom
1605
+
1606
+
1607
+ _convert_to_double = partial(_convert_to_type, out_type=np.float64)
1608
+ _convert_to_bool = partial(_convert_to_type, out_type=bool)
1609
+
1610
+ # adding python-only wrappers to _distance_wrap module
1611
+ _distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap
1612
+ _distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap
1613
+
1614
+
1615
+ @dataclasses.dataclass(frozen=True)
1616
+ class CDistMetricWrapper:
1617
+ metric_name: str
1618
+
1619
+ def __call__(self, XA, XB, *, out=None, **kwargs):
1620
+ XA = np.ascontiguousarray(XA)
1621
+ XB = np.ascontiguousarray(XB)
1622
+ mA, n = XA.shape
1623
+ mB, _ = XB.shape
1624
+ metric_name = self.metric_name
1625
+ metric_info = _METRICS[metric_name]
1626
+ XA, XB, typ, kwargs = _validate_cdist_input(
1627
+ XA, XB, mA, mB, n, metric_info, **kwargs)
1628
+
1629
+ w = kwargs.pop('w', None)
1630
+ if w is not None:
1631
+ metric = metric_info.dist_func
1632
+ return _cdist_callable(
1633
+ XA, XB, metric=metric, out=out, w=w, **kwargs)
1634
+
1635
+ dm = _prepare_out_argument(out, np.float64, (mA, mB))
1636
+ # get cdist wrapper
1637
+ cdist_fn = getattr(_distance_wrap, f'cdist_{metric_name}_{typ}_wrap')
1638
+ cdist_fn(XA, XB, dm, **kwargs)
1639
+ return dm
1640
+
1641
+
1642
+ @dataclasses.dataclass(frozen=True)
1643
+ class PDistMetricWrapper:
1644
+ metric_name: str
1645
+
1646
+ def __call__(self, X, *, out=None, **kwargs):
1647
+ X = np.ascontiguousarray(X)
1648
+ m, n = X.shape
1649
+ metric_name = self.metric_name
1650
+ metric_info = _METRICS[metric_name]
1651
+ X, typ, kwargs = _validate_pdist_input(
1652
+ X, m, n, metric_info, **kwargs)
1653
+ out_size = (m * (m - 1)) // 2
1654
+ w = kwargs.pop('w', None)
1655
+ if w is not None:
1656
+ metric = metric_info.dist_func
1657
+ return _pdist_callable(
1658
+ X, metric=metric, out=out, w=w, **kwargs)
1659
+
1660
+ dm = _prepare_out_argument(out, np.float64, (out_size,))
1661
+ # get pdist wrapper
1662
+ pdist_fn = getattr(_distance_wrap, f'pdist_{metric_name}_{typ}_wrap')
1663
+ pdist_fn(X, dm, **kwargs)
1664
+ return dm
1665
+
1666
+
1667
+ @dataclasses.dataclass(frozen=True)
1668
+ class MetricInfo:
1669
+ # Name of python distance function
1670
+ canonical_name: str
1671
+ # All aliases, including canonical_name
1672
+ aka: set[str]
1673
+ # unvectorized distance function
1674
+ dist_func: Callable
1675
+ # Optimized cdist function
1676
+ cdist_func: Callable
1677
+ # Optimized pdist function
1678
+ pdist_func: Callable
1679
+ # function that checks kwargs and computes default values:
1680
+ # f(X, m, n, **kwargs)
1681
+ validator: Optional[Callable] = None
1682
+ # list of supported types:
1683
+ # X (pdist) and XA (cdist) are used to choose the type. if there is no
1684
+ # match the first type is used. Default double
1685
+ types: list[str] = dataclasses.field(default_factory=lambda: ['double'])
1686
+ # true if out array must be C-contiguous
1687
+ requires_contiguous_out: bool = True
1688
+
1689
+
1690
+ # Registry of implemented metrics:
1691
+ _METRIC_INFOS = [
1692
+ MetricInfo(
1693
+ canonical_name='braycurtis',
1694
+ aka={'braycurtis'},
1695
+ dist_func=braycurtis,
1696
+ cdist_func=_distance_pybind.cdist_braycurtis,
1697
+ pdist_func=_distance_pybind.pdist_braycurtis,
1698
+ ),
1699
+ MetricInfo(
1700
+ canonical_name='canberra',
1701
+ aka={'canberra'},
1702
+ dist_func=canberra,
1703
+ cdist_func=_distance_pybind.cdist_canberra,
1704
+ pdist_func=_distance_pybind.pdist_canberra,
1705
+ ),
1706
+ MetricInfo(
1707
+ canonical_name='chebyshev',
1708
+ aka={'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch'},
1709
+ dist_func=chebyshev,
1710
+ cdist_func=_distance_pybind.cdist_chebyshev,
1711
+ pdist_func=_distance_pybind.pdist_chebyshev,
1712
+ ),
1713
+ MetricInfo(
1714
+ canonical_name='cityblock',
1715
+ aka={'cityblock', 'cblock', 'cb', 'c'},
1716
+ dist_func=cityblock,
1717
+ cdist_func=_distance_pybind.cdist_cityblock,
1718
+ pdist_func=_distance_pybind.pdist_cityblock,
1719
+ ),
1720
+ MetricInfo(
1721
+ canonical_name='correlation',
1722
+ aka={'correlation', 'co'},
1723
+ dist_func=correlation,
1724
+ cdist_func=CDistMetricWrapper('correlation'),
1725
+ pdist_func=PDistMetricWrapper('correlation'),
1726
+ ),
1727
+ MetricInfo(
1728
+ canonical_name='cosine',
1729
+ aka={'cosine', 'cos'},
1730
+ dist_func=cosine,
1731
+ cdist_func=CDistMetricWrapper('cosine'),
1732
+ pdist_func=PDistMetricWrapper('cosine'),
1733
+ ),
1734
+ MetricInfo(
1735
+ canonical_name='dice',
1736
+ aka={'dice'},
1737
+ types=['bool'],
1738
+ dist_func=dice,
1739
+ cdist_func=_distance_pybind.cdist_dice,
1740
+ pdist_func=_distance_pybind.pdist_dice,
1741
+ ),
1742
+ MetricInfo(
1743
+ canonical_name='euclidean',
1744
+ aka={'euclidean', 'euclid', 'eu', 'e'},
1745
+ dist_func=euclidean,
1746
+ cdist_func=_distance_pybind.cdist_euclidean,
1747
+ pdist_func=_distance_pybind.pdist_euclidean,
1748
+ ),
1749
+ MetricInfo(
1750
+ canonical_name='hamming',
1751
+ aka={'matching', 'hamming', 'hamm', 'ha', 'h'},
1752
+ types=['double', 'bool'],
1753
+ validator=_validate_hamming_kwargs,
1754
+ dist_func=hamming,
1755
+ cdist_func=_distance_pybind.cdist_hamming,
1756
+ pdist_func=_distance_pybind.pdist_hamming,
1757
+ ),
1758
+ MetricInfo(
1759
+ canonical_name='jaccard',
1760
+ aka={'jaccard', 'jacc', 'ja', 'j'},
1761
+ types=['double', 'bool'],
1762
+ dist_func=jaccard,
1763
+ cdist_func=_distance_pybind.cdist_jaccard,
1764
+ pdist_func=_distance_pybind.pdist_jaccard,
1765
+ ),
1766
+ MetricInfo(
1767
+ canonical_name='jensenshannon',
1768
+ aka={'jensenshannon', 'js'},
1769
+ dist_func=jensenshannon,
1770
+ cdist_func=CDistMetricWrapper('jensenshannon'),
1771
+ pdist_func=PDistMetricWrapper('jensenshannon'),
1772
+ ),
1773
+ MetricInfo(
1774
+ canonical_name='kulczynski1',
1775
+ aka={'kulczynski1'},
1776
+ types=['bool'],
1777
+ dist_func=kulczynski1,
1778
+ cdist_func=_distance_pybind.cdist_kulczynski1,
1779
+ pdist_func=_distance_pybind.pdist_kulczynski1,
1780
+ ),
1781
+ MetricInfo(
1782
+ canonical_name='mahalanobis',
1783
+ aka={'mahalanobis', 'mahal', 'mah'},
1784
+ validator=_validate_mahalanobis_kwargs,
1785
+ dist_func=mahalanobis,
1786
+ cdist_func=CDistMetricWrapper('mahalanobis'),
1787
+ pdist_func=PDistMetricWrapper('mahalanobis'),
1788
+ ),
1789
+ MetricInfo(
1790
+ canonical_name='minkowski',
1791
+ aka={'minkowski', 'mi', 'm', 'pnorm'},
1792
+ validator=_validate_minkowski_kwargs,
1793
+ dist_func=minkowski,
1794
+ cdist_func=_distance_pybind.cdist_minkowski,
1795
+ pdist_func=_distance_pybind.pdist_minkowski,
1796
+ ),
1797
+ MetricInfo(
1798
+ canonical_name='rogerstanimoto',
1799
+ aka={'rogerstanimoto'},
1800
+ types=['bool'],
1801
+ dist_func=rogerstanimoto,
1802
+ cdist_func=_distance_pybind.cdist_rogerstanimoto,
1803
+ pdist_func=_distance_pybind.pdist_rogerstanimoto,
1804
+ ),
1805
+ MetricInfo(
1806
+ canonical_name='russellrao',
1807
+ aka={'russellrao'},
1808
+ types=['bool'],
1809
+ dist_func=russellrao,
1810
+ cdist_func=_distance_pybind.cdist_russellrao,
1811
+ pdist_func=_distance_pybind.pdist_russellrao,
1812
+ ),
1813
+ MetricInfo(
1814
+ canonical_name='seuclidean',
1815
+ aka={'seuclidean', 'se', 's'},
1816
+ validator=_validate_seuclidean_kwargs,
1817
+ dist_func=seuclidean,
1818
+ cdist_func=CDistMetricWrapper('seuclidean'),
1819
+ pdist_func=PDistMetricWrapper('seuclidean'),
1820
+ ),
1821
+ MetricInfo(
1822
+ canonical_name='sokalmichener',
1823
+ aka={'sokalmichener'},
1824
+ types=['bool'],
1825
+ dist_func=sokalmichener,
1826
+ cdist_func=_distance_pybind.cdist_sokalmichener,
1827
+ pdist_func=_distance_pybind.pdist_sokalmichener,
1828
+ ),
1829
+ MetricInfo(
1830
+ canonical_name='sokalsneath',
1831
+ aka={'sokalsneath'},
1832
+ types=['bool'],
1833
+ dist_func=sokalsneath,
1834
+ cdist_func=_distance_pybind.cdist_sokalsneath,
1835
+ pdist_func=_distance_pybind.pdist_sokalsneath,
1836
+ ),
1837
+ MetricInfo(
1838
+ canonical_name='sqeuclidean',
1839
+ aka={'sqeuclidean', 'sqe', 'sqeuclid'},
1840
+ dist_func=sqeuclidean,
1841
+ cdist_func=_distance_pybind.cdist_sqeuclidean,
1842
+ pdist_func=_distance_pybind.pdist_sqeuclidean,
1843
+ ),
1844
+ MetricInfo(
1845
+ canonical_name='yule',
1846
+ aka={'yule'},
1847
+ types=['bool'],
1848
+ dist_func=yule,
1849
+ cdist_func=_distance_pybind.cdist_yule,
1850
+ pdist_func=_distance_pybind.pdist_yule,
1851
+ ),
1852
+ ]
1853
+
1854
+ _METRICS = {info.canonical_name: info for info in _METRIC_INFOS}
1855
+ _METRIC_ALIAS = {alias: info
1856
+ for info in _METRIC_INFOS
1857
+ for alias in info.aka}
1858
+
1859
+ _METRICS_NAMES = list(_METRICS.keys())
1860
+
1861
+ _TEST_METRICS = {'test_' + info.canonical_name: info for info in _METRIC_INFOS}
1862
+
1863
+
1864
+ def pdist(X, metric='euclidean', *, out=None, **kwargs):
1865
+ """
1866
+ Pairwise distances between observations in n-dimensional space.
1867
+
1868
+ See Notes for common calling conventions.
1869
+
1870
+ Parameters
1871
+ ----------
1872
+ X : array_like
1873
+ An m by n array of m original observations in an
1874
+ n-dimensional space.
1875
+ metric : str or function, optional
1876
+ The distance metric to use. The distance function can
1877
+ be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
1878
+ 'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
1879
+ 'jaccard', 'jensenshannon', 'kulczynski1',
1880
+ 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
1881
+ 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
1882
+ 'sqeuclidean', 'yule'.
1883
+ out : ndarray, optional
1884
+ The output array.
1885
+ If not None, condensed distance matrix Y is stored in this array.
1886
+ **kwargs : dict, optional
1887
+ Extra arguments to `metric`: refer to each metric documentation for a
1888
+ list of all possible arguments.
1889
+
1890
+ Some possible arguments:
1891
+
1892
+ p : scalar
1893
+ The p-norm to apply for Minkowski, weighted and unweighted.
1894
+ Default: 2.
1895
+
1896
+ w : ndarray
1897
+ The weight vector for metrics that support weights (e.g., Minkowski).
1898
+
1899
+ V : ndarray
1900
+ The variance vector for standardized Euclidean.
1901
+ Default: var(X, axis=0, ddof=1)
1902
+
1903
+ VI : ndarray
1904
+ The inverse of the covariance matrix for Mahalanobis.
1905
+ Default: inv(cov(X.T)).T
1906
+
1907
+ Returns
1908
+ -------
1909
+ Y : ndarray
1910
+ Returns a condensed distance matrix Y. For each :math:`i` and :math:`j`
1911
+ (where :math:`i<j<m`),where m is the number of original observations.
1912
+ The metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``m
1913
+ * i + j - ((i + 2) * (i + 1)) // 2``.
1914
+
1915
+ See Also
1916
+ --------
1917
+ squareform : converts between condensed distance matrices and
1918
+ square distance matrices.
1919
+
1920
+ Notes
1921
+ -----
1922
+ See ``squareform`` for information on how to calculate the index of
1923
+ this entry or to convert the condensed distance matrix to a
1924
+ redundant square matrix.
1925
+
1926
+ The following are common calling conventions.
1927
+
1928
+ 1. ``Y = pdist(X, 'euclidean')``
1929
+
1930
+ Computes the distance between m points using Euclidean distance
1931
+ (2-norm) as the distance metric between the points. The points
1932
+ are arranged as m n-dimensional row vectors in the matrix X.
1933
+
1934
+ 2. ``Y = pdist(X, 'minkowski', p=2.)``
1935
+
1936
+ Computes the distances using the Minkowski distance
1937
+ :math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note
1938
+ that this is only a quasi-metric if :math:`0 < p < 1`).
1939
+
1940
+ 3. ``Y = pdist(X, 'cityblock')``
1941
+
1942
+ Computes the city block or Manhattan distance between the
1943
+ points.
1944
+
1945
+ 4. ``Y = pdist(X, 'seuclidean', V=None)``
1946
+
1947
+ Computes the standardized Euclidean distance. The standardized
1948
+ Euclidean distance between two n-vectors ``u`` and ``v`` is
1949
+
1950
+ .. math::
1951
+
1952
+ \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
1953
+
1954
+
1955
+ V is the variance vector; V[i] is the variance computed over all
1956
+ the i'th components of the points. If not passed, it is
1957
+ automatically computed.
1958
+
1959
+ 5. ``Y = pdist(X, 'sqeuclidean')``
1960
+
1961
+ Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between
1962
+ the vectors.
1963
+
1964
+ 6. ``Y = pdist(X, 'cosine')``
1965
+
1966
+ Computes the cosine distance between vectors u and v,
1967
+
1968
+ .. math::
1969
+
1970
+ 1 - \\frac{u \\cdot v}
1971
+ {{\\|u\\|}_2 {\\|v\\|}_2}
1972
+
1973
+ where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and
1974
+ :math:`u \\cdot v` is the dot product of ``u`` and ``v``.
1975
+
1976
+ 7. ``Y = pdist(X, 'correlation')``
1977
+
1978
+ Computes the correlation distance between vectors u and v. This is
1979
+
1980
+ .. math::
1981
+
1982
+ 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
1983
+ {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
1984
+
1985
+ where :math:`\\bar{v}` is the mean of the elements of vector v,
1986
+ and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
1987
+
1988
+ 8. ``Y = pdist(X, 'hamming')``
1989
+
1990
+ Computes the normalized Hamming distance, or the proportion of
1991
+ those vector elements between two n-vectors ``u`` and ``v``
1992
+ which disagree. To save memory, the matrix ``X`` can be of type
1993
+ boolean.
1994
+
1995
+ 9. ``Y = pdist(X, 'jaccard')``
1996
+
1997
+ Computes the Jaccard distance between the points. Given two
1998
+ vectors, ``u`` and ``v``, the Jaccard distance is the
1999
+ proportion of those elements ``u[i]`` and ``v[i]`` that
2000
+ disagree.
2001
+
2002
+ 10. ``Y = pdist(X, 'jensenshannon')``
2003
+
2004
+ Computes the Jensen-Shannon distance between two probability arrays.
2005
+ Given two probability vectors, :math:`p` and :math:`q`, the
2006
+ Jensen-Shannon distance is
2007
+
2008
+ .. math::
2009
+
2010
+ \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
2011
+
2012
+ where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
2013
+ and :math:`D` is the Kullback-Leibler divergence.
2014
+
2015
+ 11. ``Y = pdist(X, 'chebyshev')``
2016
+
2017
+ Computes the Chebyshev distance between the points. The
2018
+ Chebyshev distance between two n-vectors ``u`` and ``v`` is the
2019
+ maximum norm-1 distance between their respective elements. More
2020
+ precisely, the distance is given by
2021
+
2022
+ .. math::
2023
+
2024
+ d(u,v) = \\max_i {|u_i-v_i|}
2025
+
2026
+ 12. ``Y = pdist(X, 'canberra')``
2027
+
2028
+ Computes the Canberra distance between the points. The
2029
+ Canberra distance between two points ``u`` and ``v`` is
2030
+
2031
+ .. math::
2032
+
2033
+ d(u,v) = \\sum_i \\frac{|u_i-v_i|}
2034
+ {|u_i|+|v_i|}
2035
+
2036
+
2037
+ 13. ``Y = pdist(X, 'braycurtis')``
2038
+
2039
+ Computes the Bray-Curtis distance between the points. The
2040
+ Bray-Curtis distance between two points ``u`` and ``v`` is
2041
+
2042
+
2043
+ .. math::
2044
+
2045
+ d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
2046
+ {\\sum_i {|u_i+v_i|}}
2047
+
2048
+ 14. ``Y = pdist(X, 'mahalanobis', VI=None)``
2049
+
2050
+ Computes the Mahalanobis distance between the points. The
2051
+ Mahalanobis distance between two points ``u`` and ``v`` is
2052
+ :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
2053
+ variable) is the inverse covariance. If ``VI`` is not None,
2054
+ ``VI`` will be used as the inverse covariance matrix.
2055
+
2056
+ 15. ``Y = pdist(X, 'yule')``
2057
+
2058
+ Computes the Yule distance between each pair of boolean
2059
+ vectors. (see yule function documentation)
2060
+
2061
+ 16. ``Y = pdist(X, 'matching')``
2062
+
2063
+ Synonym for 'hamming'.
2064
+
2065
+ 17. ``Y = pdist(X, 'dice')``
2066
+
2067
+ Computes the Dice distance between each pair of boolean
2068
+ vectors. (see dice function documentation)
2069
+
2070
+ 18. ``Y = pdist(X, 'kulczynski1')``
2071
+
2072
+ Computes the kulczynski1 distance between each pair of
2073
+ boolean vectors. (see kulczynski1 function documentation)
2074
+
2075
+ 19. ``Y = pdist(X, 'rogerstanimoto')``
2076
+
2077
+ Computes the Rogers-Tanimoto distance between each pair of
2078
+ boolean vectors. (see rogerstanimoto function documentation)
2079
+
2080
+ 20. ``Y = pdist(X, 'russellrao')``
2081
+
2082
+ Computes the Russell-Rao distance between each pair of
2083
+ boolean vectors. (see russellrao function documentation)
2084
+
2085
+ 21. ``Y = pdist(X, 'sokalmichener')``
2086
+
2087
+ Computes the Sokal-Michener distance between each pair of
2088
+ boolean vectors. (see sokalmichener function documentation)
2089
+
2090
+ 22. ``Y = pdist(X, 'sokalsneath')``
2091
+
2092
+ Computes the Sokal-Sneath distance between each pair of
2093
+ boolean vectors. (see sokalsneath function documentation)
2094
+
2095
+ 23. ``Y = pdist(X, 'kulczynski1')``
2096
+
2097
+ Computes the Kulczynski 1 distance between each pair of
2098
+ boolean vectors. (see kulczynski1 function documentation)
2099
+
2100
+ 24. ``Y = pdist(X, f)``
2101
+
2102
+ Computes the distance between all pairs of vectors in X
2103
+ using the user supplied 2-arity function f. For example,
2104
+ Euclidean distance between the vectors could be computed
2105
+ as follows::
2106
+
2107
+ dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
2108
+
2109
+ Note that you should avoid passing a reference to one of
2110
+ the distance functions defined in this library. For example,::
2111
+
2112
+ dm = pdist(X, sokalsneath)
2113
+
2114
+ would calculate the pair-wise distances between the vectors in
2115
+ X using the Python function sokalsneath. This would result in
2116
+ sokalsneath being called :math:`{n \\choose 2}` times, which
2117
+ is inefficient. Instead, the optimized C version is more
2118
+ efficient, and we call it using the following syntax.::
2119
+
2120
+ dm = pdist(X, 'sokalsneath')
2121
+
2122
+ Examples
2123
+ --------
2124
+ >>> import numpy as np
2125
+ >>> from scipy.spatial.distance import pdist
2126
+
2127
+ ``x`` is an array of five points in three-dimensional space.
2128
+
2129
+ >>> x = np.array([[2, 0, 2], [2, 2, 3], [-2, 4, 5], [0, 1, 9], [2, 2, 4]])
2130
+
2131
+ ``pdist(x)`` with no additional arguments computes the 10 pairwise
2132
+ Euclidean distances:
2133
+
2134
+ >>> pdist(x)
2135
+ array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
2136
+ 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
2137
+
2138
+ The following computes the pairwise Minkowski distances with ``p = 3.5``:
2139
+
2140
+ >>> pdist(x, metric='minkowski', p=3.5)
2141
+ array([2.04898923, 5.1154929 , 7.02700737, 2.43802731, 4.19042714,
2142
+ 6.03956994, 1. , 4.45128103, 4.10636143, 5.0619695 ])
2143
+
2144
+ The pairwise city block or Manhattan distances:
2145
+
2146
+ >>> pdist(x, metric='cityblock')
2147
+ array([ 3., 11., 10., 4., 8., 9., 1., 9., 7., 8.])
2148
+
2149
+ """
2150
+ # You can also call this as:
2151
+ # Y = pdist(X, 'test_abc')
2152
+ # where 'abc' is the metric being tested. This computes the distance
2153
+ # between all pairs of vectors in X using the distance metric 'abc' but
2154
+ # with a more succinct, verifiable, but less efficient implementation.
2155
+
2156
+ X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
2157
+ check_finite=False)
2158
+
2159
+ s = X.shape
2160
+ if len(s) != 2:
2161
+ raise ValueError('A 2-dimensional array must be passed.')
2162
+
2163
+ m, n = s
2164
+
2165
+ if callable(metric):
2166
+ mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
2167
+ metric_info = _METRIC_ALIAS.get(mstr, None)
2168
+
2169
+ if metric_info is not None:
2170
+ X, typ, kwargs = _validate_pdist_input(
2171
+ X, m, n, metric_info, **kwargs)
2172
+
2173
+ return _pdist_callable(X, metric=metric, out=out, **kwargs)
2174
+ elif isinstance(metric, str):
2175
+ mstr = metric.lower()
2176
+ metric_info = _METRIC_ALIAS.get(mstr, None)
2177
+
2178
+ if metric_info is not None:
2179
+ pdist_fn = metric_info.pdist_func
2180
+ return pdist_fn(X, out=out, **kwargs)
2181
+ elif mstr.startswith("test_"):
2182
+ metric_info = _TEST_METRICS.get(mstr, None)
2183
+ if metric_info is None:
2184
+ raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
2185
+ X, typ, kwargs = _validate_pdist_input(
2186
+ X, m, n, metric_info, **kwargs)
2187
+ return _pdist_callable(
2188
+ X, metric=metric_info.dist_func, out=out, **kwargs)
2189
+ else:
2190
+ raise ValueError('Unknown Distance Metric: %s' % mstr)
2191
+ else:
2192
+ raise TypeError('2nd argument metric must be a string identifier '
2193
+ 'or a function.')
2194
+
2195
+
2196
+ def squareform(X, force="no", checks=True):
2197
+ """
2198
+ Convert a vector-form distance vector to a square-form distance
2199
+ matrix, and vice-versa.
2200
+
2201
+ Parameters
2202
+ ----------
2203
+ X : array_like
2204
+ Either a condensed or redundant distance matrix.
2205
+ force : str, optional
2206
+ As with MATLAB(TM), if force is equal to ``'tovector'`` or
2207
+ ``'tomatrix'``, the input will be treated as a distance matrix or
2208
+ distance vector respectively.
2209
+ checks : bool, optional
2210
+ If set to False, no checks will be made for matrix
2211
+ symmetry nor zero diagonals. This is useful if it is known that
2212
+ ``X - X.T1`` is small and ``diag(X)`` is close to zero.
2213
+ These values are ignored any way so they do not disrupt the
2214
+ squareform transformation.
2215
+
2216
+ Returns
2217
+ -------
2218
+ Y : ndarray
2219
+ If a condensed distance matrix is passed, a redundant one is
2220
+ returned, or if a redundant one is passed, a condensed distance
2221
+ matrix is returned.
2222
+
2223
+ Notes
2224
+ -----
2225
+ 1. ``v = squareform(X)``
2226
+
2227
+ Given a square n-by-n symmetric distance matrix ``X``,
2228
+ ``v = squareform(X)`` returns a ``n * (n-1) / 2``
2229
+ (i.e. binomial coefficient n choose 2) sized vector `v`
2230
+ where :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]`
2231
+ is the distance between distinct points ``i`` and ``j``.
2232
+ If ``X`` is non-square or asymmetric, an error is raised.
2233
+
2234
+ 2. ``X = squareform(v)``
2235
+
2236
+ Given a ``n * (n-1) / 2`` sized vector ``v``
2237
+ for some integer ``n >= 1`` encoding distances as described,
2238
+ ``X = squareform(v)`` returns a n-by-n distance matrix ``X``.
2239
+ The ``X[i, j]`` and ``X[j, i]`` values are set to
2240
+ :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]`
2241
+ and all diagonal elements are zero.
2242
+
2243
+ In SciPy 0.19.0, ``squareform`` stopped casting all input types to
2244
+ float64, and started returning arrays of the same dtype as the input.
2245
+
2246
+ Examples
2247
+ --------
2248
+ >>> import numpy as np
2249
+ >>> from scipy.spatial.distance import pdist, squareform
2250
+
2251
+ ``x`` is an array of five points in three-dimensional space.
2252
+
2253
+ >>> x = np.array([[2, 0, 2], [2, 2, 3], [-2, 4, 5], [0, 1, 9], [2, 2, 4]])
2254
+
2255
+ ``pdist(x)`` computes the Euclidean distances between each pair of
2256
+ points in ``x``. The distances are returned in a one-dimensional
2257
+ array with length ``5*(5 - 1)/2 = 10``.
2258
+
2259
+ >>> distvec = pdist(x)
2260
+ >>> distvec
2261
+ array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
2262
+ 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
2263
+
2264
+ ``squareform(distvec)`` returns the 5x5 distance matrix.
2265
+
2266
+ >>> m = squareform(distvec)
2267
+ >>> m
2268
+ array([[0. , 2.23606798, 6.40312424, 7.34846923, 2.82842712],
2269
+ [2.23606798, 0. , 4.89897949, 6.40312424, 1. ],
2270
+ [6.40312424, 4.89897949, 0. , 5.38516481, 4.58257569],
2271
+ [7.34846923, 6.40312424, 5.38516481, 0. , 5.47722558],
2272
+ [2.82842712, 1. , 4.58257569, 5.47722558, 0. ]])
2273
+
2274
+ When given a square distance matrix ``m``, ``squareform(m)`` returns
2275
+ the one-dimensional condensed distance vector associated with the
2276
+ matrix. In this case, we recover ``distvec``.
2277
+
2278
+ >>> squareform(m)
2279
+ array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
2280
+ 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
2281
+ """
2282
+ X = np.ascontiguousarray(X)
2283
+
2284
+ s = X.shape
2285
+
2286
+ if force.lower() == 'tomatrix':
2287
+ if len(s) != 1:
2288
+ raise ValueError("Forcing 'tomatrix' but input X is not a "
2289
+ "distance vector.")
2290
+ elif force.lower() == 'tovector':
2291
+ if len(s) != 2:
2292
+ raise ValueError("Forcing 'tovector' but input X is not a "
2293
+ "distance matrix.")
2294
+
2295
+ # X = squareform(v)
2296
+ if len(s) == 1:
2297
+ if s[0] == 0:
2298
+ return np.zeros((1, 1), dtype=X.dtype)
2299
+
2300
+ # Grab the closest value to the square root of the number
2301
+ # of elements times 2 to see if the number of elements
2302
+ # is indeed a binomial coefficient.
2303
+ d = int(np.ceil(np.sqrt(s[0] * 2)))
2304
+
2305
+ # Check that v is of valid dimensions.
2306
+ if d * (d - 1) != s[0] * 2:
2307
+ raise ValueError('Incompatible vector size. It must be a binomial '
2308
+ 'coefficient n choose 2 for some integer n >= 2.')
2309
+
2310
+ # Allocate memory for the distance matrix.
2311
+ M = np.zeros((d, d), dtype=X.dtype)
2312
+
2313
+ # Since the C code does not support striding using strides.
2314
+ # The dimensions are used instead.
2315
+ X = _copy_array_if_base_present(X)
2316
+
2317
+ # Fill in the values of the distance matrix.
2318
+ _distance_wrap.to_squareform_from_vector_wrap(M, X)
2319
+
2320
+ # Return the distance matrix.
2321
+ return M
2322
+ elif len(s) == 2:
2323
+ if s[0] != s[1]:
2324
+ raise ValueError('The matrix argument must be square.')
2325
+ if checks:
2326
+ is_valid_dm(X, throw=True, name='X')
2327
+
2328
+ # One-side of the dimensions is set here.
2329
+ d = s[0]
2330
+
2331
+ if d <= 1:
2332
+ return np.array([], dtype=X.dtype)
2333
+
2334
+ # Create a vector.
2335
+ v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
2336
+
2337
+ # Since the C code does not support striding using strides.
2338
+ # The dimensions are used instead.
2339
+ X = _copy_array_if_base_present(X)
2340
+
2341
+ # Convert the vector to squareform.
2342
+ _distance_wrap.to_vector_from_squareform_wrap(X, v)
2343
+ return v
2344
+ else:
2345
+ raise ValueError(('The first argument must be one or two dimensional '
2346
+ 'array. A %d-dimensional array is not '
2347
+ 'permitted') % len(s))
2348
+
2349
+
2350
+ def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
2351
+ """
2352
+ Return True if input array is a valid distance matrix.
2353
+
2354
+ Distance matrices must be 2-dimensional numpy arrays.
2355
+ They must have a zero-diagonal, and they must be symmetric.
2356
+
2357
+ Parameters
2358
+ ----------
2359
+ D : array_like
2360
+ The candidate object to test for validity.
2361
+ tol : float, optional
2362
+ The distance matrix should be symmetric. `tol` is the maximum
2363
+ difference between entries ``ij`` and ``ji`` for the distance
2364
+ metric to be considered symmetric.
2365
+ throw : bool, optional
2366
+ An exception is thrown if the distance matrix passed is not valid.
2367
+ name : str, optional
2368
+ The name of the variable to checked. This is useful if
2369
+ throw is set to True so the offending variable can be identified
2370
+ in the exception message when an exception is thrown.
2371
+ warning : bool, optional
2372
+ Instead of throwing an exception, a warning message is
2373
+ raised.
2374
+
2375
+ Returns
2376
+ -------
2377
+ valid : bool
2378
+ True if the variable `D` passed is a valid distance matrix.
2379
+
2380
+ Notes
2381
+ -----
2382
+ Small numerical differences in `D` and `D.T` and non-zeroness of
2383
+ the diagonal are ignored if they are within the tolerance specified
2384
+ by `tol`.
2385
+
2386
+ Examples
2387
+ --------
2388
+ >>> import numpy as np
2389
+ >>> from scipy.spatial.distance import is_valid_dm
2390
+
2391
+ This matrix is a valid distance matrix.
2392
+
2393
+ >>> d = np.array([[0.0, 1.1, 1.2, 1.3],
2394
+ ... [1.1, 0.0, 1.0, 1.4],
2395
+ ... [1.2, 1.0, 0.0, 1.5],
2396
+ ... [1.3, 1.4, 1.5, 0.0]])
2397
+ >>> is_valid_dm(d)
2398
+ True
2399
+
2400
+ In the following examples, the input is not a valid distance matrix.
2401
+
2402
+ Not square:
2403
+
2404
+ >>> is_valid_dm([[0, 2, 2], [2, 0, 2]])
2405
+ False
2406
+
2407
+ Nonzero diagonal element:
2408
+
2409
+ >>> is_valid_dm([[0, 1, 1], [1, 2, 3], [1, 3, 0]])
2410
+ False
2411
+
2412
+ Not symmetric:
2413
+
2414
+ >>> is_valid_dm([[0, 1, 3], [2, 0, 1], [3, 1, 0]])
2415
+ False
2416
+
2417
+ """
2418
+ D = np.asarray(D, order='c')
2419
+ valid = True
2420
+ try:
2421
+ s = D.shape
2422
+ if len(D.shape) != 2:
2423
+ if name:
2424
+ raise ValueError(('Distance matrix \'%s\' must have shape=2 '
2425
+ '(i.e. be two-dimensional).') % name)
2426
+ else:
2427
+ raise ValueError('Distance matrix must have shape=2 (i.e. '
2428
+ 'be two-dimensional).')
2429
+ if tol == 0.0:
2430
+ if not (D == D.T).all():
2431
+ if name:
2432
+ raise ValueError(('Distance matrix \'%s\' must be '
2433
+ 'symmetric.') % name)
2434
+ else:
2435
+ raise ValueError('Distance matrix must be symmetric.')
2436
+ if not (D[range(0, s[0]), range(0, s[0])] == 0).all():
2437
+ if name:
2438
+ raise ValueError(('Distance matrix \'%s\' diagonal must '
2439
+ 'be zero.') % name)
2440
+ else:
2441
+ raise ValueError('Distance matrix diagonal must be zero.')
2442
+ else:
2443
+ if not (D - D.T <= tol).all():
2444
+ if name:
2445
+ raise ValueError(f'Distance matrix \'{name}\' must be '
2446
+ f'symmetric within tolerance {tol:5.5f}.')
2447
+ else:
2448
+ raise ValueError('Distance matrix must be symmetric within '
2449
+ 'tolerance %5.5f.' % tol)
2450
+ if not (D[range(0, s[0]), range(0, s[0])] <= tol).all():
2451
+ if name:
2452
+ raise ValueError(f'Distance matrix \'{name}\' diagonal must be '
2453
+ f'close to zero within tolerance {tol:5.5f}.')
2454
+ else:
2455
+ raise ValueError(('Distance matrix \'{}\' diagonal must be close '
2456
+ 'to zero within tolerance {:5.5f}.').format(*tol))
2457
+ except Exception as e:
2458
+ if throw:
2459
+ raise
2460
+ if warning:
2461
+ warnings.warn(str(e), stacklevel=2)
2462
+ valid = False
2463
+ return valid
2464
+
2465
+
2466
+ def is_valid_y(y, warning=False, throw=False, name=None):
2467
+ """
2468
+ Return True if the input array is a valid condensed distance matrix.
2469
+
2470
+ Condensed distance matrices must be 1-dimensional numpy arrays.
2471
+ Their length must be a binomial coefficient :math:`{n \\choose 2}`
2472
+ for some positive integer n.
2473
+
2474
+ Parameters
2475
+ ----------
2476
+ y : array_like
2477
+ The condensed distance matrix.
2478
+ warning : bool, optional
2479
+ Invokes a warning if the variable passed is not a valid
2480
+ condensed distance matrix. The warning message explains why
2481
+ the distance matrix is not valid. `name` is used when
2482
+ referencing the offending variable.
2483
+ throw : bool, optional
2484
+ Throws an exception if the variable passed is not a valid
2485
+ condensed distance matrix.
2486
+ name : bool, optional
2487
+ Used when referencing the offending variable in the
2488
+ warning or exception message.
2489
+
2490
+ Returns
2491
+ -------
2492
+ bool
2493
+ True if the input array is a valid condensed distance matrix,
2494
+ False otherwise.
2495
+
2496
+ Examples
2497
+ --------
2498
+ >>> from scipy.spatial.distance import is_valid_y
2499
+
2500
+ This vector is a valid condensed distance matrix. The length is 6,
2501
+ which corresponds to ``n = 4``, since ``4*(4 - 1)/2`` is 6.
2502
+
2503
+ >>> v = [1.0, 1.2, 1.0, 0.5, 1.3, 0.9]
2504
+ >>> is_valid_y(v)
2505
+ True
2506
+
2507
+ An input vector with length, say, 7, is not a valid condensed distance
2508
+ matrix.
2509
+
2510
+ >>> is_valid_y([1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7])
2511
+ False
2512
+
2513
+ """
2514
+ y = np.asarray(y, order='c')
2515
+ valid = True
2516
+ try:
2517
+ if len(y.shape) != 1:
2518
+ if name:
2519
+ raise ValueError(('Condensed distance matrix \'%s\' must '
2520
+ 'have shape=1 (i.e. be one-dimensional).')
2521
+ % name)
2522
+ else:
2523
+ raise ValueError('Condensed distance matrix must have shape=1 '
2524
+ '(i.e. be one-dimensional).')
2525
+ n = y.shape[0]
2526
+ d = int(np.ceil(np.sqrt(n * 2)))
2527
+ if (d * (d - 1) / 2) != n:
2528
+ if name:
2529
+ raise ValueError(('Length n of condensed distance matrix '
2530
+ '\'%s\' must be a binomial coefficient, i.e.'
2531
+ 'there must be a k such that '
2532
+ '(k \\choose 2)=n)!') % name)
2533
+ else:
2534
+ raise ValueError('Length n of condensed distance matrix must '
2535
+ 'be a binomial coefficient, i.e. there must '
2536
+ 'be a k such that (k \\choose 2)=n)!')
2537
+ except Exception as e:
2538
+ if throw:
2539
+ raise
2540
+ if warning:
2541
+ warnings.warn(str(e), stacklevel=2)
2542
+ valid = False
2543
+ return valid
2544
+
2545
+
2546
+ def num_obs_dm(d):
2547
+ """
2548
+ Return the number of original observations that correspond to a
2549
+ square, redundant distance matrix.
2550
+
2551
+ Parameters
2552
+ ----------
2553
+ d : array_like
2554
+ The target distance matrix.
2555
+
2556
+ Returns
2557
+ -------
2558
+ num_obs_dm : int
2559
+ The number of observations in the redundant distance matrix.
2560
+
2561
+ Examples
2562
+ --------
2563
+ Find the number of original observations corresponding
2564
+ to a square redundant distance matrix d.
2565
+
2566
+ >>> from scipy.spatial.distance import num_obs_dm
2567
+ >>> d = [[0, 100, 200], [100, 0, 150], [200, 150, 0]]
2568
+ >>> num_obs_dm(d)
2569
+ 3
2570
+ """
2571
+ d = np.asarray(d, order='c')
2572
+ is_valid_dm(d, tol=np.inf, throw=True, name='d')
2573
+ return d.shape[0]
2574
+
2575
+
2576
+ def num_obs_y(Y):
2577
+ """
2578
+ Return the number of original observations that correspond to a
2579
+ condensed distance matrix.
2580
+
2581
+ Parameters
2582
+ ----------
2583
+ Y : array_like
2584
+ Condensed distance matrix.
2585
+
2586
+ Returns
2587
+ -------
2588
+ n : int
2589
+ The number of observations in the condensed distance matrix `Y`.
2590
+
2591
+ Examples
2592
+ --------
2593
+ Find the number of original observations corresponding to a
2594
+ condensed distance matrix Y.
2595
+
2596
+ >>> from scipy.spatial.distance import num_obs_y
2597
+ >>> Y = [1, 2, 3.5, 7, 10, 4]
2598
+ >>> num_obs_y(Y)
2599
+ 4
2600
+ """
2601
+ Y = np.asarray(Y, order='c')
2602
+ is_valid_y(Y, throw=True, name='Y')
2603
+ k = Y.shape[0]
2604
+ if k == 0:
2605
+ raise ValueError("The number of observations cannot be determined on "
2606
+ "an empty distance matrix.")
2607
+ d = int(np.ceil(np.sqrt(k * 2)))
2608
+ if (d * (d - 1) / 2) != k:
2609
+ raise ValueError("Invalid condensed distance matrix passed. Must be "
2610
+ "some k where k=(n choose 2) for some n >= 2.")
2611
+ return d
2612
+
2613
+
2614
+ def _prepare_out_argument(out, dtype, expected_shape):
2615
+ if out is None:
2616
+ return np.empty(expected_shape, dtype=dtype)
2617
+
2618
+ if out.shape != expected_shape:
2619
+ raise ValueError("Output array has incorrect shape.")
2620
+ if not out.flags.c_contiguous:
2621
+ raise ValueError("Output array must be C-contiguous.")
2622
+ if out.dtype != np.float64:
2623
+ raise ValueError("Output array must be double type.")
2624
+ return out
2625
+
2626
+
2627
+ def _pdist_callable(X, *, out, metric, **kwargs):
2628
+ n = X.shape[0]
2629
+ out_size = (n * (n - 1)) // 2
2630
+ dm = _prepare_out_argument(out, np.float64, (out_size,))
2631
+ k = 0
2632
+ for i in range(X.shape[0] - 1):
2633
+ for j in range(i + 1, X.shape[0]):
2634
+ dm[k] = metric(X[i], X[j], **kwargs)
2635
+ k += 1
2636
+ return dm
2637
+
2638
+
2639
+ def _cdist_callable(XA, XB, *, out, metric, **kwargs):
2640
+ mA = XA.shape[0]
2641
+ mB = XB.shape[0]
2642
+ dm = _prepare_out_argument(out, np.float64, (mA, mB))
2643
+ for i in range(mA):
2644
+ for j in range(mB):
2645
+ dm[i, j] = metric(XA[i], XB[j], **kwargs)
2646
+ return dm
2647
+
2648
+
2649
+ def cdist(XA, XB, metric='euclidean', *, out=None, **kwargs):
2650
+ """
2651
+ Compute distance between each pair of the two collections of inputs.
2652
+
2653
+ See Notes for common calling conventions.
2654
+
2655
+ Parameters
2656
+ ----------
2657
+ XA : array_like
2658
+ An :math:`m_A` by :math:`n` array of :math:`m_A`
2659
+ original observations in an :math:`n`-dimensional space.
2660
+ Inputs are converted to float type.
2661
+ XB : array_like
2662
+ An :math:`m_B` by :math:`n` array of :math:`m_B`
2663
+ original observations in an :math:`n`-dimensional space.
2664
+ Inputs are converted to float type.
2665
+ metric : str or callable, optional
2666
+ The distance metric to use. If a string, the distance function can be
2667
+ 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
2668
+ 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',
2669
+ 'kulczynski1', 'mahalanobis', 'matching', 'minkowski',
2670
+ 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener',
2671
+ 'sokalsneath', 'sqeuclidean', 'yule'.
2672
+ **kwargs : dict, optional
2673
+ Extra arguments to `metric`: refer to each metric documentation for a
2674
+ list of all possible arguments.
2675
+
2676
+ Some possible arguments:
2677
+
2678
+ p : scalar
2679
+ The p-norm to apply for Minkowski, weighted and unweighted.
2680
+ Default: 2.
2681
+
2682
+ w : array_like
2683
+ The weight vector for metrics that support weights (e.g., Minkowski).
2684
+
2685
+ V : array_like
2686
+ The variance vector for standardized Euclidean.
2687
+ Default: var(vstack([XA, XB]), axis=0, ddof=1)
2688
+
2689
+ VI : array_like
2690
+ The inverse of the covariance matrix for Mahalanobis.
2691
+ Default: inv(cov(vstack([XA, XB].T))).T
2692
+
2693
+ out : ndarray
2694
+ The output array
2695
+ If not None, the distance matrix Y is stored in this array.
2696
+
2697
+ Returns
2698
+ -------
2699
+ Y : ndarray
2700
+ A :math:`m_A` by :math:`m_B` distance matrix is returned.
2701
+ For each :math:`i` and :math:`j`, the metric
2702
+ ``dist(u=XA[i], v=XB[j])`` is computed and stored in the
2703
+ :math:`ij` th entry.
2704
+
2705
+ Raises
2706
+ ------
2707
+ ValueError
2708
+ An exception is thrown if `XA` and `XB` do not have
2709
+ the same number of columns.
2710
+
2711
+ Notes
2712
+ -----
2713
+ The following are common calling conventions:
2714
+
2715
+ 1. ``Y = cdist(XA, XB, 'euclidean')``
2716
+
2717
+ Computes the distance between :math:`m` points using
2718
+ Euclidean distance (2-norm) as the distance metric between the
2719
+ points. The points are arranged as :math:`m`
2720
+ :math:`n`-dimensional row vectors in the matrix X.
2721
+
2722
+ 2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``
2723
+
2724
+ Computes the distances using the Minkowski distance
2725
+ :math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note
2726
+ that this is only a quasi-metric if :math:`0 < p < 1`).
2727
+
2728
+ 3. ``Y = cdist(XA, XB, 'cityblock')``
2729
+
2730
+ Computes the city block or Manhattan distance between the
2731
+ points.
2732
+
2733
+ 4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
2734
+
2735
+ Computes the standardized Euclidean distance. The standardized
2736
+ Euclidean distance between two n-vectors ``u`` and ``v`` is
2737
+
2738
+ .. math::
2739
+
2740
+ \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
2741
+
2742
+ V is the variance vector; V[i] is the variance computed over all
2743
+ the i'th components of the points. If not passed, it is
2744
+ automatically computed.
2745
+
2746
+ 5. ``Y = cdist(XA, XB, 'sqeuclidean')``
2747
+
2748
+ Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between
2749
+ the vectors.
2750
+
2751
+ 6. ``Y = cdist(XA, XB, 'cosine')``
2752
+
2753
+ Computes the cosine distance between vectors u and v,
2754
+
2755
+ .. math::
2756
+
2757
+ 1 - \\frac{u \\cdot v}
2758
+ {{\\|u\\|}_2 {\\|v\\|}_2}
2759
+
2760
+ where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and
2761
+ :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
2762
+
2763
+ 7. ``Y = cdist(XA, XB, 'correlation')``
2764
+
2765
+ Computes the correlation distance between vectors u and v. This is
2766
+
2767
+ .. math::
2768
+
2769
+ 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
2770
+ {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
2771
+
2772
+ where :math:`\\bar{v}` is the mean of the elements of vector v,
2773
+ and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
2774
+
2775
+
2776
+ 8. ``Y = cdist(XA, XB, 'hamming')``
2777
+
2778
+ Computes the normalized Hamming distance, or the proportion of
2779
+ those vector elements between two n-vectors ``u`` and ``v``
2780
+ which disagree. To save memory, the matrix ``X`` can be of type
2781
+ boolean.
2782
+
2783
+ 9. ``Y = cdist(XA, XB, 'jaccard')``
2784
+
2785
+ Computes the Jaccard distance between the points. Given two
2786
+ vectors, ``u`` and ``v``, the Jaccard distance is the
2787
+ proportion of those elements ``u[i]`` and ``v[i]`` that
2788
+ disagree where at least one of them is non-zero.
2789
+
2790
+ 10. ``Y = cdist(XA, XB, 'jensenshannon')``
2791
+
2792
+ Computes the Jensen-Shannon distance between two probability arrays.
2793
+ Given two probability vectors, :math:`p` and :math:`q`, the
2794
+ Jensen-Shannon distance is
2795
+
2796
+ .. math::
2797
+
2798
+ \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
2799
+
2800
+ where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
2801
+ and :math:`D` is the Kullback-Leibler divergence.
2802
+
2803
+ 11. ``Y = cdist(XA, XB, 'chebyshev')``
2804
+
2805
+ Computes the Chebyshev distance between the points. The
2806
+ Chebyshev distance between two n-vectors ``u`` and ``v`` is the
2807
+ maximum norm-1 distance between their respective elements. More
2808
+ precisely, the distance is given by
2809
+
2810
+ .. math::
2811
+
2812
+ d(u,v) = \\max_i {|u_i-v_i|}.
2813
+
2814
+ 12. ``Y = cdist(XA, XB, 'canberra')``
2815
+
2816
+ Computes the Canberra distance between the points. The
2817
+ Canberra distance between two points ``u`` and ``v`` is
2818
+
2819
+ .. math::
2820
+
2821
+ d(u,v) = \\sum_i \\frac{|u_i-v_i|}
2822
+ {|u_i|+|v_i|}.
2823
+
2824
+ 13. ``Y = cdist(XA, XB, 'braycurtis')``
2825
+
2826
+ Computes the Bray-Curtis distance between the points. The
2827
+ Bray-Curtis distance between two points ``u`` and ``v`` is
2828
+
2829
+
2830
+ .. math::
2831
+
2832
+ d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
2833
+ {\\sum_i (|u_i+v_i|)}
2834
+
2835
+ 14. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
2836
+
2837
+ Computes the Mahalanobis distance between the points. The
2838
+ Mahalanobis distance between two points ``u`` and ``v`` is
2839
+ :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
2840
+ variable) is the inverse covariance. If ``VI`` is not None,
2841
+ ``VI`` will be used as the inverse covariance matrix.
2842
+
2843
+ 15. ``Y = cdist(XA, XB, 'yule')``
2844
+
2845
+ Computes the Yule distance between the boolean
2846
+ vectors. (see `yule` function documentation)
2847
+
2848
+ 16. ``Y = cdist(XA, XB, 'matching')``
2849
+
2850
+ Synonym for 'hamming'.
2851
+
2852
+ 17. ``Y = cdist(XA, XB, 'dice')``
2853
+
2854
+ Computes the Dice distance between the boolean vectors. (see
2855
+ `dice` function documentation)
2856
+
2857
+ 18. ``Y = cdist(XA, XB, 'kulczynski1')``
2858
+
2859
+ Computes the kulczynski distance between the boolean
2860
+ vectors. (see `kulczynski1` function documentation)
2861
+
2862
+ 19. ``Y = cdist(XA, XB, 'rogerstanimoto')``
2863
+
2864
+ Computes the Rogers-Tanimoto distance between the boolean
2865
+ vectors. (see `rogerstanimoto` function documentation)
2866
+
2867
+ 20. ``Y = cdist(XA, XB, 'russellrao')``
2868
+
2869
+ Computes the Russell-Rao distance between the boolean
2870
+ vectors. (see `russellrao` function documentation)
2871
+
2872
+ 21. ``Y = cdist(XA, XB, 'sokalmichener')``
2873
+
2874
+ Computes the Sokal-Michener distance between the boolean
2875
+ vectors. (see `sokalmichener` function documentation)
2876
+
2877
+ 22. ``Y = cdist(XA, XB, 'sokalsneath')``
2878
+
2879
+ Computes the Sokal-Sneath distance between the vectors. (see
2880
+ `sokalsneath` function documentation)
2881
+
2882
+ 23. ``Y = cdist(XA, XB, f)``
2883
+
2884
+ Computes the distance between all pairs of vectors in X
2885
+ using the user supplied 2-arity function f. For example,
2886
+ Euclidean distance between the vectors could be computed
2887
+ as follows::
2888
+
2889
+ dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
2890
+
2891
+ Note that you should avoid passing a reference to one of
2892
+ the distance functions defined in this library. For example,::
2893
+
2894
+ dm = cdist(XA, XB, sokalsneath)
2895
+
2896
+ would calculate the pair-wise distances between the vectors in
2897
+ X using the Python function `sokalsneath`. This would result in
2898
+ sokalsneath being called :math:`{n \\choose 2}` times, which
2899
+ is inefficient. Instead, the optimized C version is more
2900
+ efficient, and we call it using the following syntax::
2901
+
2902
+ dm = cdist(XA, XB, 'sokalsneath')
2903
+
2904
+ Examples
2905
+ --------
2906
+ Find the Euclidean distances between four 2-D coordinates:
2907
+
2908
+ >>> from scipy.spatial import distance
2909
+ >>> import numpy as np
2910
+ >>> coords = [(35.0456, -85.2672),
2911
+ ... (35.1174, -89.9711),
2912
+ ... (35.9728, -83.9422),
2913
+ ... (36.1667, -86.7833)]
2914
+ >>> distance.cdist(coords, coords, 'euclidean')
2915
+ array([[ 0. , 4.7044, 1.6172, 1.8856],
2916
+ [ 4.7044, 0. , 6.0893, 3.3561],
2917
+ [ 1.6172, 6.0893, 0. , 2.8477],
2918
+ [ 1.8856, 3.3561, 2.8477, 0. ]])
2919
+
2920
+
2921
+ Find the Manhattan distance from a 3-D point to the corners of the unit
2922
+ cube:
2923
+
2924
+ >>> a = np.array([[0, 0, 0],
2925
+ ... [0, 0, 1],
2926
+ ... [0, 1, 0],
2927
+ ... [0, 1, 1],
2928
+ ... [1, 0, 0],
2929
+ ... [1, 0, 1],
2930
+ ... [1, 1, 0],
2931
+ ... [1, 1, 1]])
2932
+ >>> b = np.array([[ 0.1, 0.2, 0.4]])
2933
+ >>> distance.cdist(a, b, 'cityblock')
2934
+ array([[ 0.7],
2935
+ [ 0.9],
2936
+ [ 1.3],
2937
+ [ 1.5],
2938
+ [ 1.5],
2939
+ [ 1.7],
2940
+ [ 2.1],
2941
+ [ 2.3]])
2942
+
2943
+ """
2944
+ # You can also call this as:
2945
+ # Y = cdist(XA, XB, 'test_abc')
2946
+ # where 'abc' is the metric being tested. This computes the distance
2947
+ # between all pairs of vectors in XA and XB using the distance metric 'abc'
2948
+ # but with a more succinct, verifiable, but less efficient implementation.
2949
+
2950
+ XA = np.asarray(XA)
2951
+ XB = np.asarray(XB)
2952
+
2953
+ s = XA.shape
2954
+ sB = XB.shape
2955
+
2956
+ if len(s) != 2:
2957
+ raise ValueError('XA must be a 2-dimensional array.')
2958
+ if len(sB) != 2:
2959
+ raise ValueError('XB must be a 2-dimensional array.')
2960
+ if s[1] != sB[1]:
2961
+ raise ValueError('XA and XB must have the same number of columns '
2962
+ '(i.e. feature dimension.)')
2963
+
2964
+ mA = s[0]
2965
+ mB = sB[0]
2966
+ n = s[1]
2967
+
2968
+ if callable(metric):
2969
+ mstr = getattr(metric, '__name__', 'Unknown')
2970
+ metric_info = _METRIC_ALIAS.get(mstr, None)
2971
+ if metric_info is not None:
2972
+ XA, XB, typ, kwargs = _validate_cdist_input(
2973
+ XA, XB, mA, mB, n, metric_info, **kwargs)
2974
+ return _cdist_callable(XA, XB, metric=metric, out=out, **kwargs)
2975
+ elif isinstance(metric, str):
2976
+ mstr = metric.lower()
2977
+ metric_info = _METRIC_ALIAS.get(mstr, None)
2978
+ if metric_info is not None:
2979
+ cdist_fn = metric_info.cdist_func
2980
+ return cdist_fn(XA, XB, out=out, **kwargs)
2981
+ elif mstr.startswith("test_"):
2982
+ metric_info = _TEST_METRICS.get(mstr, None)
2983
+ if metric_info is None:
2984
+ raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
2985
+ XA, XB, typ, kwargs = _validate_cdist_input(
2986
+ XA, XB, mA, mB, n, metric_info, **kwargs)
2987
+ return _cdist_callable(
2988
+ XA, XB, metric=metric_info.dist_func, out=out, **kwargs)
2989
+ else:
2990
+ raise ValueError('Unknown Distance Metric: %s' % mstr)
2991
+ else:
2992
+ raise TypeError('2nd argument metric must be a string identifier '
2993
+ 'or a function.')
venv/lib/python3.10/site-packages/scipy/spatial/distance.pyi ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import (overload, Any, SupportsFloat, Literal, Protocol, SupportsIndex)
3
+
4
+ import numpy as np
5
+ from numpy.typing import ArrayLike, NDArray
6
+
7
+ # Anything that can be parsed by `np.float64.__init__` and is thus
8
+ # compatible with `ndarray.__setitem__` (for a float64 array)
9
+ _FloatValue = None | str | bytes | SupportsFloat | SupportsIndex
10
+
11
+ class _MetricCallback1(Protocol):
12
+ def __call__(
13
+ self, __XA: NDArray[Any], __XB: NDArray[Any]
14
+ ) -> _FloatValue: ...
15
+
16
+ class _MetricCallback2(Protocol):
17
+ def __call__(
18
+ self, __XA: NDArray[Any], __XB: NDArray[Any], **kwargs: Any
19
+ ) -> _FloatValue: ...
20
+
21
+ # TODO: Use a single protocol with a parameter specification variable
22
+ # once available (PEP 612)
23
+ _MetricCallback = _MetricCallback1 | _MetricCallback2
24
+
25
+ _MetricKind = Literal[
26
+ 'braycurtis',
27
+ 'canberra',
28
+ 'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch',
29
+ 'cityblock', 'cblock', 'cb', 'c',
30
+ 'correlation', 'co',
31
+ 'cosine', 'cos',
32
+ 'dice',
33
+ 'euclidean', 'euclid', 'eu', 'e',
34
+ 'hamming', 'hamm', 'ha', 'h',
35
+ 'minkowski', 'mi', 'm', 'pnorm',
36
+ 'jaccard', 'jacc', 'ja', 'j',
37
+ 'jensenshannon', 'js',
38
+ 'kulczynski1',
39
+ 'mahalanobis', 'mahal', 'mah',
40
+ 'rogerstanimoto',
41
+ 'russellrao',
42
+ 'seuclidean', 'se', 's',
43
+ 'sokalmichener',
44
+ 'sokalsneath',
45
+ 'sqeuclidean', 'sqe', 'sqeuclid',
46
+ 'yule',
47
+ ]
48
+
49
+ # Function annotations
50
+
51
+ def braycurtis(
52
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
53
+ ) -> np.float64: ...
54
+
55
+ def canberra(
56
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
57
+ ) -> np.float64: ...
58
+
59
+ # TODO: Add `metric`-specific overloads
60
+ # Returns a float64 or float128 array, depending on the input dtype
61
+ @overload
62
+ def cdist(
63
+ XA: ArrayLike,
64
+ XB: ArrayLike,
65
+ metric: _MetricKind = ...,
66
+ *,
67
+ out: None | NDArray[np.floating[Any]] = ...,
68
+ p: float = ...,
69
+ w: ArrayLike | None = ...,
70
+ V: ArrayLike | None = ...,
71
+ VI: ArrayLike | None = ...,
72
+ ) -> NDArray[np.floating[Any]]: ...
73
+ @overload
74
+ def cdist(
75
+ XA: ArrayLike,
76
+ XB: ArrayLike,
77
+ metric: _MetricCallback,
78
+ *,
79
+ out: None | NDArray[np.floating[Any]] = ...,
80
+ **kwargs: Any,
81
+ ) -> NDArray[np.floating[Any]]: ...
82
+
83
+ # TODO: Wait for dtype support; the return type is
84
+ # dependent on the input arrays dtype
85
+ def chebyshev(
86
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
87
+ ) -> Any: ...
88
+
89
+ # TODO: Wait for dtype support; the return type is
90
+ # dependent on the input arrays dtype
91
+ def cityblock(
92
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
93
+ ) -> Any: ...
94
+
95
+ def correlation(
96
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ..., centered: bool = ...
97
+ ) -> np.float64: ...
98
+
99
+ def cosine(
100
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
101
+ ) -> np.float64: ...
102
+
103
+ def dice(
104
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
105
+ ) -> float: ...
106
+
107
+ def directed_hausdorff(
108
+ u: ArrayLike, v: ArrayLike, seed: int | None = ...
109
+ ) -> tuple[float, int, int]: ...
110
+
111
+ def euclidean(
112
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
113
+ ) -> float: ...
114
+
115
+ def hamming(
116
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
117
+ ) -> np.float64: ...
118
+
119
+ def is_valid_dm(
120
+ D: ArrayLike,
121
+ tol: float = ...,
122
+ throw: bool = ...,
123
+ name: str | None = ...,
124
+ warning: bool = ...,
125
+ ) -> bool: ...
126
+
127
+ def is_valid_y(
128
+ y: ArrayLike,
129
+ warning: bool = ...,
130
+ throw: bool = ...,
131
+ name: str | None = ...,
132
+ ) -> bool: ...
133
+
134
+ def jaccard(
135
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
136
+ ) -> np.float64: ...
137
+
138
+ def jensenshannon(
139
+ p: ArrayLike, q: ArrayLike, base: float | None = ...
140
+ ) -> np.float64: ...
141
+
142
+ def kulczynski1(
143
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
144
+ ) -> np.float64: ...
145
+
146
+ def mahalanobis(
147
+ u: ArrayLike, v: ArrayLike, VI: ArrayLike
148
+ ) -> np.float64: ...
149
+
150
+ def minkowski(
151
+ u: ArrayLike, v: ArrayLike, p: float = ..., w: ArrayLike | None = ...
152
+ ) -> float: ...
153
+
154
+ def num_obs_dm(d: ArrayLike) -> int: ...
155
+
156
+ def num_obs_y(Y: ArrayLike) -> int: ...
157
+
158
+ # TODO: Add `metric`-specific overloads
159
+ @overload
160
+ def pdist(
161
+ X: ArrayLike,
162
+ metric: _MetricKind = ...,
163
+ *,
164
+ out: None | NDArray[np.floating[Any]] = ...,
165
+ p: float = ...,
166
+ w: ArrayLike | None = ...,
167
+ V: ArrayLike | None = ...,
168
+ VI: ArrayLike | None = ...,
169
+ ) -> NDArray[np.floating[Any]]: ...
170
+ @overload
171
+ def pdist(
172
+ X: ArrayLike,
173
+ metric: _MetricCallback,
174
+ *,
175
+ out: None | NDArray[np.floating[Any]] = ...,
176
+ **kwargs: Any,
177
+ ) -> NDArray[np.floating[Any]]: ...
178
+
179
+ def seuclidean(
180
+ u: ArrayLike, v: ArrayLike, V: ArrayLike
181
+ ) -> float: ...
182
+
183
+ def sokalmichener(
184
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
185
+ ) -> float: ...
186
+
187
+ def sokalsneath(
188
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
189
+ ) -> np.float64: ...
190
+
191
+ def sqeuclidean(
192
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
193
+ ) -> np.float64: ...
194
+
195
+ def squareform(
196
+ X: ArrayLike,
197
+ force: Literal["no", "tomatrix", "tovector"] = ...,
198
+ checks: bool = ...,
199
+ ) -> NDArray[Any]: ...
200
+
201
+ def rogerstanimoto(
202
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
203
+ ) -> float: ...
204
+
205
+ def russellrao(
206
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
207
+ ) -> float: ...
208
+
209
+ def yule(
210
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
211
+ ) -> float: ...
venv/lib/python3.10/site-packages/scipy/spatial/kdtree.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.spatial` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'KDTree',
10
+ 'Rectangle',
11
+ 'cKDTree',
12
+ 'cKDTreeNode',
13
+ 'distance_matrix',
14
+ 'minkowski_distance',
15
+ 'minkowski_distance_p',
16
+ ]
17
+
18
+
19
+ def __dir__():
20
+ return __all__
21
+
22
+
23
+ def __getattr__(name):
24
+ return _sub_module_deprecation(sub_package="spatial", module="kdtree",
25
+ private_modules=["_kdtree"], all=__all__,
26
+ attribute=name)
venv/lib/python3.10/site-packages/scipy/spatial/qhull.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.spatial` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'ConvexHull',
10
+ 'Delaunay',
11
+ 'HalfspaceIntersection',
12
+ 'QhullError',
13
+ 'Voronoi',
14
+ 'tsearch',
15
+ ]
16
+
17
+
18
+ def __dir__():
19
+ return __all__
20
+
21
+
22
+ def __getattr__(name):
23
+ return _sub_module_deprecation(sub_package="spatial", module="qhull",
24
+ private_modules=["_qhull"], all=__all__,
25
+ attribute=name)
venv/lib/python3.10/site-packages/scipy/spatial/qhull_src/COPYING.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Qhull, Copyright (c) 1993-2019
2
+
3
+ C.B. Barber
4
+ Arlington, MA
5
+
6
+ and
7
+
8
+ The National Science and Technology Research Center for
9
+ Computation and Visualization of Geometric Structures
10
+ (The Geometry Center)
11
+ University of Minnesota
12
+
13
14
+
15
+ This software includes Qhull from C.B. Barber and The Geometry Center.
16
+ Qhull is copyrighted as noted above. Qhull is free software and may
17
+ be obtained via http from www.qhull.org. It may be freely copied, modified,
18
+ and redistributed under the following conditions:
19
+
20
+ 1. All copyright notices must remain intact in all files.
21
+
22
+ 2. A copy of this text file must be distributed along with any copies
23
+ of Qhull that you redistribute; this includes copies that you have
24
+ modified, or copies of programs or other software products that
25
+ include Qhull.
26
+
27
+ 3. If you modify Qhull, you must include a notice giving the
28
+ name of the person performing the modification, the date of
29
+ modification, and the reason for such modification.
30
+
31
+ 4. When distributing modified versions of Qhull, or other software
32
+ products that include Qhull, you must provide notice that the original
33
+ source code may be obtained as noted above.
34
+
35
+ 5. There is no warranty or other guarantee of fitness for Qhull, it is
36
+ provided solely "as is". Bug reports or fixes may be sent to
37
+ [email protected]; the authors may or may not act on them as
38
+ they desire.
venv/lib/python3.10/site-packages/scipy/spatial/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/scipy/spatial/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file