applied-ai-018 commited on
Commit
e2b465d
·
verified ·
1 Parent(s): 50f14f0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/spatial/__init__.py +129 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/spatial/__pycache__/_geometric_slerp.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/spatial/__pycache__/_kdtree.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/spatial/__pycache__/kdtree.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/spatial/__pycache__/qhull.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_ckdtree.pyi +214 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so +0 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so +0 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_geometric_slerp.py +240 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so +0 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_kdtree.py +920 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_plotutils.py +270 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_procrustes.py +132 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_qhull.pyi +213 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_spherical_voronoi.py +341 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so +0 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/spatial/_voronoi.pyi +4 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/spatial/ckdtree.py +27 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/spatial/distance.py +2993 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/spatial/distance.pyi +211 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/spatial/kdtree.py +26 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/spatial/qhull.py +25 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/spatial/qhull_src/COPYING.txt +38 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__init__.py +29 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/__init__.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/_rotation_groups.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/_rotation_spline.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/rotation.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/_rotation_groups.py +140 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/tests/__pycache__/test_rotation.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/tests/__pycache__/test_rotation_spline.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__init__.py +0 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/__init__.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/cosine_cdf.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/expn_asy.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/gammainc_asy.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/gammainc_data.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/lambertw.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/loggamma.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/struve_convergence.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/utils.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/wright_bessel.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/wright_bessel_data.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/wrightomega.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/zetac.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/cosine_cdf.py +17 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/expn_asy.py +54 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/gammainc_asy.py +116 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/gammainc_data.py +124 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/lambertw.py +68 -0
env-llmeval/lib/python3.10/site-packages/scipy/spatial/__init__.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =============================================================
3
+ Spatial algorithms and data structures (:mod:`scipy.spatial`)
4
+ =============================================================
5
+
6
+ .. currentmodule:: scipy.spatial
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ spatial.distance
12
+
13
+ Spatial transformations
14
+ =======================
15
+
16
+ These are contained in the `scipy.spatial.transform` submodule.
17
+
18
+ Nearest-neighbor queries
19
+ ========================
20
+ .. autosummary::
21
+ :toctree: generated/
22
+
23
+ KDTree -- class for efficient nearest-neighbor queries
24
+ cKDTree -- class for efficient nearest-neighbor queries (faster implementation)
25
+ Rectangle
26
+
27
+ Distance metrics
28
+ ================
29
+
30
+ Distance metrics are contained in the :mod:`scipy.spatial.distance` submodule.
31
+
32
+ Delaunay triangulation, convex hulls, and Voronoi diagrams
33
+ ==========================================================
34
+
35
+ .. autosummary::
36
+ :toctree: generated/
37
+
38
+ Delaunay -- compute Delaunay triangulation of input points
39
+ ConvexHull -- compute a convex hull for input points
40
+ Voronoi -- compute a Voronoi diagram hull from input points
41
+ SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere
42
+ HalfspaceIntersection -- compute the intersection points of input halfspaces
43
+
44
+ Plotting helpers
45
+ ================
46
+
47
+ .. autosummary::
48
+ :toctree: generated/
49
+
50
+ delaunay_plot_2d -- plot 2-D triangulation
51
+ convex_hull_plot_2d -- plot 2-D convex hull
52
+ voronoi_plot_2d -- plot 2-D Voronoi diagram
53
+
54
+ .. seealso:: :ref:`Tutorial <qhulltutorial>`
55
+
56
+
57
+ Simplex representation
58
+ ======================
59
+ The simplices (triangles, tetrahedra, etc.) appearing in the Delaunay
60
+ tessellation (N-D simplices), convex hull facets, and Voronoi ridges
61
+ (N-1-D simplices) are represented in the following scheme::
62
+
63
+ tess = Delaunay(points)
64
+ hull = ConvexHull(points)
65
+ voro = Voronoi(points)
66
+
67
+ # coordinates of the jth vertex of the ith simplex
68
+ tess.points[tess.simplices[i, j], :] # tessellation element
69
+ hull.points[hull.simplices[i, j], :] # convex hull facet
70
+ voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells
71
+
72
+ For Delaunay triangulations and convex hulls, the neighborhood
73
+ structure of the simplices satisfies the condition:
74
+ ``tess.neighbors[i,j]`` is the neighboring simplex of the ith
75
+ simplex, opposite to the ``j``-vertex. It is -1 in case of no neighbor.
76
+
77
+ Convex hull facets also define a hyperplane equation::
78
+
79
+ (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0
80
+
81
+ Similar hyperplane equations for the Delaunay triangulation correspond
82
+ to the convex hull facets on the corresponding N+1-D
83
+ paraboloid.
84
+
85
+ The Delaunay triangulation objects offer a method for locating the
86
+ simplex containing a given point, and barycentric coordinate
87
+ computations.
88
+
89
+ Functions
90
+ ---------
91
+
92
+ .. autosummary::
93
+ :toctree: generated/
94
+
95
+ tsearch
96
+ distance_matrix
97
+ minkowski_distance
98
+ minkowski_distance_p
99
+ procrustes
100
+ geometric_slerp
101
+
102
+ Warnings / Errors used in :mod:`scipy.spatial`
103
+ ----------------------------------------------
104
+ .. autosummary::
105
+ :toctree: generated/
106
+
107
+ QhullError
108
+ """ # noqa: E501
109
+
110
+ from ._kdtree import *
111
+ from ._ckdtree import *
112
+ from ._qhull import *
113
+ from ._spherical_voronoi import SphericalVoronoi
114
+ from ._plotutils import *
115
+ from ._procrustes import procrustes
116
+ from ._geometric_slerp import geometric_slerp
117
+
118
+ # Deprecated namespaces, to be removed in v2.0.0
119
+ from . import ckdtree, kdtree, qhull
120
+
121
+ __all__ = [s for s in dir() if not s.startswith('_')]
122
+
123
+ from . import distance, transform
124
+
125
+ __all__ += ['distance', 'transform']
126
+
127
+ from scipy._lib._testutils import PytestTester
128
+ test = PytestTester(__name__)
129
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/spatial/__pycache__/_geometric_slerp.cpython-310.pyc ADDED
Binary file (7.21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/__pycache__/_kdtree.cpython-310.pyc ADDED
Binary file (34.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/__pycache__/kdtree.cpython-310.pyc ADDED
Binary file (691 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/__pycache__/qhull.cpython-310.pyc ADDED
Binary file (663 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_ckdtree.pyi ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import (
3
+ Any,
4
+ Generic,
5
+ overload,
6
+ TypeVar,
7
+ )
8
+
9
+ import numpy as np
10
+ import numpy.typing as npt
11
+ from scipy.sparse import coo_matrix, dok_matrix
12
+
13
+ from typing import Literal
14
+
15
+ # TODO: Replace `ndarray` with a 1D float64 array when possible
16
+ _BoxType = TypeVar("_BoxType", None, npt.NDArray[np.float64])
17
+
18
+ # Copied from `numpy.typing._scalar_like._ScalarLike`
19
+ # TODO: Expand with 0D arrays once we have shape support
20
+ _ArrayLike0D = bool | int | float | complex | str | bytes | np.generic
21
+
22
+ _WeightType = npt.ArrayLike | tuple[npt.ArrayLike | None, npt.ArrayLike | None]
23
+
24
+ class cKDTreeNode:
25
+ @property
26
+ def data_points(self) -> npt.NDArray[np.float64]: ...
27
+ @property
28
+ def indices(self) -> npt.NDArray[np.intp]: ...
29
+
30
+ # These are read-only attributes in cython, which behave like properties
31
+ @property
32
+ def level(self) -> int: ...
33
+ @property
34
+ def split_dim(self) -> int: ...
35
+ @property
36
+ def children(self) -> int: ...
37
+ @property
38
+ def start_idx(self) -> int: ...
39
+ @property
40
+ def end_idx(self) -> int: ...
41
+ @property
42
+ def split(self) -> float: ...
43
+ @property
44
+ def lesser(self) -> cKDTreeNode | None: ...
45
+ @property
46
+ def greater(self) -> cKDTreeNode | None: ...
47
+
48
+ class cKDTree(Generic[_BoxType]):
49
+ @property
50
+ def n(self) -> int: ...
51
+ @property
52
+ def m(self) -> int: ...
53
+ @property
54
+ def leafsize(self) -> int: ...
55
+ @property
56
+ def size(self) -> int: ...
57
+ @property
58
+ def tree(self) -> cKDTreeNode: ...
59
+
60
+ # These are read-only attributes in cython, which behave like properties
61
+ @property
62
+ def data(self) -> npt.NDArray[np.float64]: ...
63
+ @property
64
+ def maxes(self) -> npt.NDArray[np.float64]: ...
65
+ @property
66
+ def mins(self) -> npt.NDArray[np.float64]: ...
67
+ @property
68
+ def indices(self) -> npt.NDArray[np.float64]: ...
69
+ @property
70
+ def boxsize(self) -> _BoxType: ...
71
+
72
+ # NOTE: In practice `__init__` is used as constructor, not `__new__`.
73
+ # The latter gives us more flexibility in setting the generic parameter
74
+ # though.
75
+ @overload
76
+ def __new__( # type: ignore[misc]
77
+ cls,
78
+ data: npt.ArrayLike,
79
+ leafsize: int = ...,
80
+ compact_nodes: bool = ...,
81
+ copy_data: bool = ...,
82
+ balanced_tree: bool = ...,
83
+ boxsize: None = ...,
84
+ ) -> cKDTree[None]: ...
85
+ @overload
86
+ def __new__(
87
+ cls,
88
+ data: npt.ArrayLike,
89
+ leafsize: int = ...,
90
+ compact_nodes: bool = ...,
91
+ copy_data: bool = ...,
92
+ balanced_tree: bool = ...,
93
+ boxsize: npt.ArrayLike = ...,
94
+ ) -> cKDTree[npt.NDArray[np.float64]]: ...
95
+
96
+ # TODO: returns a 2-tuple of scalars if `x.ndim == 1` and `k == 1`,
97
+ # returns a 2-tuple of arrays otherwise
98
+ def query(
99
+ self,
100
+ x: npt.ArrayLike,
101
+ k: npt.ArrayLike = ...,
102
+ eps: float = ...,
103
+ p: float = ...,
104
+ distance_upper_bound: float = ...,
105
+ workers: int | None = ...,
106
+ ) -> tuple[Any, Any]: ...
107
+
108
+ # TODO: returns a list scalars if `x.ndim <= 1`,
109
+ # returns an object array of lists otherwise
110
+ def query_ball_point(
111
+ self,
112
+ x: npt.ArrayLike,
113
+ r: npt.ArrayLike,
114
+ p: float,
115
+ eps: float = ...,
116
+ workers: int | None = ...,
117
+ return_sorted: bool | None = ...,
118
+ return_length: bool = ...
119
+ ) -> Any: ...
120
+
121
+ def query_ball_tree(
122
+ self,
123
+ other: cKDTree,
124
+ r: float,
125
+ p: float,
126
+ eps: float = ...,
127
+ ) -> list[list[int]]: ...
128
+
129
+ @overload
130
+ def query_pairs( # type: ignore[misc]
131
+ self,
132
+ r: float,
133
+ p: float = ...,
134
+ eps: float = ...,
135
+ output_type: Literal["set"] = ...,
136
+ ) -> set[tuple[int, int]]: ...
137
+ @overload
138
+ def query_pairs(
139
+ self,
140
+ r: float,
141
+ p: float = ...,
142
+ eps: float = ...,
143
+ output_type: Literal["ndarray"] = ...,
144
+ ) -> npt.NDArray[np.intp]: ...
145
+
146
+ @overload
147
+ def count_neighbors( # type: ignore[misc]
148
+ self,
149
+ other: cKDTree,
150
+ r: _ArrayLike0D,
151
+ p: float = ...,
152
+ weights: None | tuple[None, None] = ...,
153
+ cumulative: bool = ...,
154
+ ) -> int: ...
155
+ @overload
156
+ def count_neighbors( # type: ignore[misc]
157
+ self,
158
+ other: cKDTree,
159
+ r: _ArrayLike0D,
160
+ p: float = ...,
161
+ weights: _WeightType = ...,
162
+ cumulative: bool = ...,
163
+ ) -> np.float64: ...
164
+ @overload
165
+ def count_neighbors( # type: ignore[misc]
166
+ self,
167
+ other: cKDTree,
168
+ r: npt.ArrayLike,
169
+ p: float = ...,
170
+ weights: None | tuple[None, None] = ...,
171
+ cumulative: bool = ...,
172
+ ) -> npt.NDArray[np.intp]: ...
173
+ @overload
174
+ def count_neighbors(
175
+ self,
176
+ other: cKDTree,
177
+ r: npt.ArrayLike,
178
+ p: float = ...,
179
+ weights: _WeightType = ...,
180
+ cumulative: bool = ...,
181
+ ) -> npt.NDArray[np.float64]: ...
182
+
183
+ @overload
184
+ def sparse_distance_matrix( # type: ignore[misc]
185
+ self,
186
+ other: cKDTree,
187
+ max_distance: float,
188
+ p: float = ...,
189
+ output_type: Literal["dok_matrix"] = ...,
190
+ ) -> dok_matrix: ...
191
+ @overload
192
+ def sparse_distance_matrix( # type: ignore[misc]
193
+ self,
194
+ other: cKDTree,
195
+ max_distance: float,
196
+ p: float = ...,
197
+ output_type: Literal["coo_matrix"] = ...,
198
+ ) -> coo_matrix: ...
199
+ @overload
200
+ def sparse_distance_matrix( # type: ignore[misc]
201
+ self,
202
+ other: cKDTree,
203
+ max_distance: float,
204
+ p: float = ...,
205
+ output_type: Literal["dict"] = ...,
206
+ ) -> dict[tuple[int, int], float]: ...
207
+ @overload
208
+ def sparse_distance_matrix(
209
+ self,
210
+ other: cKDTree,
211
+ max_distance: float,
212
+ p: float = ...,
213
+ output_type: Literal["ndarray"] = ...,
214
+ ) -> npt.NDArray[np.void]: ...
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (641 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (113 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_geometric_slerp.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ __all__ = ['geometric_slerp']
4
+
5
+ import warnings
6
+ from typing import TYPE_CHECKING
7
+
8
+ import numpy as np
9
+ from scipy.spatial.distance import euclidean
10
+
11
+ if TYPE_CHECKING:
12
+ import numpy.typing as npt
13
+
14
+
15
+ def _geometric_slerp(start, end, t):
16
+ # create an orthogonal basis using QR decomposition
17
+ basis = np.vstack([start, end])
18
+ Q, R = np.linalg.qr(basis.T)
19
+ signs = 2 * (np.diag(R) >= 0) - 1
20
+ Q = Q.T * signs.T[:, np.newaxis]
21
+ R = R.T * signs.T[:, np.newaxis]
22
+
23
+ # calculate the angle between `start` and `end`
24
+ c = np.dot(start, end)
25
+ s = np.linalg.det(R)
26
+ omega = np.arctan2(s, c)
27
+
28
+ # interpolate
29
+ start, end = Q
30
+ s = np.sin(t * omega)
31
+ c = np.cos(t * omega)
32
+ return start * c[:, np.newaxis] + end * s[:, np.newaxis]
33
+
34
+
35
+ def geometric_slerp(
36
+ start: npt.ArrayLike,
37
+ end: npt.ArrayLike,
38
+ t: npt.ArrayLike,
39
+ tol: float = 1e-7,
40
+ ) -> np.ndarray:
41
+ """
42
+ Geometric spherical linear interpolation.
43
+
44
+ The interpolation occurs along a unit-radius
45
+ great circle arc in arbitrary dimensional space.
46
+
47
+ Parameters
48
+ ----------
49
+ start : (n_dimensions, ) array-like
50
+ Single n-dimensional input coordinate in a 1-D array-like
51
+ object. `n` must be greater than 1.
52
+ end : (n_dimensions, ) array-like
53
+ Single n-dimensional input coordinate in a 1-D array-like
54
+ object. `n` must be greater than 1.
55
+ t : float or (n_points,) 1D array-like
56
+ A float or 1D array-like of doubles representing interpolation
57
+ parameters, with values required in the inclusive interval
58
+ between 0 and 1. A common approach is to generate the array
59
+ with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.
60
+ Ascending, descending, and scrambled orders are permitted.
61
+ tol : float
62
+ The absolute tolerance for determining if the start and end
63
+ coordinates are antipodes.
64
+
65
+ Returns
66
+ -------
67
+ result : (t.size, D)
68
+ An array of doubles containing the interpolated
69
+ spherical path and including start and
70
+ end when 0 and 1 t are used. The
71
+ interpolated values should correspond to the
72
+ same sort order provided in the t array. The result
73
+ may be 1-dimensional if ``t`` is a float.
74
+
75
+ Raises
76
+ ------
77
+ ValueError
78
+ If ``start`` and ``end`` are antipodes, not on the
79
+ unit n-sphere, or for a variety of degenerate conditions.
80
+
81
+ See Also
82
+ --------
83
+ scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions
84
+
85
+ Notes
86
+ -----
87
+ The implementation is based on the mathematical formula provided in [1]_,
88
+ and the first known presentation of this algorithm, derived from study of
89
+ 4-D geometry, is credited to Glenn Davis in a footnote of the original
90
+ quaternion Slerp publication by Ken Shoemake [2]_.
91
+
92
+ .. versionadded:: 1.5.0
93
+
94
+ References
95
+ ----------
96
+ .. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp
97
+ .. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.
98
+ ACM SIGGRAPH Computer Graphics, 19(3): 245-254.
99
+
100
+ Examples
101
+ --------
102
+ Interpolate four linearly-spaced values on the circumference of
103
+ a circle spanning 90 degrees:
104
+
105
+ >>> import numpy as np
106
+ >>> from scipy.spatial import geometric_slerp
107
+ >>> import matplotlib.pyplot as plt
108
+ >>> fig = plt.figure()
109
+ >>> ax = fig.add_subplot(111)
110
+ >>> start = np.array([1, 0])
111
+ >>> end = np.array([0, 1])
112
+ >>> t_vals = np.linspace(0, 1, 4)
113
+ >>> result = geometric_slerp(start,
114
+ ... end,
115
+ ... t_vals)
116
+
117
+ The interpolated results should be at 30 degree intervals
118
+ recognizable on the unit circle:
119
+
120
+ >>> ax.scatter(result[...,0], result[...,1], c='k')
121
+ >>> circle = plt.Circle((0, 0), 1, color='grey')
122
+ >>> ax.add_artist(circle)
123
+ >>> ax.set_aspect('equal')
124
+ >>> plt.show()
125
+
126
+ Attempting to interpolate between antipodes on a circle is
127
+ ambiguous because there are two possible paths, and on a
128
+ sphere there are infinite possible paths on the geodesic surface.
129
+ Nonetheless, one of the ambiguous paths is returned along
130
+ with a warning:
131
+
132
+ >>> opposite_pole = np.array([-1, 0])
133
+ >>> with np.testing.suppress_warnings() as sup:
134
+ ... sup.filter(UserWarning)
135
+ ... geometric_slerp(start,
136
+ ... opposite_pole,
137
+ ... t_vals)
138
+ array([[ 1.00000000e+00, 0.00000000e+00],
139
+ [ 5.00000000e-01, 8.66025404e-01],
140
+ [-5.00000000e-01, 8.66025404e-01],
141
+ [-1.00000000e+00, 1.22464680e-16]])
142
+
143
+ Extend the original example to a sphere and plot interpolation
144
+ points in 3D:
145
+
146
+ >>> from mpl_toolkits.mplot3d import proj3d
147
+ >>> fig = plt.figure()
148
+ >>> ax = fig.add_subplot(111, projection='3d')
149
+
150
+ Plot the unit sphere for reference (optional):
151
+
152
+ >>> u = np.linspace(0, 2 * np.pi, 100)
153
+ >>> v = np.linspace(0, np.pi, 100)
154
+ >>> x = np.outer(np.cos(u), np.sin(v))
155
+ >>> y = np.outer(np.sin(u), np.sin(v))
156
+ >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
157
+ >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
158
+
159
+ Interpolating over a larger number of points
160
+ may provide the appearance of a smooth curve on
161
+ the surface of the sphere, which is also useful
162
+ for discretized integration calculations on a
163
+ sphere surface:
164
+
165
+ >>> start = np.array([1, 0, 0])
166
+ >>> end = np.array([0, 0, 1])
167
+ >>> t_vals = np.linspace(0, 1, 200)
168
+ >>> result = geometric_slerp(start,
169
+ ... end,
170
+ ... t_vals)
171
+ >>> ax.plot(result[...,0],
172
+ ... result[...,1],
173
+ ... result[...,2],
174
+ ... c='k')
175
+ >>> plt.show()
176
+ """
177
+
178
+ start = np.asarray(start, dtype=np.float64)
179
+ end = np.asarray(end, dtype=np.float64)
180
+ t = np.asarray(t)
181
+
182
+ if t.ndim > 1:
183
+ raise ValueError("The interpolation parameter "
184
+ "value must be one dimensional.")
185
+
186
+ if start.ndim != 1 or end.ndim != 1:
187
+ raise ValueError("Start and end coordinates "
188
+ "must be one-dimensional")
189
+
190
+ if start.size != end.size:
191
+ raise ValueError("The dimensions of start and "
192
+ "end must match (have same size)")
193
+
194
+ if start.size < 2 or end.size < 2:
195
+ raise ValueError("The start and end coordinates must "
196
+ "both be in at least two-dimensional "
197
+ "space")
198
+
199
+ if np.array_equal(start, end):
200
+ return np.linspace(start, start, t.size)
201
+
202
+ # for points that violate equation for n-sphere
203
+ for coord in [start, end]:
204
+ if not np.allclose(np.linalg.norm(coord), 1.0,
205
+ rtol=1e-9,
206
+ atol=0):
207
+ raise ValueError("start and end are not"
208
+ " on a unit n-sphere")
209
+
210
+ if not isinstance(tol, float):
211
+ raise ValueError("tol must be a float")
212
+ else:
213
+ tol = np.fabs(tol)
214
+
215
+ coord_dist = euclidean(start, end)
216
+
217
+ # diameter of 2 within tolerance means antipodes, which is a problem
218
+ # for all unit n-spheres (even the 0-sphere would have an ambiguous path)
219
+ if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):
220
+ warnings.warn("start and end are antipodes "
221
+ "using the specified tolerance; "
222
+ "this may cause ambiguous slerp paths",
223
+ stacklevel=2)
224
+
225
+ t = np.asarray(t, dtype=np.float64)
226
+
227
+ if t.size == 0:
228
+ return np.empty((0, start.size))
229
+
230
+ if t.min() < 0 or t.max() > 1:
231
+ raise ValueError("interpolation parameter must be in [0, 1]")
232
+
233
+ if t.ndim == 0:
234
+ return _geometric_slerp(start,
235
+ end,
236
+ np.atleast_1d(t)).ravel()
237
+ else:
238
+ return _geometric_slerp(start,
239
+ end,
240
+ t)
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (250 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_kdtree.py ADDED
@@ -0,0 +1,920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Anne M. Archibald 2008
2
+ # Released under the scipy license
3
+ import numpy as np
4
+ from ._ckdtree import cKDTree, cKDTreeNode
5
+
6
+ __all__ = ['minkowski_distance_p', 'minkowski_distance',
7
+ 'distance_matrix',
8
+ 'Rectangle', 'KDTree']
9
+
10
+
11
+ def minkowski_distance_p(x, y, p=2):
12
+ """Compute the pth power of the L**p distance between two arrays.
13
+
14
+ For efficiency, this function computes the L**p distance but does
15
+ not extract the pth root. If `p` is 1 or infinity, this is equal to
16
+ the actual L**p distance.
17
+
18
+ The last dimensions of `x` and `y` must be the same length. Any
19
+ other dimensions must be compatible for broadcasting.
20
+
21
+ Parameters
22
+ ----------
23
+ x : (..., K) array_like
24
+ Input array.
25
+ y : (..., K) array_like
26
+ Input array.
27
+ p : float, 1 <= p <= infinity
28
+ Which Minkowski p-norm to use.
29
+
30
+ Returns
31
+ -------
32
+ dist : ndarray
33
+ pth power of the distance between the input arrays.
34
+
35
+ Examples
36
+ --------
37
+ >>> from scipy.spatial import minkowski_distance_p
38
+ >>> minkowski_distance_p([[0, 0], [0, 0]], [[1, 1], [0, 1]])
39
+ array([2, 1])
40
+
41
+ """
42
+ x = np.asarray(x)
43
+ y = np.asarray(y)
44
+
45
+ # Find smallest common datatype with float64 (return type of this
46
+ # function) - addresses #10262.
47
+ # Don't just cast to float64 for complex input case.
48
+ common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype),
49
+ 'float64')
50
+
51
+ # Make sure x and y are NumPy arrays of correct datatype.
52
+ x = x.astype(common_datatype)
53
+ y = y.astype(common_datatype)
54
+
55
+ if p == np.inf:
56
+ return np.amax(np.abs(y-x), axis=-1)
57
+ elif p == 1:
58
+ return np.sum(np.abs(y-x), axis=-1)
59
+ else:
60
+ return np.sum(np.abs(y-x)**p, axis=-1)
61
+
62
+
63
+ def minkowski_distance(x, y, p=2):
64
+ """Compute the L**p distance between two arrays.
65
+
66
+ The last dimensions of `x` and `y` must be the same length. Any
67
+ other dimensions must be compatible for broadcasting.
68
+
69
+ Parameters
70
+ ----------
71
+ x : (..., K) array_like
72
+ Input array.
73
+ y : (..., K) array_like
74
+ Input array.
75
+ p : float, 1 <= p <= infinity
76
+ Which Minkowski p-norm to use.
77
+
78
+ Returns
79
+ -------
80
+ dist : ndarray
81
+ Distance between the input arrays.
82
+
83
+ Examples
84
+ --------
85
+ >>> from scipy.spatial import minkowski_distance
86
+ >>> minkowski_distance([[0, 0], [0, 0]], [[1, 1], [0, 1]])
87
+ array([ 1.41421356, 1. ])
88
+
89
+ """
90
+ x = np.asarray(x)
91
+ y = np.asarray(y)
92
+ if p == np.inf or p == 1:
93
+ return minkowski_distance_p(x, y, p)
94
+ else:
95
+ return minkowski_distance_p(x, y, p)**(1./p)
96
+
97
+
98
+ class Rectangle:
99
+ """Hyperrectangle class.
100
+
101
+ Represents a Cartesian product of intervals.
102
+ """
103
+ def __init__(self, maxes, mins):
104
+ """Construct a hyperrectangle."""
105
+ self.maxes = np.maximum(maxes,mins).astype(float)
106
+ self.mins = np.minimum(maxes,mins).astype(float)
107
+ self.m, = self.maxes.shape
108
+
109
+ def __repr__(self):
110
+ return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
111
+
112
+ def volume(self):
113
+ """Total volume."""
114
+ return np.prod(self.maxes-self.mins)
115
+
116
+ def split(self, d, split):
117
+ """Produce two hyperrectangles by splitting.
118
+
119
+ In general, if you need to compute maximum and minimum
120
+ distances to the children, it can be done more efficiently
121
+ by updating the maximum and minimum distances to the parent.
122
+
123
+ Parameters
124
+ ----------
125
+ d : int
126
+ Axis to split hyperrectangle along.
127
+ split : float
128
+ Position along axis `d` to split at.
129
+
130
+ """
131
+ mid = np.copy(self.maxes)
132
+ mid[d] = split
133
+ less = Rectangle(self.mins, mid)
134
+ mid = np.copy(self.mins)
135
+ mid[d] = split
136
+ greater = Rectangle(mid, self.maxes)
137
+ return less, greater
138
+
139
+ def min_distance_point(self, x, p=2.):
140
+ """
141
+ Return the minimum distance between input and points in the
142
+ hyperrectangle.
143
+
144
+ Parameters
145
+ ----------
146
+ x : array_like
147
+ Input.
148
+ p : float, optional
149
+ Input.
150
+
151
+ """
152
+ return minkowski_distance(
153
+ 0, np.maximum(0, np.maximum(self.mins-x, x-self.maxes)),
154
+ p
155
+ )
156
+
157
+ def max_distance_point(self, x, p=2.):
158
+ """
159
+ Return the maximum distance between input and points in the hyperrectangle.
160
+
161
+ Parameters
162
+ ----------
163
+ x : array_like
164
+ Input array.
165
+ p : float, optional
166
+ Input.
167
+
168
+ """
169
+ return minkowski_distance(0, np.maximum(self.maxes-x, x-self.mins), p)
170
+
171
+ def min_distance_rectangle(self, other, p=2.):
172
+ """
173
+ Compute the minimum distance between points in the two hyperrectangles.
174
+
175
+ Parameters
176
+ ----------
177
+ other : hyperrectangle
178
+ Input.
179
+ p : float
180
+ Input.
181
+
182
+ """
183
+ return minkowski_distance(
184
+ 0,
185
+ np.maximum(0, np.maximum(self.mins-other.maxes,
186
+ other.mins-self.maxes)),
187
+ p
188
+ )
189
+
190
+ def max_distance_rectangle(self, other, p=2.):
191
+ """
192
+ Compute the maximum distance between points in the two hyperrectangles.
193
+
194
+ Parameters
195
+ ----------
196
+ other : hyperrectangle
197
+ Input.
198
+ p : float, optional
199
+ Input.
200
+
201
+ """
202
+ return minkowski_distance(
203
+ 0, np.maximum(self.maxes-other.mins, other.maxes-self.mins), p)
204
+
205
+
206
+ class KDTree(cKDTree):
207
+ """kd-tree for quick nearest-neighbor lookup.
208
+
209
+ This class provides an index into a set of k-dimensional points
210
+ which can be used to rapidly look up the nearest neighbors of any
211
+ point.
212
+
213
+ Parameters
214
+ ----------
215
+ data : array_like, shape (n,m)
216
+ The n data points of dimension m to be indexed. This array is
217
+ not copied unless this is necessary to produce a contiguous
218
+ array of doubles, and so modifying this data will result in
219
+ bogus results. The data are also copied if the kd-tree is built
220
+ with copy_data=True.
221
+ leafsize : positive int, optional
222
+ The number of points at which the algorithm switches over to
223
+ brute-force. Default: 10.
224
+ compact_nodes : bool, optional
225
+ If True, the kd-tree is built to shrink the hyperrectangles to
226
+ the actual data range. This usually gives a more compact tree that
227
+ is robust against degenerated input data and gives faster queries
228
+ at the expense of longer build time. Default: True.
229
+ copy_data : bool, optional
230
+ If True the data is always copied to protect the kd-tree against
231
+ data corruption. Default: False.
232
+ balanced_tree : bool, optional
233
+ If True, the median is used to split the hyperrectangles instead of
234
+ the midpoint. This usually gives a more compact tree and
235
+ faster queries at the expense of longer build time. Default: True.
236
+ boxsize : array_like or scalar, optional
237
+ Apply a m-d toroidal topology to the KDTree.. The topology is generated
238
+ by :math:`x_i + n_i L_i` where :math:`n_i` are integers and :math:`L_i`
239
+ is the boxsize along i-th dimension. The input data shall be wrapped
240
+ into :math:`[0, L_i)`. A ValueError is raised if any of the data is
241
+ outside of this bound.
242
+
243
+ Notes
244
+ -----
245
+ The algorithm used is described in Maneewongvatana and Mount 1999.
246
+ The general idea is that the kd-tree is a binary tree, each of whose
247
+ nodes represents an axis-aligned hyperrectangle. Each node specifies
248
+ an axis and splits the set of points based on whether their coordinate
249
+ along that axis is greater than or less than a particular value.
250
+
251
+ During construction, the axis and splitting point are chosen by the
252
+ "sliding midpoint" rule, which ensures that the cells do not all
253
+ become long and thin.
254
+
255
+ The tree can be queried for the r closest neighbors of any given point
256
+ (optionally returning only those within some maximum distance of the
257
+ point). It can also be queried, with a substantial gain in efficiency,
258
+ for the r approximate closest neighbors.
259
+
260
+ For large dimensions (20 is already large) do not expect this to run
261
+ significantly faster than brute force. High-dimensional nearest-neighbor
262
+ queries are a substantial open problem in computer science.
263
+
264
+ Attributes
265
+ ----------
266
+ data : ndarray, shape (n,m)
267
+ The n data points of dimension m to be indexed. This array is
268
+ not copied unless this is necessary to produce a contiguous
269
+ array of doubles. The data are also copied if the kd-tree is built
270
+ with `copy_data=True`.
271
+ leafsize : positive int
272
+ The number of points at which the algorithm switches over to
273
+ brute-force.
274
+ m : int
275
+ The dimension of a single data-point.
276
+ n : int
277
+ The number of data points.
278
+ maxes : ndarray, shape (m,)
279
+ The maximum value in each dimension of the n data points.
280
+ mins : ndarray, shape (m,)
281
+ The minimum value in each dimension of the n data points.
282
+ size : int
283
+ The number of nodes in the tree.
284
+
285
+ """
286
+
287
+ class node:
288
+ @staticmethod
289
+ def _create(ckdtree_node=None):
290
+ """Create either an inner or leaf node, wrapping a cKDTreeNode instance"""
291
+ if ckdtree_node is None:
292
+ return KDTree.node(ckdtree_node)
293
+ elif ckdtree_node.split_dim == -1:
294
+ return KDTree.leafnode(ckdtree_node)
295
+ else:
296
+ return KDTree.innernode(ckdtree_node)
297
+
298
+ def __init__(self, ckdtree_node=None):
299
+ if ckdtree_node is None:
300
+ ckdtree_node = cKDTreeNode()
301
+ self._node = ckdtree_node
302
+
303
+ def __lt__(self, other):
304
+ return id(self) < id(other)
305
+
306
+ def __gt__(self, other):
307
+ return id(self) > id(other)
308
+
309
+ def __le__(self, other):
310
+ return id(self) <= id(other)
311
+
312
+ def __ge__(self, other):
313
+ return id(self) >= id(other)
314
+
315
+ def __eq__(self, other):
316
+ return id(self) == id(other)
317
+
318
+ class leafnode(node):
319
+ @property
320
+ def idx(self):
321
+ return self._node.indices
322
+
323
+ @property
324
+ def children(self):
325
+ return self._node.children
326
+
327
+ class innernode(node):
328
+ def __init__(self, ckdtreenode):
329
+ assert isinstance(ckdtreenode, cKDTreeNode)
330
+ super().__init__(ckdtreenode)
331
+ self.less = KDTree.node._create(ckdtreenode.lesser)
332
+ self.greater = KDTree.node._create(ckdtreenode.greater)
333
+
334
+ @property
335
+ def split_dim(self):
336
+ return self._node.split_dim
337
+
338
+ @property
339
+ def split(self):
340
+ return self._node.split
341
+
342
+ @property
343
+ def children(self):
344
+ return self._node.children
345
+
346
+ @property
347
+ def tree(self):
348
+ if not hasattr(self, "_tree"):
349
+ self._tree = KDTree.node._create(super().tree)
350
+
351
+ return self._tree
352
+
353
+ def __init__(self, data, leafsize=10, compact_nodes=True, copy_data=False,
354
+ balanced_tree=True, boxsize=None):
355
+ data = np.asarray(data)
356
+ if data.dtype.kind == 'c':
357
+ raise TypeError("KDTree does not work with complex data")
358
+
359
+ # Note KDTree has different default leafsize from cKDTree
360
+ super().__init__(data, leafsize, compact_nodes, copy_data,
361
+ balanced_tree, boxsize)
362
+
363
+ def query(
364
+ self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf, workers=1):
365
+ r"""Query the kd-tree for nearest neighbors.
366
+
367
+ Parameters
368
+ ----------
369
+ x : array_like, last dimension self.m
370
+ An array of points to query.
371
+ k : int or Sequence[int], optional
372
+ Either the number of nearest neighbors to return, or a list of the
373
+ k-th nearest neighbors to return, starting from 1.
374
+ eps : nonnegative float, optional
375
+ Return approximate nearest neighbors; the kth returned value
376
+ is guaranteed to be no further than (1+eps) times the
377
+ distance to the real kth nearest neighbor.
378
+ p : float, 1<=p<=infinity, optional
379
+ Which Minkowski p-norm to use.
380
+ 1 is the sum-of-absolute-values distance ("Manhattan" distance).
381
+ 2 is the usual Euclidean distance.
382
+ infinity is the maximum-coordinate-difference distance.
383
+ A large, finite p may cause a ValueError if overflow can occur.
384
+ distance_upper_bound : nonnegative float, optional
385
+ Return only neighbors within this distance. This is used to prune
386
+ tree searches, so if you are doing a series of nearest-neighbor
387
+ queries, it may help to supply the distance to the nearest neighbor
388
+ of the most recent point.
389
+ workers : int, optional
390
+ Number of workers to use for parallel processing. If -1 is given
391
+ all CPU threads are used. Default: 1.
392
+
393
+ .. versionadded:: 1.6.0
394
+
395
+ Returns
396
+ -------
397
+ d : float or array of floats
398
+ The distances to the nearest neighbors.
399
+ If ``x`` has shape ``tuple+(self.m,)``, then ``d`` has shape
400
+ ``tuple+(k,)``.
401
+ When k == 1, the last dimension of the output is squeezed.
402
+ Missing neighbors are indicated with infinite distances.
403
+ Hits are sorted by distance (nearest first).
404
+
405
+ .. versionchanged:: 1.9.0
406
+ Previously if ``k=None``, then `d` was an object array of
407
+ shape ``tuple``, containing lists of distances. This behavior
408
+ has been removed, use `query_ball_point` instead.
409
+
410
+ i : integer or array of integers
411
+ The index of each neighbor in ``self.data``.
412
+ ``i`` is the same shape as d.
413
+ Missing neighbors are indicated with ``self.n``.
414
+
415
+ Examples
416
+ --------
417
+
418
+ >>> import numpy as np
419
+ >>> from scipy.spatial import KDTree
420
+ >>> x, y = np.mgrid[0:5, 2:8]
421
+ >>> tree = KDTree(np.c_[x.ravel(), y.ravel()])
422
+
423
+ To query the nearest neighbours and return squeezed result, use
424
+
425
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=1)
426
+ >>> print(dd, ii, sep='\n')
427
+ [2. 0.2236068]
428
+ [ 0 13]
429
+
430
+ To query the nearest neighbours and return unsqueezed result, use
431
+
432
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[1])
433
+ >>> print(dd, ii, sep='\n')
434
+ [[2. ]
435
+ [0.2236068]]
436
+ [[ 0]
437
+ [13]]
438
+
439
+ To query the second nearest neighbours and return unsqueezed result,
440
+ use
441
+
442
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[2])
443
+ >>> print(dd, ii, sep='\n')
444
+ [[2.23606798]
445
+ [0.80622577]]
446
+ [[ 6]
447
+ [19]]
448
+
449
+ To query the first and second nearest neighbours, use
450
+
451
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=2)
452
+ >>> print(dd, ii, sep='\n')
453
+ [[2. 2.23606798]
454
+ [0.2236068 0.80622577]]
455
+ [[ 0 6]
456
+ [13 19]]
457
+
458
+ or, be more specific
459
+
460
+ >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[1, 2])
461
+ >>> print(dd, ii, sep='\n')
462
+ [[2. 2.23606798]
463
+ [0.2236068 0.80622577]]
464
+ [[ 0 6]
465
+ [13 19]]
466
+
467
+ """
468
+ x = np.asarray(x)
469
+ if x.dtype.kind == 'c':
470
+ raise TypeError("KDTree does not work with complex data")
471
+
472
+ if k is None:
473
+ raise ValueError("k must be an integer or a sequence of integers")
474
+
475
+ d, i = super().query(x, k, eps, p, distance_upper_bound, workers)
476
+ if isinstance(i, int):
477
+ i = np.intp(i)
478
+ return d, i
479
+
480
+ def query_ball_point(self, x, r, p=2., eps=0, workers=1,
481
+ return_sorted=None, return_length=False):
482
+ """Find all points within distance r of point(s) x.
483
+
484
+ Parameters
485
+ ----------
486
+ x : array_like, shape tuple + (self.m,)
487
+ The point or points to search for neighbors of.
488
+ r : array_like, float
489
+ The radius of points to return, must broadcast to the length of x.
490
+ p : float, optional
491
+ Which Minkowski p-norm to use. Should be in the range [1, inf].
492
+ A finite large p may cause a ValueError if overflow can occur.
493
+ eps : nonnegative float, optional
494
+ Approximate search. Branches of the tree are not explored if their
495
+ nearest points are further than ``r / (1 + eps)``, and branches are
496
+ added in bulk if their furthest points are nearer than
497
+ ``r * (1 + eps)``.
498
+ workers : int, optional
499
+ Number of jobs to schedule for parallel processing. If -1 is given
500
+ all processors are used. Default: 1.
501
+
502
+ .. versionadded:: 1.6.0
503
+ return_sorted : bool, optional
504
+ Sorts returned indices if True and does not sort them if False. If
505
+ None, does not sort single point queries, but does sort
506
+ multi-point queries which was the behavior before this option
507
+ was added.
508
+
509
+ .. versionadded:: 1.6.0
510
+ return_length : bool, optional
511
+ Return the number of points inside the radius instead of a list
512
+ of the indices.
513
+
514
+ .. versionadded:: 1.6.0
515
+
516
+ Returns
517
+ -------
518
+ results : list or array of lists
519
+ If `x` is a single point, returns a list of the indices of the
520
+ neighbors of `x`. If `x` is an array of points, returns an object
521
+ array of shape tuple containing lists of neighbors.
522
+
523
+ Notes
524
+ -----
525
+ If you have many points whose neighbors you want to find, you may save
526
+ substantial amounts of time by putting them in a KDTree and using
527
+ query_ball_tree.
528
+
529
+ Examples
530
+ --------
531
+ >>> import numpy as np
532
+ >>> from scipy import spatial
533
+ >>> x, y = np.mgrid[0:5, 0:5]
534
+ >>> points = np.c_[x.ravel(), y.ravel()]
535
+ >>> tree = spatial.KDTree(points)
536
+ >>> sorted(tree.query_ball_point([2, 0], 1))
537
+ [5, 10, 11, 15]
538
+
539
+ Query multiple points and plot the results:
540
+
541
+ >>> import matplotlib.pyplot as plt
542
+ >>> points = np.asarray(points)
543
+ >>> plt.plot(points[:,0], points[:,1], '.')
544
+ >>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
545
+ ... nearby_points = points[results]
546
+ ... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
547
+ >>> plt.margins(0.1, 0.1)
548
+ >>> plt.show()
549
+
550
+ """
551
+ x = np.asarray(x)
552
+ if x.dtype.kind == 'c':
553
+ raise TypeError("KDTree does not work with complex data")
554
+ return super().query_ball_point(
555
+ x, r, p, eps, workers, return_sorted, return_length)
556
+
557
+ def query_ball_tree(self, other, r, p=2., eps=0):
558
+ """
559
+ Find all pairs of points between `self` and `other` whose distance is
560
+ at most r.
561
+
562
+ Parameters
563
+ ----------
564
+ other : KDTree instance
565
+ The tree containing points to search against.
566
+ r : float
567
+ The maximum distance, has to be positive.
568
+ p : float, optional
569
+ Which Minkowski norm to use. `p` has to meet the condition
570
+ ``1 <= p <= infinity``.
571
+ eps : float, optional
572
+ Approximate search. Branches of the tree are not explored
573
+ if their nearest points are further than ``r/(1+eps)``, and
574
+ branches are added in bulk if their furthest points are nearer
575
+ than ``r * (1+eps)``. `eps` has to be non-negative.
576
+
577
+ Returns
578
+ -------
579
+ results : list of lists
580
+ For each element ``self.data[i]`` of this tree, ``results[i]`` is a
581
+ list of the indices of its neighbors in ``other.data``.
582
+
583
+ Examples
584
+ --------
585
+ You can search all pairs of points between two kd-trees within a distance:
586
+
587
+ >>> import matplotlib.pyplot as plt
588
+ >>> import numpy as np
589
+ >>> from scipy.spatial import KDTree
590
+ >>> rng = np.random.default_rng()
591
+ >>> points1 = rng.random((15, 2))
592
+ >>> points2 = rng.random((15, 2))
593
+ >>> plt.figure(figsize=(6, 6))
594
+ >>> plt.plot(points1[:, 0], points1[:, 1], "xk", markersize=14)
595
+ >>> plt.plot(points2[:, 0], points2[:, 1], "og", markersize=14)
596
+ >>> kd_tree1 = KDTree(points1)
597
+ >>> kd_tree2 = KDTree(points2)
598
+ >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
599
+ >>> for i in range(len(indexes)):
600
+ ... for j in indexes[i]:
601
+ ... plt.plot([points1[i, 0], points2[j, 0]],
602
+ ... [points1[i, 1], points2[j, 1]], "-r")
603
+ >>> plt.show()
604
+
605
+ """
606
+ return super().query_ball_tree(other, r, p, eps)
607
+
608
+ def query_pairs(self, r, p=2., eps=0, output_type='set'):
609
+ """Find all pairs of points in `self` whose distance is at most r.
610
+
611
+ Parameters
612
+ ----------
613
+ r : positive float
614
+ The maximum distance.
615
+ p : float, optional
616
+ Which Minkowski norm to use. `p` has to meet the condition
617
+ ``1 <= p <= infinity``.
618
+ eps : float, optional
619
+ Approximate search. Branches of the tree are not explored
620
+ if their nearest points are further than ``r/(1+eps)``, and
621
+ branches are added in bulk if their furthest points are nearer
622
+ than ``r * (1+eps)``. `eps` has to be non-negative.
623
+ output_type : string, optional
624
+ Choose the output container, 'set' or 'ndarray'. Default: 'set'
625
+
626
+ .. versionadded:: 1.6.0
627
+
628
+ Returns
629
+ -------
630
+ results : set or ndarray
631
+ Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
632
+ positions are close. If output_type is 'ndarray', an ndarry is
633
+ returned instead of a set.
634
+
635
+ Examples
636
+ --------
637
+ You can search all pairs of points in a kd-tree within a distance:
638
+
639
+ >>> import matplotlib.pyplot as plt
640
+ >>> import numpy as np
641
+ >>> from scipy.spatial import KDTree
642
+ >>> rng = np.random.default_rng()
643
+ >>> points = rng.random((20, 2))
644
+ >>> plt.figure(figsize=(6, 6))
645
+ >>> plt.plot(points[:, 0], points[:, 1], "xk", markersize=14)
646
+ >>> kd_tree = KDTree(points)
647
+ >>> pairs = kd_tree.query_pairs(r=0.2)
648
+ >>> for (i, j) in pairs:
649
+ ... plt.plot([points[i, 0], points[j, 0]],
650
+ ... [points[i, 1], points[j, 1]], "-r")
651
+ >>> plt.show()
652
+
653
+ """
654
+ return super().query_pairs(r, p, eps, output_type)
655
+
656
+ def count_neighbors(self, other, r, p=2., weights=None, cumulative=True):
657
+ """Count how many nearby pairs can be formed.
658
+
659
+ Count the number of pairs ``(x1,x2)`` can be formed, with ``x1`` drawn
660
+ from ``self`` and ``x2`` drawn from ``other``, and where
661
+ ``distance(x1, x2, p) <= r``.
662
+
663
+ Data points on ``self`` and ``other`` are optionally weighted by the
664
+ ``weights`` argument. (See below)
665
+
666
+ This is adapted from the "two-point correlation" algorithm described by
667
+ Gray and Moore [1]_. See notes for further discussion.
668
+
669
+ Parameters
670
+ ----------
671
+ other : KDTree
672
+ The other tree to draw points from, can be the same tree as self.
673
+ r : float or one-dimensional array of floats
674
+ The radius to produce a count for. Multiple radii are searched with
675
+ a single tree traversal.
676
+ If the count is non-cumulative(``cumulative=False``), ``r`` defines
677
+ the edges of the bins, and must be non-decreasing.
678
+ p : float, optional
679
+ 1<=p<=infinity.
680
+ Which Minkowski p-norm to use.
681
+ Default 2.0.
682
+ A finite large p may cause a ValueError if overflow can occur.
683
+ weights : tuple, array_like, or None, optional
684
+ If None, the pair-counting is unweighted.
685
+ If given as a tuple, weights[0] is the weights of points in
686
+ ``self``, and weights[1] is the weights of points in ``other``;
687
+ either can be None to indicate the points are unweighted.
688
+ If given as an array_like, weights is the weights of points in
689
+ ``self`` and ``other``. For this to make sense, ``self`` and
690
+ ``other`` must be the same tree. If ``self`` and ``other`` are two
691
+ different trees, a ``ValueError`` is raised.
692
+ Default: None
693
+
694
+ .. versionadded:: 1.6.0
695
+ cumulative : bool, optional
696
+ Whether the returned counts are cumulative. When cumulative is set
697
+ to ``False`` the algorithm is optimized to work with a large number
698
+ of bins (>10) specified by ``r``. When ``cumulative`` is set to
699
+ True, the algorithm is optimized to work with a small number of
700
+ ``r``. Default: True
701
+
702
+ .. versionadded:: 1.6.0
703
+
704
+ Returns
705
+ -------
706
+ result : scalar or 1-D array
707
+ The number of pairs. For unweighted counts, the result is integer.
708
+ For weighted counts, the result is float.
709
+ If cumulative is False, ``result[i]`` contains the counts with
710
+ ``(-inf if i == 0 else r[i-1]) < R <= r[i]``
711
+
712
+ Notes
713
+ -----
714
+ Pair-counting is the basic operation used to calculate the two point
715
+ correlation functions from a data set composed of position of objects.
716
+
717
+ Two point correlation function measures the clustering of objects and
718
+ is widely used in cosmology to quantify the large scale structure
719
+ in our Universe, but it may be useful for data analysis in other fields
720
+ where self-similar assembly of objects also occur.
721
+
722
+ The Landy-Szalay estimator for the two point correlation function of
723
+ ``D`` measures the clustering signal in ``D``. [2]_
724
+
725
+ For example, given the position of two sets of objects,
726
+
727
+ - objects ``D`` (data) contains the clustering signal, and
728
+
729
+ - objects ``R`` (random) that contains no signal,
730
+
731
+ .. math::
732
+
733
+ \\xi(r) = \\frac{<D, D> - 2 f <D, R> + f^2<R, R>}{f^2<R, R>},
734
+
735
+ where the brackets represents counting pairs between two data sets
736
+ in a finite bin around ``r`` (distance), corresponding to setting
737
+ `cumulative=False`, and ``f = float(len(D)) / float(len(R))`` is the
738
+ ratio between number of objects from data and random.
739
+
740
+ The algorithm implemented here is loosely based on the dual-tree
741
+ algorithm described in [1]_. We switch between two different
742
+ pair-cumulation scheme depending on the setting of ``cumulative``.
743
+ The computing time of the method we use when for
744
+ ``cumulative == False`` does not scale with the total number of bins.
745
+ The algorithm for ``cumulative == True`` scales linearly with the
746
+ number of bins, though it is slightly faster when only
747
+ 1 or 2 bins are used. [5]_.
748
+
749
+ As an extension to the naive pair-counting,
750
+ weighted pair-counting counts the product of weights instead
751
+ of number of pairs.
752
+ Weighted pair-counting is used to estimate marked correlation functions
753
+ ([3]_, section 2.2),
754
+ or to properly calculate the average of data per distance bin
755
+ (e.g. [4]_, section 2.1 on redshift).
756
+
757
+ .. [1] Gray and Moore,
758
+ "N-body problems in statistical learning",
759
+ Mining the sky, 2000,
760
+ https://arxiv.org/abs/astro-ph/0012333
761
+
762
+ .. [2] Landy and Szalay,
763
+ "Bias and variance of angular correlation functions",
764
+ The Astrophysical Journal, 1993,
765
+ http://adsabs.harvard.edu/abs/1993ApJ...412...64L
766
+
767
+ .. [3] Sheth, Connolly and Skibba,
768
+ "Marked correlations in galaxy formation models",
769
+ Arxiv e-print, 2005,
770
+ https://arxiv.org/abs/astro-ph/0511773
771
+
772
+ .. [4] Hawkins, et al.,
773
+ "The 2dF Galaxy Redshift Survey: correlation functions,
774
+ peculiar velocities and the matter density of the Universe",
775
+ Monthly Notices of the Royal Astronomical Society, 2002,
776
+ http://adsabs.harvard.edu/abs/2003MNRAS.346...78H
777
+
778
+ .. [5] https://github.com/scipy/scipy/pull/5647#issuecomment-168474926
779
+
780
+ Examples
781
+ --------
782
+ You can count neighbors number between two kd-trees within a distance:
783
+
784
+ >>> import numpy as np
785
+ >>> from scipy.spatial import KDTree
786
+ >>> rng = np.random.default_rng()
787
+ >>> points1 = rng.random((5, 2))
788
+ >>> points2 = rng.random((5, 2))
789
+ >>> kd_tree1 = KDTree(points1)
790
+ >>> kd_tree2 = KDTree(points2)
791
+ >>> kd_tree1.count_neighbors(kd_tree2, 0.2)
792
+ 1
793
+
794
+ This number is same as the total pair number calculated by
795
+ `query_ball_tree`:
796
+
797
+ >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
798
+ >>> sum([len(i) for i in indexes])
799
+ 1
800
+
801
+ """
802
+ return super().count_neighbors(other, r, p, weights, cumulative)
803
+
804
+ def sparse_distance_matrix(
805
+ self, other, max_distance, p=2., output_type='dok_matrix'):
806
+ """Compute a sparse distance matrix.
807
+
808
+ Computes a distance matrix between two KDTrees, leaving as zero
809
+ any distance greater than max_distance.
810
+
811
+ Parameters
812
+ ----------
813
+ other : KDTree
814
+
815
+ max_distance : positive float
816
+
817
+ p : float, 1<=p<=infinity
818
+ Which Minkowski p-norm to use.
819
+ A finite large p may cause a ValueError if overflow can occur.
820
+
821
+ output_type : string, optional
822
+ Which container to use for output data. Options: 'dok_matrix',
823
+ 'coo_matrix', 'dict', or 'ndarray'. Default: 'dok_matrix'.
824
+
825
+ .. versionadded:: 1.6.0
826
+
827
+ Returns
828
+ -------
829
+ result : dok_matrix, coo_matrix, dict or ndarray
830
+ Sparse matrix representing the results in "dictionary of keys"
831
+ format. If a dict is returned the keys are (i,j) tuples of indices.
832
+ If output_type is 'ndarray' a record array with fields 'i', 'j',
833
+ and 'v' is returned,
834
+
835
+ Examples
836
+ --------
837
+ You can compute a sparse distance matrix between two kd-trees:
838
+
839
+ >>> import numpy as np
840
+ >>> from scipy.spatial import KDTree
841
+ >>> rng = np.random.default_rng()
842
+ >>> points1 = rng.random((5, 2))
843
+ >>> points2 = rng.random((5, 2))
844
+ >>> kd_tree1 = KDTree(points1)
845
+ >>> kd_tree2 = KDTree(points2)
846
+ >>> sdm = kd_tree1.sparse_distance_matrix(kd_tree2, 0.3)
847
+ >>> sdm.toarray()
848
+ array([[0. , 0. , 0.12295571, 0. , 0. ],
849
+ [0. , 0. , 0. , 0. , 0. ],
850
+ [0.28942611, 0. , 0. , 0.2333084 , 0. ],
851
+ [0. , 0. , 0. , 0. , 0. ],
852
+ [0.24617575, 0.29571802, 0.26836782, 0. , 0. ]])
853
+
854
+ You can check distances above the `max_distance` are zeros:
855
+
856
+ >>> from scipy.spatial import distance_matrix
857
+ >>> distance_matrix(points1, points2)
858
+ array([[0.56906522, 0.39923701, 0.12295571, 0.8658745 , 0.79428925],
859
+ [0.37327919, 0.7225693 , 0.87665969, 0.32580855, 0.75679479],
860
+ [0.28942611, 0.30088013, 0.6395831 , 0.2333084 , 0.33630734],
861
+ [0.31994999, 0.72658602, 0.71124834, 0.55396483, 0.90785663],
862
+ [0.24617575, 0.29571802, 0.26836782, 0.57714465, 0.6473269 ]])
863
+
864
+ """
865
+ return super().sparse_distance_matrix(
866
+ other, max_distance, p, output_type)
867
+
868
+
869
+ def distance_matrix(x, y, p=2, threshold=1000000):
870
+ """Compute the distance matrix.
871
+
872
+ Returns the matrix of all pair-wise distances.
873
+
874
+ Parameters
875
+ ----------
876
+ x : (M, K) array_like
877
+ Matrix of M vectors in K dimensions.
878
+ y : (N, K) array_like
879
+ Matrix of N vectors in K dimensions.
880
+ p : float, 1 <= p <= infinity
881
+ Which Minkowski p-norm to use.
882
+ threshold : positive int
883
+ If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
884
+ of large temporary arrays.
885
+
886
+ Returns
887
+ -------
888
+ result : (M, N) ndarray
889
+ Matrix containing the distance from every vector in `x` to every vector
890
+ in `y`.
891
+
892
+ Examples
893
+ --------
894
+ >>> from scipy.spatial import distance_matrix
895
+ >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
896
+ array([[ 1. , 1.41421356],
897
+ [ 1.41421356, 1. ]])
898
+
899
+ """
900
+
901
+ x = np.asarray(x)
902
+ m, k = x.shape
903
+ y = np.asarray(y)
904
+ n, kk = y.shape
905
+
906
+ if k != kk:
907
+ raise ValueError(f"x contains {k}-dimensional vectors but y contains "
908
+ f"{kk}-dimensional vectors")
909
+
910
+ if m*n*k <= threshold:
911
+ return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
912
+ else:
913
+ result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
914
+ if m < n:
915
+ for i in range(m):
916
+ result[i,:] = minkowski_distance(x[i],y,p)
917
+ else:
918
+ for j in range(n):
919
+ result[:,j] = minkowski_distance(x,y[j],p)
920
+ return result
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_plotutils.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy._lib.decorator import decorator as _decorator
3
+
4
+ __all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
5
+
6
+
7
+ @_decorator
8
+ def _held_figure(func, obj, ax=None, **kw):
9
+ import matplotlib.pyplot as plt
10
+
11
+ if ax is None:
12
+ fig = plt.figure()
13
+ ax = fig.gca()
14
+ return func(obj, ax=ax, **kw)
15
+
16
+ # As of matplotlib 2.0, the "hold" mechanism is deprecated.
17
+ # When matplotlib 1.x is no longer supported, this check can be removed.
18
+ was_held = getattr(ax, 'ishold', lambda: True)()
19
+ if was_held:
20
+ return func(obj, ax=ax, **kw)
21
+ try:
22
+ ax.hold(True)
23
+ return func(obj, ax=ax, **kw)
24
+ finally:
25
+ ax.hold(was_held)
26
+
27
+
28
+ def _adjust_bounds(ax, points):
29
+ margin = 0.1 * np.ptp(points, axis=0)
30
+ xy_min = points.min(axis=0) - margin
31
+ xy_max = points.max(axis=0) + margin
32
+ ax.set_xlim(xy_min[0], xy_max[0])
33
+ ax.set_ylim(xy_min[1], xy_max[1])
34
+
35
+
36
+ @_held_figure
37
+ def delaunay_plot_2d(tri, ax=None):
38
+ """
39
+ Plot the given Delaunay triangulation in 2-D
40
+
41
+ Parameters
42
+ ----------
43
+ tri : scipy.spatial.Delaunay instance
44
+ Triangulation to plot
45
+ ax : matplotlib.axes.Axes instance, optional
46
+ Axes to plot on
47
+
48
+ Returns
49
+ -------
50
+ fig : matplotlib.figure.Figure instance
51
+ Figure for the plot
52
+
53
+ See Also
54
+ --------
55
+ Delaunay
56
+ matplotlib.pyplot.triplot
57
+
58
+ Notes
59
+ -----
60
+ Requires Matplotlib.
61
+
62
+ Examples
63
+ --------
64
+
65
+ >>> import numpy as np
66
+ >>> import matplotlib.pyplot as plt
67
+ >>> from scipy.spatial import Delaunay, delaunay_plot_2d
68
+
69
+ The Delaunay triangulation of a set of random points:
70
+
71
+ >>> rng = np.random.default_rng()
72
+ >>> points = rng.random((30, 2))
73
+ >>> tri = Delaunay(points)
74
+
75
+ Plot it:
76
+
77
+ >>> _ = delaunay_plot_2d(tri)
78
+ >>> plt.show()
79
+
80
+ """
81
+ if tri.points.shape[1] != 2:
82
+ raise ValueError("Delaunay triangulation is not 2-D")
83
+
84
+ x, y = tri.points.T
85
+ ax.plot(x, y, 'o')
86
+ ax.triplot(x, y, tri.simplices.copy())
87
+
88
+ _adjust_bounds(ax, tri.points)
89
+
90
+ return ax.figure
91
+
92
+
93
+ @_held_figure
94
+ def convex_hull_plot_2d(hull, ax=None):
95
+ """
96
+ Plot the given convex hull diagram in 2-D
97
+
98
+ Parameters
99
+ ----------
100
+ hull : scipy.spatial.ConvexHull instance
101
+ Convex hull to plot
102
+ ax : matplotlib.axes.Axes instance, optional
103
+ Axes to plot on
104
+
105
+ Returns
106
+ -------
107
+ fig : matplotlib.figure.Figure instance
108
+ Figure for the plot
109
+
110
+ See Also
111
+ --------
112
+ ConvexHull
113
+
114
+ Notes
115
+ -----
116
+ Requires Matplotlib.
117
+
118
+
119
+ Examples
120
+ --------
121
+
122
+ >>> import numpy as np
123
+ >>> import matplotlib.pyplot as plt
124
+ >>> from scipy.spatial import ConvexHull, convex_hull_plot_2d
125
+
126
+ The convex hull of a random set of points:
127
+
128
+ >>> rng = np.random.default_rng()
129
+ >>> points = rng.random((30, 2))
130
+ >>> hull = ConvexHull(points)
131
+
132
+ Plot it:
133
+
134
+ >>> _ = convex_hull_plot_2d(hull)
135
+ >>> plt.show()
136
+
137
+ """
138
+ from matplotlib.collections import LineCollection
139
+
140
+ if hull.points.shape[1] != 2:
141
+ raise ValueError("Convex hull is not 2-D")
142
+
143
+ ax.plot(hull.points[:, 0], hull.points[:, 1], 'o')
144
+ line_segments = [hull.points[simplex] for simplex in hull.simplices]
145
+ ax.add_collection(LineCollection(line_segments,
146
+ colors='k',
147
+ linestyle='solid'))
148
+ _adjust_bounds(ax, hull.points)
149
+
150
+ return ax.figure
151
+
152
+
153
+ @_held_figure
154
+ def voronoi_plot_2d(vor, ax=None, **kw):
155
+ """
156
+ Plot the given Voronoi diagram in 2-D
157
+
158
+ Parameters
159
+ ----------
160
+ vor : scipy.spatial.Voronoi instance
161
+ Diagram to plot
162
+ ax : matplotlib.axes.Axes instance, optional
163
+ Axes to plot on
164
+ show_points : bool, optional
165
+ Add the Voronoi points to the plot.
166
+ show_vertices : bool, optional
167
+ Add the Voronoi vertices to the plot.
168
+ line_colors : string, optional
169
+ Specifies the line color for polygon boundaries
170
+ line_width : float, optional
171
+ Specifies the line width for polygon boundaries
172
+ line_alpha : float, optional
173
+ Specifies the line alpha for polygon boundaries
174
+ point_size : float, optional
175
+ Specifies the size of points
176
+
177
+ Returns
178
+ -------
179
+ fig : matplotlib.figure.Figure instance
180
+ Figure for the plot
181
+
182
+ See Also
183
+ --------
184
+ Voronoi
185
+
186
+ Notes
187
+ -----
188
+ Requires Matplotlib.
189
+
190
+ Examples
191
+ --------
192
+ >>> import numpy as np
193
+ >>> import matplotlib.pyplot as plt
194
+ >>> from scipy.spatial import Voronoi, voronoi_plot_2d
195
+
196
+ Create a set of points for the example:
197
+
198
+ >>> rng = np.random.default_rng()
199
+ >>> points = rng.random((10,2))
200
+
201
+ Generate the Voronoi diagram for the points:
202
+
203
+ >>> vor = Voronoi(points)
204
+
205
+ Use `voronoi_plot_2d` to plot the diagram:
206
+
207
+ >>> fig = voronoi_plot_2d(vor)
208
+
209
+ Use `voronoi_plot_2d` to plot the diagram again, with some settings
210
+ customized:
211
+
212
+ >>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
213
+ ... line_width=2, line_alpha=0.6, point_size=2)
214
+ >>> plt.show()
215
+
216
+ """
217
+ from matplotlib.collections import LineCollection
218
+
219
+ if vor.points.shape[1] != 2:
220
+ raise ValueError("Voronoi diagram is not 2-D")
221
+
222
+ if kw.get('show_points', True):
223
+ point_size = kw.get('point_size', None)
224
+ ax.plot(vor.points[:, 0], vor.points[:, 1], '.', markersize=point_size)
225
+ if kw.get('show_vertices', True):
226
+ ax.plot(vor.vertices[:, 0], vor.vertices[:, 1], 'o')
227
+
228
+ line_colors = kw.get('line_colors', 'k')
229
+ line_width = kw.get('line_width', 1.0)
230
+ line_alpha = kw.get('line_alpha', 1.0)
231
+
232
+ center = vor.points.mean(axis=0)
233
+ ptp_bound = np.ptp(vor.points, axis=0)
234
+
235
+ finite_segments = []
236
+ infinite_segments = []
237
+ for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
238
+ simplex = np.asarray(simplex)
239
+ if np.all(simplex >= 0):
240
+ finite_segments.append(vor.vertices[simplex])
241
+ else:
242
+ i = simplex[simplex >= 0][0] # finite end Voronoi vertex
243
+
244
+ t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
245
+ t /= np.linalg.norm(t)
246
+ n = np.array([-t[1], t[0]]) # normal
247
+
248
+ midpoint = vor.points[pointidx].mean(axis=0)
249
+ direction = np.sign(np.dot(midpoint - center, n)) * n
250
+ if (vor.furthest_site):
251
+ direction = -direction
252
+ aspect_factor = abs(ptp_bound.max() / ptp_bound.min())
253
+ far_point = vor.vertices[i] + direction * ptp_bound.max() * aspect_factor
254
+
255
+ infinite_segments.append([vor.vertices[i], far_point])
256
+
257
+ ax.add_collection(LineCollection(finite_segments,
258
+ colors=line_colors,
259
+ lw=line_width,
260
+ alpha=line_alpha,
261
+ linestyle='solid'))
262
+ ax.add_collection(LineCollection(infinite_segments,
263
+ colors=line_colors,
264
+ lw=line_width,
265
+ alpha=line_alpha,
266
+ linestyle='dashed'))
267
+
268
+ _adjust_bounds(ax, vor.points)
269
+
270
+ return ax.figure
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_procrustes.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module provides functions to perform full Procrustes analysis.
3
+
4
+ This code was originally written by Justin Kucynski and ported over from
5
+ scikit-bio by Yoshiki Vazquez-Baeza.
6
+ """
7
+
8
+ import numpy as np
9
+ from scipy.linalg import orthogonal_procrustes
10
+
11
+
12
+ __all__ = ['procrustes']
13
+
14
+
15
+ def procrustes(data1, data2):
16
+ r"""Procrustes analysis, a similarity test for two data sets.
17
+
18
+ Each input matrix is a set of points or vectors (the rows of the matrix).
19
+ The dimension of the space is the number of columns of each matrix. Given
20
+ two identically sized matrices, procrustes standardizes both such that:
21
+
22
+ - :math:`tr(AA^{T}) = 1`.
23
+
24
+ - Both sets of points are centered around the origin.
25
+
26
+ Procrustes ([1]_, [2]_) then applies the optimal transform to the second
27
+ matrix (including scaling/dilation, rotations, and reflections) to minimize
28
+ :math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the
29
+ pointwise differences between the two input datasets.
30
+
31
+ This function was not designed to handle datasets with different numbers of
32
+ datapoints (rows). If two data sets have different dimensionality
33
+ (different number of columns), simply add columns of zeros to the smaller
34
+ of the two.
35
+
36
+ Parameters
37
+ ----------
38
+ data1 : array_like
39
+ Matrix, n rows represent points in k (columns) space `data1` is the
40
+ reference data, after it is standardised, the data from `data2` will be
41
+ transformed to fit the pattern in `data1` (must have >1 unique points).
42
+ data2 : array_like
43
+ n rows of data in k space to be fit to `data1`. Must be the same
44
+ shape ``(numrows, numcols)`` as data1 (must have >1 unique points).
45
+
46
+ Returns
47
+ -------
48
+ mtx1 : array_like
49
+ A standardized version of `data1`.
50
+ mtx2 : array_like
51
+ The orientation of `data2` that best fits `data1`. Centered, but not
52
+ necessarily :math:`tr(AA^{T}) = 1`.
53
+ disparity : float
54
+ :math:`M^{2}` as defined above.
55
+
56
+ Raises
57
+ ------
58
+ ValueError
59
+ If the input arrays are not two-dimensional.
60
+ If the shape of the input arrays is different.
61
+ If the input arrays have zero columns or zero rows.
62
+
63
+ See Also
64
+ --------
65
+ scipy.linalg.orthogonal_procrustes
66
+ scipy.spatial.distance.directed_hausdorff : Another similarity test
67
+ for two data sets
68
+
69
+ Notes
70
+ -----
71
+ - The disparity should not depend on the order of the input matrices, but
72
+ the output matrices will, as only the first output matrix is guaranteed
73
+ to be scaled such that :math:`tr(AA^{T}) = 1`.
74
+
75
+ - Duplicate data points are generally ok, duplicating a data point will
76
+ increase its effect on the procrustes fit.
77
+
78
+ - The disparity scales as the number of points per input matrix.
79
+
80
+ References
81
+ ----------
82
+ .. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
83
+ .. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
84
+
85
+ Examples
86
+ --------
87
+ >>> import numpy as np
88
+ >>> from scipy.spatial import procrustes
89
+
90
+ The matrix ``b`` is a rotated, shifted, scaled and mirrored version of
91
+ ``a`` here:
92
+
93
+ >>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
94
+ >>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
95
+ >>> mtx1, mtx2, disparity = procrustes(a, b)
96
+ >>> round(disparity)
97
+ 0.0
98
+
99
+ """
100
+ mtx1 = np.array(data1, dtype=np.float64, copy=True)
101
+ mtx2 = np.array(data2, dtype=np.float64, copy=True)
102
+
103
+ if mtx1.ndim != 2 or mtx2.ndim != 2:
104
+ raise ValueError("Input matrices must be two-dimensional")
105
+ if mtx1.shape != mtx2.shape:
106
+ raise ValueError("Input matrices must be of same shape")
107
+ if mtx1.size == 0:
108
+ raise ValueError("Input matrices must be >0 rows and >0 cols")
109
+
110
+ # translate all the data to the origin
111
+ mtx1 -= np.mean(mtx1, 0)
112
+ mtx2 -= np.mean(mtx2, 0)
113
+
114
+ norm1 = np.linalg.norm(mtx1)
115
+ norm2 = np.linalg.norm(mtx2)
116
+
117
+ if norm1 == 0 or norm2 == 0:
118
+ raise ValueError("Input matrices must contain >1 unique points")
119
+
120
+ # change scaling of data (in rows) such that trace(mtx*mtx') = 1
121
+ mtx1 /= norm1
122
+ mtx2 /= norm2
123
+
124
+ # transform mtx2 to minimize disparity
125
+ R, s = orthogonal_procrustes(mtx1, mtx2)
126
+ mtx2 = np.dot(mtx2, R.T) * s
127
+
128
+ # measure the dissimilarity between the two datasets
129
+ disparity = np.sum(np.square(mtx1 - mtx2))
130
+
131
+ return mtx1, mtx2, disparity
132
+
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_qhull.pyi ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Static type checking stub file for scipy/spatial/qhull.pyx
3
+ '''
4
+
5
+
6
+ import numpy as np
7
+ from numpy.typing import ArrayLike, NDArray
8
+ from typing_extensions import final
9
+
10
+ class QhullError(RuntimeError):
11
+ ...
12
+
13
+ @final
14
+ class _Qhull:
15
+ # Read-only cython attribute that behaves, more or less, like a property
16
+ @property
17
+ def ndim(self) -> int: ...
18
+ mode_option: bytes
19
+ options: bytes
20
+ furthest_site: bool
21
+
22
+ def __init__(
23
+ self,
24
+ mode_option: bytes,
25
+ points: NDArray[np.float64],
26
+ options: None | bytes = ...,
27
+ required_options: None | bytes = ...,
28
+ furthest_site: bool = ...,
29
+ incremental: bool = ...,
30
+ interior_point: None | NDArray[np.float64] = ...,
31
+ ) -> None: ...
32
+ def check_active(self) -> None: ...
33
+ def close(self) -> None: ...
34
+ def get_points(self) -> NDArray[np.float64]: ...
35
+ def add_points(
36
+ self,
37
+ points: ArrayLike,
38
+ interior_point: ArrayLike = ...
39
+ ) -> None: ...
40
+ def get_paraboloid_shift_scale(self) -> tuple[float, float]: ...
41
+ def volume_area(self) -> tuple[float, float]: ...
42
+ def triangulate(self) -> None: ...
43
+ def get_simplex_facet_array(self) -> tuple[
44
+ NDArray[np.intc],
45
+ NDArray[np.intc],
46
+ NDArray[np.float64],
47
+ NDArray[np.intc],
48
+ NDArray[np.intc],
49
+ ]: ...
50
+ def get_hull_points(self) -> NDArray[np.float64]: ...
51
+ def get_hull_facets(self) -> tuple[
52
+ list[list[int]],
53
+ NDArray[np.float64],
54
+ ]: ...
55
+ def get_voronoi_diagram(self) -> tuple[
56
+ NDArray[np.float64],
57
+ NDArray[np.intc],
58
+ list[list[int]],
59
+ list[list[int]],
60
+ NDArray[np.intp],
61
+ ]: ...
62
+ def get_extremes_2d(self) -> NDArray[np.intc]: ...
63
+
64
+ def _get_barycentric_transforms(
65
+ points: NDArray[np.float64],
66
+ simplices: NDArray[np.intc],
67
+ eps: float
68
+ ) -> NDArray[np.float64]: ...
69
+
70
+ class _QhullUser:
71
+ ndim: int
72
+ npoints: int
73
+ min_bound: NDArray[np.float64]
74
+ max_bound: NDArray[np.float64]
75
+
76
+ def __init__(self, qhull: _Qhull, incremental: bool = ...) -> None: ...
77
+ def close(self) -> None: ...
78
+ def _update(self, qhull: _Qhull) -> None: ...
79
+ def _add_points(
80
+ self,
81
+ points: ArrayLike,
82
+ restart: bool = ...,
83
+ interior_point: ArrayLike = ...
84
+ ) -> None: ...
85
+
86
+ class Delaunay(_QhullUser):
87
+ furthest_site: bool
88
+ paraboloid_scale: float
89
+ paraboloid_shift: float
90
+ simplices: NDArray[np.intc]
91
+ neighbors: NDArray[np.intc]
92
+ equations: NDArray[np.float64]
93
+ coplanar: NDArray[np.intc]
94
+ good: NDArray[np.intc]
95
+ nsimplex: int
96
+ vertices: NDArray[np.intc]
97
+
98
+ def __init__(
99
+ self,
100
+ points: ArrayLike,
101
+ furthest_site: bool = ...,
102
+ incremental: bool = ...,
103
+ qhull_options: None | str = ...
104
+ ) -> None: ...
105
+ def _update(self, qhull: _Qhull) -> None: ...
106
+ def add_points(
107
+ self,
108
+ points: ArrayLike,
109
+ restart: bool = ...
110
+ ) -> None: ...
111
+ @property
112
+ def points(self) -> NDArray[np.float64]: ...
113
+ @property
114
+ def transform(self) -> NDArray[np.float64]: ...
115
+ @property
116
+ def vertex_to_simplex(self) -> NDArray[np.intc]: ...
117
+ @property
118
+ def vertex_neighbor_vertices(self) -> tuple[
119
+ NDArray[np.intc],
120
+ NDArray[np.intc],
121
+ ]: ...
122
+ @property
123
+ def convex_hull(self) -> NDArray[np.intc]: ...
124
+ def find_simplex(
125
+ self,
126
+ xi: ArrayLike,
127
+ bruteforce: bool = ...,
128
+ tol: float = ...
129
+ ) -> NDArray[np.intc]: ...
130
+ def plane_distance(self, xi: ArrayLike) -> NDArray[np.float64]: ...
131
+ def lift_points(self, x: ArrayLike) -> NDArray[np.float64]: ...
132
+
133
+ def tsearch(tri: Delaunay, xi: ArrayLike) -> NDArray[np.intc]: ...
134
+ def _copy_docstr(dst: object, src: object) -> None: ...
135
+
136
+ class ConvexHull(_QhullUser):
137
+ simplices: NDArray[np.intc]
138
+ neighbors: NDArray[np.intc]
139
+ equations: NDArray[np.float64]
140
+ coplanar: NDArray[np.intc]
141
+ good: None | NDArray[np.bool_]
142
+ volume: float
143
+ area: float
144
+ nsimplex: int
145
+
146
+ def __init__(
147
+ self,
148
+ points: ArrayLike,
149
+ incremental: bool = ...,
150
+ qhull_options: None | str = ...
151
+ ) -> None: ...
152
+ def _update(self, qhull: _Qhull) -> None: ...
153
+ def add_points(self, points: ArrayLike,
154
+ restart: bool = ...) -> None: ...
155
+ @property
156
+ def points(self) -> NDArray[np.float64]: ...
157
+ @property
158
+ def vertices(self) -> NDArray[np.intc]: ...
159
+
160
+ class Voronoi(_QhullUser):
161
+ vertices: NDArray[np.float64]
162
+ ridge_points: NDArray[np.intc]
163
+ ridge_vertices: list[list[int]]
164
+ regions: list[list[int]]
165
+ point_region: NDArray[np.intp]
166
+ furthest_site: bool
167
+
168
+ def __init__(
169
+ self,
170
+ points: ArrayLike,
171
+ furthest_site: bool = ...,
172
+ incremental: bool = ...,
173
+ qhull_options: None | str = ...
174
+ ) -> None: ...
175
+ def _update(self, qhull: _Qhull) -> None: ...
176
+ def add_points(
177
+ self,
178
+ points: ArrayLike,
179
+ restart: bool = ...
180
+ ) -> None: ...
181
+ @property
182
+ def points(self) -> NDArray[np.float64]: ...
183
+ @property
184
+ def ridge_dict(self) -> dict[tuple[int, int], list[int]]: ...
185
+
186
+ class HalfspaceIntersection(_QhullUser):
187
+ interior_point: NDArray[np.float64]
188
+ dual_facets: list[list[int]]
189
+ dual_equations: NDArray[np.float64]
190
+ dual_points: NDArray[np.float64]
191
+ dual_volume: float
192
+ dual_area: float
193
+ intersections: NDArray[np.float64]
194
+ ndim: int
195
+ nineq: int
196
+
197
+ def __init__(
198
+ self,
199
+ halfspaces: ArrayLike,
200
+ interior_point: ArrayLike,
201
+ incremental: bool = ...,
202
+ qhull_options: None | str = ...
203
+ ) -> None: ...
204
+ def _update(self, qhull: _Qhull) -> None: ...
205
+ def add_halfspaces(
206
+ self,
207
+ halfspaces: ArrayLike,
208
+ restart: bool = ...
209
+ ) -> None: ...
210
+ @property
211
+ def halfspaces(self) -> NDArray[np.float64]: ...
212
+ @property
213
+ def dual_vertices(self) -> NDArray[np.integer]: ...
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_spherical_voronoi.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Spherical Voronoi Code
3
+
4
+ .. versionadded:: 0.18.0
5
+
6
+ """
7
+ #
8
+ # Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
9
+ # Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
10
+ #
11
+ # Distributed under the same BSD license as SciPy.
12
+ #
13
+
14
+ import numpy as np
15
+ import scipy
16
+ from . import _voronoi
17
+ from scipy.spatial import cKDTree
18
+
19
+ __all__ = ['SphericalVoronoi']
20
+
21
+
22
+ def calculate_solid_angles(R):
23
+ """Calculates the solid angles of plane triangles. Implements the method of
24
+ Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
25
+ that input points have unit norm."""
26
+ # Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
27
+ # This is equal to the determinant of the matrix [R1 R2 R3], which can be
28
+ # computed with better stability.
29
+ numerator = np.linalg.det(R)
30
+ denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
31
+ np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
32
+ np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
33
+ return np.abs(2 * np.arctan2(numerator, denominator))
34
+
35
+
36
+ class SphericalVoronoi:
37
+ """ Voronoi diagrams on the surface of a sphere.
38
+
39
+ .. versionadded:: 0.18.0
40
+
41
+ Parameters
42
+ ----------
43
+ points : ndarray of floats, shape (npoints, ndim)
44
+ Coordinates of points from which to construct a spherical
45
+ Voronoi diagram.
46
+ radius : float, optional
47
+ Radius of the sphere (Default: 1)
48
+ center : ndarray of floats, shape (ndim,)
49
+ Center of sphere (Default: origin)
50
+ threshold : float
51
+ Threshold for detecting duplicate points and
52
+ mismatches between points and sphere parameters.
53
+ (Default: 1e-06)
54
+
55
+ Attributes
56
+ ----------
57
+ points : double array of shape (npoints, ndim)
58
+ the points in `ndim` dimensions to generate the Voronoi diagram from
59
+ radius : double
60
+ radius of the sphere
61
+ center : double array of shape (ndim,)
62
+ center of the sphere
63
+ vertices : double array of shape (nvertices, ndim)
64
+ Voronoi vertices corresponding to points
65
+ regions : list of list of integers of shape (npoints, _ )
66
+ the n-th entry is a list consisting of the indices
67
+ of the vertices belonging to the n-th point in points
68
+
69
+ Methods
70
+ -------
71
+ calculate_areas
72
+ Calculates the areas of the Voronoi regions. For 2D point sets, the
73
+ regions are circular arcs. The sum of the areas is `2 * pi * radius`.
74
+ For 3D point sets, the regions are spherical polygons. The sum of the
75
+ areas is `4 * pi * radius**2`.
76
+
77
+ Raises
78
+ ------
79
+ ValueError
80
+ If there are duplicates in `points`.
81
+ If the provided `radius` is not consistent with `points`.
82
+
83
+ Notes
84
+ -----
85
+ The spherical Voronoi diagram algorithm proceeds as follows. The Convex
86
+ Hull of the input points (generators) is calculated, and is equivalent to
87
+ their Delaunay triangulation on the surface of the sphere [Caroli]_.
88
+ The Convex Hull neighbour information is then used to
89
+ order the Voronoi region vertices around each generator. The latter
90
+ approach is substantially less sensitive to floating point issues than
91
+ angle-based methods of Voronoi region vertex sorting.
92
+
93
+ Empirical assessment of spherical Voronoi algorithm performance suggests
94
+ quadratic time complexity (loglinear is optimal, but algorithms are more
95
+ challenging to implement).
96
+
97
+ References
98
+ ----------
99
+ .. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
100
+ points on or close to a sphere. Research Report RR-7004, 2009.
101
+
102
+ .. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
103
+ triangle. IEEE Transactions on Biomedical Engineering,
104
+ 2, 1983, pp 125--126.
105
+
106
+ See Also
107
+ --------
108
+ Voronoi : Conventional Voronoi diagrams in N dimensions.
109
+
110
+ Examples
111
+ --------
112
+ Do some imports and take some points on a cube:
113
+
114
+ >>> import numpy as np
115
+ >>> import matplotlib.pyplot as plt
116
+ >>> from scipy.spatial import SphericalVoronoi, geometric_slerp
117
+ >>> from mpl_toolkits.mplot3d import proj3d
118
+ >>> # set input data
119
+ >>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
120
+ ... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
121
+
122
+ Calculate the spherical Voronoi diagram:
123
+
124
+ >>> radius = 1
125
+ >>> center = np.array([0, 0, 0])
126
+ >>> sv = SphericalVoronoi(points, radius, center)
127
+
128
+ Generate plot:
129
+
130
+ >>> # sort vertices (optional, helpful for plotting)
131
+ >>> sv.sort_vertices_of_regions()
132
+ >>> t_vals = np.linspace(0, 1, 2000)
133
+ >>> fig = plt.figure()
134
+ >>> ax = fig.add_subplot(111, projection='3d')
135
+ >>> # plot the unit sphere for reference (optional)
136
+ >>> u = np.linspace(0, 2 * np.pi, 100)
137
+ >>> v = np.linspace(0, np.pi, 100)
138
+ >>> x = np.outer(np.cos(u), np.sin(v))
139
+ >>> y = np.outer(np.sin(u), np.sin(v))
140
+ >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
141
+ >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
142
+ >>> # plot generator points
143
+ >>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
144
+ >>> # plot Voronoi vertices
145
+ >>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
146
+ ... c='g')
147
+ >>> # indicate Voronoi regions (as Euclidean polygons)
148
+ >>> for region in sv.regions:
149
+ ... n = len(region)
150
+ ... for i in range(n):
151
+ ... start = sv.vertices[region][i]
152
+ ... end = sv.vertices[region][(i + 1) % n]
153
+ ... result = geometric_slerp(start, end, t_vals)
154
+ ... ax.plot(result[..., 0],
155
+ ... result[..., 1],
156
+ ... result[..., 2],
157
+ ... c='k')
158
+ >>> ax.azim = 10
159
+ >>> ax.elev = 40
160
+ >>> _ = ax.set_xticks([])
161
+ >>> _ = ax.set_yticks([])
162
+ >>> _ = ax.set_zticks([])
163
+ >>> fig.set_size_inches(4, 4)
164
+ >>> plt.show()
165
+
166
+ """
167
+ def __init__(self, points, radius=1, center=None, threshold=1e-06):
168
+
169
+ if radius is None:
170
+ raise ValueError('`radius` is `None`. '
171
+ 'Please provide a floating point number '
172
+ '(i.e. `radius=1`).')
173
+
174
+ self.radius = float(radius)
175
+ self.points = np.array(points).astype(np.float64)
176
+ self._dim = self.points.shape[1]
177
+ if center is None:
178
+ self.center = np.zeros(self._dim)
179
+ else:
180
+ self.center = np.array(center, dtype=float)
181
+
182
+ # test degenerate input
183
+ self._rank = np.linalg.matrix_rank(self.points - self.points[0],
184
+ tol=threshold * self.radius)
185
+ if self._rank < self._dim:
186
+ raise ValueError(f"Rank of input points must be at least {self._dim}")
187
+
188
+ if cKDTree(self.points).query_pairs(threshold * self.radius):
189
+ raise ValueError("Duplicate generators present.")
190
+
191
+ radii = np.linalg.norm(self.points - self.center, axis=1)
192
+ max_discrepancy = np.abs(radii - self.radius).max()
193
+ if max_discrepancy >= threshold * self.radius:
194
+ raise ValueError("Radius inconsistent with generators.")
195
+
196
+ self._calc_vertices_regions()
197
+
198
+ def _calc_vertices_regions(self):
199
+ """
200
+ Calculates the Voronoi vertices and regions of the generators stored
201
+ in self.points. The vertices will be stored in self.vertices and the
202
+ regions in self.regions.
203
+
204
+ This algorithm was discussed at PyData London 2015 by
205
+ Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
206
+ """
207
+ # get Convex Hull
208
+ conv = scipy.spatial.ConvexHull(self.points)
209
+ # get circumcenters of Convex Hull triangles from facet equations
210
+ # for 3D input circumcenters will have shape: (2N-4, 3)
211
+ self.vertices = self.radius * conv.equations[:, :-1] + self.center
212
+ self._simplices = conv.simplices
213
+ # calculate regions from triangulation
214
+ # for 3D input simplex_indices will have shape: (2N-4,)
215
+ simplex_indices = np.arange(len(self._simplices))
216
+ # for 3D input tri_indices will have shape: (6N-12,)
217
+ tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
218
+ # for 3D input point_indices will have shape: (6N-12,)
219
+ point_indices = self._simplices.ravel()
220
+ # for 3D input indices will have shape: (6N-12,)
221
+ indices = np.argsort(point_indices, kind='mergesort')
222
+ # for 3D input flattened_groups will have shape: (6N-12,)
223
+ flattened_groups = tri_indices[indices].astype(np.intp)
224
+ # intervals will have shape: (N+1,)
225
+ intervals = np.cumsum(np.bincount(point_indices + 1))
226
+ # split flattened groups to get nested list of unsorted regions
227
+ groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
228
+ for i in range(len(intervals) - 1)]
229
+ self.regions = groups
230
+
231
+ def sort_vertices_of_regions(self):
232
+ """Sort indices of the vertices to be (counter-)clockwise ordered.
233
+
234
+ Raises
235
+ ------
236
+ TypeError
237
+ If the points are not three-dimensional.
238
+
239
+ Notes
240
+ -----
241
+ For each region in regions, it sorts the indices of the Voronoi
242
+ vertices such that the resulting points are in a clockwise or
243
+ counterclockwise order around the generator point.
244
+
245
+ This is done as follows: Recall that the n-th region in regions
246
+ surrounds the n-th generator in points and that the k-th
247
+ Voronoi vertex in vertices is the circumcenter of the k-th triangle
248
+ in self._simplices. For each region n, we choose the first triangle
249
+ (=Voronoi vertex) in self._simplices and a vertex of that triangle
250
+ not equal to the center n. These determine a unique neighbor of that
251
+ triangle, which is then chosen as the second triangle. The second
252
+ triangle will have a unique vertex not equal to the current vertex or
253
+ the center. This determines a unique neighbor of the second triangle,
254
+ which is then chosen as the third triangle and so forth. We proceed
255
+ through all the triangles (=Voronoi vertices) belonging to the
256
+ generator in points and obtain a sorted version of the vertices
257
+ of its surrounding region.
258
+ """
259
+ if self._dim != 3:
260
+ raise TypeError("Only supported for three-dimensional point sets")
261
+ _voronoi.sort_vertices_of_regions(self._simplices, self.regions)
262
+
263
+ def _calculate_areas_3d(self):
264
+ self.sort_vertices_of_regions()
265
+ sizes = [len(region) for region in self.regions]
266
+ csizes = np.cumsum(sizes)
267
+ num_regions = csizes[-1]
268
+
269
+ # We create a set of triangles consisting of one point and two Voronoi
270
+ # vertices. The vertices of each triangle are adjacent in the sorted
271
+ # regions list.
272
+ point_indices = [i for i, size in enumerate(sizes)
273
+ for j in range(size)]
274
+
275
+ nbrs1 = np.array([r for region in self.regions for r in region])
276
+
277
+ # The calculation of nbrs2 is a vectorized version of:
278
+ # np.array([r for region in self.regions for r in np.roll(region, 1)])
279
+ nbrs2 = np.roll(nbrs1, 1)
280
+ indices = np.roll(csizes, 1)
281
+ indices[0] = 0
282
+ nbrs2[indices] = nbrs1[csizes - 1]
283
+
284
+ # Normalize points and vertices.
285
+ pnormalized = (self.points - self.center) / self.radius
286
+ vnormalized = (self.vertices - self.center) / self.radius
287
+
288
+ # Create the complete set of triangles and calculate their solid angles
289
+ triangles = np.hstack([pnormalized[point_indices],
290
+ vnormalized[nbrs1],
291
+ vnormalized[nbrs2]
292
+ ]).reshape((num_regions, 3, 3))
293
+ triangle_solid_angles = calculate_solid_angles(triangles)
294
+
295
+ # Sum the solid angles of the triangles in each region
296
+ solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
297
+ solid_angles[1:] -= solid_angles[:-1]
298
+
299
+ # Get polygon areas using A = omega * r**2
300
+ return solid_angles * self.radius**2
301
+
302
+ def _calculate_areas_2d(self):
303
+ # Find start and end points of arcs
304
+ arcs = self.points[self._simplices] - self.center
305
+
306
+ # Calculate the angle subtended by arcs
307
+ d = np.sum((arcs[:, 1] - arcs[:, 0]) ** 2, axis=1)
308
+ theta = np.arccos(1 - (d / (2 * (self.radius ** 2))))
309
+
310
+ # Get areas using A = r * theta
311
+ areas = self.radius * theta
312
+
313
+ # Correct arcs which go the wrong way (single-hemisphere inputs)
314
+ signs = np.sign(np.einsum('ij,ij->i', arcs[:, 0],
315
+ self.vertices - self.center))
316
+ indices = np.where(signs < 0)
317
+ areas[indices] = 2 * np.pi * self.radius - areas[indices]
318
+ return areas
319
+
320
+ def calculate_areas(self):
321
+ """Calculates the areas of the Voronoi regions.
322
+
323
+ For 2D point sets, the regions are circular arcs. The sum of the areas
324
+ is `2 * pi * radius`.
325
+
326
+ For 3D point sets, the regions are spherical polygons. The sum of the
327
+ areas is `4 * pi * radius**2`.
328
+
329
+ .. versionadded:: 1.5.0
330
+
331
+ Returns
332
+ -------
333
+ areas : double array of shape (npoints,)
334
+ The areas of the Voronoi regions.
335
+ """
336
+ if self._dim == 2:
337
+ return self._calculate_areas_2d()
338
+ elif self._dim == 3:
339
+ return self._calculate_areas_3d()
340
+ else:
341
+ raise TypeError("Only supported for 2D and 3D point sets")
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (241 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/_voronoi.pyi ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ def sort_vertices_of_regions(simplices: np.ndarray, regions: list[list[int]]) -> None: ... # noqa: E501
env-llmeval/lib/python3.10/site-packages/scipy/spatial/ckdtree.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.spatial` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'cKDTree',
10
+ 'cKDTreeNode',
11
+ 'coo_entries',
12
+ 'operator',
13
+ 'ordered_pairs',
14
+ 'os',
15
+ 'scipy',
16
+ 'threading',
17
+ ]
18
+
19
+
20
+ def __dir__():
21
+ return __all__
22
+
23
+
24
+ def __getattr__(name):
25
+ return _sub_module_deprecation(sub_package="spatial", module="ckdtree",
26
+ private_modules=["_ckdtree"], all=__all__,
27
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/spatial/distance.py ADDED
@@ -0,0 +1,2993 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Distance computations (:mod:`scipy.spatial.distance`)
3
+ =====================================================
4
+
5
+ .. sectionauthor:: Damian Eads
6
+
7
+ Function reference
8
+ ------------------
9
+
10
+ Distance matrix computation from a collection of raw observation vectors
11
+ stored in a rectangular array.
12
+
13
+ .. autosummary::
14
+ :toctree: generated/
15
+
16
+ pdist -- pairwise distances between observation vectors.
17
+ cdist -- distances between two collections of observation vectors
18
+ squareform -- convert distance matrix to a condensed one and vice versa
19
+ directed_hausdorff -- directed Hausdorff distance between arrays
20
+
21
+ Predicates for checking the validity of distance matrices, both
22
+ condensed and redundant. Also contained in this module are functions
23
+ for computing the number of observations in a distance matrix.
24
+
25
+ .. autosummary::
26
+ :toctree: generated/
27
+
28
+ is_valid_dm -- checks for a valid distance matrix
29
+ is_valid_y -- checks for a valid condensed distance matrix
30
+ num_obs_dm -- # of observations in a distance matrix
31
+ num_obs_y -- # of observations in a condensed distance matrix
32
+
33
+ Distance functions between two numeric vectors ``u`` and ``v``. Computing
34
+ distances over a large collection of vectors is inefficient for these
35
+ functions. Use ``pdist`` for this purpose.
36
+
37
+ .. autosummary::
38
+ :toctree: generated/
39
+
40
+ braycurtis -- the Bray-Curtis distance.
41
+ canberra -- the Canberra distance.
42
+ chebyshev -- the Chebyshev distance.
43
+ cityblock -- the Manhattan distance.
44
+ correlation -- the Correlation distance.
45
+ cosine -- the Cosine distance.
46
+ euclidean -- the Euclidean distance.
47
+ jensenshannon -- the Jensen-Shannon distance.
48
+ mahalanobis -- the Mahalanobis distance.
49
+ minkowski -- the Minkowski distance.
50
+ seuclidean -- the normalized Euclidean distance.
51
+ sqeuclidean -- the squared Euclidean distance.
52
+
53
+ Distance functions between two boolean vectors (representing sets) ``u`` and
54
+ ``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
55
+ computing the distances between all pairs.
56
+
57
+ .. autosummary::
58
+ :toctree: generated/
59
+
60
+ dice -- the Dice dissimilarity.
61
+ hamming -- the Hamming distance.
62
+ jaccard -- the Jaccard distance.
63
+ kulczynski1 -- the Kulczynski 1 distance.
64
+ rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
65
+ russellrao -- the Russell-Rao dissimilarity.
66
+ sokalmichener -- the Sokal-Michener dissimilarity.
67
+ sokalsneath -- the Sokal-Sneath dissimilarity.
68
+ yule -- the Yule dissimilarity.
69
+
70
+ :func:`hamming` also operates over discrete numerical vectors.
71
+ """
72
+
73
+ # Copyright (C) Damian Eads, 2007-2008. New BSD License.
74
+
75
+ __all__ = [
76
+ 'braycurtis',
77
+ 'canberra',
78
+ 'cdist',
79
+ 'chebyshev',
80
+ 'cityblock',
81
+ 'correlation',
82
+ 'cosine',
83
+ 'dice',
84
+ 'directed_hausdorff',
85
+ 'euclidean',
86
+ 'hamming',
87
+ 'is_valid_dm',
88
+ 'is_valid_y',
89
+ 'jaccard',
90
+ 'jensenshannon',
91
+ 'kulczynski1',
92
+ 'mahalanobis',
93
+ 'minkowski',
94
+ 'num_obs_dm',
95
+ 'num_obs_y',
96
+ 'pdist',
97
+ 'rogerstanimoto',
98
+ 'russellrao',
99
+ 'seuclidean',
100
+ 'sokalmichener',
101
+ 'sokalsneath',
102
+ 'sqeuclidean',
103
+ 'squareform',
104
+ 'yule'
105
+ ]
106
+
107
+
108
+ import math
109
+ import warnings
110
+ import numpy as np
111
+ import dataclasses
112
+
113
+ from typing import Optional, Callable
114
+
115
+ from functools import partial
116
+ from scipy._lib._util import _asarray_validated
117
+
118
+ from . import _distance_wrap
119
+ from . import _hausdorff
120
+ from ..linalg import norm
121
+ from ..special import rel_entr
122
+
123
+ from . import _distance_pybind
124
+
125
+
126
+ def _copy_array_if_base_present(a):
127
+ """Copy the array if its base points to a parent array."""
128
+ if a.base is not None:
129
+ return a.copy()
130
+ return a
131
+
132
+
133
+ def _correlation_cdist_wrap(XA, XB, dm, **kwargs):
134
+ XA = XA - XA.mean(axis=1, keepdims=True)
135
+ XB = XB - XB.mean(axis=1, keepdims=True)
136
+ _distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)
137
+
138
+
139
+ def _correlation_pdist_wrap(X, dm, **kwargs):
140
+ X2 = X - X.mean(axis=1, keepdims=True)
141
+ _distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)
142
+
143
+
144
+ def _convert_to_type(X, out_type):
145
+ return np.ascontiguousarray(X, dtype=out_type)
146
+
147
+
148
+ def _nbool_correspond_all(u, v, w=None):
149
+ if u.dtype == v.dtype == bool and w is None:
150
+ not_u = ~u
151
+ not_v = ~v
152
+ nff = (not_u & not_v).sum()
153
+ nft = (not_u & v).sum()
154
+ ntf = (u & not_v).sum()
155
+ ntt = (u & v).sum()
156
+ else:
157
+ dtype = np.result_type(int, u.dtype, v.dtype)
158
+ u = u.astype(dtype)
159
+ v = v.astype(dtype)
160
+ not_u = 1.0 - u
161
+ not_v = 1.0 - v
162
+ if w is not None:
163
+ not_u = w * not_u
164
+ u = w * u
165
+ nff = (not_u * not_v).sum()
166
+ nft = (not_u * v).sum()
167
+ ntf = (u * not_v).sum()
168
+ ntt = (u * v).sum()
169
+ return (nff, nft, ntf, ntt)
170
+
171
+
172
+ def _nbool_correspond_ft_tf(u, v, w=None):
173
+ if u.dtype == v.dtype == bool and w is None:
174
+ not_u = ~u
175
+ not_v = ~v
176
+ nft = (not_u & v).sum()
177
+ ntf = (u & not_v).sum()
178
+ else:
179
+ dtype = np.result_type(int, u.dtype, v.dtype)
180
+ u = u.astype(dtype)
181
+ v = v.astype(dtype)
182
+ not_u = 1.0 - u
183
+ not_v = 1.0 - v
184
+ if w is not None:
185
+ not_u = w * not_u
186
+ u = w * u
187
+ nft = (not_u * v).sum()
188
+ ntf = (u * not_v).sum()
189
+ return (nft, ntf)
190
+
191
+
192
+ def _validate_cdist_input(XA, XB, mA, mB, n, metric_info, **kwargs):
193
+ # get supported types
194
+ types = metric_info.types
195
+ # choose best type
196
+ typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]
197
+ # validate data
198
+ XA = _convert_to_type(XA, out_type=typ)
199
+ XB = _convert_to_type(XB, out_type=typ)
200
+
201
+ # validate kwargs
202
+ _validate_kwargs = metric_info.validator
203
+ if _validate_kwargs:
204
+ kwargs = _validate_kwargs((XA, XB), mA + mB, n, **kwargs)
205
+ return XA, XB, typ, kwargs
206
+
207
+
208
+ def _validate_weight_with_size(X, m, n, **kwargs):
209
+ w = kwargs.pop('w', None)
210
+ if w is None:
211
+ return kwargs
212
+
213
+ if w.ndim != 1 or w.shape[0] != n:
214
+ raise ValueError("Weights must have same size as input vector. "
215
+ f"{w.shape[0]} vs. {n}")
216
+
217
+ kwargs['w'] = _validate_weights(w)
218
+ return kwargs
219
+
220
+
221
+ def _validate_hamming_kwargs(X, m, n, **kwargs):
222
+ w = kwargs.get('w', np.ones((n,), dtype='double'))
223
+
224
+ if w.ndim != 1 or w.shape[0] != n:
225
+ raise ValueError(
226
+ "Weights must have same size as input vector. %d vs. %d" % (w.shape[0], n)
227
+ )
228
+
229
+ kwargs['w'] = _validate_weights(w)
230
+ return kwargs
231
+
232
+
233
+ def _validate_mahalanobis_kwargs(X, m, n, **kwargs):
234
+ VI = kwargs.pop('VI', None)
235
+ if VI is None:
236
+ if m <= n:
237
+ # There are fewer observations than the dimension of
238
+ # the observations.
239
+ raise ValueError("The number of observations (%d) is too "
240
+ "small; the covariance matrix is "
241
+ "singular. For observations with %d "
242
+ "dimensions, at least %d observations "
243
+ "are required." % (m, n, n + 1))
244
+ if isinstance(X, tuple):
245
+ X = np.vstack(X)
246
+ CV = np.atleast_2d(np.cov(X.astype(np.float64, copy=False).T))
247
+ VI = np.linalg.inv(CV).T.copy()
248
+ kwargs["VI"] = _convert_to_double(VI)
249
+ return kwargs
250
+
251
+
252
+ def _validate_minkowski_kwargs(X, m, n, **kwargs):
253
+ kwargs = _validate_weight_with_size(X, m, n, **kwargs)
254
+ if 'p' not in kwargs:
255
+ kwargs['p'] = 2.
256
+ else:
257
+ if kwargs['p'] <= 0:
258
+ raise ValueError("p must be greater than 0")
259
+
260
+ return kwargs
261
+
262
+
263
+ def _validate_pdist_input(X, m, n, metric_info, **kwargs):
264
+ # get supported types
265
+ types = metric_info.types
266
+ # choose best type
267
+ typ = types[types.index(X.dtype)] if X.dtype in types else types[0]
268
+ # validate data
269
+ X = _convert_to_type(X, out_type=typ)
270
+
271
+ # validate kwargs
272
+ _validate_kwargs = metric_info.validator
273
+ if _validate_kwargs:
274
+ kwargs = _validate_kwargs(X, m, n, **kwargs)
275
+ return X, typ, kwargs
276
+
277
+
278
+ def _validate_seuclidean_kwargs(X, m, n, **kwargs):
279
+ V = kwargs.pop('V', None)
280
+ if V is None:
281
+ if isinstance(X, tuple):
282
+ X = np.vstack(X)
283
+ V = np.var(X.astype(np.float64, copy=False), axis=0, ddof=1)
284
+ else:
285
+ V = np.asarray(V, order='c')
286
+ if len(V.shape) != 1:
287
+ raise ValueError('Variance vector V must '
288
+ 'be one-dimensional.')
289
+ if V.shape[0] != n:
290
+ raise ValueError('Variance vector V must be of the same '
291
+ 'dimension as the vectors on which the distances '
292
+ 'are computed.')
293
+ kwargs['V'] = _convert_to_double(V)
294
+ return kwargs
295
+
296
+
297
+ def _validate_vector(u, dtype=None):
298
+ # XXX Is order='c' really necessary?
299
+ u = np.asarray(u, dtype=dtype, order='c')
300
+ if u.ndim == 1:
301
+ return u
302
+ raise ValueError("Input vector should be 1-D.")
303
+
304
+
305
+ def _validate_weights(w, dtype=np.float64):
306
+ w = _validate_vector(w, dtype=dtype)
307
+ if np.any(w < 0):
308
+ raise ValueError("Input weights should be all non-negative")
309
+ return w
310
+
311
+
312
+ def directed_hausdorff(u, v, seed=0):
313
+ """
314
+ Compute the directed Hausdorff distance between two 2-D arrays.
315
+
316
+ Distances between pairs are calculated using a Euclidean metric.
317
+
318
+ Parameters
319
+ ----------
320
+ u : (M,N) array_like
321
+ Input array with M points in N dimensions.
322
+ v : (O,N) array_like
323
+ Input array with O points in N dimensions.
324
+ seed : int or None, optional
325
+ Local `numpy.random.RandomState` seed. Default is 0, a random
326
+ shuffling of u and v that guarantees reproducibility.
327
+
328
+ Returns
329
+ -------
330
+ d : double
331
+ The directed Hausdorff distance between arrays `u` and `v`,
332
+
333
+ index_1 : int
334
+ index of point contributing to Hausdorff pair in `u`
335
+
336
+ index_2 : int
337
+ index of point contributing to Hausdorff pair in `v`
338
+
339
+ Raises
340
+ ------
341
+ ValueError
342
+ An exception is thrown if `u` and `v` do not have
343
+ the same number of columns.
344
+
345
+ See Also
346
+ --------
347
+ scipy.spatial.procrustes : Another similarity test for two data sets
348
+
349
+ Notes
350
+ -----
351
+ Uses the early break technique and the random sampling approach
352
+ described by [1]_. Although worst-case performance is ``O(m * o)``
353
+ (as with the brute force algorithm), this is unlikely in practice
354
+ as the input data would have to require the algorithm to explore
355
+ every single point interaction, and after the algorithm shuffles
356
+ the input points at that. The best case performance is O(m), which
357
+ is satisfied by selecting an inner loop distance that is less than
358
+ cmax and leads to an early break as often as possible. The authors
359
+ have formally shown that the average runtime is closer to O(m).
360
+
361
+ .. versionadded:: 0.19.0
362
+
363
+ References
364
+ ----------
365
+ .. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
366
+ calculating the exact Hausdorff distance." IEEE Transactions On
367
+ Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
368
+ 2015.
369
+
370
+ Examples
371
+ --------
372
+ Find the directed Hausdorff distance between two 2-D arrays of
373
+ coordinates:
374
+
375
+ >>> from scipy.spatial.distance import directed_hausdorff
376
+ >>> import numpy as np
377
+ >>> u = np.array([(1.0, 0.0),
378
+ ... (0.0, 1.0),
379
+ ... (-1.0, 0.0),
380
+ ... (0.0, -1.0)])
381
+ >>> v = np.array([(2.0, 0.0),
382
+ ... (0.0, 2.0),
383
+ ... (-2.0, 0.0),
384
+ ... (0.0, -4.0)])
385
+
386
+ >>> directed_hausdorff(u, v)[0]
387
+ 2.23606797749979
388
+ >>> directed_hausdorff(v, u)[0]
389
+ 3.0
390
+
391
+ Find the general (symmetric) Hausdorff distance between two 2-D
392
+ arrays of coordinates:
393
+
394
+ >>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
395
+ 3.0
396
+
397
+ Find the indices of the points that generate the Hausdorff distance
398
+ (the Hausdorff pair):
399
+
400
+ >>> directed_hausdorff(v, u)[1:]
401
+ (3, 3)
402
+
403
+ """
404
+ u = np.asarray(u, dtype=np.float64, order='c')
405
+ v = np.asarray(v, dtype=np.float64, order='c')
406
+ if u.shape[1] != v.shape[1]:
407
+ raise ValueError('u and v need to have the same '
408
+ 'number of columns')
409
+ result = _hausdorff.directed_hausdorff(u, v, seed)
410
+ return result
411
+
412
+
413
+ def minkowski(u, v, p=2, w=None):
414
+ """
415
+ Compute the Minkowski distance between two 1-D arrays.
416
+
417
+ The Minkowski distance between 1-D arrays `u` and `v`,
418
+ is defined as
419
+
420
+ .. math::
421
+
422
+ {\\|u-v\\|}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
423
+
424
+
425
+ \\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}.
426
+
427
+ Parameters
428
+ ----------
429
+ u : (N,) array_like
430
+ Input array.
431
+ v : (N,) array_like
432
+ Input array.
433
+ p : scalar
434
+ The order of the norm of the difference :math:`{\\|u-v\\|}_p`. Note
435
+ that for :math:`0 < p < 1`, the triangle inequality only holds with
436
+ an additional multiplicative factor, i.e. it is only a quasi-metric.
437
+ w : (N,) array_like, optional
438
+ The weights for each value in `u` and `v`. Default is None,
439
+ which gives each value a weight of 1.0
440
+
441
+ Returns
442
+ -------
443
+ minkowski : double
444
+ The Minkowski distance between vectors `u` and `v`.
445
+
446
+ Examples
447
+ --------
448
+ >>> from scipy.spatial import distance
449
+ >>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)
450
+ 2.0
451
+ >>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)
452
+ 1.4142135623730951
453
+ >>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)
454
+ 1.2599210498948732
455
+ >>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)
456
+ 1.0
457
+ >>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)
458
+ 1.0
459
+ >>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)
460
+ 1.0
461
+
462
+ """
463
+ u = _validate_vector(u)
464
+ v = _validate_vector(v)
465
+ if p <= 0:
466
+ raise ValueError("p must be greater than 0")
467
+ u_v = u - v
468
+ if w is not None:
469
+ w = _validate_weights(w)
470
+ if p == 1:
471
+ root_w = w
472
+ elif p == 2:
473
+ # better precision and speed
474
+ root_w = np.sqrt(w)
475
+ elif p == np.inf:
476
+ root_w = (w != 0)
477
+ else:
478
+ root_w = np.power(w, 1/p)
479
+ u_v = root_w * u_v
480
+ dist = norm(u_v, ord=p)
481
+ return dist
482
+
483
+
484
+ def euclidean(u, v, w=None):
485
+ """
486
+ Computes the Euclidean distance between two 1-D arrays.
487
+
488
+ The Euclidean distance between 1-D arrays `u` and `v`, is defined as
489
+
490
+ .. math::
491
+
492
+ {\\|u-v\\|}_2
493
+
494
+ \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
495
+
496
+ Parameters
497
+ ----------
498
+ u : (N,) array_like
499
+ Input array.
500
+ v : (N,) array_like
501
+ Input array.
502
+ w : (N,) array_like, optional
503
+ The weights for each value in `u` and `v`. Default is None,
504
+ which gives each value a weight of 1.0
505
+
506
+ Returns
507
+ -------
508
+ euclidean : double
509
+ The Euclidean distance between vectors `u` and `v`.
510
+
511
+ Examples
512
+ --------
513
+ >>> from scipy.spatial import distance
514
+ >>> distance.euclidean([1, 0, 0], [0, 1, 0])
515
+ 1.4142135623730951
516
+ >>> distance.euclidean([1, 1, 0], [0, 1, 0])
517
+ 1.0
518
+
519
+ """
520
+ return minkowski(u, v, p=2, w=w)
521
+
522
+
523
+ def sqeuclidean(u, v, w=None):
524
+ """
525
+ Compute the squared Euclidean distance between two 1-D arrays.
526
+
527
+ The squared Euclidean distance between `u` and `v` is defined as
528
+
529
+ .. math::
530
+
531
+ \\sum_i{w_i |u_i - v_i|^2}
532
+
533
+ Parameters
534
+ ----------
535
+ u : (N,) array_like
536
+ Input array.
537
+ v : (N,) array_like
538
+ Input array.
539
+ w : (N,) array_like, optional
540
+ The weights for each value in `u` and `v`. Default is None,
541
+ which gives each value a weight of 1.0
542
+
543
+ Returns
544
+ -------
545
+ sqeuclidean : double
546
+ The squared Euclidean distance between vectors `u` and `v`.
547
+
548
+ Examples
549
+ --------
550
+ >>> from scipy.spatial import distance
551
+ >>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])
552
+ 2.0
553
+ >>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])
554
+ 1.0
555
+
556
+ """
557
+ # Preserve float dtypes, but convert everything else to np.float64
558
+ # for stability.
559
+ utype, vtype = None, None
560
+ if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
561
+ utype = np.float64
562
+ if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
563
+ vtype = np.float64
564
+
565
+ u = _validate_vector(u, dtype=utype)
566
+ v = _validate_vector(v, dtype=vtype)
567
+ u_v = u - v
568
+ u_v_w = u_v # only want weights applied once
569
+ if w is not None:
570
+ w = _validate_weights(w)
571
+ u_v_w = w * u_v
572
+ return np.dot(u_v, u_v_w)
573
+
574
+
575
+ def correlation(u, v, w=None, centered=True):
576
+ """
577
+ Compute the correlation distance between two 1-D arrays.
578
+
579
+ The correlation distance between `u` and `v`, is
580
+ defined as
581
+
582
+ .. math::
583
+
584
+ 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
585
+ {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
586
+
587
+ where :math:`\\bar{u}` is the mean of the elements of `u`
588
+ and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
589
+
590
+ Parameters
591
+ ----------
592
+ u : (N,) array_like
593
+ Input array.
594
+ v : (N,) array_like
595
+ Input array.
596
+ w : (N,) array_like, optional
597
+ The weights for each value in `u` and `v`. Default is None,
598
+ which gives each value a weight of 1.0
599
+ centered : bool, optional
600
+ If True, `u` and `v` will be centered. Default is True.
601
+
602
+ Returns
603
+ -------
604
+ correlation : double
605
+ The correlation distance between 1-D array `u` and `v`.
606
+
607
+ Examples
608
+ --------
609
+ Find the correlation between two arrays.
610
+
611
+ >>> from scipy.spatial.distance import correlation
612
+ >>> correlation([1, 0, 1], [1, 1, 0])
613
+ 1.5
614
+
615
+ Using a weighting array, the correlation can be calculated as:
616
+
617
+ >>> correlation([1, 0, 1], [1, 1, 0], w=[0.9, 0.1, 0.1])
618
+ 1.1
619
+
620
+ If centering is not needed, the correlation can be calculated as:
621
+
622
+ >>> correlation([1, 0, 1], [1, 1, 0], centered=False)
623
+ 0.5
624
+ """
625
+ u = _validate_vector(u)
626
+ v = _validate_vector(v)
627
+ if w is not None:
628
+ w = _validate_weights(w)
629
+ w = w / w.sum()
630
+ if centered:
631
+ if w is not None:
632
+ umu = np.dot(u, w)
633
+ vmu = np.dot(v, w)
634
+ else:
635
+ umu = np.mean(u)
636
+ vmu = np.mean(v)
637
+ u = u - umu
638
+ v = v - vmu
639
+ if w is not None:
640
+ vw = v * w
641
+ uw = u * w
642
+ else:
643
+ vw, uw = v, u
644
+ uv = np.dot(u, vw)
645
+ uu = np.dot(u, uw)
646
+ vv = np.dot(v, vw)
647
+ dist = 1.0 - uv / math.sqrt(uu * vv)
648
+ # Clip the result to avoid rounding error
649
+ return np.clip(dist, 0.0, 2.0)
650
+
651
+
652
+ def cosine(u, v, w=None):
653
+ """
654
+ Compute the Cosine distance between 1-D arrays.
655
+
656
+ The Cosine distance between `u` and `v`, is defined as
657
+
658
+ .. math::
659
+
660
+ 1 - \\frac{u \\cdot v}
661
+ {\\|u\\|_2 \\|v\\|_2}.
662
+
663
+ where :math:`u \\cdot v` is the dot product of :math:`u` and
664
+ :math:`v`.
665
+
666
+ Parameters
667
+ ----------
668
+ u : (N,) array_like
669
+ Input array.
670
+ v : (N,) array_like
671
+ Input array.
672
+ w : (N,) array_like, optional
673
+ The weights for each value in `u` and `v`. Default is None,
674
+ which gives each value a weight of 1.0
675
+
676
+ Returns
677
+ -------
678
+ cosine : double
679
+ The Cosine distance between vectors `u` and `v`.
680
+
681
+ Examples
682
+ --------
683
+ >>> from scipy.spatial import distance
684
+ >>> distance.cosine([1, 0, 0], [0, 1, 0])
685
+ 1.0
686
+ >>> distance.cosine([100, 0, 0], [0, 1, 0])
687
+ 1.0
688
+ >>> distance.cosine([1, 1, 0], [0, 1, 0])
689
+ 0.29289321881345254
690
+
691
+ """
692
+ # cosine distance is also referred to as 'uncentered correlation',
693
+ # or 'reflective correlation'
694
+ return correlation(u, v, w=w, centered=False)
695
+
696
+
697
+ def hamming(u, v, w=None):
698
+ """
699
+ Compute the Hamming distance between two 1-D arrays.
700
+
701
+ The Hamming distance between 1-D arrays `u` and `v`, is simply the
702
+ proportion of disagreeing components in `u` and `v`. If `u` and `v` are
703
+ boolean vectors, the Hamming distance is
704
+
705
+ .. math::
706
+
707
+ \\frac{c_{01} + c_{10}}{n}
708
+
709
+ where :math:`c_{ij}` is the number of occurrences of
710
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
711
+ :math:`k < n`.
712
+
713
+ Parameters
714
+ ----------
715
+ u : (N,) array_like
716
+ Input array.
717
+ v : (N,) array_like
718
+ Input array.
719
+ w : (N,) array_like, optional
720
+ The weights for each value in `u` and `v`. Default is None,
721
+ which gives each value a weight of 1.0
722
+
723
+ Returns
724
+ -------
725
+ hamming : double
726
+ The Hamming distance between vectors `u` and `v`.
727
+
728
+ Examples
729
+ --------
730
+ >>> from scipy.spatial import distance
731
+ >>> distance.hamming([1, 0, 0], [0, 1, 0])
732
+ 0.66666666666666663
733
+ >>> distance.hamming([1, 0, 0], [1, 1, 0])
734
+ 0.33333333333333331
735
+ >>> distance.hamming([1, 0, 0], [2, 0, 0])
736
+ 0.33333333333333331
737
+ >>> distance.hamming([1, 0, 0], [3, 0, 0])
738
+ 0.33333333333333331
739
+
740
+ """
741
+ u = _validate_vector(u)
742
+ v = _validate_vector(v)
743
+ if u.shape != v.shape:
744
+ raise ValueError('The 1d arrays must have equal lengths.')
745
+ u_ne_v = u != v
746
+ if w is not None:
747
+ w = _validate_weights(w)
748
+ if w.shape != u.shape:
749
+ raise ValueError("'w' should have the same length as 'u' and 'v'.")
750
+ w = w / w.sum()
751
+ return np.dot(u_ne_v, w)
752
+ return np.mean(u_ne_v)
753
+
754
+
755
+ def jaccard(u, v, w=None):
756
+ """
757
+ Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
758
+
759
+ The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
760
+ is defined as
761
+
762
+ .. math::
763
+
764
+ \\frac{c_{TF} + c_{FT}}
765
+ {c_{TT} + c_{FT} + c_{TF}}
766
+
767
+ where :math:`c_{ij}` is the number of occurrences of
768
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
769
+ :math:`k < n`.
770
+
771
+ Parameters
772
+ ----------
773
+ u : (N,) array_like, bool
774
+ Input array.
775
+ v : (N,) array_like, bool
776
+ Input array.
777
+ w : (N,) array_like, optional
778
+ The weights for each value in `u` and `v`. Default is None,
779
+ which gives each value a weight of 1.0
780
+
781
+ Returns
782
+ -------
783
+ jaccard : double
784
+ The Jaccard distance between vectors `u` and `v`.
785
+
786
+ Notes
787
+ -----
788
+ When both `u` and `v` lead to a `0/0` division i.e. there is no overlap
789
+ between the items in the vectors the returned distance is 0. See the
790
+ Wikipedia page on the Jaccard index [1]_, and this paper [2]_.
791
+
792
+ .. versionchanged:: 1.2.0
793
+ Previously, when `u` and `v` lead to a `0/0` division, the function
794
+ would return NaN. This was changed to return 0 instead.
795
+
796
+ References
797
+ ----------
798
+ .. [1] https://en.wikipedia.org/wiki/Jaccard_index
799
+ .. [2] S. Kosub, "A note on the triangle inequality for the Jaccard
800
+ distance", 2016, :arxiv:`1612.02696`
801
+
802
+ Examples
803
+ --------
804
+ >>> from scipy.spatial import distance
805
+ >>> distance.jaccard([1, 0, 0], [0, 1, 0])
806
+ 1.0
807
+ >>> distance.jaccard([1, 0, 0], [1, 1, 0])
808
+ 0.5
809
+ >>> distance.jaccard([1, 0, 0], [1, 2, 0])
810
+ 0.5
811
+ >>> distance.jaccard([1, 0, 0], [1, 1, 1])
812
+ 0.66666666666666663
813
+
814
+ """
815
+ u = _validate_vector(u)
816
+ v = _validate_vector(v)
817
+
818
+ nonzero = np.bitwise_or(u != 0, v != 0)
819
+ unequal_nonzero = np.bitwise_and((u != v), nonzero)
820
+ if w is not None:
821
+ w = _validate_weights(w)
822
+ nonzero = w * nonzero
823
+ unequal_nonzero = w * unequal_nonzero
824
+ a = np.float64(unequal_nonzero.sum())
825
+ b = np.float64(nonzero.sum())
826
+ return (a / b) if b != 0 else 0
827
+
828
+
829
+ def kulczynski1(u, v, *, w=None):
830
+ """
831
+ Compute the Kulczynski 1 dissimilarity between two boolean 1-D arrays.
832
+
833
+ The Kulczynski 1 dissimilarity between two boolean 1-D arrays `u` and `v`
834
+ of length ``n``, is defined as
835
+
836
+ .. math::
837
+
838
+ \\frac{c_{11}}
839
+ {c_{01} + c_{10}}
840
+
841
+ where :math:`c_{ij}` is the number of occurrences of
842
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
843
+ :math:`k \\in {0, 1, ..., n-1}`.
844
+
845
+ Parameters
846
+ ----------
847
+ u : (N,) array_like, bool
848
+ Input array.
849
+ v : (N,) array_like, bool
850
+ Input array.
851
+ w : (N,) array_like, optional
852
+ The weights for each value in `u` and `v`. Default is None,
853
+ which gives each value a weight of 1.0
854
+
855
+ Returns
856
+ -------
857
+ kulczynski1 : float
858
+ The Kulczynski 1 distance between vectors `u` and `v`.
859
+
860
+ Notes
861
+ -----
862
+ This measure has a minimum value of 0 and no upper limit.
863
+ It is un-defined when there are no non-matches.
864
+
865
+ .. versionadded:: 1.8.0
866
+
867
+ References
868
+ ----------
869
+ .. [1] Kulczynski S. et al. Bulletin
870
+ International de l'Academie Polonaise des Sciences
871
+ et des Lettres, Classe des Sciences Mathematiques
872
+ et Naturelles, Serie B (Sciences Naturelles). 1927;
873
+ Supplement II: 57-203.
874
+
875
+ Examples
876
+ --------
877
+ >>> from scipy.spatial import distance
878
+ >>> distance.kulczynski1([1, 0, 0], [0, 1, 0])
879
+ 0.0
880
+ >>> distance.kulczynski1([True, False, False], [True, True, False])
881
+ 1.0
882
+ >>> distance.kulczynski1([True, False, False], [True])
883
+ 0.5
884
+ >>> distance.kulczynski1([1, 0, 0], [3, 1, 0])
885
+ -3.0
886
+
887
+ """
888
+ u = _validate_vector(u)
889
+ v = _validate_vector(v)
890
+ if w is not None:
891
+ w = _validate_weights(w)
892
+ (_, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
893
+
894
+ return ntt / (ntf + nft)
895
+
896
+
897
+ def seuclidean(u, v, V):
898
+ """
899
+ Return the standardized Euclidean distance between two 1-D arrays.
900
+
901
+ The standardized Euclidean distance between two n-vectors `u` and `v` is
902
+
903
+ .. math::
904
+
905
+ \\sqrt{\\sum\\limits_i \\frac{1}{V_i} \\left(u_i-v_i \\right)^2}
906
+
907
+ ``V`` is the variance vector; ``V[I]`` is the variance computed over all the i-th
908
+ components of the points. If not passed, it is automatically computed.
909
+
910
+ Parameters
911
+ ----------
912
+ u : (N,) array_like
913
+ Input array.
914
+ v : (N,) array_like
915
+ Input array.
916
+ V : (N,) array_like
917
+ `V` is an 1-D array of component variances. It is usually computed
918
+ among a larger collection vectors.
919
+
920
+ Returns
921
+ -------
922
+ seuclidean : double
923
+ The standardized Euclidean distance between vectors `u` and `v`.
924
+
925
+ Examples
926
+ --------
927
+ >>> from scipy.spatial import distance
928
+ >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])
929
+ 4.4721359549995796
930
+ >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])
931
+ 3.3166247903553998
932
+ >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])
933
+ 3.1780497164141406
934
+
935
+ """
936
+ u = _validate_vector(u)
937
+ v = _validate_vector(v)
938
+ V = _validate_vector(V, dtype=np.float64)
939
+ if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
940
+ raise TypeError('V must be a 1-D array of the same dimension '
941
+ 'as u and v.')
942
+ return euclidean(u, v, w=1/V)
943
+
944
+
945
+ def cityblock(u, v, w=None):
946
+ """
947
+ Compute the City Block (Manhattan) distance.
948
+
949
+ Computes the Manhattan distance between two 1-D arrays `u` and `v`,
950
+ which is defined as
951
+
952
+ .. math::
953
+
954
+ \\sum_i {\\left| u_i - v_i \\right|}.
955
+
956
+ Parameters
957
+ ----------
958
+ u : (N,) array_like
959
+ Input array.
960
+ v : (N,) array_like
961
+ Input array.
962
+ w : (N,) array_like, optional
963
+ The weights for each value in `u` and `v`. Default is None,
964
+ which gives each value a weight of 1.0
965
+
966
+ Returns
967
+ -------
968
+ cityblock : double
969
+ The City Block (Manhattan) distance between vectors `u` and `v`.
970
+
971
+ Examples
972
+ --------
973
+ >>> from scipy.spatial import distance
974
+ >>> distance.cityblock([1, 0, 0], [0, 1, 0])
975
+ 2
976
+ >>> distance.cityblock([1, 0, 0], [0, 2, 0])
977
+ 3
978
+ >>> distance.cityblock([1, 0, 0], [1, 1, 0])
979
+ 1
980
+
981
+ """
982
+ u = _validate_vector(u)
983
+ v = _validate_vector(v)
984
+ l1_diff = abs(u - v)
985
+ if w is not None:
986
+ w = _validate_weights(w)
987
+ l1_diff = w * l1_diff
988
+ return l1_diff.sum()
989
+
990
+
991
+ def mahalanobis(u, v, VI):
992
+ """
993
+ Compute the Mahalanobis distance between two 1-D arrays.
994
+
995
+ The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
996
+
997
+ .. math::
998
+
999
+ \\sqrt{ (u-v) V^{-1} (u-v)^T }
1000
+
1001
+ where ``V`` is the covariance matrix. Note that the argument `VI`
1002
+ is the inverse of ``V``.
1003
+
1004
+ Parameters
1005
+ ----------
1006
+ u : (N,) array_like
1007
+ Input array.
1008
+ v : (N,) array_like
1009
+ Input array.
1010
+ VI : array_like
1011
+ The inverse of the covariance matrix.
1012
+
1013
+ Returns
1014
+ -------
1015
+ mahalanobis : double
1016
+ The Mahalanobis distance between vectors `u` and `v`.
1017
+
1018
+ Examples
1019
+ --------
1020
+ >>> from scipy.spatial import distance
1021
+ >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
1022
+ >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)
1023
+ 1.0
1024
+ >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)
1025
+ 1.0
1026
+ >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)
1027
+ 1.7320508075688772
1028
+
1029
+ """
1030
+ u = _validate_vector(u)
1031
+ v = _validate_vector(v)
1032
+ VI = np.atleast_2d(VI)
1033
+ delta = u - v
1034
+ m = np.dot(np.dot(delta, VI), delta)
1035
+ return np.sqrt(m)
1036
+
1037
+
1038
+ def chebyshev(u, v, w=None):
1039
+ """
1040
+ Compute the Chebyshev distance.
1041
+
1042
+ Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
1043
+ which is defined as
1044
+
1045
+ .. math::
1046
+
1047
+ \\max_i {|u_i-v_i|}.
1048
+
1049
+ Parameters
1050
+ ----------
1051
+ u : (N,) array_like
1052
+ Input vector.
1053
+ v : (N,) array_like
1054
+ Input vector.
1055
+ w : (N,) array_like, optional
1056
+ Unused, as 'max' is a weightless operation. Here for API consistency.
1057
+
1058
+ Returns
1059
+ -------
1060
+ chebyshev : double
1061
+ The Chebyshev distance between vectors `u` and `v`.
1062
+
1063
+ Examples
1064
+ --------
1065
+ >>> from scipy.spatial import distance
1066
+ >>> distance.chebyshev([1, 0, 0], [0, 1, 0])
1067
+ 1
1068
+ >>> distance.chebyshev([1, 1, 0], [0, 1, 0])
1069
+ 1
1070
+
1071
+ """
1072
+ u = _validate_vector(u)
1073
+ v = _validate_vector(v)
1074
+ if w is not None:
1075
+ w = _validate_weights(w)
1076
+ has_weight = w > 0
1077
+ if has_weight.sum() < w.size:
1078
+ u = u[has_weight]
1079
+ v = v[has_weight]
1080
+ return max(abs(u - v))
1081
+
1082
+
1083
+ def braycurtis(u, v, w=None):
1084
+ """
1085
+ Compute the Bray-Curtis distance between two 1-D arrays.
1086
+
1087
+ Bray-Curtis distance is defined as
1088
+
1089
+ .. math::
1090
+
1091
+ \\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
1092
+
1093
+ The Bray-Curtis distance is in the range [0, 1] if all coordinates are
1094
+ positive, and is undefined if the inputs are of length zero.
1095
+
1096
+ Parameters
1097
+ ----------
1098
+ u : (N,) array_like
1099
+ Input array.
1100
+ v : (N,) array_like
1101
+ Input array.
1102
+ w : (N,) array_like, optional
1103
+ The weights for each value in `u` and `v`. Default is None,
1104
+ which gives each value a weight of 1.0
1105
+
1106
+ Returns
1107
+ -------
1108
+ braycurtis : double
1109
+ The Bray-Curtis distance between 1-D arrays `u` and `v`.
1110
+
1111
+ Examples
1112
+ --------
1113
+ >>> from scipy.spatial import distance
1114
+ >>> distance.braycurtis([1, 0, 0], [0, 1, 0])
1115
+ 1.0
1116
+ >>> distance.braycurtis([1, 1, 0], [0, 1, 0])
1117
+ 0.33333333333333331
1118
+
1119
+ """
1120
+ u = _validate_vector(u)
1121
+ v = _validate_vector(v, dtype=np.float64)
1122
+ l1_diff = abs(u - v)
1123
+ l1_sum = abs(u + v)
1124
+ if w is not None:
1125
+ w = _validate_weights(w)
1126
+ l1_diff = w * l1_diff
1127
+ l1_sum = w * l1_sum
1128
+ return l1_diff.sum() / l1_sum.sum()
1129
+
1130
+
1131
+ def canberra(u, v, w=None):
1132
+ """
1133
+ Compute the Canberra distance between two 1-D arrays.
1134
+
1135
+ The Canberra distance is defined as
1136
+
1137
+ .. math::
1138
+
1139
+ d(u,v) = \\sum_i \\frac{|u_i-v_i|}
1140
+ {|u_i|+|v_i|}.
1141
+
1142
+ Parameters
1143
+ ----------
1144
+ u : (N,) array_like
1145
+ Input array.
1146
+ v : (N,) array_like
1147
+ Input array.
1148
+ w : (N,) array_like, optional
1149
+ The weights for each value in `u` and `v`. Default is None,
1150
+ which gives each value a weight of 1.0
1151
+
1152
+ Returns
1153
+ -------
1154
+ canberra : double
1155
+ The Canberra distance between vectors `u` and `v`.
1156
+
1157
+ Notes
1158
+ -----
1159
+ When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
1160
+ used in the calculation.
1161
+
1162
+ Examples
1163
+ --------
1164
+ >>> from scipy.spatial import distance
1165
+ >>> distance.canberra([1, 0, 0], [0, 1, 0])
1166
+ 2.0
1167
+ >>> distance.canberra([1, 1, 0], [0, 1, 0])
1168
+ 1.0
1169
+
1170
+ """
1171
+ u = _validate_vector(u)
1172
+ v = _validate_vector(v, dtype=np.float64)
1173
+ if w is not None:
1174
+ w = _validate_weights(w)
1175
+ with np.errstate(invalid='ignore'):
1176
+ abs_uv = abs(u - v)
1177
+ abs_u = abs(u)
1178
+ abs_v = abs(v)
1179
+ d = abs_uv / (abs_u + abs_v)
1180
+ if w is not None:
1181
+ d = w * d
1182
+ d = np.nansum(d)
1183
+ return d
1184
+
1185
+
1186
+ def jensenshannon(p, q, base=None, *, axis=0, keepdims=False):
1187
+ """
1188
+ Compute the Jensen-Shannon distance (metric) between
1189
+ two probability arrays. This is the square root
1190
+ of the Jensen-Shannon divergence.
1191
+
1192
+ The Jensen-Shannon distance between two probability
1193
+ vectors `p` and `q` is defined as,
1194
+
1195
+ .. math::
1196
+
1197
+ \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
1198
+
1199
+ where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
1200
+ and :math:`D` is the Kullback-Leibler divergence.
1201
+
1202
+ This routine will normalize `p` and `q` if they don't sum to 1.0.
1203
+
1204
+ Parameters
1205
+ ----------
1206
+ p : (N,) array_like
1207
+ left probability vector
1208
+ q : (N,) array_like
1209
+ right probability vector
1210
+ base : double, optional
1211
+ the base of the logarithm used to compute the output
1212
+ if not given, then the routine uses the default base of
1213
+ scipy.stats.entropy.
1214
+ axis : int, optional
1215
+ Axis along which the Jensen-Shannon distances are computed. The default
1216
+ is 0.
1217
+
1218
+ .. versionadded:: 1.7.0
1219
+ keepdims : bool, optional
1220
+ If this is set to `True`, the reduced axes are left in the
1221
+ result as dimensions with size one. With this option,
1222
+ the result will broadcast correctly against the input array.
1223
+ Default is False.
1224
+
1225
+ .. versionadded:: 1.7.0
1226
+
1227
+ Returns
1228
+ -------
1229
+ js : double or ndarray
1230
+ The Jensen-Shannon distances between `p` and `q` along the `axis`.
1231
+
1232
+ Notes
1233
+ -----
1234
+
1235
+ .. versionadded:: 1.2.0
1236
+
1237
+ Examples
1238
+ --------
1239
+ >>> from scipy.spatial import distance
1240
+ >>> import numpy as np
1241
+ >>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0)
1242
+ 1.0
1243
+ >>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5])
1244
+ 0.46450140402245893
1245
+ >>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0])
1246
+ 0.0
1247
+ >>> a = np.array([[1, 2, 3, 4],
1248
+ ... [5, 6, 7, 8],
1249
+ ... [9, 10, 11, 12]])
1250
+ >>> b = np.array([[13, 14, 15, 16],
1251
+ ... [17, 18, 19, 20],
1252
+ ... [21, 22, 23, 24]])
1253
+ >>> distance.jensenshannon(a, b, axis=0)
1254
+ array([0.1954288, 0.1447697, 0.1138377, 0.0927636])
1255
+ >>> distance.jensenshannon(a, b, axis=1)
1256
+ array([0.1402339, 0.0399106, 0.0201815])
1257
+
1258
+ """
1259
+ p = np.asarray(p)
1260
+ q = np.asarray(q)
1261
+ p = p / np.sum(p, axis=axis, keepdims=True)
1262
+ q = q / np.sum(q, axis=axis, keepdims=True)
1263
+ m = (p + q) / 2.0
1264
+ left = rel_entr(p, m)
1265
+ right = rel_entr(q, m)
1266
+ left_sum = np.sum(left, axis=axis, keepdims=keepdims)
1267
+ right_sum = np.sum(right, axis=axis, keepdims=keepdims)
1268
+ js = left_sum + right_sum
1269
+ if base is not None:
1270
+ js /= np.log(base)
1271
+ return np.sqrt(js / 2.0)
1272
+
1273
+
1274
+ def yule(u, v, w=None):
1275
+ """
1276
+ Compute the Yule dissimilarity between two boolean 1-D arrays.
1277
+
1278
+ The Yule dissimilarity is defined as
1279
+
1280
+ .. math::
1281
+
1282
+ \\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
1283
+
1284
+ where :math:`c_{ij}` is the number of occurrences of
1285
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1286
+ :math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
1287
+
1288
+ Parameters
1289
+ ----------
1290
+ u : (N,) array_like, bool
1291
+ Input array.
1292
+ v : (N,) array_like, bool
1293
+ Input array.
1294
+ w : (N,) array_like, optional
1295
+ The weights for each value in `u` and `v`. Default is None,
1296
+ which gives each value a weight of 1.0
1297
+
1298
+ Returns
1299
+ -------
1300
+ yule : double
1301
+ The Yule dissimilarity between vectors `u` and `v`.
1302
+
1303
+ Examples
1304
+ --------
1305
+ >>> from scipy.spatial import distance
1306
+ >>> distance.yule([1, 0, 0], [0, 1, 0])
1307
+ 2.0
1308
+ >>> distance.yule([1, 1, 0], [0, 1, 0])
1309
+ 0.0
1310
+
1311
+ """
1312
+ u = _validate_vector(u)
1313
+ v = _validate_vector(v)
1314
+ if w is not None:
1315
+ w = _validate_weights(w)
1316
+ (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
1317
+ half_R = ntf * nft
1318
+ if half_R == 0:
1319
+ return 0.0
1320
+ else:
1321
+ return float(2.0 * half_R / (ntt * nff + half_R))
1322
+
1323
+
1324
+ def dice(u, v, w=None):
1325
+ """
1326
+ Compute the Dice dissimilarity between two boolean 1-D arrays.
1327
+
1328
+ The Dice dissimilarity between `u` and `v`, is
1329
+
1330
+ .. math::
1331
+
1332
+ \\frac{c_{TF} + c_{FT}}
1333
+ {2c_{TT} + c_{FT} + c_{TF}}
1334
+
1335
+ where :math:`c_{ij}` is the number of occurrences of
1336
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1337
+ :math:`k < n`.
1338
+
1339
+ Parameters
1340
+ ----------
1341
+ u : (N,) array_like, bool
1342
+ Input 1-D array.
1343
+ v : (N,) array_like, bool
1344
+ Input 1-D array.
1345
+ w : (N,) array_like, optional
1346
+ The weights for each value in `u` and `v`. Default is None,
1347
+ which gives each value a weight of 1.0
1348
+
1349
+ Returns
1350
+ -------
1351
+ dice : double
1352
+ The Dice dissimilarity between 1-D arrays `u` and `v`.
1353
+
1354
+ Notes
1355
+ -----
1356
+ This function computes the Dice dissimilarity index. To compute the
1357
+ Dice similarity index, convert one to the other with similarity =
1358
+ 1 - dissimilarity.
1359
+
1360
+ Examples
1361
+ --------
1362
+ >>> from scipy.spatial import distance
1363
+ >>> distance.dice([1, 0, 0], [0, 1, 0])
1364
+ 1.0
1365
+ >>> distance.dice([1, 0, 0], [1, 1, 0])
1366
+ 0.3333333333333333
1367
+ >>> distance.dice([1, 0, 0], [2, 0, 0])
1368
+ -0.3333333333333333
1369
+
1370
+ """
1371
+ u = _validate_vector(u)
1372
+ v = _validate_vector(v)
1373
+ if w is not None:
1374
+ w = _validate_weights(w)
1375
+ if u.dtype == v.dtype == bool and w is None:
1376
+ ntt = (u & v).sum()
1377
+ else:
1378
+ dtype = np.result_type(int, u.dtype, v.dtype)
1379
+ u = u.astype(dtype)
1380
+ v = v.astype(dtype)
1381
+ if w is None:
1382
+ ntt = (u * v).sum()
1383
+ else:
1384
+ ntt = (u * v * w).sum()
1385
+ (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
1386
+ return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
1387
+
1388
+
1389
+ def rogerstanimoto(u, v, w=None):
1390
+ """
1391
+ Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
1392
+
1393
+ The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
1394
+ `u` and `v`, is defined as
1395
+
1396
+ .. math::
1397
+ \\frac{R}
1398
+ {c_{TT} + c_{FF} + R}
1399
+
1400
+ where :math:`c_{ij}` is the number of occurrences of
1401
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1402
+ :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
1403
+
1404
+ Parameters
1405
+ ----------
1406
+ u : (N,) array_like, bool
1407
+ Input array.
1408
+ v : (N,) array_like, bool
1409
+ Input array.
1410
+ w : (N,) array_like, optional
1411
+ The weights for each value in `u` and `v`. Default is None,
1412
+ which gives each value a weight of 1.0
1413
+
1414
+ Returns
1415
+ -------
1416
+ rogerstanimoto : double
1417
+ The Rogers-Tanimoto dissimilarity between vectors
1418
+ `u` and `v`.
1419
+
1420
+ Examples
1421
+ --------
1422
+ >>> from scipy.spatial import distance
1423
+ >>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])
1424
+ 0.8
1425
+ >>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])
1426
+ 0.5
1427
+ >>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])
1428
+ -1.0
1429
+
1430
+ """
1431
+ u = _validate_vector(u)
1432
+ v = _validate_vector(v)
1433
+ if w is not None:
1434
+ w = _validate_weights(w)
1435
+ (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
1436
+ return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
1437
+
1438
+
1439
+ def russellrao(u, v, w=None):
1440
+ """
1441
+ Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.
1442
+
1443
+ The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
1444
+ `v`, is defined as
1445
+
1446
+ .. math::
1447
+
1448
+ \\frac{n - c_{TT}}
1449
+ {n}
1450
+
1451
+ where :math:`c_{ij}` is the number of occurrences of
1452
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1453
+ :math:`k < n`.
1454
+
1455
+ Parameters
1456
+ ----------
1457
+ u : (N,) array_like, bool
1458
+ Input array.
1459
+ v : (N,) array_like, bool
1460
+ Input array.
1461
+ w : (N,) array_like, optional
1462
+ The weights for each value in `u` and `v`. Default is None,
1463
+ which gives each value a weight of 1.0
1464
+
1465
+ Returns
1466
+ -------
1467
+ russellrao : double
1468
+ The Russell-Rao dissimilarity between vectors `u` and `v`.
1469
+
1470
+ Examples
1471
+ --------
1472
+ >>> from scipy.spatial import distance
1473
+ >>> distance.russellrao([1, 0, 0], [0, 1, 0])
1474
+ 1.0
1475
+ >>> distance.russellrao([1, 0, 0], [1, 1, 0])
1476
+ 0.6666666666666666
1477
+ >>> distance.russellrao([1, 0, 0], [2, 0, 0])
1478
+ 0.3333333333333333
1479
+
1480
+ """
1481
+ u = _validate_vector(u)
1482
+ v = _validate_vector(v)
1483
+ if u.dtype == v.dtype == bool and w is None:
1484
+ ntt = (u & v).sum()
1485
+ n = float(len(u))
1486
+ elif w is None:
1487
+ ntt = (u * v).sum()
1488
+ n = float(len(u))
1489
+ else:
1490
+ w = _validate_weights(w)
1491
+ ntt = (u * v * w).sum()
1492
+ n = w.sum()
1493
+ return float(n - ntt) / n
1494
+
1495
+
1496
+ def sokalmichener(u, v, w=None):
1497
+ """
1498
+ Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.
1499
+
1500
+ The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
1501
+ is defined as
1502
+
1503
+ .. math::
1504
+
1505
+ \\frac{R}
1506
+ {S + R}
1507
+
1508
+ where :math:`c_{ij}` is the number of occurrences of
1509
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1510
+ :math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
1511
+ :math:`S = c_{FF} + c_{TT}`.
1512
+
1513
+ Parameters
1514
+ ----------
1515
+ u : (N,) array_like, bool
1516
+ Input array.
1517
+ v : (N,) array_like, bool
1518
+ Input array.
1519
+ w : (N,) array_like, optional
1520
+ The weights for each value in `u` and `v`. Default is None,
1521
+ which gives each value a weight of 1.0
1522
+
1523
+ Returns
1524
+ -------
1525
+ sokalmichener : double
1526
+ The Sokal-Michener dissimilarity between vectors `u` and `v`.
1527
+
1528
+ Examples
1529
+ --------
1530
+ >>> from scipy.spatial import distance
1531
+ >>> distance.sokalmichener([1, 0, 0], [0, 1, 0])
1532
+ 0.8
1533
+ >>> distance.sokalmichener([1, 0, 0], [1, 1, 0])
1534
+ 0.5
1535
+ >>> distance.sokalmichener([1, 0, 0], [2, 0, 0])
1536
+ -1.0
1537
+
1538
+ """
1539
+ u = _validate_vector(u)
1540
+ v = _validate_vector(v)
1541
+ if w is not None:
1542
+ w = _validate_weights(w)
1543
+ nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)
1544
+ return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
1545
+
1546
+
1547
+ def sokalsneath(u, v, w=None):
1548
+ """
1549
+ Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
1550
+
1551
+ The Sokal-Sneath dissimilarity between `u` and `v`,
1552
+
1553
+ .. math::
1554
+
1555
+ \\frac{R}
1556
+ {c_{TT} + R}
1557
+
1558
+ where :math:`c_{ij}` is the number of occurrences of
1559
+ :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
1560
+ :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
1561
+
1562
+ Parameters
1563
+ ----------
1564
+ u : (N,) array_like, bool
1565
+ Input array.
1566
+ v : (N,) array_like, bool
1567
+ Input array.
1568
+ w : (N,) array_like, optional
1569
+ The weights for each value in `u` and `v`. Default is None,
1570
+ which gives each value a weight of 1.0
1571
+
1572
+ Returns
1573
+ -------
1574
+ sokalsneath : double
1575
+ The Sokal-Sneath dissimilarity between vectors `u` and `v`.
1576
+
1577
+ Examples
1578
+ --------
1579
+ >>> from scipy.spatial import distance
1580
+ >>> distance.sokalsneath([1, 0, 0], [0, 1, 0])
1581
+ 1.0
1582
+ >>> distance.sokalsneath([1, 0, 0], [1, 1, 0])
1583
+ 0.66666666666666663
1584
+ >>> distance.sokalsneath([1, 0, 0], [2, 1, 0])
1585
+ 0.0
1586
+ >>> distance.sokalsneath([1, 0, 0], [3, 1, 0])
1587
+ -2.0
1588
+
1589
+ """
1590
+ u = _validate_vector(u)
1591
+ v = _validate_vector(v)
1592
+ if u.dtype == v.dtype == bool and w is None:
1593
+ ntt = (u & v).sum()
1594
+ elif w is None:
1595
+ ntt = (u * v).sum()
1596
+ else:
1597
+ w = _validate_weights(w)
1598
+ ntt = (u * v * w).sum()
1599
+ (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
1600
+ denom = np.array(ntt + 2.0 * (ntf + nft))
1601
+ if not denom.any():
1602
+ raise ValueError('Sokal-Sneath dissimilarity is not defined for '
1603
+ 'vectors that are entirely false.')
1604
+ return float(2.0 * (ntf + nft)) / denom
1605
+
1606
+
1607
+ _convert_to_double = partial(_convert_to_type, out_type=np.float64)
1608
+ _convert_to_bool = partial(_convert_to_type, out_type=bool)
1609
+
1610
+ # adding python-only wrappers to _distance_wrap module
1611
+ _distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap
1612
+ _distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap
1613
+
1614
+
1615
+ @dataclasses.dataclass(frozen=True)
1616
+ class CDistMetricWrapper:
1617
+ metric_name: str
1618
+
1619
+ def __call__(self, XA, XB, *, out=None, **kwargs):
1620
+ XA = np.ascontiguousarray(XA)
1621
+ XB = np.ascontiguousarray(XB)
1622
+ mA, n = XA.shape
1623
+ mB, _ = XB.shape
1624
+ metric_name = self.metric_name
1625
+ metric_info = _METRICS[metric_name]
1626
+ XA, XB, typ, kwargs = _validate_cdist_input(
1627
+ XA, XB, mA, mB, n, metric_info, **kwargs)
1628
+
1629
+ w = kwargs.pop('w', None)
1630
+ if w is not None:
1631
+ metric = metric_info.dist_func
1632
+ return _cdist_callable(
1633
+ XA, XB, metric=metric, out=out, w=w, **kwargs)
1634
+
1635
+ dm = _prepare_out_argument(out, np.float64, (mA, mB))
1636
+ # get cdist wrapper
1637
+ cdist_fn = getattr(_distance_wrap, f'cdist_{metric_name}_{typ}_wrap')
1638
+ cdist_fn(XA, XB, dm, **kwargs)
1639
+ return dm
1640
+
1641
+
1642
+ @dataclasses.dataclass(frozen=True)
1643
+ class PDistMetricWrapper:
1644
+ metric_name: str
1645
+
1646
+ def __call__(self, X, *, out=None, **kwargs):
1647
+ X = np.ascontiguousarray(X)
1648
+ m, n = X.shape
1649
+ metric_name = self.metric_name
1650
+ metric_info = _METRICS[metric_name]
1651
+ X, typ, kwargs = _validate_pdist_input(
1652
+ X, m, n, metric_info, **kwargs)
1653
+ out_size = (m * (m - 1)) // 2
1654
+ w = kwargs.pop('w', None)
1655
+ if w is not None:
1656
+ metric = metric_info.dist_func
1657
+ return _pdist_callable(
1658
+ X, metric=metric, out=out, w=w, **kwargs)
1659
+
1660
+ dm = _prepare_out_argument(out, np.float64, (out_size,))
1661
+ # get pdist wrapper
1662
+ pdist_fn = getattr(_distance_wrap, f'pdist_{metric_name}_{typ}_wrap')
1663
+ pdist_fn(X, dm, **kwargs)
1664
+ return dm
1665
+
1666
+
1667
+ @dataclasses.dataclass(frozen=True)
1668
+ class MetricInfo:
1669
+ # Name of python distance function
1670
+ canonical_name: str
1671
+ # All aliases, including canonical_name
1672
+ aka: set[str]
1673
+ # unvectorized distance function
1674
+ dist_func: Callable
1675
+ # Optimized cdist function
1676
+ cdist_func: Callable
1677
+ # Optimized pdist function
1678
+ pdist_func: Callable
1679
+ # function that checks kwargs and computes default values:
1680
+ # f(X, m, n, **kwargs)
1681
+ validator: Optional[Callable] = None
1682
+ # list of supported types:
1683
+ # X (pdist) and XA (cdist) are used to choose the type. if there is no
1684
+ # match the first type is used. Default double
1685
+ types: list[str] = dataclasses.field(default_factory=lambda: ['double'])
1686
+ # true if out array must be C-contiguous
1687
+ requires_contiguous_out: bool = True
1688
+
1689
+
1690
+ # Registry of implemented metrics:
1691
+ _METRIC_INFOS = [
1692
+ MetricInfo(
1693
+ canonical_name='braycurtis',
1694
+ aka={'braycurtis'},
1695
+ dist_func=braycurtis,
1696
+ cdist_func=_distance_pybind.cdist_braycurtis,
1697
+ pdist_func=_distance_pybind.pdist_braycurtis,
1698
+ ),
1699
+ MetricInfo(
1700
+ canonical_name='canberra',
1701
+ aka={'canberra'},
1702
+ dist_func=canberra,
1703
+ cdist_func=_distance_pybind.cdist_canberra,
1704
+ pdist_func=_distance_pybind.pdist_canberra,
1705
+ ),
1706
+ MetricInfo(
1707
+ canonical_name='chebyshev',
1708
+ aka={'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch'},
1709
+ dist_func=chebyshev,
1710
+ cdist_func=_distance_pybind.cdist_chebyshev,
1711
+ pdist_func=_distance_pybind.pdist_chebyshev,
1712
+ ),
1713
+ MetricInfo(
1714
+ canonical_name='cityblock',
1715
+ aka={'cityblock', 'cblock', 'cb', 'c'},
1716
+ dist_func=cityblock,
1717
+ cdist_func=_distance_pybind.cdist_cityblock,
1718
+ pdist_func=_distance_pybind.pdist_cityblock,
1719
+ ),
1720
+ MetricInfo(
1721
+ canonical_name='correlation',
1722
+ aka={'correlation', 'co'},
1723
+ dist_func=correlation,
1724
+ cdist_func=CDistMetricWrapper('correlation'),
1725
+ pdist_func=PDistMetricWrapper('correlation'),
1726
+ ),
1727
+ MetricInfo(
1728
+ canonical_name='cosine',
1729
+ aka={'cosine', 'cos'},
1730
+ dist_func=cosine,
1731
+ cdist_func=CDistMetricWrapper('cosine'),
1732
+ pdist_func=PDistMetricWrapper('cosine'),
1733
+ ),
1734
+ MetricInfo(
1735
+ canonical_name='dice',
1736
+ aka={'dice'},
1737
+ types=['bool'],
1738
+ dist_func=dice,
1739
+ cdist_func=_distance_pybind.cdist_dice,
1740
+ pdist_func=_distance_pybind.pdist_dice,
1741
+ ),
1742
+ MetricInfo(
1743
+ canonical_name='euclidean',
1744
+ aka={'euclidean', 'euclid', 'eu', 'e'},
1745
+ dist_func=euclidean,
1746
+ cdist_func=_distance_pybind.cdist_euclidean,
1747
+ pdist_func=_distance_pybind.pdist_euclidean,
1748
+ ),
1749
+ MetricInfo(
1750
+ canonical_name='hamming',
1751
+ aka={'matching', 'hamming', 'hamm', 'ha', 'h'},
1752
+ types=['double', 'bool'],
1753
+ validator=_validate_hamming_kwargs,
1754
+ dist_func=hamming,
1755
+ cdist_func=_distance_pybind.cdist_hamming,
1756
+ pdist_func=_distance_pybind.pdist_hamming,
1757
+ ),
1758
+ MetricInfo(
1759
+ canonical_name='jaccard',
1760
+ aka={'jaccard', 'jacc', 'ja', 'j'},
1761
+ types=['double', 'bool'],
1762
+ dist_func=jaccard,
1763
+ cdist_func=_distance_pybind.cdist_jaccard,
1764
+ pdist_func=_distance_pybind.pdist_jaccard,
1765
+ ),
1766
+ MetricInfo(
1767
+ canonical_name='jensenshannon',
1768
+ aka={'jensenshannon', 'js'},
1769
+ dist_func=jensenshannon,
1770
+ cdist_func=CDistMetricWrapper('jensenshannon'),
1771
+ pdist_func=PDistMetricWrapper('jensenshannon'),
1772
+ ),
1773
+ MetricInfo(
1774
+ canonical_name='kulczynski1',
1775
+ aka={'kulczynski1'},
1776
+ types=['bool'],
1777
+ dist_func=kulczynski1,
1778
+ cdist_func=_distance_pybind.cdist_kulczynski1,
1779
+ pdist_func=_distance_pybind.pdist_kulczynski1,
1780
+ ),
1781
+ MetricInfo(
1782
+ canonical_name='mahalanobis',
1783
+ aka={'mahalanobis', 'mahal', 'mah'},
1784
+ validator=_validate_mahalanobis_kwargs,
1785
+ dist_func=mahalanobis,
1786
+ cdist_func=CDistMetricWrapper('mahalanobis'),
1787
+ pdist_func=PDistMetricWrapper('mahalanobis'),
1788
+ ),
1789
+ MetricInfo(
1790
+ canonical_name='minkowski',
1791
+ aka={'minkowski', 'mi', 'm', 'pnorm'},
1792
+ validator=_validate_minkowski_kwargs,
1793
+ dist_func=minkowski,
1794
+ cdist_func=_distance_pybind.cdist_minkowski,
1795
+ pdist_func=_distance_pybind.pdist_minkowski,
1796
+ ),
1797
+ MetricInfo(
1798
+ canonical_name='rogerstanimoto',
1799
+ aka={'rogerstanimoto'},
1800
+ types=['bool'],
1801
+ dist_func=rogerstanimoto,
1802
+ cdist_func=_distance_pybind.cdist_rogerstanimoto,
1803
+ pdist_func=_distance_pybind.pdist_rogerstanimoto,
1804
+ ),
1805
+ MetricInfo(
1806
+ canonical_name='russellrao',
1807
+ aka={'russellrao'},
1808
+ types=['bool'],
1809
+ dist_func=russellrao,
1810
+ cdist_func=_distance_pybind.cdist_russellrao,
1811
+ pdist_func=_distance_pybind.pdist_russellrao,
1812
+ ),
1813
+ MetricInfo(
1814
+ canonical_name='seuclidean',
1815
+ aka={'seuclidean', 'se', 's'},
1816
+ validator=_validate_seuclidean_kwargs,
1817
+ dist_func=seuclidean,
1818
+ cdist_func=CDistMetricWrapper('seuclidean'),
1819
+ pdist_func=PDistMetricWrapper('seuclidean'),
1820
+ ),
1821
+ MetricInfo(
1822
+ canonical_name='sokalmichener',
1823
+ aka={'sokalmichener'},
1824
+ types=['bool'],
1825
+ dist_func=sokalmichener,
1826
+ cdist_func=_distance_pybind.cdist_sokalmichener,
1827
+ pdist_func=_distance_pybind.pdist_sokalmichener,
1828
+ ),
1829
+ MetricInfo(
1830
+ canonical_name='sokalsneath',
1831
+ aka={'sokalsneath'},
1832
+ types=['bool'],
1833
+ dist_func=sokalsneath,
1834
+ cdist_func=_distance_pybind.cdist_sokalsneath,
1835
+ pdist_func=_distance_pybind.pdist_sokalsneath,
1836
+ ),
1837
+ MetricInfo(
1838
+ canonical_name='sqeuclidean',
1839
+ aka={'sqeuclidean', 'sqe', 'sqeuclid'},
1840
+ dist_func=sqeuclidean,
1841
+ cdist_func=_distance_pybind.cdist_sqeuclidean,
1842
+ pdist_func=_distance_pybind.pdist_sqeuclidean,
1843
+ ),
1844
+ MetricInfo(
1845
+ canonical_name='yule',
1846
+ aka={'yule'},
1847
+ types=['bool'],
1848
+ dist_func=yule,
1849
+ cdist_func=_distance_pybind.cdist_yule,
1850
+ pdist_func=_distance_pybind.pdist_yule,
1851
+ ),
1852
+ ]
1853
+
1854
+ _METRICS = {info.canonical_name: info for info in _METRIC_INFOS}
1855
+ _METRIC_ALIAS = {alias: info
1856
+ for info in _METRIC_INFOS
1857
+ for alias in info.aka}
1858
+
1859
+ _METRICS_NAMES = list(_METRICS.keys())
1860
+
1861
+ _TEST_METRICS = {'test_' + info.canonical_name: info for info in _METRIC_INFOS}
1862
+
1863
+
1864
+ def pdist(X, metric='euclidean', *, out=None, **kwargs):
1865
+ """
1866
+ Pairwise distances between observations in n-dimensional space.
1867
+
1868
+ See Notes for common calling conventions.
1869
+
1870
+ Parameters
1871
+ ----------
1872
+ X : array_like
1873
+ An m by n array of m original observations in an
1874
+ n-dimensional space.
1875
+ metric : str or function, optional
1876
+ The distance metric to use. The distance function can
1877
+ be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
1878
+ 'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
1879
+ 'jaccard', 'jensenshannon', 'kulczynski1',
1880
+ 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
1881
+ 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
1882
+ 'sqeuclidean', 'yule'.
1883
+ out : ndarray, optional
1884
+ The output array.
1885
+ If not None, condensed distance matrix Y is stored in this array.
1886
+ **kwargs : dict, optional
1887
+ Extra arguments to `metric`: refer to each metric documentation for a
1888
+ list of all possible arguments.
1889
+
1890
+ Some possible arguments:
1891
+
1892
+ p : scalar
1893
+ The p-norm to apply for Minkowski, weighted and unweighted.
1894
+ Default: 2.
1895
+
1896
+ w : ndarray
1897
+ The weight vector for metrics that support weights (e.g., Minkowski).
1898
+
1899
+ V : ndarray
1900
+ The variance vector for standardized Euclidean.
1901
+ Default: var(X, axis=0, ddof=1)
1902
+
1903
+ VI : ndarray
1904
+ The inverse of the covariance matrix for Mahalanobis.
1905
+ Default: inv(cov(X.T)).T
1906
+
1907
+ Returns
1908
+ -------
1909
+ Y : ndarray
1910
+ Returns a condensed distance matrix Y. For each :math:`i` and :math:`j`
1911
+ (where :math:`i<j<m`),where m is the number of original observations.
1912
+ The metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``m
1913
+ * i + j - ((i + 2) * (i + 1)) // 2``.
1914
+
1915
+ See Also
1916
+ --------
1917
+ squareform : converts between condensed distance matrices and
1918
+ square distance matrices.
1919
+
1920
+ Notes
1921
+ -----
1922
+ See ``squareform`` for information on how to calculate the index of
1923
+ this entry or to convert the condensed distance matrix to a
1924
+ redundant square matrix.
1925
+
1926
+ The following are common calling conventions.
1927
+
1928
+ 1. ``Y = pdist(X, 'euclidean')``
1929
+
1930
+ Computes the distance between m points using Euclidean distance
1931
+ (2-norm) as the distance metric between the points. The points
1932
+ are arranged as m n-dimensional row vectors in the matrix X.
1933
+
1934
+ 2. ``Y = pdist(X, 'minkowski', p=2.)``
1935
+
1936
+ Computes the distances using the Minkowski distance
1937
+ :math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note
1938
+ that this is only a quasi-metric if :math:`0 < p < 1`).
1939
+
1940
+ 3. ``Y = pdist(X, 'cityblock')``
1941
+
1942
+ Computes the city block or Manhattan distance between the
1943
+ points.
1944
+
1945
+ 4. ``Y = pdist(X, 'seuclidean', V=None)``
1946
+
1947
+ Computes the standardized Euclidean distance. The standardized
1948
+ Euclidean distance between two n-vectors ``u`` and ``v`` is
1949
+
1950
+ .. math::
1951
+
1952
+ \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
1953
+
1954
+
1955
+ V is the variance vector; V[i] is the variance computed over all
1956
+ the i'th components of the points. If not passed, it is
1957
+ automatically computed.
1958
+
1959
+ 5. ``Y = pdist(X, 'sqeuclidean')``
1960
+
1961
+ Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between
1962
+ the vectors.
1963
+
1964
+ 6. ``Y = pdist(X, 'cosine')``
1965
+
1966
+ Computes the cosine distance between vectors u and v,
1967
+
1968
+ .. math::
1969
+
1970
+ 1 - \\frac{u \\cdot v}
1971
+ {{\\|u\\|}_2 {\\|v\\|}_2}
1972
+
1973
+ where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and
1974
+ :math:`u \\cdot v` is the dot product of ``u`` and ``v``.
1975
+
1976
+ 7. ``Y = pdist(X, 'correlation')``
1977
+
1978
+ Computes the correlation distance between vectors u and v. This is
1979
+
1980
+ .. math::
1981
+
1982
+ 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
1983
+ {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
1984
+
1985
+ where :math:`\\bar{v}` is the mean of the elements of vector v,
1986
+ and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
1987
+
1988
+ 8. ``Y = pdist(X, 'hamming')``
1989
+
1990
+ Computes the normalized Hamming distance, or the proportion of
1991
+ those vector elements between two n-vectors ``u`` and ``v``
1992
+ which disagree. To save memory, the matrix ``X`` can be of type
1993
+ boolean.
1994
+
1995
+ 9. ``Y = pdist(X, 'jaccard')``
1996
+
1997
+ Computes the Jaccard distance between the points. Given two
1998
+ vectors, ``u`` and ``v``, the Jaccard distance is the
1999
+ proportion of those elements ``u[i]`` and ``v[i]`` that
2000
+ disagree.
2001
+
2002
+ 10. ``Y = pdist(X, 'jensenshannon')``
2003
+
2004
+ Computes the Jensen-Shannon distance between two probability arrays.
2005
+ Given two probability vectors, :math:`p` and :math:`q`, the
2006
+ Jensen-Shannon distance is
2007
+
2008
+ .. math::
2009
+
2010
+ \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
2011
+
2012
+ where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
2013
+ and :math:`D` is the Kullback-Leibler divergence.
2014
+
2015
+ 11. ``Y = pdist(X, 'chebyshev')``
2016
+
2017
+ Computes the Chebyshev distance between the points. The
2018
+ Chebyshev distance between two n-vectors ``u`` and ``v`` is the
2019
+ maximum norm-1 distance between their respective elements. More
2020
+ precisely, the distance is given by
2021
+
2022
+ .. math::
2023
+
2024
+ d(u,v) = \\max_i {|u_i-v_i|}
2025
+
2026
+ 12. ``Y = pdist(X, 'canberra')``
2027
+
2028
+ Computes the Canberra distance between the points. The
2029
+ Canberra distance between two points ``u`` and ``v`` is
2030
+
2031
+ .. math::
2032
+
2033
+ d(u,v) = \\sum_i \\frac{|u_i-v_i|}
2034
+ {|u_i|+|v_i|}
2035
+
2036
+
2037
+ 13. ``Y = pdist(X, 'braycurtis')``
2038
+
2039
+ Computes the Bray-Curtis distance between the points. The
2040
+ Bray-Curtis distance between two points ``u`` and ``v`` is
2041
+
2042
+
2043
+ .. math::
2044
+
2045
+ d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
2046
+ {\\sum_i {|u_i+v_i|}}
2047
+
2048
+ 14. ``Y = pdist(X, 'mahalanobis', VI=None)``
2049
+
2050
+ Computes the Mahalanobis distance between the points. The
2051
+ Mahalanobis distance between two points ``u`` and ``v`` is
2052
+ :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
2053
+ variable) is the inverse covariance. If ``VI`` is not None,
2054
+ ``VI`` will be used as the inverse covariance matrix.
2055
+
2056
+ 15. ``Y = pdist(X, 'yule')``
2057
+
2058
+ Computes the Yule distance between each pair of boolean
2059
+ vectors. (see yule function documentation)
2060
+
2061
+ 16. ``Y = pdist(X, 'matching')``
2062
+
2063
+ Synonym for 'hamming'.
2064
+
2065
+ 17. ``Y = pdist(X, 'dice')``
2066
+
2067
+ Computes the Dice distance between each pair of boolean
2068
+ vectors. (see dice function documentation)
2069
+
2070
+ 18. ``Y = pdist(X, 'kulczynski1')``
2071
+
2072
+ Computes the kulczynski1 distance between each pair of
2073
+ boolean vectors. (see kulczynski1 function documentation)
2074
+
2075
+ 19. ``Y = pdist(X, 'rogerstanimoto')``
2076
+
2077
+ Computes the Rogers-Tanimoto distance between each pair of
2078
+ boolean vectors. (see rogerstanimoto function documentation)
2079
+
2080
+ 20. ``Y = pdist(X, 'russellrao')``
2081
+
2082
+ Computes the Russell-Rao distance between each pair of
2083
+ boolean vectors. (see russellrao function documentation)
2084
+
2085
+ 21. ``Y = pdist(X, 'sokalmichener')``
2086
+
2087
+ Computes the Sokal-Michener distance between each pair of
2088
+ boolean vectors. (see sokalmichener function documentation)
2089
+
2090
+ 22. ``Y = pdist(X, 'sokalsneath')``
2091
+
2092
+ Computes the Sokal-Sneath distance between each pair of
2093
+ boolean vectors. (see sokalsneath function documentation)
2094
+
2095
+ 23. ``Y = pdist(X, 'kulczynski1')``
2096
+
2097
+ Computes the Kulczynski 1 distance between each pair of
2098
+ boolean vectors. (see kulczynski1 function documentation)
2099
+
2100
+ 24. ``Y = pdist(X, f)``
2101
+
2102
+ Computes the distance between all pairs of vectors in X
2103
+ using the user supplied 2-arity function f. For example,
2104
+ Euclidean distance between the vectors could be computed
2105
+ as follows::
2106
+
2107
+ dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
2108
+
2109
+ Note that you should avoid passing a reference to one of
2110
+ the distance functions defined in this library. For example,::
2111
+
2112
+ dm = pdist(X, sokalsneath)
2113
+
2114
+ would calculate the pair-wise distances between the vectors in
2115
+ X using the Python function sokalsneath. This would result in
2116
+ sokalsneath being called :math:`{n \\choose 2}` times, which
2117
+ is inefficient. Instead, the optimized C version is more
2118
+ efficient, and we call it using the following syntax.::
2119
+
2120
+ dm = pdist(X, 'sokalsneath')
2121
+
2122
+ Examples
2123
+ --------
2124
+ >>> import numpy as np
2125
+ >>> from scipy.spatial.distance import pdist
2126
+
2127
+ ``x`` is an array of five points in three-dimensional space.
2128
+
2129
+ >>> x = np.array([[2, 0, 2], [2, 2, 3], [-2, 4, 5], [0, 1, 9], [2, 2, 4]])
2130
+
2131
+ ``pdist(x)`` with no additional arguments computes the 10 pairwise
2132
+ Euclidean distances:
2133
+
2134
+ >>> pdist(x)
2135
+ array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
2136
+ 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
2137
+
2138
+ The following computes the pairwise Minkowski distances with ``p = 3.5``:
2139
+
2140
+ >>> pdist(x, metric='minkowski', p=3.5)
2141
+ array([2.04898923, 5.1154929 , 7.02700737, 2.43802731, 4.19042714,
2142
+ 6.03956994, 1. , 4.45128103, 4.10636143, 5.0619695 ])
2143
+
2144
+ The pairwise city block or Manhattan distances:
2145
+
2146
+ >>> pdist(x, metric='cityblock')
2147
+ array([ 3., 11., 10., 4., 8., 9., 1., 9., 7., 8.])
2148
+
2149
+ """
2150
+ # You can also call this as:
2151
+ # Y = pdist(X, 'test_abc')
2152
+ # where 'abc' is the metric being tested. This computes the distance
2153
+ # between all pairs of vectors in X using the distance metric 'abc' but
2154
+ # with a more succinct, verifiable, but less efficient implementation.
2155
+
2156
+ X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
2157
+ check_finite=False)
2158
+
2159
+ s = X.shape
2160
+ if len(s) != 2:
2161
+ raise ValueError('A 2-dimensional array must be passed.')
2162
+
2163
+ m, n = s
2164
+
2165
+ if callable(metric):
2166
+ mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
2167
+ metric_info = _METRIC_ALIAS.get(mstr, None)
2168
+
2169
+ if metric_info is not None:
2170
+ X, typ, kwargs = _validate_pdist_input(
2171
+ X, m, n, metric_info, **kwargs)
2172
+
2173
+ return _pdist_callable(X, metric=metric, out=out, **kwargs)
2174
+ elif isinstance(metric, str):
2175
+ mstr = metric.lower()
2176
+ metric_info = _METRIC_ALIAS.get(mstr, None)
2177
+
2178
+ if metric_info is not None:
2179
+ pdist_fn = metric_info.pdist_func
2180
+ return pdist_fn(X, out=out, **kwargs)
2181
+ elif mstr.startswith("test_"):
2182
+ metric_info = _TEST_METRICS.get(mstr, None)
2183
+ if metric_info is None:
2184
+ raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
2185
+ X, typ, kwargs = _validate_pdist_input(
2186
+ X, m, n, metric_info, **kwargs)
2187
+ return _pdist_callable(
2188
+ X, metric=metric_info.dist_func, out=out, **kwargs)
2189
+ else:
2190
+ raise ValueError('Unknown Distance Metric: %s' % mstr)
2191
+ else:
2192
+ raise TypeError('2nd argument metric must be a string identifier '
2193
+ 'or a function.')
2194
+
2195
+
2196
+ def squareform(X, force="no", checks=True):
2197
+ """
2198
+ Convert a vector-form distance vector to a square-form distance
2199
+ matrix, and vice-versa.
2200
+
2201
+ Parameters
2202
+ ----------
2203
+ X : array_like
2204
+ Either a condensed or redundant distance matrix.
2205
+ force : str, optional
2206
+ As with MATLAB(TM), if force is equal to ``'tovector'`` or
2207
+ ``'tomatrix'``, the input will be treated as a distance matrix or
2208
+ distance vector respectively.
2209
+ checks : bool, optional
2210
+ If set to False, no checks will be made for matrix
2211
+ symmetry nor zero diagonals. This is useful if it is known that
2212
+ ``X - X.T1`` is small and ``diag(X)`` is close to zero.
2213
+ These values are ignored any way so they do not disrupt the
2214
+ squareform transformation.
2215
+
2216
+ Returns
2217
+ -------
2218
+ Y : ndarray
2219
+ If a condensed distance matrix is passed, a redundant one is
2220
+ returned, or if a redundant one is passed, a condensed distance
2221
+ matrix is returned.
2222
+
2223
+ Notes
2224
+ -----
2225
+ 1. ``v = squareform(X)``
2226
+
2227
+ Given a square n-by-n symmetric distance matrix ``X``,
2228
+ ``v = squareform(X)`` returns a ``n * (n-1) / 2``
2229
+ (i.e. binomial coefficient n choose 2) sized vector `v`
2230
+ where :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]`
2231
+ is the distance between distinct points ``i`` and ``j``.
2232
+ If ``X`` is non-square or asymmetric, an error is raised.
2233
+
2234
+ 2. ``X = squareform(v)``
2235
+
2236
+ Given a ``n * (n-1) / 2`` sized vector ``v``
2237
+ for some integer ``n >= 1`` encoding distances as described,
2238
+ ``X = squareform(v)`` returns a n-by-n distance matrix ``X``.
2239
+ The ``X[i, j]`` and ``X[j, i]`` values are set to
2240
+ :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]`
2241
+ and all diagonal elements are zero.
2242
+
2243
+ In SciPy 0.19.0, ``squareform`` stopped casting all input types to
2244
+ float64, and started returning arrays of the same dtype as the input.
2245
+
2246
+ Examples
2247
+ --------
2248
+ >>> import numpy as np
2249
+ >>> from scipy.spatial.distance import pdist, squareform
2250
+
2251
+ ``x`` is an array of five points in three-dimensional space.
2252
+
2253
+ >>> x = np.array([[2, 0, 2], [2, 2, 3], [-2, 4, 5], [0, 1, 9], [2, 2, 4]])
2254
+
2255
+ ``pdist(x)`` computes the Euclidean distances between each pair of
2256
+ points in ``x``. The distances are returned in a one-dimensional
2257
+ array with length ``5*(5 - 1)/2 = 10``.
2258
+
2259
+ >>> distvec = pdist(x)
2260
+ >>> distvec
2261
+ array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
2262
+ 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
2263
+
2264
+ ``squareform(distvec)`` returns the 5x5 distance matrix.
2265
+
2266
+ >>> m = squareform(distvec)
2267
+ >>> m
2268
+ array([[0. , 2.23606798, 6.40312424, 7.34846923, 2.82842712],
2269
+ [2.23606798, 0. , 4.89897949, 6.40312424, 1. ],
2270
+ [6.40312424, 4.89897949, 0. , 5.38516481, 4.58257569],
2271
+ [7.34846923, 6.40312424, 5.38516481, 0. , 5.47722558],
2272
+ [2.82842712, 1. , 4.58257569, 5.47722558, 0. ]])
2273
+
2274
+ When given a square distance matrix ``m``, ``squareform(m)`` returns
2275
+ the one-dimensional condensed distance vector associated with the
2276
+ matrix. In this case, we recover ``distvec``.
2277
+
2278
+ >>> squareform(m)
2279
+ array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949,
2280
+ 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558])
2281
+ """
2282
+ X = np.ascontiguousarray(X)
2283
+
2284
+ s = X.shape
2285
+
2286
+ if force.lower() == 'tomatrix':
2287
+ if len(s) != 1:
2288
+ raise ValueError("Forcing 'tomatrix' but input X is not a "
2289
+ "distance vector.")
2290
+ elif force.lower() == 'tovector':
2291
+ if len(s) != 2:
2292
+ raise ValueError("Forcing 'tovector' but input X is not a "
2293
+ "distance matrix.")
2294
+
2295
+ # X = squareform(v)
2296
+ if len(s) == 1:
2297
+ if s[0] == 0:
2298
+ return np.zeros((1, 1), dtype=X.dtype)
2299
+
2300
+ # Grab the closest value to the square root of the number
2301
+ # of elements times 2 to see if the number of elements
2302
+ # is indeed a binomial coefficient.
2303
+ d = int(np.ceil(np.sqrt(s[0] * 2)))
2304
+
2305
+ # Check that v is of valid dimensions.
2306
+ if d * (d - 1) != s[0] * 2:
2307
+ raise ValueError('Incompatible vector size. It must be a binomial '
2308
+ 'coefficient n choose 2 for some integer n >= 2.')
2309
+
2310
+ # Allocate memory for the distance matrix.
2311
+ M = np.zeros((d, d), dtype=X.dtype)
2312
+
2313
+ # Since the C code does not support striding using strides.
2314
+ # The dimensions are used instead.
2315
+ X = _copy_array_if_base_present(X)
2316
+
2317
+ # Fill in the values of the distance matrix.
2318
+ _distance_wrap.to_squareform_from_vector_wrap(M, X)
2319
+
2320
+ # Return the distance matrix.
2321
+ return M
2322
+ elif len(s) == 2:
2323
+ if s[0] != s[1]:
2324
+ raise ValueError('The matrix argument must be square.')
2325
+ if checks:
2326
+ is_valid_dm(X, throw=True, name='X')
2327
+
2328
+ # One-side of the dimensions is set here.
2329
+ d = s[0]
2330
+
2331
+ if d <= 1:
2332
+ return np.array([], dtype=X.dtype)
2333
+
2334
+ # Create a vector.
2335
+ v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
2336
+
2337
+ # Since the C code does not support striding using strides.
2338
+ # The dimensions are used instead.
2339
+ X = _copy_array_if_base_present(X)
2340
+
2341
+ # Convert the vector to squareform.
2342
+ _distance_wrap.to_vector_from_squareform_wrap(X, v)
2343
+ return v
2344
+ else:
2345
+ raise ValueError(('The first argument must be one or two dimensional '
2346
+ 'array. A %d-dimensional array is not '
2347
+ 'permitted') % len(s))
2348
+
2349
+
2350
+ def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
2351
+ """
2352
+ Return True if input array is a valid distance matrix.
2353
+
2354
+ Distance matrices must be 2-dimensional numpy arrays.
2355
+ They must have a zero-diagonal, and they must be symmetric.
2356
+
2357
+ Parameters
2358
+ ----------
2359
+ D : array_like
2360
+ The candidate object to test for validity.
2361
+ tol : float, optional
2362
+ The distance matrix should be symmetric. `tol` is the maximum
2363
+ difference between entries ``ij`` and ``ji`` for the distance
2364
+ metric to be considered symmetric.
2365
+ throw : bool, optional
2366
+ An exception is thrown if the distance matrix passed is not valid.
2367
+ name : str, optional
2368
+ The name of the variable to checked. This is useful if
2369
+ throw is set to True so the offending variable can be identified
2370
+ in the exception message when an exception is thrown.
2371
+ warning : bool, optional
2372
+ Instead of throwing an exception, a warning message is
2373
+ raised.
2374
+
2375
+ Returns
2376
+ -------
2377
+ valid : bool
2378
+ True if the variable `D` passed is a valid distance matrix.
2379
+
2380
+ Notes
2381
+ -----
2382
+ Small numerical differences in `D` and `D.T` and non-zeroness of
2383
+ the diagonal are ignored if they are within the tolerance specified
2384
+ by `tol`.
2385
+
2386
+ Examples
2387
+ --------
2388
+ >>> import numpy as np
2389
+ >>> from scipy.spatial.distance import is_valid_dm
2390
+
2391
+ This matrix is a valid distance matrix.
2392
+
2393
+ >>> d = np.array([[0.0, 1.1, 1.2, 1.3],
2394
+ ... [1.1, 0.0, 1.0, 1.4],
2395
+ ... [1.2, 1.0, 0.0, 1.5],
2396
+ ... [1.3, 1.4, 1.5, 0.0]])
2397
+ >>> is_valid_dm(d)
2398
+ True
2399
+
2400
+ In the following examples, the input is not a valid distance matrix.
2401
+
2402
+ Not square:
2403
+
2404
+ >>> is_valid_dm([[0, 2, 2], [2, 0, 2]])
2405
+ False
2406
+
2407
+ Nonzero diagonal element:
2408
+
2409
+ >>> is_valid_dm([[0, 1, 1], [1, 2, 3], [1, 3, 0]])
2410
+ False
2411
+
2412
+ Not symmetric:
2413
+
2414
+ >>> is_valid_dm([[0, 1, 3], [2, 0, 1], [3, 1, 0]])
2415
+ False
2416
+
2417
+ """
2418
+ D = np.asarray(D, order='c')
2419
+ valid = True
2420
+ try:
2421
+ s = D.shape
2422
+ if len(D.shape) != 2:
2423
+ if name:
2424
+ raise ValueError(('Distance matrix \'%s\' must have shape=2 '
2425
+ '(i.e. be two-dimensional).') % name)
2426
+ else:
2427
+ raise ValueError('Distance matrix must have shape=2 (i.e. '
2428
+ 'be two-dimensional).')
2429
+ if tol == 0.0:
2430
+ if not (D == D.T).all():
2431
+ if name:
2432
+ raise ValueError(('Distance matrix \'%s\' must be '
2433
+ 'symmetric.') % name)
2434
+ else:
2435
+ raise ValueError('Distance matrix must be symmetric.')
2436
+ if not (D[range(0, s[0]), range(0, s[0])] == 0).all():
2437
+ if name:
2438
+ raise ValueError(('Distance matrix \'%s\' diagonal must '
2439
+ 'be zero.') % name)
2440
+ else:
2441
+ raise ValueError('Distance matrix diagonal must be zero.')
2442
+ else:
2443
+ if not (D - D.T <= tol).all():
2444
+ if name:
2445
+ raise ValueError(f'Distance matrix \'{name}\' must be '
2446
+ f'symmetric within tolerance {tol:5.5f}.')
2447
+ else:
2448
+ raise ValueError('Distance matrix must be symmetric within '
2449
+ 'tolerance %5.5f.' % tol)
2450
+ if not (D[range(0, s[0]), range(0, s[0])] <= tol).all():
2451
+ if name:
2452
+ raise ValueError(f'Distance matrix \'{name}\' diagonal must be '
2453
+ f'close to zero within tolerance {tol:5.5f}.')
2454
+ else:
2455
+ raise ValueError(('Distance matrix \'{}\' diagonal must be close '
2456
+ 'to zero within tolerance {:5.5f}.').format(*tol))
2457
+ except Exception as e:
2458
+ if throw:
2459
+ raise
2460
+ if warning:
2461
+ warnings.warn(str(e), stacklevel=2)
2462
+ valid = False
2463
+ return valid
2464
+
2465
+
2466
+ def is_valid_y(y, warning=False, throw=False, name=None):
2467
+ """
2468
+ Return True if the input array is a valid condensed distance matrix.
2469
+
2470
+ Condensed distance matrices must be 1-dimensional numpy arrays.
2471
+ Their length must be a binomial coefficient :math:`{n \\choose 2}`
2472
+ for some positive integer n.
2473
+
2474
+ Parameters
2475
+ ----------
2476
+ y : array_like
2477
+ The condensed distance matrix.
2478
+ warning : bool, optional
2479
+ Invokes a warning if the variable passed is not a valid
2480
+ condensed distance matrix. The warning message explains why
2481
+ the distance matrix is not valid. `name` is used when
2482
+ referencing the offending variable.
2483
+ throw : bool, optional
2484
+ Throws an exception if the variable passed is not a valid
2485
+ condensed distance matrix.
2486
+ name : bool, optional
2487
+ Used when referencing the offending variable in the
2488
+ warning or exception message.
2489
+
2490
+ Returns
2491
+ -------
2492
+ bool
2493
+ True if the input array is a valid condensed distance matrix,
2494
+ False otherwise.
2495
+
2496
+ Examples
2497
+ --------
2498
+ >>> from scipy.spatial.distance import is_valid_y
2499
+
2500
+ This vector is a valid condensed distance matrix. The length is 6,
2501
+ which corresponds to ``n = 4``, since ``4*(4 - 1)/2`` is 6.
2502
+
2503
+ >>> v = [1.0, 1.2, 1.0, 0.5, 1.3, 0.9]
2504
+ >>> is_valid_y(v)
2505
+ True
2506
+
2507
+ An input vector with length, say, 7, is not a valid condensed distance
2508
+ matrix.
2509
+
2510
+ >>> is_valid_y([1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7])
2511
+ False
2512
+
2513
+ """
2514
+ y = np.asarray(y, order='c')
2515
+ valid = True
2516
+ try:
2517
+ if len(y.shape) != 1:
2518
+ if name:
2519
+ raise ValueError(('Condensed distance matrix \'%s\' must '
2520
+ 'have shape=1 (i.e. be one-dimensional).')
2521
+ % name)
2522
+ else:
2523
+ raise ValueError('Condensed distance matrix must have shape=1 '
2524
+ '(i.e. be one-dimensional).')
2525
+ n = y.shape[0]
2526
+ d = int(np.ceil(np.sqrt(n * 2)))
2527
+ if (d * (d - 1) / 2) != n:
2528
+ if name:
2529
+ raise ValueError(('Length n of condensed distance matrix '
2530
+ '\'%s\' must be a binomial coefficient, i.e.'
2531
+ 'there must be a k such that '
2532
+ '(k \\choose 2)=n)!') % name)
2533
+ else:
2534
+ raise ValueError('Length n of condensed distance matrix must '
2535
+ 'be a binomial coefficient, i.e. there must '
2536
+ 'be a k such that (k \\choose 2)=n)!')
2537
+ except Exception as e:
2538
+ if throw:
2539
+ raise
2540
+ if warning:
2541
+ warnings.warn(str(e), stacklevel=2)
2542
+ valid = False
2543
+ return valid
2544
+
2545
+
2546
+ def num_obs_dm(d):
2547
+ """
2548
+ Return the number of original observations that correspond to a
2549
+ square, redundant distance matrix.
2550
+
2551
+ Parameters
2552
+ ----------
2553
+ d : array_like
2554
+ The target distance matrix.
2555
+
2556
+ Returns
2557
+ -------
2558
+ num_obs_dm : int
2559
+ The number of observations in the redundant distance matrix.
2560
+
2561
+ Examples
2562
+ --------
2563
+ Find the number of original observations corresponding
2564
+ to a square redundant distance matrix d.
2565
+
2566
+ >>> from scipy.spatial.distance import num_obs_dm
2567
+ >>> d = [[0, 100, 200], [100, 0, 150], [200, 150, 0]]
2568
+ >>> num_obs_dm(d)
2569
+ 3
2570
+ """
2571
+ d = np.asarray(d, order='c')
2572
+ is_valid_dm(d, tol=np.inf, throw=True, name='d')
2573
+ return d.shape[0]
2574
+
2575
+
2576
+ def num_obs_y(Y):
2577
+ """
2578
+ Return the number of original observations that correspond to a
2579
+ condensed distance matrix.
2580
+
2581
+ Parameters
2582
+ ----------
2583
+ Y : array_like
2584
+ Condensed distance matrix.
2585
+
2586
+ Returns
2587
+ -------
2588
+ n : int
2589
+ The number of observations in the condensed distance matrix `Y`.
2590
+
2591
+ Examples
2592
+ --------
2593
+ Find the number of original observations corresponding to a
2594
+ condensed distance matrix Y.
2595
+
2596
+ >>> from scipy.spatial.distance import num_obs_y
2597
+ >>> Y = [1, 2, 3.5, 7, 10, 4]
2598
+ >>> num_obs_y(Y)
2599
+ 4
2600
+ """
2601
+ Y = np.asarray(Y, order='c')
2602
+ is_valid_y(Y, throw=True, name='Y')
2603
+ k = Y.shape[0]
2604
+ if k == 0:
2605
+ raise ValueError("The number of observations cannot be determined on "
2606
+ "an empty distance matrix.")
2607
+ d = int(np.ceil(np.sqrt(k * 2)))
2608
+ if (d * (d - 1) / 2) != k:
2609
+ raise ValueError("Invalid condensed distance matrix passed. Must be "
2610
+ "some k where k=(n choose 2) for some n >= 2.")
2611
+ return d
2612
+
2613
+
2614
+ def _prepare_out_argument(out, dtype, expected_shape):
2615
+ if out is None:
2616
+ return np.empty(expected_shape, dtype=dtype)
2617
+
2618
+ if out.shape != expected_shape:
2619
+ raise ValueError("Output array has incorrect shape.")
2620
+ if not out.flags.c_contiguous:
2621
+ raise ValueError("Output array must be C-contiguous.")
2622
+ if out.dtype != np.float64:
2623
+ raise ValueError("Output array must be double type.")
2624
+ return out
2625
+
2626
+
2627
+ def _pdist_callable(X, *, out, metric, **kwargs):
2628
+ n = X.shape[0]
2629
+ out_size = (n * (n - 1)) // 2
2630
+ dm = _prepare_out_argument(out, np.float64, (out_size,))
2631
+ k = 0
2632
+ for i in range(X.shape[0] - 1):
2633
+ for j in range(i + 1, X.shape[0]):
2634
+ dm[k] = metric(X[i], X[j], **kwargs)
2635
+ k += 1
2636
+ return dm
2637
+
2638
+
2639
+ def _cdist_callable(XA, XB, *, out, metric, **kwargs):
2640
+ mA = XA.shape[0]
2641
+ mB = XB.shape[0]
2642
+ dm = _prepare_out_argument(out, np.float64, (mA, mB))
2643
+ for i in range(mA):
2644
+ for j in range(mB):
2645
+ dm[i, j] = metric(XA[i], XB[j], **kwargs)
2646
+ return dm
2647
+
2648
+
2649
+ def cdist(XA, XB, metric='euclidean', *, out=None, **kwargs):
2650
+ """
2651
+ Compute distance between each pair of the two collections of inputs.
2652
+
2653
+ See Notes for common calling conventions.
2654
+
2655
+ Parameters
2656
+ ----------
2657
+ XA : array_like
2658
+ An :math:`m_A` by :math:`n` array of :math:`m_A`
2659
+ original observations in an :math:`n`-dimensional space.
2660
+ Inputs are converted to float type.
2661
+ XB : array_like
2662
+ An :math:`m_B` by :math:`n` array of :math:`m_B`
2663
+ original observations in an :math:`n`-dimensional space.
2664
+ Inputs are converted to float type.
2665
+ metric : str or callable, optional
2666
+ The distance metric to use. If a string, the distance function can be
2667
+ 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
2668
+ 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',
2669
+ 'kulczynski1', 'mahalanobis', 'matching', 'minkowski',
2670
+ 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener',
2671
+ 'sokalsneath', 'sqeuclidean', 'yule'.
2672
+ **kwargs : dict, optional
2673
+ Extra arguments to `metric`: refer to each metric documentation for a
2674
+ list of all possible arguments.
2675
+
2676
+ Some possible arguments:
2677
+
2678
+ p : scalar
2679
+ The p-norm to apply for Minkowski, weighted and unweighted.
2680
+ Default: 2.
2681
+
2682
+ w : array_like
2683
+ The weight vector for metrics that support weights (e.g., Minkowski).
2684
+
2685
+ V : array_like
2686
+ The variance vector for standardized Euclidean.
2687
+ Default: var(vstack([XA, XB]), axis=0, ddof=1)
2688
+
2689
+ VI : array_like
2690
+ The inverse of the covariance matrix for Mahalanobis.
2691
+ Default: inv(cov(vstack([XA, XB].T))).T
2692
+
2693
+ out : ndarray
2694
+ The output array
2695
+ If not None, the distance matrix Y is stored in this array.
2696
+
2697
+ Returns
2698
+ -------
2699
+ Y : ndarray
2700
+ A :math:`m_A` by :math:`m_B` distance matrix is returned.
2701
+ For each :math:`i` and :math:`j`, the metric
2702
+ ``dist(u=XA[i], v=XB[j])`` is computed and stored in the
2703
+ :math:`ij` th entry.
2704
+
2705
+ Raises
2706
+ ------
2707
+ ValueError
2708
+ An exception is thrown if `XA` and `XB` do not have
2709
+ the same number of columns.
2710
+
2711
+ Notes
2712
+ -----
2713
+ The following are common calling conventions:
2714
+
2715
+ 1. ``Y = cdist(XA, XB, 'euclidean')``
2716
+
2717
+ Computes the distance between :math:`m` points using
2718
+ Euclidean distance (2-norm) as the distance metric between the
2719
+ points. The points are arranged as :math:`m`
2720
+ :math:`n`-dimensional row vectors in the matrix X.
2721
+
2722
+ 2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``
2723
+
2724
+ Computes the distances using the Minkowski distance
2725
+ :math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note
2726
+ that this is only a quasi-metric if :math:`0 < p < 1`).
2727
+
2728
+ 3. ``Y = cdist(XA, XB, 'cityblock')``
2729
+
2730
+ Computes the city block or Manhattan distance between the
2731
+ points.
2732
+
2733
+ 4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
2734
+
2735
+ Computes the standardized Euclidean distance. The standardized
2736
+ Euclidean distance between two n-vectors ``u`` and ``v`` is
2737
+
2738
+ .. math::
2739
+
2740
+ \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
2741
+
2742
+ V is the variance vector; V[i] is the variance computed over all
2743
+ the i'th components of the points. If not passed, it is
2744
+ automatically computed.
2745
+
2746
+ 5. ``Y = cdist(XA, XB, 'sqeuclidean')``
2747
+
2748
+ Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between
2749
+ the vectors.
2750
+
2751
+ 6. ``Y = cdist(XA, XB, 'cosine')``
2752
+
2753
+ Computes the cosine distance between vectors u and v,
2754
+
2755
+ .. math::
2756
+
2757
+ 1 - \\frac{u \\cdot v}
2758
+ {{\\|u\\|}_2 {\\|v\\|}_2}
2759
+
2760
+ where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and
2761
+ :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
2762
+
2763
+ 7. ``Y = cdist(XA, XB, 'correlation')``
2764
+
2765
+ Computes the correlation distance between vectors u and v. This is
2766
+
2767
+ .. math::
2768
+
2769
+ 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
2770
+ {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
2771
+
2772
+ where :math:`\\bar{v}` is the mean of the elements of vector v,
2773
+ and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
2774
+
2775
+
2776
+ 8. ``Y = cdist(XA, XB, 'hamming')``
2777
+
2778
+ Computes the normalized Hamming distance, or the proportion of
2779
+ those vector elements between two n-vectors ``u`` and ``v``
2780
+ which disagree. To save memory, the matrix ``X`` can be of type
2781
+ boolean.
2782
+
2783
+ 9. ``Y = cdist(XA, XB, 'jaccard')``
2784
+
2785
+ Computes the Jaccard distance between the points. Given two
2786
+ vectors, ``u`` and ``v``, the Jaccard distance is the
2787
+ proportion of those elements ``u[i]`` and ``v[i]`` that
2788
+ disagree where at least one of them is non-zero.
2789
+
2790
+ 10. ``Y = cdist(XA, XB, 'jensenshannon')``
2791
+
2792
+ Computes the Jensen-Shannon distance between two probability arrays.
2793
+ Given two probability vectors, :math:`p` and :math:`q`, the
2794
+ Jensen-Shannon distance is
2795
+
2796
+ .. math::
2797
+
2798
+ \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
2799
+
2800
+ where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
2801
+ and :math:`D` is the Kullback-Leibler divergence.
2802
+
2803
+ 11. ``Y = cdist(XA, XB, 'chebyshev')``
2804
+
2805
+ Computes the Chebyshev distance between the points. The
2806
+ Chebyshev distance between two n-vectors ``u`` and ``v`` is the
2807
+ maximum norm-1 distance between their respective elements. More
2808
+ precisely, the distance is given by
2809
+
2810
+ .. math::
2811
+
2812
+ d(u,v) = \\max_i {|u_i-v_i|}.
2813
+
2814
+ 12. ``Y = cdist(XA, XB, 'canberra')``
2815
+
2816
+ Computes the Canberra distance between the points. The
2817
+ Canberra distance between two points ``u`` and ``v`` is
2818
+
2819
+ .. math::
2820
+
2821
+ d(u,v) = \\sum_i \\frac{|u_i-v_i|}
2822
+ {|u_i|+|v_i|}.
2823
+
2824
+ 13. ``Y = cdist(XA, XB, 'braycurtis')``
2825
+
2826
+ Computes the Bray-Curtis distance between the points. The
2827
+ Bray-Curtis distance between two points ``u`` and ``v`` is
2828
+
2829
+
2830
+ .. math::
2831
+
2832
+ d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
2833
+ {\\sum_i (|u_i+v_i|)}
2834
+
2835
+ 14. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
2836
+
2837
+ Computes the Mahalanobis distance between the points. The
2838
+ Mahalanobis distance between two points ``u`` and ``v`` is
2839
+ :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
2840
+ variable) is the inverse covariance. If ``VI`` is not None,
2841
+ ``VI`` will be used as the inverse covariance matrix.
2842
+
2843
+ 15. ``Y = cdist(XA, XB, 'yule')``
2844
+
2845
+ Computes the Yule distance between the boolean
2846
+ vectors. (see `yule` function documentation)
2847
+
2848
+ 16. ``Y = cdist(XA, XB, 'matching')``
2849
+
2850
+ Synonym for 'hamming'.
2851
+
2852
+ 17. ``Y = cdist(XA, XB, 'dice')``
2853
+
2854
+ Computes the Dice distance between the boolean vectors. (see
2855
+ `dice` function documentation)
2856
+
2857
+ 18. ``Y = cdist(XA, XB, 'kulczynski1')``
2858
+
2859
+ Computes the kulczynski distance between the boolean
2860
+ vectors. (see `kulczynski1` function documentation)
2861
+
2862
+ 19. ``Y = cdist(XA, XB, 'rogerstanimoto')``
2863
+
2864
+ Computes the Rogers-Tanimoto distance between the boolean
2865
+ vectors. (see `rogerstanimoto` function documentation)
2866
+
2867
+ 20. ``Y = cdist(XA, XB, 'russellrao')``
2868
+
2869
+ Computes the Russell-Rao distance between the boolean
2870
+ vectors. (see `russellrao` function documentation)
2871
+
2872
+ 21. ``Y = cdist(XA, XB, 'sokalmichener')``
2873
+
2874
+ Computes the Sokal-Michener distance between the boolean
2875
+ vectors. (see `sokalmichener` function documentation)
2876
+
2877
+ 22. ``Y = cdist(XA, XB, 'sokalsneath')``
2878
+
2879
+ Computes the Sokal-Sneath distance between the vectors. (see
2880
+ `sokalsneath` function documentation)
2881
+
2882
+ 23. ``Y = cdist(XA, XB, f)``
2883
+
2884
+ Computes the distance between all pairs of vectors in X
2885
+ using the user supplied 2-arity function f. For example,
2886
+ Euclidean distance between the vectors could be computed
2887
+ as follows::
2888
+
2889
+ dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
2890
+
2891
+ Note that you should avoid passing a reference to one of
2892
+ the distance functions defined in this library. For example,::
2893
+
2894
+ dm = cdist(XA, XB, sokalsneath)
2895
+
2896
+ would calculate the pair-wise distances between the vectors in
2897
+ X using the Python function `sokalsneath`. This would result in
2898
+ sokalsneath being called :math:`{n \\choose 2}` times, which
2899
+ is inefficient. Instead, the optimized C version is more
2900
+ efficient, and we call it using the following syntax::
2901
+
2902
+ dm = cdist(XA, XB, 'sokalsneath')
2903
+
2904
+ Examples
2905
+ --------
2906
+ Find the Euclidean distances between four 2-D coordinates:
2907
+
2908
+ >>> from scipy.spatial import distance
2909
+ >>> import numpy as np
2910
+ >>> coords = [(35.0456, -85.2672),
2911
+ ... (35.1174, -89.9711),
2912
+ ... (35.9728, -83.9422),
2913
+ ... (36.1667, -86.7833)]
2914
+ >>> distance.cdist(coords, coords, 'euclidean')
2915
+ array([[ 0. , 4.7044, 1.6172, 1.8856],
2916
+ [ 4.7044, 0. , 6.0893, 3.3561],
2917
+ [ 1.6172, 6.0893, 0. , 2.8477],
2918
+ [ 1.8856, 3.3561, 2.8477, 0. ]])
2919
+
2920
+
2921
+ Find the Manhattan distance from a 3-D point to the corners of the unit
2922
+ cube:
2923
+
2924
+ >>> a = np.array([[0, 0, 0],
2925
+ ... [0, 0, 1],
2926
+ ... [0, 1, 0],
2927
+ ... [0, 1, 1],
2928
+ ... [1, 0, 0],
2929
+ ... [1, 0, 1],
2930
+ ... [1, 1, 0],
2931
+ ... [1, 1, 1]])
2932
+ >>> b = np.array([[ 0.1, 0.2, 0.4]])
2933
+ >>> distance.cdist(a, b, 'cityblock')
2934
+ array([[ 0.7],
2935
+ [ 0.9],
2936
+ [ 1.3],
2937
+ [ 1.5],
2938
+ [ 1.5],
2939
+ [ 1.7],
2940
+ [ 2.1],
2941
+ [ 2.3]])
2942
+
2943
+ """
2944
+ # You can also call this as:
2945
+ # Y = cdist(XA, XB, 'test_abc')
2946
+ # where 'abc' is the metric being tested. This computes the distance
2947
+ # between all pairs of vectors in XA and XB using the distance metric 'abc'
2948
+ # but with a more succinct, verifiable, but less efficient implementation.
2949
+
2950
+ XA = np.asarray(XA)
2951
+ XB = np.asarray(XB)
2952
+
2953
+ s = XA.shape
2954
+ sB = XB.shape
2955
+
2956
+ if len(s) != 2:
2957
+ raise ValueError('XA must be a 2-dimensional array.')
2958
+ if len(sB) != 2:
2959
+ raise ValueError('XB must be a 2-dimensional array.')
2960
+ if s[1] != sB[1]:
2961
+ raise ValueError('XA and XB must have the same number of columns '
2962
+ '(i.e. feature dimension.)')
2963
+
2964
+ mA = s[0]
2965
+ mB = sB[0]
2966
+ n = s[1]
2967
+
2968
+ if callable(metric):
2969
+ mstr = getattr(metric, '__name__', 'Unknown')
2970
+ metric_info = _METRIC_ALIAS.get(mstr, None)
2971
+ if metric_info is not None:
2972
+ XA, XB, typ, kwargs = _validate_cdist_input(
2973
+ XA, XB, mA, mB, n, metric_info, **kwargs)
2974
+ return _cdist_callable(XA, XB, metric=metric, out=out, **kwargs)
2975
+ elif isinstance(metric, str):
2976
+ mstr = metric.lower()
2977
+ metric_info = _METRIC_ALIAS.get(mstr, None)
2978
+ if metric_info is not None:
2979
+ cdist_fn = metric_info.cdist_func
2980
+ return cdist_fn(XA, XB, out=out, **kwargs)
2981
+ elif mstr.startswith("test_"):
2982
+ metric_info = _TEST_METRICS.get(mstr, None)
2983
+ if metric_info is None:
2984
+ raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
2985
+ XA, XB, typ, kwargs = _validate_cdist_input(
2986
+ XA, XB, mA, mB, n, metric_info, **kwargs)
2987
+ return _cdist_callable(
2988
+ XA, XB, metric=metric_info.dist_func, out=out, **kwargs)
2989
+ else:
2990
+ raise ValueError('Unknown Distance Metric: %s' % mstr)
2991
+ else:
2992
+ raise TypeError('2nd argument metric must be a string identifier '
2993
+ 'or a function.')
env-llmeval/lib/python3.10/site-packages/scipy/spatial/distance.pyi ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import (overload, Any, SupportsFloat, Literal, Protocol, SupportsIndex)
3
+
4
+ import numpy as np
5
+ from numpy.typing import ArrayLike, NDArray
6
+
7
+ # Anything that can be parsed by `np.float64.__init__` and is thus
8
+ # compatible with `ndarray.__setitem__` (for a float64 array)
9
+ _FloatValue = None | str | bytes | SupportsFloat | SupportsIndex
10
+
11
+ class _MetricCallback1(Protocol):
12
+ def __call__(
13
+ self, __XA: NDArray[Any], __XB: NDArray[Any]
14
+ ) -> _FloatValue: ...
15
+
16
+ class _MetricCallback2(Protocol):
17
+ def __call__(
18
+ self, __XA: NDArray[Any], __XB: NDArray[Any], **kwargs: Any
19
+ ) -> _FloatValue: ...
20
+
21
+ # TODO: Use a single protocol with a parameter specification variable
22
+ # once available (PEP 612)
23
+ _MetricCallback = _MetricCallback1 | _MetricCallback2
24
+
25
+ _MetricKind = Literal[
26
+ 'braycurtis',
27
+ 'canberra',
28
+ 'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch',
29
+ 'cityblock', 'cblock', 'cb', 'c',
30
+ 'correlation', 'co',
31
+ 'cosine', 'cos',
32
+ 'dice',
33
+ 'euclidean', 'euclid', 'eu', 'e',
34
+ 'hamming', 'hamm', 'ha', 'h',
35
+ 'minkowski', 'mi', 'm', 'pnorm',
36
+ 'jaccard', 'jacc', 'ja', 'j',
37
+ 'jensenshannon', 'js',
38
+ 'kulczynski1',
39
+ 'mahalanobis', 'mahal', 'mah',
40
+ 'rogerstanimoto',
41
+ 'russellrao',
42
+ 'seuclidean', 'se', 's',
43
+ 'sokalmichener',
44
+ 'sokalsneath',
45
+ 'sqeuclidean', 'sqe', 'sqeuclid',
46
+ 'yule',
47
+ ]
48
+
49
+ # Function annotations
50
+
51
+ def braycurtis(
52
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
53
+ ) -> np.float64: ...
54
+
55
+ def canberra(
56
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
57
+ ) -> np.float64: ...
58
+
59
+ # TODO: Add `metric`-specific overloads
60
+ # Returns a float64 or float128 array, depending on the input dtype
61
+ @overload
62
+ def cdist(
63
+ XA: ArrayLike,
64
+ XB: ArrayLike,
65
+ metric: _MetricKind = ...,
66
+ *,
67
+ out: None | NDArray[np.floating[Any]] = ...,
68
+ p: float = ...,
69
+ w: ArrayLike | None = ...,
70
+ V: ArrayLike | None = ...,
71
+ VI: ArrayLike | None = ...,
72
+ ) -> NDArray[np.floating[Any]]: ...
73
+ @overload
74
+ def cdist(
75
+ XA: ArrayLike,
76
+ XB: ArrayLike,
77
+ metric: _MetricCallback,
78
+ *,
79
+ out: None | NDArray[np.floating[Any]] = ...,
80
+ **kwargs: Any,
81
+ ) -> NDArray[np.floating[Any]]: ...
82
+
83
+ # TODO: Wait for dtype support; the return type is
84
+ # dependent on the input arrays dtype
85
+ def chebyshev(
86
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
87
+ ) -> Any: ...
88
+
89
+ # TODO: Wait for dtype support; the return type is
90
+ # dependent on the input arrays dtype
91
+ def cityblock(
92
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
93
+ ) -> Any: ...
94
+
95
+ def correlation(
96
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ..., centered: bool = ...
97
+ ) -> np.float64: ...
98
+
99
+ def cosine(
100
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
101
+ ) -> np.float64: ...
102
+
103
+ def dice(
104
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
105
+ ) -> float: ...
106
+
107
+ def directed_hausdorff(
108
+ u: ArrayLike, v: ArrayLike, seed: int | None = ...
109
+ ) -> tuple[float, int, int]: ...
110
+
111
+ def euclidean(
112
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
113
+ ) -> float: ...
114
+
115
+ def hamming(
116
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
117
+ ) -> np.float64: ...
118
+
119
+ def is_valid_dm(
120
+ D: ArrayLike,
121
+ tol: float = ...,
122
+ throw: bool = ...,
123
+ name: str | None = ...,
124
+ warning: bool = ...,
125
+ ) -> bool: ...
126
+
127
+ def is_valid_y(
128
+ y: ArrayLike,
129
+ warning: bool = ...,
130
+ throw: bool = ...,
131
+ name: str | None = ...,
132
+ ) -> bool: ...
133
+
134
+ def jaccard(
135
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
136
+ ) -> np.float64: ...
137
+
138
+ def jensenshannon(
139
+ p: ArrayLike, q: ArrayLike, base: float | None = ...
140
+ ) -> np.float64: ...
141
+
142
+ def kulczynski1(
143
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
144
+ ) -> np.float64: ...
145
+
146
+ def mahalanobis(
147
+ u: ArrayLike, v: ArrayLike, VI: ArrayLike
148
+ ) -> np.float64: ...
149
+
150
+ def minkowski(
151
+ u: ArrayLike, v: ArrayLike, p: float = ..., w: ArrayLike | None = ...
152
+ ) -> float: ...
153
+
154
+ def num_obs_dm(d: ArrayLike) -> int: ...
155
+
156
+ def num_obs_y(Y: ArrayLike) -> int: ...
157
+
158
+ # TODO: Add `metric`-specific overloads
159
+ @overload
160
+ def pdist(
161
+ X: ArrayLike,
162
+ metric: _MetricKind = ...,
163
+ *,
164
+ out: None | NDArray[np.floating[Any]] = ...,
165
+ p: float = ...,
166
+ w: ArrayLike | None = ...,
167
+ V: ArrayLike | None = ...,
168
+ VI: ArrayLike | None = ...,
169
+ ) -> NDArray[np.floating[Any]]: ...
170
+ @overload
171
+ def pdist(
172
+ X: ArrayLike,
173
+ metric: _MetricCallback,
174
+ *,
175
+ out: None | NDArray[np.floating[Any]] = ...,
176
+ **kwargs: Any,
177
+ ) -> NDArray[np.floating[Any]]: ...
178
+
179
+ def seuclidean(
180
+ u: ArrayLike, v: ArrayLike, V: ArrayLike
181
+ ) -> float: ...
182
+
183
+ def sokalmichener(
184
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
185
+ ) -> float: ...
186
+
187
+ def sokalsneath(
188
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
189
+ ) -> np.float64: ...
190
+
191
+ def sqeuclidean(
192
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
193
+ ) -> np.float64: ...
194
+
195
+ def squareform(
196
+ X: ArrayLike,
197
+ force: Literal["no", "tomatrix", "tovector"] = ...,
198
+ checks: bool = ...,
199
+ ) -> NDArray[Any]: ...
200
+
201
+ def rogerstanimoto(
202
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
203
+ ) -> float: ...
204
+
205
+ def russellrao(
206
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
207
+ ) -> float: ...
208
+
209
+ def yule(
210
+ u: ArrayLike, v: ArrayLike, w: ArrayLike | None = ...
211
+ ) -> float: ...
env-llmeval/lib/python3.10/site-packages/scipy/spatial/kdtree.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.spatial` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'KDTree',
10
+ 'Rectangle',
11
+ 'cKDTree',
12
+ 'cKDTreeNode',
13
+ 'distance_matrix',
14
+ 'minkowski_distance',
15
+ 'minkowski_distance_p',
16
+ ]
17
+
18
+
19
+ def __dir__():
20
+ return __all__
21
+
22
+
23
+ def __getattr__(name):
24
+ return _sub_module_deprecation(sub_package="spatial", module="kdtree",
25
+ private_modules=["_kdtree"], all=__all__,
26
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/spatial/qhull.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.spatial` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'ConvexHull',
10
+ 'Delaunay',
11
+ 'HalfspaceIntersection',
12
+ 'QhullError',
13
+ 'Voronoi',
14
+ 'tsearch',
15
+ ]
16
+
17
+
18
+ def __dir__():
19
+ return __all__
20
+
21
+
22
+ def __getattr__(name):
23
+ return _sub_module_deprecation(sub_package="spatial", module="qhull",
24
+ private_modules=["_qhull"], all=__all__,
25
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/spatial/qhull_src/COPYING.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Qhull, Copyright (c) 1993-2019
2
+
3
+ C.B. Barber
4
+ Arlington, MA
5
+
6
+ and
7
+
8
+ The National Science and Technology Research Center for
9
+ Computation and Visualization of Geometric Structures
10
+ (The Geometry Center)
11
+ University of Minnesota
12
+
13
14
+
15
+ This software includes Qhull from C.B. Barber and The Geometry Center.
16
+ Qhull is copyrighted as noted above. Qhull is free software and may
17
+ be obtained via http from www.qhull.org. It may be freely copied, modified,
18
+ and redistributed under the following conditions:
19
+
20
+ 1. All copyright notices must remain intact in all files.
21
+
22
+ 2. A copy of this text file must be distributed along with any copies
23
+ of Qhull that you redistribute; this includes copies that you have
24
+ modified, or copies of programs or other software products that
25
+ include Qhull.
26
+
27
+ 3. If you modify Qhull, you must include a notice giving the
28
+ name of the person performing the modification, the date of
29
+ modification, and the reason for such modification.
30
+
31
+ 4. When distributing modified versions of Qhull, or other software
32
+ products that include Qhull, you must provide notice that the original
33
+ source code may be obtained as noted above.
34
+
35
+ 5. There is no warranty or other guarantee of fitness for Qhull, it is
36
+ provided solely "as is". Bug reports or fixes may be sent to
37
+ [email protected]; the authors may or may not act on them as
38
+ they desire.
env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Spatial Transformations (:mod:`scipy.spatial.transform`)
3
+ ========================================================
4
+
5
+ .. currentmodule:: scipy.spatial.transform
6
+
7
+ This package implements various spatial transformations. For now,
8
+ only rotations are supported.
9
+
10
+ Rotations in 3 dimensions
11
+ -------------------------
12
+ .. autosummary::
13
+ :toctree: generated/
14
+
15
+ Rotation
16
+ Slerp
17
+ RotationSpline
18
+ """
19
+ from ._rotation import Rotation, Slerp
20
+ from ._rotation_spline import RotationSpline
21
+
22
+ # Deprecated namespaces, to be removed in v2.0.0
23
+ from . import rotation
24
+
25
+ __all__ = ['Rotation', 'Slerp', 'RotationSpline']
26
+
27
+ from scipy._lib._testutils import PytestTester
28
+ test = PytestTester(__name__)
29
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (873 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/_rotation_groups.cpython-310.pyc ADDED
Binary file (3.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/_rotation_spline.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/rotation.cpython-310.pyc ADDED
Binary file (672 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/_rotation_groups.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.constants import golden as phi
3
+
4
+
5
+ def icosahedral(cls):
6
+ g1 = tetrahedral(cls).as_quat()
7
+ a = 0.5
8
+ b = 0.5 / phi
9
+ c = phi / 2
10
+ g2 = np.array([[+a, +b, +c, 0],
11
+ [+a, +b, -c, 0],
12
+ [+a, +c, 0, +b],
13
+ [+a, +c, 0, -b],
14
+ [+a, -b, +c, 0],
15
+ [+a, -b, -c, 0],
16
+ [+a, -c, 0, +b],
17
+ [+a, -c, 0, -b],
18
+ [+a, 0, +b, +c],
19
+ [+a, 0, +b, -c],
20
+ [+a, 0, -b, +c],
21
+ [+a, 0, -b, -c],
22
+ [+b, +a, 0, +c],
23
+ [+b, +a, 0, -c],
24
+ [+b, +c, +a, 0],
25
+ [+b, +c, -a, 0],
26
+ [+b, -a, 0, +c],
27
+ [+b, -a, 0, -c],
28
+ [+b, -c, +a, 0],
29
+ [+b, -c, -a, 0],
30
+ [+b, 0, +c, +a],
31
+ [+b, 0, +c, -a],
32
+ [+b, 0, -c, +a],
33
+ [+b, 0, -c, -a],
34
+ [+c, +a, +b, 0],
35
+ [+c, +a, -b, 0],
36
+ [+c, +b, 0, +a],
37
+ [+c, +b, 0, -a],
38
+ [+c, -a, +b, 0],
39
+ [+c, -a, -b, 0],
40
+ [+c, -b, 0, +a],
41
+ [+c, -b, 0, -a],
42
+ [+c, 0, +a, +b],
43
+ [+c, 0, +a, -b],
44
+ [+c, 0, -a, +b],
45
+ [+c, 0, -a, -b],
46
+ [0, +a, +c, +b],
47
+ [0, +a, +c, -b],
48
+ [0, +a, -c, +b],
49
+ [0, +a, -c, -b],
50
+ [0, +b, +a, +c],
51
+ [0, +b, +a, -c],
52
+ [0, +b, -a, +c],
53
+ [0, +b, -a, -c],
54
+ [0, +c, +b, +a],
55
+ [0, +c, +b, -a],
56
+ [0, +c, -b, +a],
57
+ [0, +c, -b, -a]])
58
+ return cls.from_quat(np.concatenate((g1, g2)))
59
+
60
+
61
+ def octahedral(cls):
62
+ g1 = tetrahedral(cls).as_quat()
63
+ c = np.sqrt(2) / 2
64
+ g2 = np.array([[+c, 0, 0, +c],
65
+ [0, +c, 0, +c],
66
+ [0, 0, +c, +c],
67
+ [0, 0, -c, +c],
68
+ [0, -c, 0, +c],
69
+ [-c, 0, 0, +c],
70
+ [0, +c, +c, 0],
71
+ [0, -c, +c, 0],
72
+ [+c, 0, +c, 0],
73
+ [-c, 0, +c, 0],
74
+ [+c, +c, 0, 0],
75
+ [-c, +c, 0, 0]])
76
+ return cls.from_quat(np.concatenate((g1, g2)))
77
+
78
+
79
+ def tetrahedral(cls):
80
+ g1 = np.eye(4)
81
+ c = 0.5
82
+ g2 = np.array([[c, -c, -c, +c],
83
+ [c, -c, +c, +c],
84
+ [c, +c, -c, +c],
85
+ [c, +c, +c, +c],
86
+ [c, -c, -c, -c],
87
+ [c, -c, +c, -c],
88
+ [c, +c, -c, -c],
89
+ [c, +c, +c, -c]])
90
+ return cls.from_quat(np.concatenate((g1, g2)))
91
+
92
+
93
+ def dicyclic(cls, n, axis=2):
94
+ g1 = cyclic(cls, n, axis).as_rotvec()
95
+
96
+ thetas = np.linspace(0, np.pi, n, endpoint=False)
97
+ rv = np.pi * np.vstack([np.zeros(n), np.cos(thetas), np.sin(thetas)]).T
98
+ g2 = np.roll(rv, axis, axis=1)
99
+ return cls.from_rotvec(np.concatenate((g1, g2)))
100
+
101
+
102
+ def cyclic(cls, n, axis=2):
103
+ thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)
104
+ rv = np.vstack([thetas, np.zeros(n), np.zeros(n)]).T
105
+ return cls.from_rotvec(np.roll(rv, axis, axis=1))
106
+
107
+
108
+ def create_group(cls, group, axis='Z'):
109
+ if not isinstance(group, str):
110
+ raise ValueError("`group` argument must be a string")
111
+
112
+ permitted_axes = ['x', 'y', 'z', 'X', 'Y', 'Z']
113
+ if axis not in permitted_axes:
114
+ raise ValueError("`axis` must be one of " + ", ".join(permitted_axes))
115
+
116
+ if group in ['I', 'O', 'T']:
117
+ symbol = group
118
+ order = 1
119
+ elif group[:1] in ['C', 'D'] and group[1:].isdigit():
120
+ symbol = group[:1]
121
+ order = int(group[1:])
122
+ else:
123
+ raise ValueError("`group` must be one of 'I', 'O', 'T', 'Dn', 'Cn'")
124
+
125
+ if order < 1:
126
+ raise ValueError("Group order must be positive")
127
+
128
+ axis = 'xyz'.index(axis.lower())
129
+ if symbol == 'I':
130
+ return icosahedral(cls)
131
+ elif symbol == 'O':
132
+ return octahedral(cls)
133
+ elif symbol == 'T':
134
+ return tetrahedral(cls)
135
+ elif symbol == 'D':
136
+ return dicyclic(cls, order, axis=axis)
137
+ elif symbol == 'C':
138
+ return cyclic(cls, order, axis=axis)
139
+ else:
140
+ assert False
env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/tests/__pycache__/test_rotation.cpython-310.pyc ADDED
Binary file (50.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/spatial/transform/tests/__pycache__/test_rotation_spline.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/cosine_cdf.cpython-310.pyc ADDED
Binary file (719 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/expn_asy.cpython-310.pyc ADDED
Binary file (1.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/gammainc_asy.cpython-310.pyc ADDED
Binary file (3.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/gammainc_data.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/lambertw.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/loggamma.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/struve_convergence.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.46 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/wright_bessel.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/wright_bessel_data.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/wrightomega.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/zetac.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/cosine_cdf.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mpmath
2
+
3
+
4
+ def f(x):
5
+ return (mpmath.pi + x + mpmath.sin(x)) / (2*mpmath.pi)
6
+
7
+
8
+ # Note: 40 digits might be overkill; a few more digits than the default
9
+ # might be sufficient.
10
+ mpmath.mp.dps = 40
11
+ ts = mpmath.taylor(f, -mpmath.pi, 20)
12
+ p, q = mpmath.pade(ts, 9, 10)
13
+
14
+ p = [float(c) for c in p]
15
+ q = [float(c) for c in q]
16
+ print('p =', p)
17
+ print('q =', q)
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/expn_asy.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Precompute the polynomials for the asymptotic expansion of the
2
+ generalized exponential integral.
3
+
4
+ Sources
5
+ -------
6
+ [1] NIST, Digital Library of Mathematical Functions,
7
+ https://dlmf.nist.gov/8.20#ii
8
+
9
+ """
10
+ import os
11
+
12
+ try:
13
+ import sympy
14
+ from sympy import Poly
15
+ x = sympy.symbols('x')
16
+ except ImportError:
17
+ pass
18
+
19
+
20
+ def generate_A(K):
21
+ A = [Poly(1, x)]
22
+ for k in range(K):
23
+ A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
24
+ return A
25
+
26
+
27
+ WARNING = """\
28
+ /* This file was automatically generated by _precompute/expn_asy.py.
29
+ * Do not edit it manually!
30
+ */
31
+ """
32
+
33
+
34
+ def main():
35
+ print(__doc__)
36
+ fn = os.path.join('..', 'cephes', 'expn.h')
37
+
38
+ K = 12
39
+ A = generate_A(K)
40
+ with open(fn + '.new', 'w') as f:
41
+ f.write(WARNING)
42
+ f.write(f"#define nA {len(A)}\n")
43
+ for k, Ak in enumerate(A):
44
+ ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
45
+ f.write(f"static const double A{k}[] = {{tmp}};\n")
46
+ ", ".join([f"A{k}" for k in range(K + 1)])
47
+ f.write("static const double *A[] = {{tmp}};\n")
48
+ ", ".join([str(Ak.degree()) for Ak in A])
49
+ f.write("static const int Adegs[] = {{tmp}};\n")
50
+ os.rename(fn + '.new', fn)
51
+
52
+
53
+ if __name__ == "__main__":
54
+ main()
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/gammainc_asy.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Precompute coefficients of Temme's asymptotic expansion for gammainc.
3
+
4
+ This takes about 8 hours to run on a 2.3 GHz Macbook Pro with 4GB ram.
5
+
6
+ Sources:
7
+ [1] NIST, "Digital Library of Mathematical Functions",
8
+ https://dlmf.nist.gov/
9
+
10
+ """
11
+ import os
12
+ from scipy.special._precompute.utils import lagrange_inversion
13
+
14
+ try:
15
+ import mpmath as mp
16
+ except ImportError:
17
+ pass
18
+
19
+
20
+ def compute_a(n):
21
+ """a_k from DLMF 5.11.6"""
22
+ a = [mp.sqrt(2)/2]
23
+ for k in range(1, n):
24
+ ak = a[-1]/k
25
+ for j in range(1, len(a)):
26
+ ak -= a[j]*a[-j]/(j + 1)
27
+ ak /= a[0]*(1 + mp.mpf(1)/(k + 1))
28
+ a.append(ak)
29
+ return a
30
+
31
+
32
+ def compute_g(n):
33
+ """g_k from DLMF 5.11.3/5.11.5"""
34
+ a = compute_a(2*n)
35
+ g = [mp.sqrt(2)*mp.rf(0.5, k)*a[2*k] for k in range(n)]
36
+ return g
37
+
38
+
39
+ def eta(lam):
40
+ """Function from DLMF 8.12.1 shifted to be centered at 0."""
41
+ if lam > 0:
42
+ return mp.sqrt(2*(lam - mp.log(lam + 1)))
43
+ elif lam < 0:
44
+ return -mp.sqrt(2*(lam - mp.log(lam + 1)))
45
+ else:
46
+ return 0
47
+
48
+
49
+ def compute_alpha(n):
50
+ """alpha_n from DLMF 8.12.13"""
51
+ coeffs = mp.taylor(eta, 0, n - 1)
52
+ return lagrange_inversion(coeffs)
53
+
54
+
55
+ def compute_d(K, N):
56
+ """d_{k, n} from DLMF 8.12.12"""
57
+ M = N + 2*K
58
+ d0 = [-mp.mpf(1)/3]
59
+ alpha = compute_alpha(M + 2)
60
+ for n in range(1, M):
61
+ d0.append((n + 2)*alpha[n+2])
62
+ d = [d0]
63
+ g = compute_g(K)
64
+ for k in range(1, K):
65
+ dk = []
66
+ for n in range(M - 2*k):
67
+ dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2])
68
+ d.append(dk)
69
+ for k in range(K):
70
+ d[k] = d[k][:N]
71
+ return d
72
+
73
+
74
+ header = \
75
+ r"""/* This file was automatically generated by _precomp/gammainc.py.
76
+ * Do not edit it manually!
77
+ */
78
+
79
+ #ifndef IGAM_H
80
+ #define IGAM_H
81
+
82
+ #define K {}
83
+ #define N {}
84
+
85
+ static const double d[K][N] =
86
+ {{"""
87
+
88
+ footer = \
89
+ r"""
90
+ #endif
91
+ """
92
+
93
+
94
+ def main():
95
+ print(__doc__)
96
+ K = 25
97
+ N = 25
98
+ with mp.workdps(50):
99
+ d = compute_d(K, N)
100
+ fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h')
101
+ with open(fn + '.new', 'w') as f:
102
+ f.write(header.format(K, N))
103
+ for k, row in enumerate(d):
104
+ row = [mp.nstr(x, 17, min_fixed=0, max_fixed=0) for x in row]
105
+ f.write('{')
106
+ f.write(", ".join(row))
107
+ if k < K - 1:
108
+ f.write('},\n')
109
+ else:
110
+ f.write('}};\n')
111
+ f.write(footer)
112
+ os.rename(fn + '.new', fn)
113
+
114
+
115
+ if __name__ == "__main__":
116
+ main()
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/gammainc_data.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compute gammainc and gammaincc for large arguments and parameters
2
+ and save the values to data files for use in tests. We can't just
3
+ compare to mpmath's gammainc in test_mpmath.TestSystematic because it
4
+ would take too long.
5
+
6
+ Note that mpmath's gammainc is computed using hypercomb, but since it
7
+ doesn't allow the user to increase the maximum number of terms used in
8
+ the series it doesn't converge for many arguments. To get around this
9
+ we copy the mpmath implementation but use more terms.
10
+
11
+ This takes about 17 minutes to run on a 2.3 GHz Macbook Pro with 4GB
12
+ ram.
13
+
14
+ Sources:
15
+ [1] Fredrik Johansson and others. mpmath: a Python library for
16
+ arbitrary-precision floating-point arithmetic (version 0.19),
17
+ December 2013. http://mpmath.org/.
18
+
19
+ """
20
+ import os
21
+ from time import time
22
+ import numpy as np
23
+ from numpy import pi
24
+
25
+ from scipy.special._mptestutils import mpf2float
26
+
27
+ try:
28
+ import mpmath as mp
29
+ except ImportError:
30
+ pass
31
+
32
+
33
+ def gammainc(a, x, dps=50, maxterms=10**8):
34
+ """Compute gammainc exactly like mpmath does but allow for more
35
+ summands in hypercomb. See
36
+
37
+ mpmath/functions/expintegrals.py#L134
38
+
39
+ in the mpmath github repository.
40
+
41
+ """
42
+ with mp.workdps(dps):
43
+ z, a, b = mp.mpf(a), mp.mpf(x), mp.mpf(x)
44
+ G = [z]
45
+ negb = mp.fneg(b, exact=True)
46
+
47
+ def h(z):
48
+ T1 = [mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
49
+ return (T1,)
50
+
51
+ res = mp.hypercomb(h, [z], maxterms=maxterms)
52
+ return mpf2float(res)
53
+
54
+
55
+ def gammaincc(a, x, dps=50, maxterms=10**8):
56
+ """Compute gammaincc exactly like mpmath does but allow for more
57
+ terms in hypercomb. See
58
+
59
+ mpmath/functions/expintegrals.py#L187
60
+
61
+ in the mpmath github repository.
62
+
63
+ """
64
+ with mp.workdps(dps):
65
+ z, a = a, x
66
+
67
+ if mp.isint(z):
68
+ try:
69
+ # mpmath has a fast integer path
70
+ return mpf2float(mp.gammainc(z, a=a, regularized=True))
71
+ except mp.libmp.NoConvergence:
72
+ pass
73
+ nega = mp.fneg(a, exact=True)
74
+ G = [z]
75
+ # Use 2F0 series when possible; fall back to lower gamma representation
76
+ try:
77
+ def h(z):
78
+ r = z-1
79
+ return [([mp.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
80
+ return mpf2float(mp.hypercomb(h, [z], force_series=True))
81
+ except mp.libmp.NoConvergence:
82
+ def h(z):
83
+ T1 = [], [1, z-1], [z], G, [], [], 0
84
+ T2 = [-mp.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
85
+ return T1, T2
86
+ return mpf2float(mp.hypercomb(h, [z], maxterms=maxterms))
87
+
88
+
89
+ def main():
90
+ t0 = time()
91
+ # It would be nice to have data for larger values, but either this
92
+ # requires prohibitively large precision (dps > 800) or mpmath has
93
+ # a bug. For example, gammainc(1e20, 1e20, dps=800) returns a
94
+ # value around 0.03, while the true value should be close to 0.5
95
+ # (DLMF 8.12.15).
96
+ print(__doc__)
97
+ pwd = os.path.dirname(__file__)
98
+ r = np.logspace(4, 14, 30)
99
+ ltheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(0.6)), 30)
100
+ utheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(1.4)), 30)
101
+
102
+ regimes = [(gammainc, ltheta), (gammaincc, utheta)]
103
+ for func, theta in regimes:
104
+ rg, thetag = np.meshgrid(r, theta)
105
+ a, x = rg*np.cos(thetag), rg*np.sin(thetag)
106
+ a, x = a.flatten(), x.flatten()
107
+ dataset = []
108
+ for i, (a0, x0) in enumerate(zip(a, x)):
109
+ if func == gammaincc:
110
+ # Exploit the fast integer path in gammaincc whenever
111
+ # possible so that the computation doesn't take too
112
+ # long
113
+ a0, x0 = np.floor(a0), np.floor(x0)
114
+ dataset.append((a0, x0, func(a0, x0)))
115
+ dataset = np.array(dataset)
116
+ filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
117
+ f'{func.__name__}.txt')
118
+ np.savetxt(filename, dataset)
119
+
120
+ print(f"{(time() - t0)/60} minutes elapsed")
121
+
122
+
123
+ if __name__ == "__main__":
124
+ main()
env-llmeval/lib/python3.10/site-packages/scipy/special/_precompute/lambertw.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compute a Pade approximation for the principal branch of the
2
+ Lambert W function around 0 and compare it to various other
3
+ approximations.
4
+
5
+ """
6
+ import numpy as np
7
+
8
+ try:
9
+ import mpmath
10
+ import matplotlib.pyplot as plt
11
+ except ImportError:
12
+ pass
13
+
14
+
15
+ def lambertw_pade():
16
+ derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
17
+ p, q = mpmath.pade(derivs, 3, 2)
18
+ return p, q
19
+
20
+
21
+ def main():
22
+ print(__doc__)
23
+ with mpmath.workdps(50):
24
+ p, q = lambertw_pade()
25
+ p, q = p[::-1], q[::-1]
26
+ print(f"p = {p}")
27
+ print(f"q = {q}")
28
+
29
+ x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
30
+ x, y = np.meshgrid(x, y)
31
+ z = x + 1j*y
32
+ lambertw_std = []
33
+ for z0 in z.flatten():
34
+ lambertw_std.append(complex(mpmath.lambertw(z0)))
35
+ lambertw_std = np.array(lambertw_std).reshape(x.shape)
36
+
37
+ fig, axes = plt.subplots(nrows=3, ncols=1)
38
+ # Compare Pade approximation to true result
39
+ p = np.array([float(p0) for p0 in p])
40
+ q = np.array([float(q0) for q0 in q])
41
+ pade_approx = np.polyval(p, z)/np.polyval(q, z)
42
+ pade_err = abs(pade_approx - lambertw_std)
43
+ axes[0].pcolormesh(x, y, pade_err)
44
+ # Compare two terms of asymptotic series to true result
45
+ asy_approx = np.log(z) - np.log(np.log(z))
46
+ asy_err = abs(asy_approx - lambertw_std)
47
+ axes[1].pcolormesh(x, y, asy_err)
48
+ # Compare two terms of the series around the branch point to the
49
+ # true result
50
+ p = np.sqrt(2*(np.exp(1)*z + 1))
51
+ series_approx = -1 + p - p**2/3
52
+ series_err = abs(series_approx - lambertw_std)
53
+ im = axes[2].pcolormesh(x, y, series_err)
54
+
55
+ fig.colorbar(im, ax=axes.ravel().tolist())
56
+ plt.show()
57
+
58
+ fig, ax = plt.subplots(nrows=1, ncols=1)
59
+ pade_better = pade_err < asy_err
60
+ im = ax.pcolormesh(x, y, pade_better)
61
+ t = np.linspace(-0.3, 0.3)
62
+ ax.plot(-2.5*abs(t) - 0.2, t, 'r')
63
+ fig.colorbar(im, ax=ax)
64
+ plt.show()
65
+
66
+
67
+ if __name__ == '__main__':
68
+ main()