applied-ai-018 commited on
Commit
cac1c29
·
verified ·
1 Parent(s): d3774b3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/7.attention.dense.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/scipy/_lib/__init__.py +14 -0
  5. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_bunch.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/scipy/_lib/_array_api.py +356 -0
  24. venv/lib/python3.10/site-packages/scipy/_lib/_bunch.py +225 -0
  25. venv/lib/python3.10/site-packages/scipy/_lib/_ccallback.py +251 -0
  26. venv/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so +0 -0
  27. venv/lib/python3.10/site-packages/scipy/_lib/_disjoint_set.py +254 -0
  28. venv/lib/python3.10/site-packages/scipy/_lib/_docscrape.py +679 -0
  29. venv/lib/python3.10/site-packages/scipy/_lib/_elementwise_iterative_method.py +320 -0
  30. venv/lib/python3.10/site-packages/scipy/_lib/_finite_differences.py +145 -0
  31. venv/lib/python3.10/site-packages/scipy/_lib/_fpumode.cpython-310-x86_64-linux-gnu.so +0 -0
  32. venv/lib/python3.10/site-packages/scipy/_lib/_gcutils.py +105 -0
  33. venv/lib/python3.10/site-packages/scipy/_lib/_pep440.py +487 -0
  34. venv/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so +0 -0
  35. venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so +0 -0
  36. venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_def.cpython-310-x86_64-linux-gnu.so +0 -0
  37. venv/lib/python3.10/site-packages/scipy/_lib/_testutils.py +253 -0
  38. venv/lib/python3.10/site-packages/scipy/_lib/_threadsafety.py +58 -0
  39. venv/lib/python3.10/site-packages/scipy/_lib/_tmpdirs.py +86 -0
  40. venv/lib/python3.10/site-packages/scipy/_lib/_util.py +948 -0
  41. venv/lib/python3.10/site-packages/scipy/_lib/decorator.py +399 -0
  42. venv/lib/python3.10/site-packages/scipy/_lib/deprecation.py +239 -0
  43. venv/lib/python3.10/site-packages/scipy/_lib/doccer.py +275 -0
  44. venv/lib/python3.10/site-packages/scipy/_lib/messagestream.cpython-310-x86_64-linux-gnu.so +0 -0
  45. venv/lib/python3.10/site-packages/scipy/_lib/tests/__init__.py +0 -0
  46. venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afde3cede4adc1334c7a9c55de1b1f51c46a626bd30ef08b5c2f72ff3aea40e2
3
+ size 50332828
ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0912f982ddaacb1da8501500b695615fe6baea1b6f3af90a819b1ab533c20c92
3
+ size 50332749
ckpts/universal/global_step40/zero/7.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c79525b81c8f0eebbae5022d5f47341e1febbc30438bfb99d81b1746926328e
3
+ size 16778317
venv/lib/python3.10/site-packages/scipy/_lib/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module containing private utility functions
3
+ ===========================================
4
+
5
+ The ``scipy._lib`` namespace is empty (for now). Tests for all
6
+ utilities in submodules of ``_lib`` can be run with::
7
+
8
+ from scipy import _lib
9
+ _lib.test()
10
+
11
+ """
12
+ from scipy._lib._testutils import PytestTester
13
+ test = PytestTester(__name__)
14
+ del PytestTester
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (535 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_array_api.cpython-310.pyc ADDED
Binary file (9.5 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_bunch.cpython-310.pyc ADDED
Binary file (6.91 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc ADDED
Binary file (7.01 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc ADDED
Binary file (6.43 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_docscrape.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_elementwise_iterative_method.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_gcutils.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_pep440.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_testutils.cpython-310.pyc ADDED
Binary file (7.85 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/_util.cpython-310.pyc ADDED
Binary file (30.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/decorator.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/deprecation.cpython-310.pyc ADDED
Binary file (7.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc ADDED
Binary file (7.79 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/__pycache__/uarray.cpython-310.pyc ADDED
Binary file (795 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/_array_api.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility functions to use Python Array API compatible libraries.
2
+
3
+ For the context about the Array API see:
4
+ https://data-apis.org/array-api/latest/purpose_and_scope.html
5
+
6
+ The SciPy use case of the Array API is described on the following page:
7
+ https://data-apis.org/array-api/latest/use_cases.html#use-case-scipy
8
+ """
9
+ from __future__ import annotations
10
+
11
+ import os
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from scipy._lib import array_api_compat
17
+ from scipy._lib.array_api_compat import (
18
+ is_array_api_obj,
19
+ size,
20
+ numpy as np_compat,
21
+ )
22
+
23
+ __all__ = ['array_namespace', '_asarray', 'size']
24
+
25
+
26
+ # To enable array API and strict array-like input validation
27
+ SCIPY_ARRAY_API: str | bool = os.environ.get("SCIPY_ARRAY_API", False)
28
+ # To control the default device - for use in the test suite only
29
+ SCIPY_DEVICE = os.environ.get("SCIPY_DEVICE", "cpu")
30
+
31
+ _GLOBAL_CONFIG = {
32
+ "SCIPY_ARRAY_API": SCIPY_ARRAY_API,
33
+ "SCIPY_DEVICE": SCIPY_DEVICE,
34
+ }
35
+
36
+
37
+ def compliance_scipy(arrays):
38
+ """Raise exceptions on known-bad subclasses.
39
+
40
+ The following subclasses are not supported and raise and error:
41
+ - `numpy.ma.MaskedArray`
42
+ - `numpy.matrix`
43
+ - NumPy arrays which do not have a boolean or numerical dtype
44
+ - Any array-like which is neither array API compatible nor coercible by NumPy
45
+ - Any array-like which is coerced by NumPy to an unsupported dtype
46
+ """
47
+ for i in range(len(arrays)):
48
+ array = arrays[i]
49
+ if isinstance(array, np.ma.MaskedArray):
50
+ raise TypeError("Inputs of type `numpy.ma.MaskedArray` are not supported.")
51
+ elif isinstance(array, np.matrix):
52
+ raise TypeError("Inputs of type `numpy.matrix` are not supported.")
53
+ if isinstance(array, (np.ndarray, np.generic)):
54
+ dtype = array.dtype
55
+ if not (np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.bool_)):
56
+ raise TypeError(f"An argument has dtype `{dtype!r}`; "
57
+ f"only boolean and numerical dtypes are supported.")
58
+ elif not is_array_api_obj(array):
59
+ try:
60
+ array = np.asanyarray(array)
61
+ except TypeError:
62
+ raise TypeError("An argument is neither array API compatible nor "
63
+ "coercible by NumPy.")
64
+ dtype = array.dtype
65
+ if not (np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.bool_)):
66
+ message = (
67
+ f"An argument was coerced to an unsupported dtype `{dtype!r}`; "
68
+ f"only boolean and numerical dtypes are supported."
69
+ )
70
+ raise TypeError(message)
71
+ arrays[i] = array
72
+ return arrays
73
+
74
+
75
+ def _check_finite(array, xp):
76
+ """Check for NaNs or Infs."""
77
+ msg = "array must not contain infs or NaNs"
78
+ try:
79
+ if not xp.all(xp.isfinite(array)):
80
+ raise ValueError(msg)
81
+ except TypeError:
82
+ raise ValueError(msg)
83
+
84
+
85
+ def array_namespace(*arrays):
86
+ """Get the array API compatible namespace for the arrays xs.
87
+
88
+ Parameters
89
+ ----------
90
+ *arrays : sequence of array_like
91
+ Arrays used to infer the common namespace.
92
+
93
+ Returns
94
+ -------
95
+ namespace : module
96
+ Common namespace.
97
+
98
+ Notes
99
+ -----
100
+ Thin wrapper around `array_api_compat.array_namespace`.
101
+
102
+ 1. Check for the global switch: SCIPY_ARRAY_API. This can also be accessed
103
+ dynamically through ``_GLOBAL_CONFIG['SCIPY_ARRAY_API']``.
104
+ 2. `compliance_scipy` raise exceptions on known-bad subclasses. See
105
+ its definition for more details.
106
+
107
+ When the global switch is False, it defaults to the `numpy` namespace.
108
+ In that case, there is no compliance check. This is a convenience to
109
+ ease the adoption. Otherwise, arrays must comply with the new rules.
110
+ """
111
+ if not _GLOBAL_CONFIG["SCIPY_ARRAY_API"]:
112
+ # here we could wrap the namespace if needed
113
+ return np_compat
114
+
115
+ arrays = [array for array in arrays if array is not None]
116
+
117
+ arrays = compliance_scipy(arrays)
118
+
119
+ return array_api_compat.array_namespace(*arrays)
120
+
121
+
122
+ def _asarray(
123
+ array, dtype=None, order=None, copy=None, *, xp=None, check_finite=False
124
+ ):
125
+ """SciPy-specific replacement for `np.asarray` with `order` and `check_finite`.
126
+
127
+ Memory layout parameter `order` is not exposed in the Array API standard.
128
+ `order` is only enforced if the input array implementation
129
+ is NumPy based, otherwise `order` is just silently ignored.
130
+
131
+ `check_finite` is also not a keyword in the array API standard; included
132
+ here for convenience rather than that having to be a separate function
133
+ call inside SciPy functions.
134
+ """
135
+ if xp is None:
136
+ xp = array_namespace(array)
137
+ if xp.__name__ in {"numpy", "scipy._lib.array_api_compat.numpy"}:
138
+ # Use NumPy API to support order
139
+ if copy is True:
140
+ array = np.array(array, order=order, dtype=dtype)
141
+ else:
142
+ array = np.asarray(array, order=order, dtype=dtype)
143
+
144
+ # At this point array is a NumPy ndarray. We convert it to an array
145
+ # container that is consistent with the input's namespace.
146
+ array = xp.asarray(array)
147
+ else:
148
+ try:
149
+ array = xp.asarray(array, dtype=dtype, copy=copy)
150
+ except TypeError:
151
+ coerced_xp = array_namespace(xp.asarray(3))
152
+ array = coerced_xp.asarray(array, dtype=dtype, copy=copy)
153
+
154
+ if check_finite:
155
+ _check_finite(array, xp)
156
+
157
+ return array
158
+
159
+
160
+ def atleast_nd(x, *, ndim, xp=None):
161
+ """Recursively expand the dimension to have at least `ndim`."""
162
+ if xp is None:
163
+ xp = array_namespace(x)
164
+ x = xp.asarray(x)
165
+ if x.ndim < ndim:
166
+ x = xp.expand_dims(x, axis=0)
167
+ x = atleast_nd(x, ndim=ndim, xp=xp)
168
+ return x
169
+
170
+
171
+ def copy(x, *, xp=None):
172
+ """
173
+ Copies an array.
174
+
175
+ Parameters
176
+ ----------
177
+ x : array
178
+
179
+ xp : array_namespace
180
+
181
+ Returns
182
+ -------
183
+ copy : array
184
+ Copied array
185
+
186
+ Notes
187
+ -----
188
+ This copy function does not offer all the semantics of `np.copy`, i.e. the
189
+ `subok` and `order` keywords are not used.
190
+ """
191
+ # Note: xp.asarray fails if xp is numpy.
192
+ if xp is None:
193
+ xp = array_namespace(x)
194
+
195
+ return _asarray(x, copy=True, xp=xp)
196
+
197
+
198
+ def is_numpy(xp):
199
+ return xp.__name__ in ('numpy', 'scipy._lib.array_api_compat.numpy')
200
+
201
+
202
+ def is_cupy(xp):
203
+ return xp.__name__ in ('cupy', 'scipy._lib.array_api_compat.cupy')
204
+
205
+
206
+ def is_torch(xp):
207
+ return xp.__name__ in ('torch', 'scipy._lib.array_api_compat.torch')
208
+
209
+
210
+ def _strict_check(actual, desired, xp,
211
+ check_namespace=True, check_dtype=True, check_shape=True):
212
+ __tracebackhide__ = True # Hide traceback for py.test
213
+ if check_namespace:
214
+ _assert_matching_namespace(actual, desired)
215
+
216
+ desired = xp.asarray(desired)
217
+
218
+ if check_dtype:
219
+ _msg = "dtypes do not match.\nActual: {actual.dtype}\nDesired: {desired.dtype}"
220
+ assert actual.dtype == desired.dtype, _msg
221
+
222
+ if check_shape:
223
+ _msg = "Shapes do not match.\nActual: {actual.shape}\nDesired: {desired.shape}"
224
+ assert actual.shape == desired.shape, _msg
225
+ _check_scalar(actual, desired, xp)
226
+
227
+ desired = xp.broadcast_to(desired, actual.shape)
228
+ return desired
229
+
230
+
231
+ def _assert_matching_namespace(actual, desired):
232
+ __tracebackhide__ = True # Hide traceback for py.test
233
+ actual = actual if isinstance(actual, tuple) else (actual,)
234
+ desired_space = array_namespace(desired)
235
+ for arr in actual:
236
+ arr_space = array_namespace(arr)
237
+ _msg = (f"Namespaces do not match.\n"
238
+ f"Actual: {arr_space.__name__}\n"
239
+ f"Desired: {desired_space.__name__}")
240
+ assert arr_space == desired_space, _msg
241
+
242
+
243
+ def _check_scalar(actual, desired, xp):
244
+ __tracebackhide__ = True # Hide traceback for py.test
245
+ # Shape check alone is sufficient unless desired.shape == (). Also,
246
+ # only NumPy distinguishes between scalars and arrays.
247
+ if desired.shape != () or not is_numpy(xp):
248
+ return
249
+ # We want to follow the conventions of the `xp` library. Libraries like
250
+ # NumPy, for which `np.asarray(0)[()]` returns a scalar, tend to return
251
+ # a scalar even when a 0D array might be more appropriate:
252
+ # import numpy as np
253
+ # np.mean([1, 2, 3]) # scalar, not 0d array
254
+ # np.asarray(0)*2 # scalar, not 0d array
255
+ # np.sin(np.asarray(0)) # scalar, not 0d array
256
+ # Libraries like CuPy, for which `cp.asarray(0)[()]` returns a 0D array,
257
+ # tend to return a 0D array in scenarios like those above.
258
+ # Therefore, regardless of whether the developer provides a scalar or 0D
259
+ # array for `desired`, we would typically want the type of `actual` to be
260
+ # the type of `desired[()]`. If the developer wants to override this
261
+ # behavior, they can set `check_shape=False`.
262
+ desired = desired[()]
263
+ _msg = f"Types do not match:\n Actual: {type(actual)}\n Desired: {type(desired)}"
264
+ assert (xp.isscalar(actual) and xp.isscalar(desired)
265
+ or (not xp.isscalar(actual) and not xp.isscalar(desired))), _msg
266
+
267
+
268
+ def xp_assert_equal(actual, desired, check_namespace=True, check_dtype=True,
269
+ check_shape=True, err_msg='', xp=None):
270
+ __tracebackhide__ = True # Hide traceback for py.test
271
+ if xp is None:
272
+ xp = array_namespace(actual)
273
+ desired = _strict_check(actual, desired, xp, check_namespace=check_namespace,
274
+ check_dtype=check_dtype, check_shape=check_shape)
275
+ if is_cupy(xp):
276
+ return xp.testing.assert_array_equal(actual, desired, err_msg=err_msg)
277
+ elif is_torch(xp):
278
+ # PyTorch recommends using `rtol=0, atol=0` like this
279
+ # to test for exact equality
280
+ err_msg = None if err_msg == '' else err_msg
281
+ return xp.testing.assert_close(actual, desired, rtol=0, atol=0, equal_nan=True,
282
+ check_dtype=False, msg=err_msg)
283
+ return np.testing.assert_array_equal(actual, desired, err_msg=err_msg)
284
+
285
+
286
+ def xp_assert_close(actual, desired, rtol=1e-07, atol=0, check_namespace=True,
287
+ check_dtype=True, check_shape=True, err_msg='', xp=None):
288
+ __tracebackhide__ = True # Hide traceback for py.test
289
+ if xp is None:
290
+ xp = array_namespace(actual)
291
+ desired = _strict_check(actual, desired, xp, check_namespace=check_namespace,
292
+ check_dtype=check_dtype, check_shape=check_shape)
293
+ if is_cupy(xp):
294
+ return xp.testing.assert_allclose(actual, desired, rtol=rtol,
295
+ atol=atol, err_msg=err_msg)
296
+ elif is_torch(xp):
297
+ err_msg = None if err_msg == '' else err_msg
298
+ return xp.testing.assert_close(actual, desired, rtol=rtol, atol=atol,
299
+ equal_nan=True, check_dtype=False, msg=err_msg)
300
+ return np.testing.assert_allclose(actual, desired, rtol=rtol,
301
+ atol=atol, err_msg=err_msg)
302
+
303
+
304
+ def xp_assert_less(actual, desired, check_namespace=True, check_dtype=True,
305
+ check_shape=True, err_msg='', verbose=True, xp=None):
306
+ __tracebackhide__ = True # Hide traceback for py.test
307
+ if xp is None:
308
+ xp = array_namespace(actual)
309
+ desired = _strict_check(actual, desired, xp, check_namespace=check_namespace,
310
+ check_dtype=check_dtype, check_shape=check_shape)
311
+ if is_cupy(xp):
312
+ return xp.testing.assert_array_less(actual, desired,
313
+ err_msg=err_msg, verbose=verbose)
314
+ elif is_torch(xp):
315
+ if actual.device.type != 'cpu':
316
+ actual = actual.cpu()
317
+ if desired.device.type != 'cpu':
318
+ desired = desired.cpu()
319
+ return np.testing.assert_array_less(actual, desired,
320
+ err_msg=err_msg, verbose=verbose)
321
+
322
+
323
+ def cov(x, *, xp=None):
324
+ if xp is None:
325
+ xp = array_namespace(x)
326
+
327
+ X = copy(x, xp=xp)
328
+ dtype = xp.result_type(X, xp.float64)
329
+
330
+ X = atleast_nd(X, ndim=2, xp=xp)
331
+ X = xp.asarray(X, dtype=dtype)
332
+
333
+ avg = xp.mean(X, axis=1)
334
+ fact = X.shape[1] - 1
335
+
336
+ if fact <= 0:
337
+ warnings.warn("Degrees of freedom <= 0 for slice",
338
+ RuntimeWarning, stacklevel=2)
339
+ fact = 0.0
340
+
341
+ X -= avg[:, None]
342
+ X_T = X.T
343
+ if xp.isdtype(X_T.dtype, 'complex floating'):
344
+ X_T = xp.conj(X_T)
345
+ c = X @ X_T
346
+ c /= fact
347
+ axes = tuple(axis for axis, length in enumerate(c.shape) if length == 1)
348
+ return xp.squeeze(c, axis=axes)
349
+
350
+
351
+ def xp_unsupported_param_msg(param):
352
+ return f'Providing {param!r} is only supported for numpy arrays.'
353
+
354
+
355
+ def is_complex(x, xp):
356
+ return xp.isdtype(x.dtype, 'complex floating')
venv/lib/python3.10/site-packages/scipy/_lib/_bunch.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys as _sys
2
+ from keyword import iskeyword as _iskeyword
3
+
4
+
5
+ def _validate_names(typename, field_names, extra_field_names):
6
+ """
7
+ Ensure that all the given names are valid Python identifiers that
8
+ do not start with '_'. Also check that there are no duplicates
9
+ among field_names + extra_field_names.
10
+ """
11
+ for name in [typename] + field_names + extra_field_names:
12
+ if not isinstance(name, str):
13
+ raise TypeError('typename and all field names must be strings')
14
+ if not name.isidentifier():
15
+ raise ValueError('typename and all field names must be valid '
16
+ f'identifiers: {name!r}')
17
+ if _iskeyword(name):
18
+ raise ValueError('typename and all field names cannot be a '
19
+ f'keyword: {name!r}')
20
+
21
+ seen = set()
22
+ for name in field_names + extra_field_names:
23
+ if name.startswith('_'):
24
+ raise ValueError('Field names cannot start with an underscore: '
25
+ f'{name!r}')
26
+ if name in seen:
27
+ raise ValueError(f'Duplicate field name: {name!r}')
28
+ seen.add(name)
29
+
30
+
31
+ # Note: This code is adapted from CPython:Lib/collections/__init__.py
32
+ def _make_tuple_bunch(typename, field_names, extra_field_names=None,
33
+ module=None):
34
+ """
35
+ Create a namedtuple-like class with additional attributes.
36
+
37
+ This function creates a subclass of tuple that acts like a namedtuple
38
+ and that has additional attributes.
39
+
40
+ The additional attributes are listed in `extra_field_names`. The
41
+ values assigned to these attributes are not part of the tuple.
42
+
43
+ The reason this function exists is to allow functions in SciPy
44
+ that currently return a tuple or a namedtuple to returned objects
45
+ that have additional attributes, while maintaining backwards
46
+ compatibility.
47
+
48
+ This should only be used to enhance *existing* functions in SciPy.
49
+ New functions are free to create objects as return values without
50
+ having to maintain backwards compatibility with an old tuple or
51
+ namedtuple return value.
52
+
53
+ Parameters
54
+ ----------
55
+ typename : str
56
+ The name of the type.
57
+ field_names : list of str
58
+ List of names of the values to be stored in the tuple. These names
59
+ will also be attributes of instances, so the values in the tuple
60
+ can be accessed by indexing or as attributes. At least one name
61
+ is required. See the Notes for additional restrictions.
62
+ extra_field_names : list of str, optional
63
+ List of names of values that will be stored as attributes of the
64
+ object. See the notes for additional restrictions.
65
+
66
+ Returns
67
+ -------
68
+ cls : type
69
+ The new class.
70
+
71
+ Notes
72
+ -----
73
+ There are restrictions on the names that may be used in `field_names`
74
+ and `extra_field_names`:
75
+
76
+ * The names must be unique--no duplicates allowed.
77
+ * The names must be valid Python identifiers, and must not begin with
78
+ an underscore.
79
+ * The names must not be Python keywords (e.g. 'def', 'and', etc., are
80
+ not allowed).
81
+
82
+ Examples
83
+ --------
84
+ >>> from scipy._lib._bunch import _make_tuple_bunch
85
+
86
+ Create a class that acts like a namedtuple with length 2 (with field
87
+ names `x` and `y`) that will also have the attributes `w` and `beta`:
88
+
89
+ >>> Result = _make_tuple_bunch('Result', ['x', 'y'], ['w', 'beta'])
90
+
91
+ `Result` is the new class. We call it with keyword arguments to create
92
+ a new instance with given values.
93
+
94
+ >>> result1 = Result(x=1, y=2, w=99, beta=0.5)
95
+ >>> result1
96
+ Result(x=1, y=2, w=99, beta=0.5)
97
+
98
+ `result1` acts like a tuple of length 2:
99
+
100
+ >>> len(result1)
101
+ 2
102
+ >>> result1[:]
103
+ (1, 2)
104
+
105
+ The values assigned when the instance was created are available as
106
+ attributes:
107
+
108
+ >>> result1.y
109
+ 2
110
+ >>> result1.beta
111
+ 0.5
112
+ """
113
+ if len(field_names) == 0:
114
+ raise ValueError('field_names must contain at least one name')
115
+
116
+ if extra_field_names is None:
117
+ extra_field_names = []
118
+ _validate_names(typename, field_names, extra_field_names)
119
+
120
+ typename = _sys.intern(str(typename))
121
+ field_names = tuple(map(_sys.intern, field_names))
122
+ extra_field_names = tuple(map(_sys.intern, extra_field_names))
123
+
124
+ all_names = field_names + extra_field_names
125
+ arg_list = ', '.join(field_names)
126
+ full_list = ', '.join(all_names)
127
+ repr_fmt = ''.join(('(',
128
+ ', '.join(f'{name}=%({name})r' for name in all_names),
129
+ ')'))
130
+ tuple_new = tuple.__new__
131
+ _dict, _tuple, _zip = dict, tuple, zip
132
+
133
+ # Create all the named tuple methods to be added to the class namespace
134
+
135
+ s = f"""\
136
+ def __new__(_cls, {arg_list}, **extra_fields):
137
+ return _tuple_new(_cls, ({arg_list},))
138
+
139
+ def __init__(self, {arg_list}, **extra_fields):
140
+ for key in self._extra_fields:
141
+ if key not in extra_fields:
142
+ raise TypeError("missing keyword argument '%s'" % (key,))
143
+ for key, val in extra_fields.items():
144
+ if key not in self._extra_fields:
145
+ raise TypeError("unexpected keyword argument '%s'" % (key,))
146
+ self.__dict__[key] = val
147
+
148
+ def __setattr__(self, key, val):
149
+ if key in {repr(field_names)}:
150
+ raise AttributeError("can't set attribute %r of class %r"
151
+ % (key, self.__class__.__name__))
152
+ else:
153
+ self.__dict__[key] = val
154
+ """
155
+ del arg_list
156
+ namespace = {'_tuple_new': tuple_new,
157
+ '__builtins__': dict(TypeError=TypeError,
158
+ AttributeError=AttributeError),
159
+ '__name__': f'namedtuple_{typename}'}
160
+ exec(s, namespace)
161
+ __new__ = namespace['__new__']
162
+ __new__.__doc__ = f'Create new instance of {typename}({full_list})'
163
+ __init__ = namespace['__init__']
164
+ __init__.__doc__ = f'Instantiate instance of {typename}({full_list})'
165
+ __setattr__ = namespace['__setattr__']
166
+
167
+ def __repr__(self):
168
+ 'Return a nicely formatted representation string'
169
+ return self.__class__.__name__ + repr_fmt % self._asdict()
170
+
171
+ def _asdict(self):
172
+ 'Return a new dict which maps field names to their values.'
173
+ out = _dict(_zip(self._fields, self))
174
+ out.update(self.__dict__)
175
+ return out
176
+
177
+ def __getnewargs_ex__(self):
178
+ 'Return self as a plain tuple. Used by copy and pickle.'
179
+ return _tuple(self), self.__dict__
180
+
181
+ # Modify function metadata to help with introspection and debugging
182
+ for method in (__new__, __repr__, _asdict, __getnewargs_ex__):
183
+ method.__qualname__ = f'{typename}.{method.__name__}'
184
+
185
+ # Build-up the class namespace dictionary
186
+ # and use type() to build the result class
187
+ class_namespace = {
188
+ '__doc__': f'{typename}({full_list})',
189
+ '_fields': field_names,
190
+ '__new__': __new__,
191
+ '__init__': __init__,
192
+ '__repr__': __repr__,
193
+ '__setattr__': __setattr__,
194
+ '_asdict': _asdict,
195
+ '_extra_fields': extra_field_names,
196
+ '__getnewargs_ex__': __getnewargs_ex__,
197
+ }
198
+ for index, name in enumerate(field_names):
199
+
200
+ def _get(self, index=index):
201
+ return self[index]
202
+ class_namespace[name] = property(_get)
203
+ for name in extra_field_names:
204
+
205
+ def _get(self, name=name):
206
+ return self.__dict__[name]
207
+ class_namespace[name] = property(_get)
208
+
209
+ result = type(typename, (tuple,), class_namespace)
210
+
211
+ # For pickling to work, the __module__ variable needs to be set to the
212
+ # frame where the named tuple is created. Bypass this step in environments
213
+ # where sys._getframe is not defined (Jython for example) or sys._getframe
214
+ # is not defined for arguments greater than 0 (IronPython), or where the
215
+ # user has specified a particular module.
216
+ if module is None:
217
+ try:
218
+ module = _sys._getframe(1).f_globals.get('__name__', '__main__')
219
+ except (AttributeError, ValueError):
220
+ pass
221
+ if module is not None:
222
+ result.__module__ = module
223
+ __new__.__module__ = module
224
+
225
+ return result
venv/lib/python3.10/site-packages/scipy/_lib/_ccallback.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import _ccallback_c
2
+
3
+ import ctypes
4
+
5
+ PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0]
6
+
7
+ ffi = None
8
+
9
+ class CData:
10
+ pass
11
+
12
+ def _import_cffi():
13
+ global ffi, CData
14
+
15
+ if ffi is not None:
16
+ return
17
+
18
+ try:
19
+ import cffi
20
+ ffi = cffi.FFI()
21
+ CData = ffi.CData
22
+ except ImportError:
23
+ ffi = False
24
+
25
+
26
+ class LowLevelCallable(tuple):
27
+ """
28
+ Low-level callback function.
29
+
30
+ Some functions in SciPy take as arguments callback functions, which
31
+ can either be python callables or low-level compiled functions. Using
32
+ compiled callback functions can improve performance somewhat by
33
+ avoiding wrapping data in Python objects.
34
+
35
+ Such low-level functions in SciPy are wrapped in `LowLevelCallable`
36
+ objects, which can be constructed from function pointers obtained from
37
+ ctypes, cffi, Cython, or contained in Python `PyCapsule` objects.
38
+
39
+ .. seealso::
40
+
41
+ Functions accepting low-level callables:
42
+
43
+ `scipy.integrate.quad`, `scipy.ndimage.generic_filter`,
44
+ `scipy.ndimage.generic_filter1d`, `scipy.ndimage.geometric_transform`
45
+
46
+ Usage examples:
47
+
48
+ :ref:`ndimage-ccallbacks`, :ref:`quad-callbacks`
49
+
50
+ Parameters
51
+ ----------
52
+ function : {PyCapsule, ctypes function pointer, cffi function pointer}
53
+ Low-level callback function.
54
+ user_data : {PyCapsule, ctypes void pointer, cffi void pointer}
55
+ User data to pass on to the callback function.
56
+ signature : str, optional
57
+ Signature of the function. If omitted, determined from *function*,
58
+ if possible.
59
+
60
+ Attributes
61
+ ----------
62
+ function
63
+ Callback function given.
64
+ user_data
65
+ User data given.
66
+ signature
67
+ Signature of the function.
68
+
69
+ Methods
70
+ -------
71
+ from_cython
72
+ Class method for constructing callables from Cython C-exported
73
+ functions.
74
+
75
+ Notes
76
+ -----
77
+ The argument ``function`` can be one of:
78
+
79
+ - PyCapsule, whose name contains the C function signature
80
+ - ctypes function pointer
81
+ - cffi function pointer
82
+
83
+ The signature of the low-level callback must match one of those expected
84
+ by the routine it is passed to.
85
+
86
+ If constructing low-level functions from a PyCapsule, the name of the
87
+ capsule must be the corresponding signature, in the format::
88
+
89
+ return_type (arg1_type, arg2_type, ...)
90
+
91
+ For example::
92
+
93
+ "void (double)"
94
+ "double (double, int *, void *)"
95
+
96
+ The context of a PyCapsule passed in as ``function`` is used as ``user_data``,
97
+ if an explicit value for ``user_data`` was not given.
98
+
99
+ """
100
+
101
+ # Make the class immutable
102
+ __slots__ = ()
103
+
104
+ def __new__(cls, function, user_data=None, signature=None):
105
+ # We need to hold a reference to the function & user data,
106
+ # to prevent them going out of scope
107
+ item = cls._parse_callback(function, user_data, signature)
108
+ return tuple.__new__(cls, (item, function, user_data))
109
+
110
+ def __repr__(self):
111
+ return f"LowLevelCallable({self.function!r}, {self.user_data!r})"
112
+
113
+ @property
114
+ def function(self):
115
+ return tuple.__getitem__(self, 1)
116
+
117
+ @property
118
+ def user_data(self):
119
+ return tuple.__getitem__(self, 2)
120
+
121
+ @property
122
+ def signature(self):
123
+ return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0))
124
+
125
+ def __getitem__(self, idx):
126
+ raise ValueError()
127
+
128
+ @classmethod
129
+ def from_cython(cls, module, name, user_data=None, signature=None):
130
+ """
131
+ Create a low-level callback function from an exported Cython function.
132
+
133
+ Parameters
134
+ ----------
135
+ module : module
136
+ Cython module where the exported function resides
137
+ name : str
138
+ Name of the exported function
139
+ user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional
140
+ User data to pass on to the callback function.
141
+ signature : str, optional
142
+ Signature of the function. If omitted, determined from *function*.
143
+
144
+ """
145
+ try:
146
+ function = module.__pyx_capi__[name]
147
+ except AttributeError as e:
148
+ message = "Given module is not a Cython module with __pyx_capi__ attribute"
149
+ raise ValueError(message) from e
150
+ except KeyError as e:
151
+ message = f"No function {name!r} found in __pyx_capi__ of the module"
152
+ raise ValueError(message) from e
153
+ return cls(function, user_data, signature)
154
+
155
+ @classmethod
156
+ def _parse_callback(cls, obj, user_data=None, signature=None):
157
+ _import_cffi()
158
+
159
+ if isinstance(obj, LowLevelCallable):
160
+ func = tuple.__getitem__(obj, 0)
161
+ elif isinstance(obj, PyCFuncPtr):
162
+ func, signature = _get_ctypes_func(obj, signature)
163
+ elif isinstance(obj, CData):
164
+ func, signature = _get_cffi_func(obj, signature)
165
+ elif _ccallback_c.check_capsule(obj):
166
+ func = obj
167
+ else:
168
+ raise ValueError("Given input is not a callable or a "
169
+ "low-level callable (pycapsule/ctypes/cffi)")
170
+
171
+ if isinstance(user_data, ctypes.c_void_p):
172
+ context = _get_ctypes_data(user_data)
173
+ elif isinstance(user_data, CData):
174
+ context = _get_cffi_data(user_data)
175
+ elif user_data is None:
176
+ context = 0
177
+ elif _ccallback_c.check_capsule(user_data):
178
+ context = user_data
179
+ else:
180
+ raise ValueError("Given user data is not a valid "
181
+ "low-level void* pointer (pycapsule/ctypes/cffi)")
182
+
183
+ return _ccallback_c.get_raw_capsule(func, signature, context)
184
+
185
+
186
+ #
187
+ # ctypes helpers
188
+ #
189
+
190
+ def _get_ctypes_func(func, signature=None):
191
+ # Get function pointer
192
+ func_ptr = ctypes.cast(func, ctypes.c_void_p).value
193
+
194
+ # Construct function signature
195
+ if signature is None:
196
+ signature = _typename_from_ctypes(func.restype) + " ("
197
+ for j, arg in enumerate(func.argtypes):
198
+ if j == 0:
199
+ signature += _typename_from_ctypes(arg)
200
+ else:
201
+ signature += ", " + _typename_from_ctypes(arg)
202
+ signature += ")"
203
+
204
+ return func_ptr, signature
205
+
206
+
207
+ def _typename_from_ctypes(item):
208
+ if item is None:
209
+ return "void"
210
+ elif item is ctypes.c_void_p:
211
+ return "void *"
212
+
213
+ name = item.__name__
214
+
215
+ pointer_level = 0
216
+ while name.startswith("LP_"):
217
+ pointer_level += 1
218
+ name = name[3:]
219
+
220
+ if name.startswith('c_'):
221
+ name = name[2:]
222
+
223
+ if pointer_level > 0:
224
+ name += " " + "*"*pointer_level
225
+
226
+ return name
227
+
228
+
229
+ def _get_ctypes_data(data):
230
+ # Get voidp pointer
231
+ return ctypes.cast(data, ctypes.c_void_p).value
232
+
233
+
234
+ #
235
+ # CFFI helpers
236
+ #
237
+
238
+ def _get_cffi_func(func, signature=None):
239
+ # Get function pointer
240
+ func_ptr = ffi.cast('uintptr_t', func)
241
+
242
+ # Get signature
243
+ if signature is None:
244
+ signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ')
245
+
246
+ return func_ptr, signature
247
+
248
+
249
+ def _get_cffi_data(data):
250
+ # Get pointer
251
+ return ffi.cast('uintptr_t', data)
venv/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (110 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/_disjoint_set.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Disjoint set data structure
3
+ """
4
+
5
+
6
+ class DisjointSet:
7
+ """ Disjoint set data structure for incremental connectivity queries.
8
+
9
+ .. versionadded:: 1.6.0
10
+
11
+ Attributes
12
+ ----------
13
+ n_subsets : int
14
+ The number of subsets.
15
+
16
+ Methods
17
+ -------
18
+ add
19
+ merge
20
+ connected
21
+ subset
22
+ subset_size
23
+ subsets
24
+ __getitem__
25
+
26
+ Notes
27
+ -----
28
+ This class implements the disjoint set [1]_, also known as the *union-find*
29
+ or *merge-find* data structure. The *find* operation (implemented in
30
+ `__getitem__`) implements the *path halving* variant. The *merge* method
31
+ implements the *merge by size* variant.
32
+
33
+ References
34
+ ----------
35
+ .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure
36
+
37
+ Examples
38
+ --------
39
+ >>> from scipy.cluster.hierarchy import DisjointSet
40
+
41
+ Initialize a disjoint set:
42
+
43
+ >>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b'])
44
+
45
+ Merge some subsets:
46
+
47
+ >>> disjoint_set.merge(1, 2)
48
+ True
49
+ >>> disjoint_set.merge(3, 'a')
50
+ True
51
+ >>> disjoint_set.merge('a', 'b')
52
+ True
53
+ >>> disjoint_set.merge('b', 'b')
54
+ False
55
+
56
+ Find root elements:
57
+
58
+ >>> disjoint_set[2]
59
+ 1
60
+ >>> disjoint_set['b']
61
+ 3
62
+
63
+ Test connectivity:
64
+
65
+ >>> disjoint_set.connected(1, 2)
66
+ True
67
+ >>> disjoint_set.connected(1, 'b')
68
+ False
69
+
70
+ List elements in disjoint set:
71
+
72
+ >>> list(disjoint_set)
73
+ [1, 2, 3, 'a', 'b']
74
+
75
+ Get the subset containing 'a':
76
+
77
+ >>> disjoint_set.subset('a')
78
+ {'a', 3, 'b'}
79
+
80
+ Get the size of the subset containing 'a' (without actually instantiating
81
+ the subset):
82
+
83
+ >>> disjoint_set.subset_size('a')
84
+ 3
85
+
86
+ Get all subsets in the disjoint set:
87
+
88
+ >>> disjoint_set.subsets()
89
+ [{1, 2}, {'a', 3, 'b'}]
90
+ """
91
+ def __init__(self, elements=None):
92
+ self.n_subsets = 0
93
+ self._sizes = {}
94
+ self._parents = {}
95
+ # _nbrs is a circular linked list which links connected elements.
96
+ self._nbrs = {}
97
+ # _indices tracks the element insertion order in `__iter__`.
98
+ self._indices = {}
99
+ if elements is not None:
100
+ for x in elements:
101
+ self.add(x)
102
+
103
+ def __iter__(self):
104
+ """Returns an iterator of the elements in the disjoint set.
105
+
106
+ Elements are ordered by insertion order.
107
+ """
108
+ return iter(self._indices)
109
+
110
+ def __len__(self):
111
+ return len(self._indices)
112
+
113
+ def __contains__(self, x):
114
+ return x in self._indices
115
+
116
+ def __getitem__(self, x):
117
+ """Find the root element of `x`.
118
+
119
+ Parameters
120
+ ----------
121
+ x : hashable object
122
+ Input element.
123
+
124
+ Returns
125
+ -------
126
+ root : hashable object
127
+ Root element of `x`.
128
+ """
129
+ if x not in self._indices:
130
+ raise KeyError(x)
131
+
132
+ # find by "path halving"
133
+ parents = self._parents
134
+ while self._indices[x] != self._indices[parents[x]]:
135
+ parents[x] = parents[parents[x]]
136
+ x = parents[x]
137
+ return x
138
+
139
+ def add(self, x):
140
+ """Add element `x` to disjoint set
141
+ """
142
+ if x in self._indices:
143
+ return
144
+
145
+ self._sizes[x] = 1
146
+ self._parents[x] = x
147
+ self._nbrs[x] = x
148
+ self._indices[x] = len(self._indices)
149
+ self.n_subsets += 1
150
+
151
+ def merge(self, x, y):
152
+ """Merge the subsets of `x` and `y`.
153
+
154
+ The smaller subset (the child) is merged into the larger subset (the
155
+ parent). If the subsets are of equal size, the root element which was
156
+ first inserted into the disjoint set is selected as the parent.
157
+
158
+ Parameters
159
+ ----------
160
+ x, y : hashable object
161
+ Elements to merge.
162
+
163
+ Returns
164
+ -------
165
+ merged : bool
166
+ True if `x` and `y` were in disjoint sets, False otherwise.
167
+ """
168
+ xr = self[x]
169
+ yr = self[y]
170
+ if self._indices[xr] == self._indices[yr]:
171
+ return False
172
+
173
+ sizes = self._sizes
174
+ if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]):
175
+ xr, yr = yr, xr
176
+ self._parents[yr] = xr
177
+ self._sizes[xr] += self._sizes[yr]
178
+ self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr]
179
+ self.n_subsets -= 1
180
+ return True
181
+
182
+ def connected(self, x, y):
183
+ """Test whether `x` and `y` are in the same subset.
184
+
185
+ Parameters
186
+ ----------
187
+ x, y : hashable object
188
+ Elements to test.
189
+
190
+ Returns
191
+ -------
192
+ result : bool
193
+ True if `x` and `y` are in the same set, False otherwise.
194
+ """
195
+ return self._indices[self[x]] == self._indices[self[y]]
196
+
197
+ def subset(self, x):
198
+ """Get the subset containing `x`.
199
+
200
+ Parameters
201
+ ----------
202
+ x : hashable object
203
+ Input element.
204
+
205
+ Returns
206
+ -------
207
+ result : set
208
+ Subset containing `x`.
209
+ """
210
+ if x not in self._indices:
211
+ raise KeyError(x)
212
+
213
+ result = [x]
214
+ nxt = self._nbrs[x]
215
+ while self._indices[nxt] != self._indices[x]:
216
+ result.append(nxt)
217
+ nxt = self._nbrs[nxt]
218
+ return set(result)
219
+
220
+ def subset_size(self, x):
221
+ """Get the size of the subset containing `x`.
222
+
223
+ Note that this method is faster than ``len(self.subset(x))`` because
224
+ the size is directly read off an internal field, without the need to
225
+ instantiate the full subset.
226
+
227
+ Parameters
228
+ ----------
229
+ x : hashable object
230
+ Input element.
231
+
232
+ Returns
233
+ -------
234
+ result : int
235
+ Size of the subset containing `x`.
236
+ """
237
+ return self._sizes[self[x]]
238
+
239
+ def subsets(self):
240
+ """Get all the subsets in the disjoint set.
241
+
242
+ Returns
243
+ -------
244
+ result : list
245
+ Subsets in the disjoint set.
246
+ """
247
+ result = []
248
+ visited = set()
249
+ for x in self:
250
+ if x not in visited:
251
+ xset = self.subset(x)
252
+ visited.update(xset)
253
+ result.append(xset)
254
+ return result
venv/lib/python3.10/site-packages/scipy/_lib/_docscrape.py ADDED
@@ -0,0 +1,679 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Extract reference documentation from the NumPy source tree.
2
+
3
+ """
4
+ # copied from numpydoc/docscrape.py
5
+ import inspect
6
+ import textwrap
7
+ import re
8
+ import pydoc
9
+ from warnings import warn
10
+ from collections import namedtuple
11
+ from collections.abc import Callable, Mapping
12
+ import copy
13
+ import sys
14
+
15
+
16
+ def strip_blank_lines(l):
17
+ "Remove leading and trailing blank lines from a list of lines"
18
+ while l and not l[0].strip():
19
+ del l[0]
20
+ while l and not l[-1].strip():
21
+ del l[-1]
22
+ return l
23
+
24
+
25
+ class Reader:
26
+ """A line-based string reader.
27
+
28
+ """
29
+ def __init__(self, data):
30
+ """
31
+ Parameters
32
+ ----------
33
+ data : str
34
+ String with lines separated by '\\n'.
35
+
36
+ """
37
+ if isinstance(data, list):
38
+ self._str = data
39
+ else:
40
+ self._str = data.split('\n') # store string as list of lines
41
+
42
+ self.reset()
43
+
44
+ def __getitem__(self, n):
45
+ return self._str[n]
46
+
47
+ def reset(self):
48
+ self._l = 0 # current line nr
49
+
50
+ def read(self):
51
+ if not self.eof():
52
+ out = self[self._l]
53
+ self._l += 1
54
+ return out
55
+ else:
56
+ return ''
57
+
58
+ def seek_next_non_empty_line(self):
59
+ for l in self[self._l:]:
60
+ if l.strip():
61
+ break
62
+ else:
63
+ self._l += 1
64
+
65
+ def eof(self):
66
+ return self._l >= len(self._str)
67
+
68
+ def read_to_condition(self, condition_func):
69
+ start = self._l
70
+ for line in self[start:]:
71
+ if condition_func(line):
72
+ return self[start:self._l]
73
+ self._l += 1
74
+ if self.eof():
75
+ return self[start:self._l+1]
76
+ return []
77
+
78
+ def read_to_next_empty_line(self):
79
+ self.seek_next_non_empty_line()
80
+
81
+ def is_empty(line):
82
+ return not line.strip()
83
+
84
+ return self.read_to_condition(is_empty)
85
+
86
+ def read_to_next_unindented_line(self):
87
+ def is_unindented(line):
88
+ return (line.strip() and (len(line.lstrip()) == len(line)))
89
+ return self.read_to_condition(is_unindented)
90
+
91
+ def peek(self, n=0):
92
+ if self._l + n < len(self._str):
93
+ return self[self._l + n]
94
+ else:
95
+ return ''
96
+
97
+ def is_empty(self):
98
+ return not ''.join(self._str).strip()
99
+
100
+
101
+ class ParseError(Exception):
102
+ def __str__(self):
103
+ message = self.args[0]
104
+ if hasattr(self, 'docstring'):
105
+ message = f"{message} in {self.docstring!r}"
106
+ return message
107
+
108
+
109
+ Parameter = namedtuple('Parameter', ['name', 'type', 'desc'])
110
+
111
+
112
+ class NumpyDocString(Mapping):
113
+ """Parses a numpydoc string to an abstract representation
114
+
115
+ Instances define a mapping from section title to structured data.
116
+
117
+ """
118
+
119
+ sections = {
120
+ 'Signature': '',
121
+ 'Summary': [''],
122
+ 'Extended Summary': [],
123
+ 'Parameters': [],
124
+ 'Returns': [],
125
+ 'Yields': [],
126
+ 'Receives': [],
127
+ 'Raises': [],
128
+ 'Warns': [],
129
+ 'Other Parameters': [],
130
+ 'Attributes': [],
131
+ 'Methods': [],
132
+ 'See Also': [],
133
+ 'Notes': [],
134
+ 'Warnings': [],
135
+ 'References': '',
136
+ 'Examples': '',
137
+ 'index': {}
138
+ }
139
+
140
+ def __init__(self, docstring, config={}):
141
+ orig_docstring = docstring
142
+ docstring = textwrap.dedent(docstring).split('\n')
143
+
144
+ self._doc = Reader(docstring)
145
+ self._parsed_data = copy.deepcopy(self.sections)
146
+
147
+ try:
148
+ self._parse()
149
+ except ParseError as e:
150
+ e.docstring = orig_docstring
151
+ raise
152
+
153
+ def __getitem__(self, key):
154
+ return self._parsed_data[key]
155
+
156
+ def __setitem__(self, key, val):
157
+ if key not in self._parsed_data:
158
+ self._error_location("Unknown section %s" % key, error=False)
159
+ else:
160
+ self._parsed_data[key] = val
161
+
162
+ def __iter__(self):
163
+ return iter(self._parsed_data)
164
+
165
+ def __len__(self):
166
+ return len(self._parsed_data)
167
+
168
+ def _is_at_section(self):
169
+ self._doc.seek_next_non_empty_line()
170
+
171
+ if self._doc.eof():
172
+ return False
173
+
174
+ l1 = self._doc.peek().strip() # e.g. Parameters
175
+
176
+ if l1.startswith('.. index::'):
177
+ return True
178
+
179
+ l2 = self._doc.peek(1).strip() # ---------- or ==========
180
+ return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
181
+
182
+ def _strip(self, doc):
183
+ i = 0
184
+ j = 0
185
+ for i, line in enumerate(doc):
186
+ if line.strip():
187
+ break
188
+
189
+ for j, line in enumerate(doc[::-1]):
190
+ if line.strip():
191
+ break
192
+
193
+ return doc[i:len(doc)-j]
194
+
195
+ def _read_to_next_section(self):
196
+ section = self._doc.read_to_next_empty_line()
197
+
198
+ while not self._is_at_section() and not self._doc.eof():
199
+ if not self._doc.peek(-1).strip(): # previous line was empty
200
+ section += ['']
201
+
202
+ section += self._doc.read_to_next_empty_line()
203
+
204
+ return section
205
+
206
+ def _read_sections(self):
207
+ while not self._doc.eof():
208
+ data = self._read_to_next_section()
209
+ name = data[0].strip()
210
+
211
+ if name.startswith('..'): # index section
212
+ yield name, data[1:]
213
+ elif len(data) < 2:
214
+ yield StopIteration
215
+ else:
216
+ yield name, self._strip(data[2:])
217
+
218
+ def _parse_param_list(self, content, single_element_is_type=False):
219
+ r = Reader(content)
220
+ params = []
221
+ while not r.eof():
222
+ header = r.read().strip()
223
+ if ' : ' in header:
224
+ arg_name, arg_type = header.split(' : ')[:2]
225
+ else:
226
+ if single_element_is_type:
227
+ arg_name, arg_type = '', header
228
+ else:
229
+ arg_name, arg_type = header, ''
230
+
231
+ desc = r.read_to_next_unindented_line()
232
+ desc = dedent_lines(desc)
233
+ desc = strip_blank_lines(desc)
234
+
235
+ params.append(Parameter(arg_name, arg_type, desc))
236
+
237
+ return params
238
+
239
+ # See also supports the following formats.
240
+ #
241
+ # <FUNCNAME>
242
+ # <FUNCNAME> SPACE* COLON SPACE+ <DESC> SPACE*
243
+ # <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)+ (COMMA | PERIOD)? SPACE*
244
+ # <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)* SPACE* COLON SPACE+ <DESC> SPACE*
245
+
246
+ # <FUNCNAME> is one of
247
+ # <PLAIN_FUNCNAME>
248
+ # COLON <ROLE> COLON BACKTICK <PLAIN_FUNCNAME> BACKTICK
249
+ # where
250
+ # <PLAIN_FUNCNAME> is a legal function name, and
251
+ # <ROLE> is any nonempty sequence of word characters.
252
+ # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`
253
+ # <DESC> is a string describing the function.
254
+
255
+ _role = r":(?P<role>\w+):"
256
+ _funcbacktick = r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`"
257
+ _funcplain = r"(?P<name2>[a-zA-Z0-9_\.-]+)"
258
+ _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")"
259
+ _funcnamenext = _funcname.replace('role', 'rolenext')
260
+ _funcnamenext = _funcnamenext.replace('name', 'namenext')
261
+ _description = r"(?P<description>\s*:(\s+(?P<desc>\S+.*))?)?\s*$"
262
+ _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*")
263
+ _line_rgx = re.compile(
264
+ r"^\s*" +
265
+ r"(?P<allfuncs>" + # group for all function names
266
+ _funcname +
267
+ r"(?P<morefuncs>([,]\s+" + _funcnamenext + r")*)" +
268
+ r")" + # end of "allfuncs"
269
+ # Some function lists have a trailing comma (or period) '\s*'
270
+ r"(?P<trailing>[,\.])?" +
271
+ _description)
272
+
273
+ # Empty <DESC> elements are replaced with '..'
274
+ empty_description = '..'
275
+
276
+ def _parse_see_also(self, content):
277
+ """
278
+ func_name : Descriptive text
279
+ continued text
280
+ another_func_name : Descriptive text
281
+ func_name1, func_name2, :meth:`func_name`, func_name3
282
+
283
+ """
284
+
285
+ items = []
286
+
287
+ def parse_item_name(text):
288
+ """Match ':role:`name`' or 'name'."""
289
+ m = self._func_rgx.match(text)
290
+ if not m:
291
+ raise ParseError("%s is not a item name" % text)
292
+ role = m.group('role')
293
+ name = m.group('name') if role else m.group('name2')
294
+ return name, role, m.end()
295
+
296
+ rest = []
297
+ for line in content:
298
+ if not line.strip():
299
+ continue
300
+
301
+ line_match = self._line_rgx.match(line)
302
+ description = None
303
+ if line_match:
304
+ description = line_match.group('desc')
305
+ if line_match.group('trailing') and description:
306
+ self._error_location(
307
+ 'Unexpected comma or period after function list at '
308
+ 'index %d of line "%s"' % (line_match.end('trailing'),
309
+ line),
310
+ error=False)
311
+ if not description and line.startswith(' '):
312
+ rest.append(line.strip())
313
+ elif line_match:
314
+ funcs = []
315
+ text = line_match.group('allfuncs')
316
+ while True:
317
+ if not text.strip():
318
+ break
319
+ name, role, match_end = parse_item_name(text)
320
+ funcs.append((name, role))
321
+ text = text[match_end:].strip()
322
+ if text and text[0] == ',':
323
+ text = text[1:].strip()
324
+ rest = list(filter(None, [description]))
325
+ items.append((funcs, rest))
326
+ else:
327
+ raise ParseError("%s is not a item name" % line)
328
+ return items
329
+
330
+ def _parse_index(self, section, content):
331
+ """
332
+ .. index:: default
333
+ :refguide: something, else, and more
334
+
335
+ """
336
+ def strip_each_in(lst):
337
+ return [s.strip() for s in lst]
338
+
339
+ out = {}
340
+ section = section.split('::')
341
+ if len(section) > 1:
342
+ out['default'] = strip_each_in(section[1].split(','))[0]
343
+ for line in content:
344
+ line = line.split(':')
345
+ if len(line) > 2:
346
+ out[line[1]] = strip_each_in(line[2].split(','))
347
+ return out
348
+
349
+ def _parse_summary(self):
350
+ """Grab signature (if given) and summary"""
351
+ if self._is_at_section():
352
+ return
353
+
354
+ # If several signatures present, take the last one
355
+ while True:
356
+ summary = self._doc.read_to_next_empty_line()
357
+ summary_str = " ".join([s.strip() for s in summary]).strip()
358
+ compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$')
359
+ if compiled.match(summary_str):
360
+ self['Signature'] = summary_str
361
+ if not self._is_at_section():
362
+ continue
363
+ break
364
+
365
+ if summary is not None:
366
+ self['Summary'] = summary
367
+
368
+ if not self._is_at_section():
369
+ self['Extended Summary'] = self._read_to_next_section()
370
+
371
+ def _parse(self):
372
+ self._doc.reset()
373
+ self._parse_summary()
374
+
375
+ sections = list(self._read_sections())
376
+ section_names = {section for section, content in sections}
377
+
378
+ has_returns = 'Returns' in section_names
379
+ has_yields = 'Yields' in section_names
380
+ # We could do more tests, but we are not. Arbitrarily.
381
+ if has_returns and has_yields:
382
+ msg = 'Docstring contains both a Returns and Yields section.'
383
+ raise ValueError(msg)
384
+ if not has_yields and 'Receives' in section_names:
385
+ msg = 'Docstring contains a Receives section but not Yields.'
386
+ raise ValueError(msg)
387
+
388
+ for (section, content) in sections:
389
+ if not section.startswith('..'):
390
+ section = (s.capitalize() for s in section.split(' '))
391
+ section = ' '.join(section)
392
+ if self.get(section):
393
+ self._error_location("The section %s appears twice"
394
+ % section)
395
+
396
+ if section in ('Parameters', 'Other Parameters', 'Attributes',
397
+ 'Methods'):
398
+ self[section] = self._parse_param_list(content)
399
+ elif section in ('Returns', 'Yields', 'Raises', 'Warns',
400
+ 'Receives'):
401
+ self[section] = self._parse_param_list(
402
+ content, single_element_is_type=True)
403
+ elif section.startswith('.. index::'):
404
+ self['index'] = self._parse_index(section, content)
405
+ elif section == 'See Also':
406
+ self['See Also'] = self._parse_see_also(content)
407
+ else:
408
+ self[section] = content
409
+
410
+ def _error_location(self, msg, error=True):
411
+ if hasattr(self, '_obj'):
412
+ # we know where the docs came from:
413
+ try:
414
+ filename = inspect.getsourcefile(self._obj)
415
+ except TypeError:
416
+ filename = None
417
+ msg = msg + (f" in the docstring of {self._obj} in {filename}.")
418
+ if error:
419
+ raise ValueError(msg)
420
+ else:
421
+ warn(msg, stacklevel=3)
422
+
423
+ # string conversion routines
424
+
425
+ def _str_header(self, name, symbol='-'):
426
+ return [name, len(name)*symbol]
427
+
428
+ def _str_indent(self, doc, indent=4):
429
+ out = []
430
+ for line in doc:
431
+ out += [' '*indent + line]
432
+ return out
433
+
434
+ def _str_signature(self):
435
+ if self['Signature']:
436
+ return [self['Signature'].replace('*', r'\*')] + ['']
437
+ else:
438
+ return ['']
439
+
440
+ def _str_summary(self):
441
+ if self['Summary']:
442
+ return self['Summary'] + ['']
443
+ else:
444
+ return []
445
+
446
+ def _str_extended_summary(self):
447
+ if self['Extended Summary']:
448
+ return self['Extended Summary'] + ['']
449
+ else:
450
+ return []
451
+
452
+ def _str_param_list(self, name):
453
+ out = []
454
+ if self[name]:
455
+ out += self._str_header(name)
456
+ for param in self[name]:
457
+ parts = []
458
+ if param.name:
459
+ parts.append(param.name)
460
+ if param.type:
461
+ parts.append(param.type)
462
+ out += [' : '.join(parts)]
463
+ if param.desc and ''.join(param.desc).strip():
464
+ out += self._str_indent(param.desc)
465
+ out += ['']
466
+ return out
467
+
468
+ def _str_section(self, name):
469
+ out = []
470
+ if self[name]:
471
+ out += self._str_header(name)
472
+ out += self[name]
473
+ out += ['']
474
+ return out
475
+
476
+ def _str_see_also(self, func_role):
477
+ if not self['See Also']:
478
+ return []
479
+ out = []
480
+ out += self._str_header("See Also")
481
+ out += ['']
482
+ last_had_desc = True
483
+ for funcs, desc in self['See Also']:
484
+ assert isinstance(funcs, list)
485
+ links = []
486
+ for func, role in funcs:
487
+ if role:
488
+ link = f':{role}:`{func}`'
489
+ elif func_role:
490
+ link = f':{func_role}:`{func}`'
491
+ else:
492
+ link = "`%s`_" % func
493
+ links.append(link)
494
+ link = ', '.join(links)
495
+ out += [link]
496
+ if desc:
497
+ out += self._str_indent([' '.join(desc)])
498
+ last_had_desc = True
499
+ else:
500
+ last_had_desc = False
501
+ out += self._str_indent([self.empty_description])
502
+
503
+ if last_had_desc:
504
+ out += ['']
505
+ out += ['']
506
+ return out
507
+
508
+ def _str_index(self):
509
+ idx = self['index']
510
+ out = []
511
+ output_index = False
512
+ default_index = idx.get('default', '')
513
+ if default_index:
514
+ output_index = True
515
+ out += ['.. index:: %s' % default_index]
516
+ for section, references in idx.items():
517
+ if section == 'default':
518
+ continue
519
+ output_index = True
520
+ out += [' :{}: {}'.format(section, ', '.join(references))]
521
+ if output_index:
522
+ return out
523
+ else:
524
+ return ''
525
+
526
+ def __str__(self, func_role=''):
527
+ out = []
528
+ out += self._str_signature()
529
+ out += self._str_summary()
530
+ out += self._str_extended_summary()
531
+ for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',
532
+ 'Other Parameters', 'Raises', 'Warns'):
533
+ out += self._str_param_list(param_list)
534
+ out += self._str_section('Warnings')
535
+ out += self._str_see_also(func_role)
536
+ for s in ('Notes', 'References', 'Examples'):
537
+ out += self._str_section(s)
538
+ for param_list in ('Attributes', 'Methods'):
539
+ out += self._str_param_list(param_list)
540
+ out += self._str_index()
541
+ return '\n'.join(out)
542
+
543
+
544
+ def indent(str, indent=4):
545
+ indent_str = ' '*indent
546
+ if str is None:
547
+ return indent_str
548
+ lines = str.split('\n')
549
+ return '\n'.join(indent_str + l for l in lines)
550
+
551
+
552
+ def dedent_lines(lines):
553
+ """Deindent a list of lines maximally"""
554
+ return textwrap.dedent("\n".join(lines)).split("\n")
555
+
556
+
557
+ def header(text, style='-'):
558
+ return text + '\n' + style*len(text) + '\n'
559
+
560
+
561
+ class FunctionDoc(NumpyDocString):
562
+ def __init__(self, func, role='func', doc=None, config={}):
563
+ self._f = func
564
+ self._role = role # e.g. "func" or "meth"
565
+
566
+ if doc is None:
567
+ if func is None:
568
+ raise ValueError("No function or docstring given")
569
+ doc = inspect.getdoc(func) or ''
570
+ NumpyDocString.__init__(self, doc, config)
571
+
572
+ def get_func(self):
573
+ func_name = getattr(self._f, '__name__', self.__class__.__name__)
574
+ if inspect.isclass(self._f):
575
+ func = getattr(self._f, '__call__', self._f.__init__)
576
+ else:
577
+ func = self._f
578
+ return func, func_name
579
+
580
+ def __str__(self):
581
+ out = ''
582
+
583
+ func, func_name = self.get_func()
584
+
585
+ roles = {'func': 'function',
586
+ 'meth': 'method'}
587
+
588
+ if self._role:
589
+ if self._role not in roles:
590
+ print("Warning: invalid role %s" % self._role)
591
+ out += '.. {}:: {}\n \n\n'.format(roles.get(self._role, ''),
592
+ func_name)
593
+
594
+ out += super().__str__(func_role=self._role)
595
+ return out
596
+
597
+
598
+ class ClassDoc(NumpyDocString):
599
+
600
+ extra_public_methods = ['__call__']
601
+
602
+ def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
603
+ config={}):
604
+ if not inspect.isclass(cls) and cls is not None:
605
+ raise ValueError("Expected a class or None, but got %r" % cls)
606
+ self._cls = cls
607
+
608
+ if 'sphinx' in sys.modules:
609
+ from sphinx.ext.autodoc import ALL
610
+ else:
611
+ ALL = object()
612
+
613
+ self.show_inherited_members = config.get(
614
+ 'show_inherited_class_members', True)
615
+
616
+ if modulename and not modulename.endswith('.'):
617
+ modulename += '.'
618
+ self._mod = modulename
619
+
620
+ if doc is None:
621
+ if cls is None:
622
+ raise ValueError("No class or documentation string given")
623
+ doc = pydoc.getdoc(cls)
624
+
625
+ NumpyDocString.__init__(self, doc)
626
+
627
+ _members = config.get('members', [])
628
+ if _members is ALL:
629
+ _members = None
630
+ _exclude = config.get('exclude-members', [])
631
+
632
+ if config.get('show_class_members', True) and _exclude is not ALL:
633
+ def splitlines_x(s):
634
+ if not s:
635
+ return []
636
+ else:
637
+ return s.splitlines()
638
+ for field, items in [('Methods', self.methods),
639
+ ('Attributes', self.properties)]:
640
+ if not self[field]:
641
+ doc_list = []
642
+ for name in sorted(items):
643
+ if (name in _exclude or
644
+ (_members and name not in _members)):
645
+ continue
646
+ try:
647
+ doc_item = pydoc.getdoc(getattr(self._cls, name))
648
+ doc_list.append(
649
+ Parameter(name, '', splitlines_x(doc_item)))
650
+ except AttributeError:
651
+ pass # method doesn't exist
652
+ self[field] = doc_list
653
+
654
+ @property
655
+ def methods(self):
656
+ if self._cls is None:
657
+ return []
658
+ return [name for name, func in inspect.getmembers(self._cls)
659
+ if ((not name.startswith('_')
660
+ or name in self.extra_public_methods)
661
+ and isinstance(func, Callable)
662
+ and self._is_show_member(name))]
663
+
664
+ @property
665
+ def properties(self):
666
+ if self._cls is None:
667
+ return []
668
+ return [name for name, func in inspect.getmembers(self._cls)
669
+ if (not name.startswith('_') and
670
+ (func is None or isinstance(func, property) or
671
+ inspect.isdatadescriptor(func))
672
+ and self._is_show_member(name))]
673
+
674
+ def _is_show_member(self, name):
675
+ if self.show_inherited_members:
676
+ return True # show all class members
677
+ if name not in self._cls.__dict__:
678
+ return False # class member is inherited, we do not show it
679
+ return True
venv/lib/python3.10/site-packages/scipy/_lib/_elementwise_iterative_method.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # `_elementwise_iterative_method.py` includes tools for writing functions that
2
+ # - are vectorized to work elementwise on arrays,
3
+ # - implement non-trivial, iterative algorithms with a callback interface, and
4
+ # - return rich objects with iteration count, termination status, etc.
5
+ #
6
+ # Examples include:
7
+ # `scipy.optimize._chandrupatla._chandrupatla for scalar rootfinding,
8
+ # `scipy.optimize._chandrupatla._chandrupatla_minimize for scalar minimization,
9
+ # `scipy.optimize._differentiate._differentiate for numerical differentiation,
10
+ # `scipy.optimize._bracket._bracket_root for finding rootfinding brackets,
11
+ # `scipy.optimize._bracket._bracket_minimize for finding minimization brackets,
12
+ # `scipy.integrate._tanhsinh._tanhsinh` for numerical quadrature.
13
+
14
+ import numpy as np
15
+ from ._util import _RichResult, _call_callback_maybe_halt
16
+
17
+ _ESIGNERR = -1
18
+ _ECONVERR = -2
19
+ _EVALUEERR = -3
20
+ _ECALLBACK = -4
21
+ _ECONVERGED = 0
22
+ _EINPROGRESS = 1
23
+
24
+ def _initialize(func, xs, args, complex_ok=False, preserve_shape=None):
25
+ """Initialize abscissa, function, and args arrays for elementwise function
26
+
27
+ Parameters
28
+ ----------
29
+ func : callable
30
+ An elementwise function with signature
31
+
32
+ func(x: ndarray, *args) -> ndarray
33
+
34
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
35
+ which may contain an arbitrary number of arrays that are broadcastable
36
+ with ``x``.
37
+ xs : tuple of arrays
38
+ Finite real abscissa arrays. Must be broadcastable.
39
+ args : tuple, optional
40
+ Additional positional arguments to be passed to `func`.
41
+ preserve_shape : bool, default:False
42
+ When ``preserve_shape=False`` (default), `func` may be passed
43
+ arguments of any shape; `_scalar_optimization_loop` is permitted
44
+ to reshape and compress arguments at will. When
45
+ ``preserve_shape=False``, arguments passed to `func` must have shape
46
+ `shape` or ``shape + (n,)``, where ``n`` is any integer.
47
+
48
+ Returns
49
+ -------
50
+ xs, fs, args : tuple of arrays
51
+ Broadcasted, writeable, 1D abscissa and function value arrays (or
52
+ NumPy floats, if appropriate). The dtypes of the `xs` and `fs` are
53
+ `xfat`; the dtype of the `args` are unchanged.
54
+ shape : tuple of ints
55
+ Original shape of broadcasted arrays.
56
+ xfat : NumPy dtype
57
+ Result dtype of abscissae, function values, and args determined using
58
+ `np.result_type`, except integer types are promoted to `np.float64`.
59
+
60
+ Raises
61
+ ------
62
+ ValueError
63
+ If the result dtype is not that of a real scalar
64
+
65
+ Notes
66
+ -----
67
+ Useful for initializing the input of SciPy functions that accept
68
+ an elementwise callable, abscissae, and arguments; e.g.
69
+ `scipy.optimize._chandrupatla`.
70
+ """
71
+ nx = len(xs)
72
+
73
+ # Try to preserve `dtype`, but we need to ensure that the arguments are at
74
+ # least floats before passing them into the function; integers can overflow
75
+ # and cause failure.
76
+ # There might be benefit to combining the `xs` into a single array and
77
+ # calling `func` once on the combined array. For now, keep them separate.
78
+ xas = np.broadcast_arrays(*xs, *args) # broadcast and rename
79
+ xat = np.result_type(*[xa.dtype for xa in xas])
80
+ xat = np.float64 if np.issubdtype(xat, np.integer) else xat
81
+ xs, args = xas[:nx], xas[nx:]
82
+ xs = [x.astype(xat, copy=False)[()] for x in xs]
83
+ fs = [np.asarray(func(x, *args)) for x in xs]
84
+ shape = xs[0].shape
85
+ fshape = fs[0].shape
86
+
87
+ if preserve_shape:
88
+ # bind original shape/func now to avoid late-binding gotcha
89
+ def func(x, *args, shape=shape, func=func, **kwargs):
90
+ i = (0,)*(len(fshape) - len(shape))
91
+ return func(x[i], *args, **kwargs)
92
+ shape = np.broadcast_shapes(fshape, shape)
93
+ xs = [np.broadcast_to(x, shape) for x in xs]
94
+ args = [np.broadcast_to(arg, shape) for arg in args]
95
+
96
+ message = ("The shape of the array returned by `func` must be the same as "
97
+ "the broadcasted shape of `x` and all other `args`.")
98
+ if preserve_shape is not None: # only in tanhsinh for now
99
+ message = f"When `preserve_shape=False`, {message.lower()}"
100
+ shapes_equal = [f.shape == shape for f in fs]
101
+ if not np.all(shapes_equal):
102
+ raise ValueError(message)
103
+
104
+ # These algorithms tend to mix the dtypes of the abscissae and function
105
+ # values, so figure out what the result will be and convert them all to
106
+ # that type from the outset.
107
+ xfat = np.result_type(*([f.dtype for f in fs] + [xat]))
108
+ if not complex_ok and not np.issubdtype(xfat, np.floating):
109
+ raise ValueError("Abscissae and function output must be real numbers.")
110
+ xs = [x.astype(xfat, copy=True)[()] for x in xs]
111
+ fs = [f.astype(xfat, copy=True)[()] for f in fs]
112
+
113
+ # To ensure that we can do indexing, we'll work with at least 1d arrays,
114
+ # but remember the appropriate shape of the output.
115
+ xs = [x.ravel() for x in xs]
116
+ fs = [f.ravel() for f in fs]
117
+ args = [arg.flatten() for arg in args]
118
+ return func, xs, fs, args, shape, xfat
119
+
120
+
121
+ def _loop(work, callback, shape, maxiter, func, args, dtype, pre_func_eval,
122
+ post_func_eval, check_termination, post_termination_check,
123
+ customize_result, res_work_pairs, preserve_shape=False):
124
+ """Main loop of a vectorized scalar optimization algorithm
125
+
126
+ Parameters
127
+ ----------
128
+ work : _RichResult
129
+ All variables that need to be retained between iterations. Must
130
+ contain attributes `nit`, `nfev`, and `success`
131
+ callback : callable
132
+ User-specified callback function
133
+ shape : tuple of ints
134
+ The shape of all output arrays
135
+ maxiter :
136
+ Maximum number of iterations of the algorithm
137
+ func : callable
138
+ The user-specified callable that is being optimized or solved
139
+ args : tuple
140
+ Additional positional arguments to be passed to `func`.
141
+ dtype : NumPy dtype
142
+ The common dtype of all abscissae and function values
143
+ pre_func_eval : callable
144
+ A function that accepts `work` and returns `x`, the active elements
145
+ of `x` at which `func` will be evaluated. May modify attributes
146
+ of `work` with any algorithmic steps that need to happen
147
+ at the beginning of an iteration, before `func` is evaluated,
148
+ post_func_eval : callable
149
+ A function that accepts `x`, `func(x)`, and `work`. May modify
150
+ attributes of `work` with any algorithmic steps that need to happen
151
+ in the middle of an iteration, after `func` is evaluated but before
152
+ the termination check.
153
+ check_termination : callable
154
+ A function that accepts `work` and returns `stop`, a boolean array
155
+ indicating which of the active elements have met a termination
156
+ condition.
157
+ post_termination_check : callable
158
+ A function that accepts `work`. May modify `work` with any algorithmic
159
+ steps that need to happen after the termination check and before the
160
+ end of the iteration.
161
+ customize_result : callable
162
+ A function that accepts `res` and `shape` and returns `shape`. May
163
+ modify `res` (in-place) according to preferences (e.g. rearrange
164
+ elements between attributes) and modify `shape` if needed.
165
+ res_work_pairs : list of (str, str)
166
+ Identifies correspondence between attributes of `res` and attributes
167
+ of `work`; i.e., attributes of active elements of `work` will be
168
+ copied to the appropriate indices of `res` when appropriate. The order
169
+ determines the order in which _RichResult attributes will be
170
+ pretty-printed.
171
+
172
+ Returns
173
+ -------
174
+ res : _RichResult
175
+ The final result object
176
+
177
+ Notes
178
+ -----
179
+ Besides providing structure, this framework provides several important
180
+ services for a vectorized optimization algorithm.
181
+
182
+ - It handles common tasks involving iteration count, function evaluation
183
+ count, a user-specified callback, and associated termination conditions.
184
+ - It compresses the attributes of `work` to eliminate unnecessary
185
+ computation on elements that have already converged.
186
+
187
+ """
188
+ cb_terminate = False
189
+
190
+ # Initialize the result object and active element index array
191
+ n_elements = int(np.prod(shape))
192
+ active = np.arange(n_elements) # in-progress element indices
193
+ res_dict = {i: np.zeros(n_elements, dtype=dtype) for i, j in res_work_pairs}
194
+ res_dict['success'] = np.zeros(n_elements, dtype=bool)
195
+ res_dict['status'] = np.full(n_elements, _EINPROGRESS)
196
+ res_dict['nit'] = np.zeros(n_elements, dtype=int)
197
+ res_dict['nfev'] = np.zeros(n_elements, dtype=int)
198
+ res = _RichResult(res_dict)
199
+ work.args = args
200
+
201
+ active = _check_termination(work, res, res_work_pairs, active,
202
+ check_termination, preserve_shape)
203
+
204
+ if callback is not None:
205
+ temp = _prepare_result(work, res, res_work_pairs, active, shape,
206
+ customize_result, preserve_shape)
207
+ if _call_callback_maybe_halt(callback, temp):
208
+ cb_terminate = True
209
+
210
+ while work.nit < maxiter and active.size and not cb_terminate and n_elements:
211
+ x = pre_func_eval(work)
212
+
213
+ if work.args and work.args[0].ndim != x.ndim:
214
+ # `x` always starts as 1D. If the SciPy function that uses
215
+ # _loop added dimensions to `x`, we need to
216
+ # add them to the elements of `args`.
217
+ dims = np.arange(x.ndim, dtype=np.int64)
218
+ work.args = [np.expand_dims(arg, tuple(dims[arg.ndim:]))
219
+ for arg in work.args]
220
+
221
+ x_shape = x.shape
222
+ if preserve_shape:
223
+ x = x.reshape(shape + (-1,))
224
+ f = func(x, *work.args)
225
+ f = np.asarray(f, dtype=dtype)
226
+ if preserve_shape:
227
+ x = x.reshape(x_shape)
228
+ f = f.reshape(x_shape)
229
+ work.nfev += 1 if x.ndim == 1 else x.shape[-1]
230
+
231
+ post_func_eval(x, f, work)
232
+
233
+ work.nit += 1
234
+ active = _check_termination(work, res, res_work_pairs, active,
235
+ check_termination, preserve_shape)
236
+
237
+ if callback is not None:
238
+ temp = _prepare_result(work, res, res_work_pairs, active, shape,
239
+ customize_result, preserve_shape)
240
+ if _call_callback_maybe_halt(callback, temp):
241
+ cb_terminate = True
242
+ break
243
+ if active.size == 0:
244
+ break
245
+
246
+ post_termination_check(work)
247
+
248
+ work.status[:] = _ECALLBACK if cb_terminate else _ECONVERR
249
+ return _prepare_result(work, res, res_work_pairs, active, shape,
250
+ customize_result, preserve_shape)
251
+
252
+
253
+ def _check_termination(work, res, res_work_pairs, active, check_termination,
254
+ preserve_shape):
255
+ # Checks termination conditions, updates elements of `res` with
256
+ # corresponding elements of `work`, and compresses `work`.
257
+
258
+ stop = check_termination(work)
259
+
260
+ if np.any(stop):
261
+ # update the active elements of the result object with the active
262
+ # elements for which a termination condition has been met
263
+ _update_active(work, res, res_work_pairs, active, stop, preserve_shape)
264
+
265
+ if preserve_shape:
266
+ stop = stop[active]
267
+
268
+ proceed = ~stop
269
+ active = active[proceed]
270
+
271
+ if not preserve_shape:
272
+ # compress the arrays to avoid unnecessary computation
273
+ for key, val in work.items():
274
+ work[key] = val[proceed] if isinstance(val, np.ndarray) else val
275
+ work.args = [arg[proceed] for arg in work.args]
276
+
277
+ return active
278
+
279
+
280
+ def _update_active(work, res, res_work_pairs, active, mask, preserve_shape):
281
+ # Update `active` indices of the arrays in result object `res` with the
282
+ # contents of the scalars and arrays in `update_dict`. When provided,
283
+ # `mask` is a boolean array applied both to the arrays in `update_dict`
284
+ # that are to be used and to the arrays in `res` that are to be updated.
285
+ update_dict = {key1: work[key2] for key1, key2 in res_work_pairs}
286
+ update_dict['success'] = work.status == 0
287
+
288
+ if mask is not None:
289
+ if preserve_shape:
290
+ active_mask = np.zeros_like(mask)
291
+ active_mask[active] = 1
292
+ active_mask = active_mask & mask
293
+ for key, val in update_dict.items():
294
+ res[key][active_mask] = (val[active_mask] if np.size(val) > 1
295
+ else val)
296
+ else:
297
+ active_mask = active[mask]
298
+ for key, val in update_dict.items():
299
+ res[key][active_mask] = val[mask] if np.size(val) > 1 else val
300
+ else:
301
+ for key, val in update_dict.items():
302
+ if preserve_shape and not np.isscalar(val):
303
+ val = val[active]
304
+ res[key][active] = val
305
+
306
+
307
+ def _prepare_result(work, res, res_work_pairs, active, shape, customize_result,
308
+ preserve_shape):
309
+ # Prepare the result object `res` by creating a copy, copying the latest
310
+ # data from work, running the provided result customization function,
311
+ # and reshaping the data to the original shapes.
312
+ res = res.copy()
313
+ _update_active(work, res, res_work_pairs, active, None, preserve_shape)
314
+
315
+ shape = customize_result(res, shape)
316
+
317
+ for key, val in res.items():
318
+ res[key] = np.reshape(val, shape)[()]
319
+ res['_order_keys'] = ['success'] + [i for i, j in res_work_pairs]
320
+ return _RichResult(**res)
venv/lib/python3.10/site-packages/scipy/_lib/_finite_differences.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import arange, newaxis, hstack, prod, array
2
+
3
+
4
+ def _central_diff_weights(Np, ndiv=1):
5
+ """
6
+ Return weights for an Np-point central derivative.
7
+
8
+ Assumes equally-spaced function points.
9
+
10
+ If weights are in the vector w, then
11
+ derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
12
+
13
+ Parameters
14
+ ----------
15
+ Np : int
16
+ Number of points for the central derivative.
17
+ ndiv : int, optional
18
+ Number of divisions. Default is 1.
19
+
20
+ Returns
21
+ -------
22
+ w : ndarray
23
+ Weights for an Np-point central derivative. Its size is `Np`.
24
+
25
+ Notes
26
+ -----
27
+ Can be inaccurate for a large number of points.
28
+
29
+ Examples
30
+ --------
31
+ We can calculate a derivative value of a function.
32
+
33
+ >>> def f(x):
34
+ ... return 2 * x**2 + 3
35
+ >>> x = 3.0 # derivative point
36
+ >>> h = 0.1 # differential step
37
+ >>> Np = 3 # point number for central derivative
38
+ >>> weights = _central_diff_weights(Np) # weights for first derivative
39
+ >>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
40
+ >>> sum(w * v for (w, v) in zip(weights, vals))/h
41
+ 11.79999999999998
42
+
43
+ This value is close to the analytical solution:
44
+ f'(x) = 4x, so f'(3) = 12
45
+
46
+ References
47
+ ----------
48
+ .. [1] https://en.wikipedia.org/wiki/Finite_difference
49
+
50
+ """
51
+ if Np < ndiv + 1:
52
+ raise ValueError(
53
+ "Number of points must be at least the derivative order + 1."
54
+ )
55
+ if Np % 2 == 0:
56
+ raise ValueError("The number of points must be odd.")
57
+ from scipy import linalg
58
+
59
+ ho = Np >> 1
60
+ x = arange(-ho, ho + 1.0)
61
+ x = x[:, newaxis]
62
+ X = x**0.0
63
+ for k in range(1, Np):
64
+ X = hstack([X, x**k])
65
+ w = prod(arange(1, ndiv + 1), axis=0) * linalg.inv(X)[ndiv]
66
+ return w
67
+
68
+
69
+ def _derivative(func, x0, dx=1.0, n=1, args=(), order=3):
70
+ """
71
+ Find the nth derivative of a function at a point.
72
+
73
+ Given a function, use a central difference formula with spacing `dx` to
74
+ compute the nth derivative at `x0`.
75
+
76
+ Parameters
77
+ ----------
78
+ func : function
79
+ Input function.
80
+ x0 : float
81
+ The point at which the nth derivative is found.
82
+ dx : float, optional
83
+ Spacing.
84
+ n : int, optional
85
+ Order of the derivative. Default is 1.
86
+ args : tuple, optional
87
+ Arguments
88
+ order : int, optional
89
+ Number of points to use, must be odd.
90
+
91
+ Notes
92
+ -----
93
+ Decreasing the step size too small can result in round-off error.
94
+
95
+ Examples
96
+ --------
97
+ >>> def f(x):
98
+ ... return x**3 + x**2
99
+ >>> _derivative(f, 1.0, dx=1e-6)
100
+ 4.9999999999217337
101
+
102
+ """
103
+ if order < n + 1:
104
+ raise ValueError(
105
+ "'order' (the number of points used to compute the derivative), "
106
+ "must be at least the derivative order 'n' + 1."
107
+ )
108
+ if order % 2 == 0:
109
+ raise ValueError(
110
+ "'order' (the number of points used to compute the derivative) "
111
+ "must be odd."
112
+ )
113
+ # pre-computed for n=1 and 2 and low-order for speed.
114
+ if n == 1:
115
+ if order == 3:
116
+ weights = array([-1, 0, 1]) / 2.0
117
+ elif order == 5:
118
+ weights = array([1, -8, 0, 8, -1]) / 12.0
119
+ elif order == 7:
120
+ weights = array([-1, 9, -45, 0, 45, -9, 1]) / 60.0
121
+ elif order == 9:
122
+ weights = array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0
123
+ else:
124
+ weights = _central_diff_weights(order, 1)
125
+ elif n == 2:
126
+ if order == 3:
127
+ weights = array([1, -2.0, 1])
128
+ elif order == 5:
129
+ weights = array([-1, 16, -30, 16, -1]) / 12.0
130
+ elif order == 7:
131
+ weights = array([2, -27, 270, -490, 270, -27, 2]) / 180.0
132
+ elif order == 9:
133
+ weights = (
134
+ array([-9, 128, -1008, 8064, -14350, 8064, -1008, 128, -9])
135
+ / 5040.0
136
+ )
137
+ else:
138
+ weights = _central_diff_weights(order, 2)
139
+ else:
140
+ weights = _central_diff_weights(order, n)
141
+ val = 0.0
142
+ ho = order >> 1
143
+ for k in range(order):
144
+ val += weights[k] * func(x0 + (k - ho) * dx, *args)
145
+ return val / prod((dx,) * n, axis=0)
venv/lib/python3.10/site-packages/scipy/_lib/_fpumode.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (16.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/_gcutils.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for testing automatic garbage collection of objects
3
+
4
+ .. autosummary::
5
+ :toctree: generated/
6
+
7
+ set_gc_state - enable or disable garbage collection
8
+ gc_state - context manager for given state of garbage collector
9
+ assert_deallocated - context manager to check for circular references on object
10
+
11
+ """
12
+ import weakref
13
+ import gc
14
+
15
+ from contextlib import contextmanager
16
+ from platform import python_implementation
17
+
18
+ __all__ = ['set_gc_state', 'gc_state', 'assert_deallocated']
19
+
20
+
21
+ IS_PYPY = python_implementation() == 'PyPy'
22
+
23
+
24
+ class ReferenceError(AssertionError):
25
+ pass
26
+
27
+
28
+ def set_gc_state(state):
29
+ """ Set status of garbage collector """
30
+ if gc.isenabled() == state:
31
+ return
32
+ if state:
33
+ gc.enable()
34
+ else:
35
+ gc.disable()
36
+
37
+
38
+ @contextmanager
39
+ def gc_state(state):
40
+ """ Context manager to set state of garbage collector to `state`
41
+
42
+ Parameters
43
+ ----------
44
+ state : bool
45
+ True for gc enabled, False for disabled
46
+
47
+ Examples
48
+ --------
49
+ >>> with gc_state(False):
50
+ ... assert not gc.isenabled()
51
+ >>> with gc_state(True):
52
+ ... assert gc.isenabled()
53
+ """
54
+ orig_state = gc.isenabled()
55
+ set_gc_state(state)
56
+ yield
57
+ set_gc_state(orig_state)
58
+
59
+
60
+ @contextmanager
61
+ def assert_deallocated(func, *args, **kwargs):
62
+ """Context manager to check that object is deallocated
63
+
64
+ This is useful for checking that an object can be freed directly by
65
+ reference counting, without requiring gc to break reference cycles.
66
+ GC is disabled inside the context manager.
67
+
68
+ This check is not available on PyPy.
69
+
70
+ Parameters
71
+ ----------
72
+ func : callable
73
+ Callable to create object to check
74
+ \\*args : sequence
75
+ positional arguments to `func` in order to create object to check
76
+ \\*\\*kwargs : dict
77
+ keyword arguments to `func` in order to create object to check
78
+
79
+ Examples
80
+ --------
81
+ >>> class C: pass
82
+ >>> with assert_deallocated(C) as c:
83
+ ... # do something
84
+ ... del c
85
+
86
+ >>> class C:
87
+ ... def __init__(self):
88
+ ... self._circular = self # Make circular reference
89
+ >>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL
90
+ ... # do something
91
+ ... del c
92
+ Traceback (most recent call last):
93
+ ...
94
+ ReferenceError: Remaining reference(s) to object
95
+ """
96
+ if IS_PYPY:
97
+ raise RuntimeError("assert_deallocated is unavailable on PyPy")
98
+
99
+ with gc_state(False):
100
+ obj = func(*args, **kwargs)
101
+ ref = weakref.ref(obj)
102
+ yield obj
103
+ del obj
104
+ if ref() is not None:
105
+ raise ReferenceError("Remaining reference(s) to object")
venv/lib/python3.10/site-packages/scipy/_lib/_pep440.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility to compare pep440 compatible version strings.
2
+
3
+ The LooseVersion and StrictVersion classes that distutils provides don't
4
+ work; they don't recognize anything like alpha/beta/rc/dev versions.
5
+ """
6
+
7
+ # Copyright (c) Donald Stufft and individual contributors.
8
+ # All rights reserved.
9
+
10
+ # Redistribution and use in source and binary forms, with or without
11
+ # modification, are permitted provided that the following conditions are met:
12
+
13
+ # 1. Redistributions of source code must retain the above copyright notice,
14
+ # this list of conditions and the following disclaimer.
15
+
16
+ # 2. Redistributions in binary form must reproduce the above copyright
17
+ # notice, this list of conditions and the following disclaimer in the
18
+ # documentation and/or other materials provided with the distribution.
19
+
20
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24
+ # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25
+ # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26
+ # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28
+ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30
+ # POSSIBILITY OF SUCH DAMAGE.
31
+
32
+ import collections
33
+ import itertools
34
+ import re
35
+
36
+
37
+ __all__ = [
38
+ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
39
+ ]
40
+
41
+
42
+ # BEGIN packaging/_structures.py
43
+
44
+
45
+ class Infinity:
46
+ def __repr__(self):
47
+ return "Infinity"
48
+
49
+ def __hash__(self):
50
+ return hash(repr(self))
51
+
52
+ def __lt__(self, other):
53
+ return False
54
+
55
+ def __le__(self, other):
56
+ return False
57
+
58
+ def __eq__(self, other):
59
+ return isinstance(other, self.__class__)
60
+
61
+ def __ne__(self, other):
62
+ return not isinstance(other, self.__class__)
63
+
64
+ def __gt__(self, other):
65
+ return True
66
+
67
+ def __ge__(self, other):
68
+ return True
69
+
70
+ def __neg__(self):
71
+ return NegativeInfinity
72
+
73
+
74
+ Infinity = Infinity()
75
+
76
+
77
+ class NegativeInfinity:
78
+ def __repr__(self):
79
+ return "-Infinity"
80
+
81
+ def __hash__(self):
82
+ return hash(repr(self))
83
+
84
+ def __lt__(self, other):
85
+ return True
86
+
87
+ def __le__(self, other):
88
+ return True
89
+
90
+ def __eq__(self, other):
91
+ return isinstance(other, self.__class__)
92
+
93
+ def __ne__(self, other):
94
+ return not isinstance(other, self.__class__)
95
+
96
+ def __gt__(self, other):
97
+ return False
98
+
99
+ def __ge__(self, other):
100
+ return False
101
+
102
+ def __neg__(self):
103
+ return Infinity
104
+
105
+
106
+ # BEGIN packaging/version.py
107
+
108
+
109
+ NegativeInfinity = NegativeInfinity()
110
+
111
+ _Version = collections.namedtuple(
112
+ "_Version",
113
+ ["epoch", "release", "dev", "pre", "post", "local"],
114
+ )
115
+
116
+
117
+ def parse(version):
118
+ """
119
+ Parse the given version string and return either a :class:`Version` object
120
+ or a :class:`LegacyVersion` object depending on if the given version is
121
+ a valid PEP 440 version or a legacy version.
122
+ """
123
+ try:
124
+ return Version(version)
125
+ except InvalidVersion:
126
+ return LegacyVersion(version)
127
+
128
+
129
+ class InvalidVersion(ValueError):
130
+ """
131
+ An invalid version was found, users should refer to PEP 440.
132
+ """
133
+
134
+
135
+ class _BaseVersion:
136
+
137
+ def __hash__(self):
138
+ return hash(self._key)
139
+
140
+ def __lt__(self, other):
141
+ return self._compare(other, lambda s, o: s < o)
142
+
143
+ def __le__(self, other):
144
+ return self._compare(other, lambda s, o: s <= o)
145
+
146
+ def __eq__(self, other):
147
+ return self._compare(other, lambda s, o: s == o)
148
+
149
+ def __ge__(self, other):
150
+ return self._compare(other, lambda s, o: s >= o)
151
+
152
+ def __gt__(self, other):
153
+ return self._compare(other, lambda s, o: s > o)
154
+
155
+ def __ne__(self, other):
156
+ return self._compare(other, lambda s, o: s != o)
157
+
158
+ def _compare(self, other, method):
159
+ if not isinstance(other, _BaseVersion):
160
+ return NotImplemented
161
+
162
+ return method(self._key, other._key)
163
+
164
+
165
+ class LegacyVersion(_BaseVersion):
166
+
167
+ def __init__(self, version):
168
+ self._version = str(version)
169
+ self._key = _legacy_cmpkey(self._version)
170
+
171
+ def __str__(self):
172
+ return self._version
173
+
174
+ def __repr__(self):
175
+ return f"<LegacyVersion({repr(str(self))})>"
176
+
177
+ @property
178
+ def public(self):
179
+ return self._version
180
+
181
+ @property
182
+ def base_version(self):
183
+ return self._version
184
+
185
+ @property
186
+ def local(self):
187
+ return None
188
+
189
+ @property
190
+ def is_prerelease(self):
191
+ return False
192
+
193
+ @property
194
+ def is_postrelease(self):
195
+ return False
196
+
197
+
198
+ _legacy_version_component_re = re.compile(
199
+ r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
200
+ )
201
+
202
+ _legacy_version_replacement_map = {
203
+ "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
204
+ }
205
+
206
+
207
+ def _parse_version_parts(s):
208
+ for part in _legacy_version_component_re.split(s):
209
+ part = _legacy_version_replacement_map.get(part, part)
210
+
211
+ if not part or part == ".":
212
+ continue
213
+
214
+ if part[:1] in "0123456789":
215
+ # pad for numeric comparison
216
+ yield part.zfill(8)
217
+ else:
218
+ yield "*" + part
219
+
220
+ # ensure that alpha/beta/candidate are before final
221
+ yield "*final"
222
+
223
+
224
+ def _legacy_cmpkey(version):
225
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
226
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
227
+ # which uses the defacto standard originally implemented by setuptools,
228
+ # as before all PEP 440 versions.
229
+ epoch = -1
230
+
231
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
232
+ # its adoption of the packaging library.
233
+ parts = []
234
+ for part in _parse_version_parts(version.lower()):
235
+ if part.startswith("*"):
236
+ # remove "-" before a prerelease tag
237
+ if part < "*final":
238
+ while parts and parts[-1] == "*final-":
239
+ parts.pop()
240
+
241
+ # remove trailing zeros from each series of numeric parts
242
+ while parts and parts[-1] == "00000000":
243
+ parts.pop()
244
+
245
+ parts.append(part)
246
+ parts = tuple(parts)
247
+
248
+ return epoch, parts
249
+
250
+
251
+ # Deliberately not anchored to the start and end of the string, to make it
252
+ # easier for 3rd party code to reuse
253
+ VERSION_PATTERN = r"""
254
+ v?
255
+ (?:
256
+ (?:(?P<epoch>[0-9]+)!)? # epoch
257
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
258
+ (?P<pre> # pre-release
259
+ [-_\.]?
260
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
261
+ [-_\.]?
262
+ (?P<pre_n>[0-9]+)?
263
+ )?
264
+ (?P<post> # post release
265
+ (?:-(?P<post_n1>[0-9]+))
266
+ |
267
+ (?:
268
+ [-_\.]?
269
+ (?P<post_l>post|rev|r)
270
+ [-_\.]?
271
+ (?P<post_n2>[0-9]+)?
272
+ )
273
+ )?
274
+ (?P<dev> # dev release
275
+ [-_\.]?
276
+ (?P<dev_l>dev)
277
+ [-_\.]?
278
+ (?P<dev_n>[0-9]+)?
279
+ )?
280
+ )
281
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
282
+ """
283
+
284
+
285
+ class Version(_BaseVersion):
286
+
287
+ _regex = re.compile(
288
+ r"^\s*" + VERSION_PATTERN + r"\s*$",
289
+ re.VERBOSE | re.IGNORECASE,
290
+ )
291
+
292
+ def __init__(self, version):
293
+ # Validate the version and parse it into pieces
294
+ match = self._regex.search(version)
295
+ if not match:
296
+ raise InvalidVersion(f"Invalid version: '{version}'")
297
+
298
+ # Store the parsed out pieces of the version
299
+ self._version = _Version(
300
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
301
+ release=tuple(int(i) for i in match.group("release").split(".")),
302
+ pre=_parse_letter_version(
303
+ match.group("pre_l"),
304
+ match.group("pre_n"),
305
+ ),
306
+ post=_parse_letter_version(
307
+ match.group("post_l"),
308
+ match.group("post_n1") or match.group("post_n2"),
309
+ ),
310
+ dev=_parse_letter_version(
311
+ match.group("dev_l"),
312
+ match.group("dev_n"),
313
+ ),
314
+ local=_parse_local_version(match.group("local")),
315
+ )
316
+
317
+ # Generate a key which will be used for sorting
318
+ self._key = _cmpkey(
319
+ self._version.epoch,
320
+ self._version.release,
321
+ self._version.pre,
322
+ self._version.post,
323
+ self._version.dev,
324
+ self._version.local,
325
+ )
326
+
327
+ def __repr__(self):
328
+ return f"<Version({repr(str(self))})>"
329
+
330
+ def __str__(self):
331
+ parts = []
332
+
333
+ # Epoch
334
+ if self._version.epoch != 0:
335
+ parts.append(f"{self._version.epoch}!")
336
+
337
+ # Release segment
338
+ parts.append(".".join(str(x) for x in self._version.release))
339
+
340
+ # Pre-release
341
+ if self._version.pre is not None:
342
+ parts.append("".join(str(x) for x in self._version.pre))
343
+
344
+ # Post-release
345
+ if self._version.post is not None:
346
+ parts.append(f".post{self._version.post[1]}")
347
+
348
+ # Development release
349
+ if self._version.dev is not None:
350
+ parts.append(f".dev{self._version.dev[1]}")
351
+
352
+ # Local version segment
353
+ if self._version.local is not None:
354
+ parts.append(
355
+ "+{}".format(".".join(str(x) for x in self._version.local))
356
+ )
357
+
358
+ return "".join(parts)
359
+
360
+ @property
361
+ def public(self):
362
+ return str(self).split("+", 1)[0]
363
+
364
+ @property
365
+ def base_version(self):
366
+ parts = []
367
+
368
+ # Epoch
369
+ if self._version.epoch != 0:
370
+ parts.append(f"{self._version.epoch}!")
371
+
372
+ # Release segment
373
+ parts.append(".".join(str(x) for x in self._version.release))
374
+
375
+ return "".join(parts)
376
+
377
+ @property
378
+ def local(self):
379
+ version_string = str(self)
380
+ if "+" in version_string:
381
+ return version_string.split("+", 1)[1]
382
+
383
+ @property
384
+ def is_prerelease(self):
385
+ return bool(self._version.dev or self._version.pre)
386
+
387
+ @property
388
+ def is_postrelease(self):
389
+ return bool(self._version.post)
390
+
391
+
392
+ def _parse_letter_version(letter, number):
393
+ if letter:
394
+ # We assume there is an implicit 0 in a pre-release if there is
395
+ # no numeral associated with it.
396
+ if number is None:
397
+ number = 0
398
+
399
+ # We normalize any letters to their lower-case form
400
+ letter = letter.lower()
401
+
402
+ # We consider some words to be alternate spellings of other words and
403
+ # in those cases we want to normalize the spellings to our preferred
404
+ # spelling.
405
+ if letter == "alpha":
406
+ letter = "a"
407
+ elif letter == "beta":
408
+ letter = "b"
409
+ elif letter in ["c", "pre", "preview"]:
410
+ letter = "rc"
411
+ elif letter in ["rev", "r"]:
412
+ letter = "post"
413
+
414
+ return letter, int(number)
415
+ if not letter and number:
416
+ # We assume that if we are given a number but not given a letter,
417
+ # then this is using the implicit post release syntax (e.g., 1.0-1)
418
+ letter = "post"
419
+
420
+ return letter, int(number)
421
+
422
+
423
+ _local_version_seperators = re.compile(r"[\._-]")
424
+
425
+
426
+ def _parse_local_version(local):
427
+ """
428
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
429
+ """
430
+ if local is not None:
431
+ return tuple(
432
+ part.lower() if not part.isdigit() else int(part)
433
+ for part in _local_version_seperators.split(local)
434
+ )
435
+
436
+
437
+ def _cmpkey(epoch, release, pre, post, dev, local):
438
+ # When we compare a release version, we want to compare it with all of the
439
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
440
+ # leading zeros until we come to something non-zero, then take the rest,
441
+ # re-reverse it back into the correct order, and make it a tuple and use
442
+ # that for our sorting key.
443
+ release = tuple(
444
+ reversed(list(
445
+ itertools.dropwhile(
446
+ lambda x: x == 0,
447
+ reversed(release),
448
+ )
449
+ ))
450
+ )
451
+
452
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
453
+ # We'll do this by abusing the pre-segment, but we _only_ want to do this
454
+ # if there is no pre- or a post-segment. If we have one of those, then
455
+ # the normal sorting rules will handle this case correctly.
456
+ if pre is None and post is None and dev is not None:
457
+ pre = -Infinity
458
+ # Versions without a pre-release (except as noted above) should sort after
459
+ # those with one.
460
+ elif pre is None:
461
+ pre = Infinity
462
+
463
+ # Versions without a post-segment should sort before those with one.
464
+ if post is None:
465
+ post = -Infinity
466
+
467
+ # Versions without a development segment should sort after those with one.
468
+ if dev is None:
469
+ dev = Infinity
470
+
471
+ if local is None:
472
+ # Versions without a local segment should sort before those with one.
473
+ local = -Infinity
474
+ else:
475
+ # Versions with a local segment need that segment parsed to implement
476
+ # the sorting rules in PEP440.
477
+ # - Alphanumeric segments sort before numeric segments
478
+ # - Alphanumeric segments sort lexicographically
479
+ # - Numeric segments sort numerically
480
+ # - Shorter versions sort before longer versions when the prefixes
481
+ # match exactly
482
+ local = tuple(
483
+ (i, "") if isinstance(i, int) else (-Infinity, i)
484
+ for i in local
485
+ )
486
+
487
+ return epoch, release, pre, post, dev, local
venv/lib/python3.10/site-packages/scipy/_lib/_test_ccallback.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (23.2 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_call.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (49.5 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/_test_deprecation_def.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (34.4 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/_testutils.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generic test utilities.
3
+
4
+ """
5
+
6
+ import os
7
+ import re
8
+ import sys
9
+ import numpy as np
10
+ import inspect
11
+ import sysconfig
12
+
13
+
14
+ __all__ = ['PytestTester', 'check_free_memory', '_TestPythranFunc', 'IS_MUSL']
15
+
16
+
17
+ IS_MUSL = False
18
+ # alternate way is
19
+ # from packaging.tags import sys_tags
20
+ # _tags = list(sys_tags())
21
+ # if 'musllinux' in _tags[0].platform:
22
+ _v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
23
+ if 'musl' in _v:
24
+ IS_MUSL = True
25
+
26
+
27
+ class FPUModeChangeWarning(RuntimeWarning):
28
+ """Warning about FPU mode change"""
29
+ pass
30
+
31
+
32
+ class PytestTester:
33
+ """
34
+ Run tests for this namespace
35
+
36
+ ``scipy.test()`` runs tests for all of SciPy, with the default settings.
37
+ When used from a submodule (e.g., ``scipy.cluster.test()``, only the tests
38
+ for that namespace are run.
39
+
40
+ Parameters
41
+ ----------
42
+ label : {'fast', 'full'}, optional
43
+ Whether to run only the fast tests, or also those marked as slow.
44
+ Default is 'fast'.
45
+ verbose : int, optional
46
+ Test output verbosity. Default is 1.
47
+ extra_argv : list, optional
48
+ Arguments to pass through to Pytest.
49
+ doctests : bool, optional
50
+ Whether to run doctests or not. Default is False.
51
+ coverage : bool, optional
52
+ Whether to run tests with code coverage measurements enabled.
53
+ Default is False.
54
+ tests : list of str, optional
55
+ List of module names to run tests for. By default, uses the module
56
+ from which the ``test`` function is called.
57
+ parallel : int, optional
58
+ Run tests in parallel with pytest-xdist, if number given is larger than
59
+ 1. Default is 1.
60
+
61
+ """
62
+ def __init__(self, module_name):
63
+ self.module_name = module_name
64
+
65
+ def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
66
+ coverage=False, tests=None, parallel=None):
67
+ import pytest
68
+
69
+ module = sys.modules[self.module_name]
70
+ module_path = os.path.abspath(module.__path__[0])
71
+
72
+ pytest_args = ['--showlocals', '--tb=short']
73
+
74
+ if doctests:
75
+ raise ValueError("Doctests not supported")
76
+
77
+ if extra_argv:
78
+ pytest_args += list(extra_argv)
79
+
80
+ if verbose and int(verbose) > 1:
81
+ pytest_args += ["-" + "v"*(int(verbose)-1)]
82
+
83
+ if coverage:
84
+ pytest_args += ["--cov=" + module_path]
85
+
86
+ if label == "fast":
87
+ pytest_args += ["-m", "not slow"]
88
+ elif label != "full":
89
+ pytest_args += ["-m", label]
90
+
91
+ if tests is None:
92
+ tests = [self.module_name]
93
+
94
+ if parallel is not None and parallel > 1:
95
+ if _pytest_has_xdist():
96
+ pytest_args += ['-n', str(parallel)]
97
+ else:
98
+ import warnings
99
+ warnings.warn('Could not run tests in parallel because '
100
+ 'pytest-xdist plugin is not available.',
101
+ stacklevel=2)
102
+
103
+ pytest_args += ['--pyargs'] + list(tests)
104
+
105
+ try:
106
+ code = pytest.main(pytest_args)
107
+ except SystemExit as exc:
108
+ code = exc.code
109
+
110
+ return (code == 0)
111
+
112
+
113
+ class _TestPythranFunc:
114
+ '''
115
+ These are situations that can be tested in our pythran tests:
116
+ - A function with multiple array arguments and then
117
+ other positional and keyword arguments.
118
+ - A function with array-like keywords (e.g. `def somefunc(x0, x1=None)`.
119
+ Note: list/tuple input is not yet tested!
120
+
121
+ `self.arguments`: A dictionary which key is the index of the argument,
122
+ value is tuple(array value, all supported dtypes)
123
+ `self.partialfunc`: A function used to freeze some non-array argument
124
+ that of no interests in the original function
125
+ '''
126
+ ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]
127
+ ALL_FLOAT = [np.float32, np.float64]
128
+ ALL_COMPLEX = [np.complex64, np.complex128]
129
+
130
+ def setup_method(self):
131
+ self.arguments = {}
132
+ self.partialfunc = None
133
+ self.expected = None
134
+
135
+ def get_optional_args(self, func):
136
+ # get optional arguments with its default value,
137
+ # used for testing keywords
138
+ signature = inspect.signature(func)
139
+ optional_args = {}
140
+ for k, v in signature.parameters.items():
141
+ if v.default is not inspect.Parameter.empty:
142
+ optional_args[k] = v.default
143
+ return optional_args
144
+
145
+ def get_max_dtype_list_length(self):
146
+ # get the max supported dtypes list length in all arguments
147
+ max_len = 0
148
+ for arg_idx in self.arguments:
149
+ cur_len = len(self.arguments[arg_idx][1])
150
+ if cur_len > max_len:
151
+ max_len = cur_len
152
+ return max_len
153
+
154
+ def get_dtype(self, dtype_list, dtype_idx):
155
+ # get the dtype from dtype_list via index
156
+ # if the index is out of range, then return the last dtype
157
+ if dtype_idx > len(dtype_list)-1:
158
+ return dtype_list[-1]
159
+ else:
160
+ return dtype_list[dtype_idx]
161
+
162
+ def test_all_dtypes(self):
163
+ for type_idx in range(self.get_max_dtype_list_length()):
164
+ args_array = []
165
+ for arg_idx in self.arguments:
166
+ new_dtype = self.get_dtype(self.arguments[arg_idx][1],
167
+ type_idx)
168
+ args_array.append(self.arguments[arg_idx][0].astype(new_dtype))
169
+ self.pythranfunc(*args_array)
170
+
171
+ def test_views(self):
172
+ args_array = []
173
+ for arg_idx in self.arguments:
174
+ args_array.append(self.arguments[arg_idx][0][::-1][::-1])
175
+ self.pythranfunc(*args_array)
176
+
177
+ def test_strided(self):
178
+ args_array = []
179
+ for arg_idx in self.arguments:
180
+ args_array.append(np.repeat(self.arguments[arg_idx][0],
181
+ 2, axis=0)[::2])
182
+ self.pythranfunc(*args_array)
183
+
184
+
185
+ def _pytest_has_xdist():
186
+ """
187
+ Check if the pytest-xdist plugin is installed, providing parallel tests
188
+ """
189
+ # Check xdist exists without importing, otherwise pytests emits warnings
190
+ from importlib.util import find_spec
191
+ return find_spec('xdist') is not None
192
+
193
+
194
+ def check_free_memory(free_mb):
195
+ """
196
+ Check *free_mb* of memory is available, otherwise do pytest.skip
197
+ """
198
+ import pytest
199
+
200
+ try:
201
+ mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
202
+ msg = '{} MB memory required, but environment SCIPY_AVAILABLE_MEM={}'.format(
203
+ free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
204
+ except KeyError:
205
+ mem_free = _get_mem_available()
206
+ if mem_free is None:
207
+ pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
208
+ "variable to free memory in MB to run the test.")
209
+ msg = f'{free_mb} MB memory required, but {mem_free/1e6} MB available'
210
+
211
+ if mem_free < free_mb * 1e6:
212
+ pytest.skip(msg)
213
+
214
+
215
+ def _parse_size(size_str):
216
+ suffixes = {'': 1e6,
217
+ 'b': 1.0,
218
+ 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
219
+ 'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
220
+ 'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
221
+ m = re.match(r'^\s*(\d+)\s*({})\s*$'.format('|'.join(suffixes.keys())),
222
+ size_str,
223
+ re.I)
224
+ if not m or m.group(2) not in suffixes:
225
+ raise ValueError("Invalid size string")
226
+
227
+ return float(m.group(1)) * suffixes[m.group(2)]
228
+
229
+
230
+ def _get_mem_available():
231
+ """
232
+ Get information about memory available, not counting swap.
233
+ """
234
+ try:
235
+ import psutil
236
+ return psutil.virtual_memory().available
237
+ except (ImportError, AttributeError):
238
+ pass
239
+
240
+ if sys.platform.startswith('linux'):
241
+ info = {}
242
+ with open('/proc/meminfo') as f:
243
+ for line in f:
244
+ p = line.split()
245
+ info[p[0].strip(':').lower()] = float(p[1]) * 1e3
246
+
247
+ if 'memavailable' in info:
248
+ # Linux >= 3.14
249
+ return info['memavailable']
250
+ else:
251
+ return info['memfree'] + info['cached']
252
+
253
+ return None
venv/lib/python3.10/site-packages/scipy/_lib/_threadsafety.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+
3
+ import scipy._lib.decorator
4
+
5
+
6
+ __all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant']
7
+
8
+
9
+ class ReentrancyError(RuntimeError):
10
+ pass
11
+
12
+
13
+ class ReentrancyLock:
14
+ """
15
+ Threading lock that raises an exception for reentrant calls.
16
+
17
+ Calls from different threads are serialized, and nested calls from the
18
+ same thread result to an error.
19
+
20
+ The object can be used as a context manager or to decorate functions
21
+ via the decorate() method.
22
+
23
+ """
24
+
25
+ def __init__(self, err_msg):
26
+ self._rlock = threading.RLock()
27
+ self._entered = False
28
+ self._err_msg = err_msg
29
+
30
+ def __enter__(self):
31
+ self._rlock.acquire()
32
+ if self._entered:
33
+ self._rlock.release()
34
+ raise ReentrancyError(self._err_msg)
35
+ self._entered = True
36
+
37
+ def __exit__(self, type, value, traceback):
38
+ self._entered = False
39
+ self._rlock.release()
40
+
41
+ def decorate(self, func):
42
+ def caller(func, *a, **kw):
43
+ with self:
44
+ return func(*a, **kw)
45
+ return scipy._lib.decorator.decorate(func, caller)
46
+
47
+
48
+ def non_reentrant(err_msg=None):
49
+ """
50
+ Decorate a function with a threading lock and prevent reentrant calls.
51
+ """
52
+ def decorator(func):
53
+ msg = err_msg
54
+ if msg is None:
55
+ msg = "%s is not re-entrant" % func.__name__
56
+ lock = ReentrancyLock(msg)
57
+ return lock.decorate(func)
58
+ return decorator
venv/lib/python3.10/site-packages/scipy/_lib/_tmpdirs.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ''' Contexts for *with* statement providing temporary directories
2
+ '''
3
+ import os
4
+ from contextlib import contextmanager
5
+ from shutil import rmtree
6
+ from tempfile import mkdtemp
7
+
8
+
9
+ @contextmanager
10
+ def tempdir():
11
+ """Create and return a temporary directory. This has the same
12
+ behavior as mkdtemp but can be used as a context manager.
13
+
14
+ Upon exiting the context, the directory and everything contained
15
+ in it are removed.
16
+
17
+ Examples
18
+ --------
19
+ >>> import os
20
+ >>> with tempdir() as tmpdir:
21
+ ... fname = os.path.join(tmpdir, 'example_file.txt')
22
+ ... with open(fname, 'wt') as fobj:
23
+ ... _ = fobj.write('a string\\n')
24
+ >>> os.path.exists(tmpdir)
25
+ False
26
+ """
27
+ d = mkdtemp()
28
+ yield d
29
+ rmtree(d)
30
+
31
+
32
+ @contextmanager
33
+ def in_tempdir():
34
+ ''' Create, return, and change directory to a temporary directory
35
+
36
+ Examples
37
+ --------
38
+ >>> import os
39
+ >>> my_cwd = os.getcwd()
40
+ >>> with in_tempdir() as tmpdir:
41
+ ... _ = open('test.txt', 'wt').write('some text')
42
+ ... assert os.path.isfile('test.txt')
43
+ ... assert os.path.isfile(os.path.join(tmpdir, 'test.txt'))
44
+ >>> os.path.exists(tmpdir)
45
+ False
46
+ >>> os.getcwd() == my_cwd
47
+ True
48
+ '''
49
+ pwd = os.getcwd()
50
+ d = mkdtemp()
51
+ os.chdir(d)
52
+ yield d
53
+ os.chdir(pwd)
54
+ rmtree(d)
55
+
56
+
57
+ @contextmanager
58
+ def in_dir(dir=None):
59
+ """ Change directory to given directory for duration of ``with`` block
60
+
61
+ Useful when you want to use `in_tempdir` for the final test, but
62
+ you are still debugging. For example, you may want to do this in the end:
63
+
64
+ >>> with in_tempdir() as tmpdir:
65
+ ... # do something complicated which might break
66
+ ... pass
67
+
68
+ But, indeed, the complicated thing does break, and meanwhile, the
69
+ ``in_tempdir`` context manager wiped out the directory with the
70
+ temporary files that you wanted for debugging. So, while debugging, you
71
+ replace with something like:
72
+
73
+ >>> with in_dir() as tmpdir: # Use working directory by default
74
+ ... # do something complicated which might break
75
+ ... pass
76
+
77
+ You can then look at the temporary file outputs to debug what is happening,
78
+ fix, and finally replace ``in_dir`` with ``in_tempdir`` again.
79
+ """
80
+ cwd = os.getcwd()
81
+ if dir is None:
82
+ yield cwd
83
+ return
84
+ os.chdir(dir)
85
+ yield dir
86
+ os.chdir(cwd)
venv/lib/python3.10/site-packages/scipy/_lib/_util.py ADDED
@@ -0,0 +1,948 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from contextlib import contextmanager
3
+ import functools
4
+ import operator
5
+ import warnings
6
+ import numbers
7
+ from collections import namedtuple
8
+ import inspect
9
+ import math
10
+ from typing import (
11
+ Optional,
12
+ Union,
13
+ TYPE_CHECKING,
14
+ TypeVar,
15
+ )
16
+
17
+ import numpy as np
18
+ from scipy._lib._array_api import array_namespace
19
+
20
+
21
+ AxisError: type[Exception]
22
+ ComplexWarning: type[Warning]
23
+ VisibleDeprecationWarning: type[Warning]
24
+
25
+ if np.lib.NumpyVersion(np.__version__) >= '1.25.0':
26
+ from numpy.exceptions import (
27
+ AxisError, ComplexWarning, VisibleDeprecationWarning,
28
+ DTypePromotionError
29
+ )
30
+ else:
31
+ from numpy import (
32
+ AxisError, ComplexWarning, VisibleDeprecationWarning # noqa: F401
33
+ )
34
+ DTypePromotionError = TypeError # type: ignore
35
+
36
+ np_long: type
37
+ np_ulong: type
38
+
39
+ if np.lib.NumpyVersion(np.__version__) >= "2.0.0.dev0":
40
+ try:
41
+ with warnings.catch_warnings():
42
+ warnings.filterwarnings(
43
+ "ignore",
44
+ r".*In the future `np\.long` will be defined as.*",
45
+ FutureWarning,
46
+ )
47
+ np_long = np.long # type: ignore[attr-defined]
48
+ np_ulong = np.ulong # type: ignore[attr-defined]
49
+ except AttributeError:
50
+ np_long = np.int_
51
+ np_ulong = np.uint
52
+ else:
53
+ np_long = np.int_
54
+ np_ulong = np.uint
55
+
56
+ IntNumber = Union[int, np.integer]
57
+ DecimalNumber = Union[float, np.floating, np.integer]
58
+
59
+ copy_if_needed: Optional[bool]
60
+
61
+ if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
62
+ copy_if_needed = None
63
+ elif np.lib.NumpyVersion(np.__version__) < "1.28.0":
64
+ copy_if_needed = False
65
+ else:
66
+ # 2.0.0 dev versions, handle cases where copy may or may not exist
67
+ try:
68
+ np.array([1]).__array__(copy=None) # type: ignore[call-overload]
69
+ copy_if_needed = None
70
+ except TypeError:
71
+ copy_if_needed = False
72
+
73
+ # Since Generator was introduced in numpy 1.17, the following condition is needed for
74
+ # backward compatibility
75
+ if TYPE_CHECKING:
76
+ SeedType = Optional[Union[IntNumber, np.random.Generator,
77
+ np.random.RandomState]]
78
+ GeneratorType = TypeVar("GeneratorType", bound=Union[np.random.Generator,
79
+ np.random.RandomState])
80
+
81
+ try:
82
+ from numpy.random import Generator as Generator
83
+ except ImportError:
84
+ class Generator: # type: ignore[no-redef]
85
+ pass
86
+
87
+
88
+ def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
89
+ """Return elements chosen from two possibilities depending on a condition
90
+
91
+ Equivalent to ``f(*arrays) if cond else fillvalue`` performed elementwise.
92
+
93
+ Parameters
94
+ ----------
95
+ cond : array
96
+ The condition (expressed as a boolean array).
97
+ arrays : tuple of array
98
+ Arguments to `f` (and `f2`). Must be broadcastable with `cond`.
99
+ f : callable
100
+ Where `cond` is True, output will be ``f(arr1[cond], arr2[cond], ...)``
101
+ fillvalue : object
102
+ If provided, value with which to fill output array where `cond` is
103
+ not True.
104
+ f2 : callable
105
+ If provided, output will be ``f2(arr1[cond], arr2[cond], ...)`` where
106
+ `cond` is not True.
107
+
108
+ Returns
109
+ -------
110
+ out : array
111
+ An array with elements from the output of `f` where `cond` is True
112
+ and `fillvalue` (or elements from the output of `f2`) elsewhere. The
113
+ returned array has data type determined by Type Promotion Rules
114
+ with the output of `f` and `fillvalue` (or the output of `f2`).
115
+
116
+ Notes
117
+ -----
118
+ ``xp.where(cond, x, fillvalue)`` requires explicitly forming `x` even where
119
+ `cond` is False. This function evaluates ``f(arr1[cond], arr2[cond], ...)``
120
+ onle where `cond` ``is True.
121
+
122
+ Examples
123
+ --------
124
+ >>> import numpy as np
125
+ >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
126
+ >>> def f(a, b):
127
+ ... return a*b
128
+ >>> _lazywhere(a > 2, (a, b), f, np.nan)
129
+ array([ nan, nan, 21., 32.])
130
+
131
+ """
132
+ xp = array_namespace(cond, *arrays)
133
+
134
+ if (f2 is fillvalue is None) or (f2 is not None and fillvalue is not None):
135
+ raise ValueError("Exactly one of `fillvalue` or `f2` must be given.")
136
+
137
+ args = xp.broadcast_arrays(cond, *arrays)
138
+ bool_dtype = xp.asarray([True]).dtype # numpy 1.xx doesn't have `bool`
139
+ cond, arrays = xp.astype(args[0], bool_dtype, copy=False), args[1:]
140
+
141
+ temp1 = xp.asarray(f(*(arr[cond] for arr in arrays)))
142
+
143
+ if f2 is None:
144
+ fillvalue = xp.asarray(fillvalue)
145
+ dtype = xp.result_type(temp1.dtype, fillvalue.dtype)
146
+ out = xp.full(cond.shape, fill_value=fillvalue, dtype=dtype)
147
+ else:
148
+ ncond = ~cond
149
+ temp2 = xp.asarray(f2(*(arr[ncond] for arr in arrays)))
150
+ dtype = xp.result_type(temp1, temp2)
151
+ out = xp.empty(cond.shape, dtype=dtype)
152
+ out[ncond] = temp2
153
+
154
+ out[cond] = temp1
155
+
156
+ return out
157
+
158
+
159
+ def _lazyselect(condlist, choicelist, arrays, default=0):
160
+ """
161
+ Mimic `np.select(condlist, choicelist)`.
162
+
163
+ Notice, it assumes that all `arrays` are of the same shape or can be
164
+ broadcasted together.
165
+
166
+ All functions in `choicelist` must accept array arguments in the order
167
+ given in `arrays` and must return an array of the same shape as broadcasted
168
+ `arrays`.
169
+
170
+ Examples
171
+ --------
172
+ >>> import numpy as np
173
+ >>> x = np.arange(6)
174
+ >>> np.select([x <3, x > 3], [x**2, x**3], default=0)
175
+ array([ 0, 1, 4, 0, 64, 125])
176
+
177
+ >>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
178
+ array([ 0., 1., 4., 0., 64., 125.])
179
+
180
+ >>> a = -np.ones_like(x)
181
+ >>> _lazyselect([x < 3, x > 3],
182
+ ... [lambda x, a: x**2, lambda x, a: a * x**3],
183
+ ... (x, a), default=np.nan)
184
+ array([ 0., 1., 4., nan, -64., -125.])
185
+
186
+ """
187
+ arrays = np.broadcast_arrays(*arrays)
188
+ tcode = np.mintypecode([a.dtype.char for a in arrays])
189
+ out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
190
+ for func, cond in zip(choicelist, condlist):
191
+ if np.all(cond is False):
192
+ continue
193
+ cond, _ = np.broadcast_arrays(cond, arrays[0])
194
+ temp = tuple(np.extract(cond, arr) for arr in arrays)
195
+ np.place(out, cond, func(*temp))
196
+ return out
197
+
198
+
199
+ def _aligned_zeros(shape, dtype=float, order="C", align=None):
200
+ """Allocate a new ndarray with aligned memory.
201
+
202
+ Primary use case for this currently is working around a f2py issue
203
+ in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
204
+ not necessarily create arrays aligned up to it.
205
+
206
+ """
207
+ dtype = np.dtype(dtype)
208
+ if align is None:
209
+ align = dtype.alignment
210
+ if not hasattr(shape, '__len__'):
211
+ shape = (shape,)
212
+ size = functools.reduce(operator.mul, shape) * dtype.itemsize
213
+ buf = np.empty(size + align + 1, np.uint8)
214
+ offset = buf.__array_interface__['data'][0] % align
215
+ if offset != 0:
216
+ offset = align - offset
217
+ # Note: slices producing 0-size arrays do not necessarily change
218
+ # data pointer --- so we use and allocate size+1
219
+ buf = buf[offset:offset+size+1][:-1]
220
+ data = np.ndarray(shape, dtype, buf, order=order)
221
+ data.fill(0)
222
+ return data
223
+
224
+
225
+ def _prune_array(array):
226
+ """Return an array equivalent to the input array. If the input
227
+ array is a view of a much larger array, copy its contents to a
228
+ newly allocated array. Otherwise, return the input unchanged.
229
+ """
230
+ if array.base is not None and array.size < array.base.size // 2:
231
+ return array.copy()
232
+ return array
233
+
234
+
235
+ def float_factorial(n: int) -> float:
236
+ """Compute the factorial and return as a float
237
+
238
+ Returns infinity when result is too large for a double
239
+ """
240
+ return float(math.factorial(n)) if n < 171 else np.inf
241
+
242
+
243
+ # copy-pasted from scikit-learn utils/validation.py
244
+ # change this to scipy.stats._qmc.check_random_state once numpy 1.16 is dropped
245
+ def check_random_state(seed):
246
+ """Turn `seed` into a `np.random.RandomState` instance.
247
+
248
+ Parameters
249
+ ----------
250
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
251
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
252
+ singleton is used.
253
+ If `seed` is an int, a new ``RandomState`` instance is used,
254
+ seeded with `seed`.
255
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
256
+ that instance is used.
257
+
258
+ Returns
259
+ -------
260
+ seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
261
+ Random number generator.
262
+
263
+ """
264
+ if seed is None or seed is np.random:
265
+ return np.random.mtrand._rand
266
+ if isinstance(seed, (numbers.Integral, np.integer)):
267
+ return np.random.RandomState(seed)
268
+ if isinstance(seed, (np.random.RandomState, np.random.Generator)):
269
+ return seed
270
+
271
+ raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
272
+ ' instance' % seed)
273
+
274
+
275
+ def _asarray_validated(a, check_finite=True,
276
+ sparse_ok=False, objects_ok=False, mask_ok=False,
277
+ as_inexact=False):
278
+ """
279
+ Helper function for SciPy argument validation.
280
+
281
+ Many SciPy linear algebra functions do support arbitrary array-like
282
+ input arguments. Examples of commonly unsupported inputs include
283
+ matrices containing inf/nan, sparse matrix representations, and
284
+ matrices with complicated elements.
285
+
286
+ Parameters
287
+ ----------
288
+ a : array_like
289
+ The array-like input.
290
+ check_finite : bool, optional
291
+ Whether to check that the input matrices contain only finite numbers.
292
+ Disabling may give a performance gain, but may result in problems
293
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
294
+ Default: True
295
+ sparse_ok : bool, optional
296
+ True if scipy sparse matrices are allowed.
297
+ objects_ok : bool, optional
298
+ True if arrays with dype('O') are allowed.
299
+ mask_ok : bool, optional
300
+ True if masked arrays are allowed.
301
+ as_inexact : bool, optional
302
+ True to convert the input array to a np.inexact dtype.
303
+
304
+ Returns
305
+ -------
306
+ ret : ndarray
307
+ The converted validated array.
308
+
309
+ """
310
+ if not sparse_ok:
311
+ import scipy.sparse
312
+ if scipy.sparse.issparse(a):
313
+ msg = ('Sparse matrices are not supported by this function. '
314
+ 'Perhaps one of the scipy.sparse.linalg functions '
315
+ 'would work instead.')
316
+ raise ValueError(msg)
317
+ if not mask_ok:
318
+ if np.ma.isMaskedArray(a):
319
+ raise ValueError('masked arrays are not supported')
320
+ toarray = np.asarray_chkfinite if check_finite else np.asarray
321
+ a = toarray(a)
322
+ if not objects_ok:
323
+ if a.dtype is np.dtype('O'):
324
+ raise ValueError('object arrays are not supported')
325
+ if as_inexact:
326
+ if not np.issubdtype(a.dtype, np.inexact):
327
+ a = toarray(a, dtype=np.float64)
328
+ return a
329
+
330
+
331
+ def _validate_int(k, name, minimum=None):
332
+ """
333
+ Validate a scalar integer.
334
+
335
+ This function can be used to validate an argument to a function
336
+ that expects the value to be an integer. It uses `operator.index`
337
+ to validate the value (so, for example, k=2.0 results in a
338
+ TypeError).
339
+
340
+ Parameters
341
+ ----------
342
+ k : int
343
+ The value to be validated.
344
+ name : str
345
+ The name of the parameter.
346
+ minimum : int, optional
347
+ An optional lower bound.
348
+ """
349
+ try:
350
+ k = operator.index(k)
351
+ except TypeError:
352
+ raise TypeError(f'{name} must be an integer.') from None
353
+ if minimum is not None and k < minimum:
354
+ raise ValueError(f'{name} must be an integer not less '
355
+ f'than {minimum}') from None
356
+ return k
357
+
358
+
359
+ # Add a replacement for inspect.getfullargspec()/
360
+ # The version below is borrowed from Django,
361
+ # https://github.com/django/django/pull/4846.
362
+
363
+ # Note an inconsistency between inspect.getfullargspec(func) and
364
+ # inspect.signature(func). If `func` is a bound method, the latter does *not*
365
+ # list `self` as a first argument, while the former *does*.
366
+ # Hence, cook up a common ground replacement: `getfullargspec_no_self` which
367
+ # mimics `inspect.getfullargspec` but does not list `self`.
368
+ #
369
+ # This way, the caller code does not need to know whether it uses a legacy
370
+ # .getfullargspec or a bright and shiny .signature.
371
+
372
+ FullArgSpec = namedtuple('FullArgSpec',
373
+ ['args', 'varargs', 'varkw', 'defaults',
374
+ 'kwonlyargs', 'kwonlydefaults', 'annotations'])
375
+
376
+
377
+ def getfullargspec_no_self(func):
378
+ """inspect.getfullargspec replacement using inspect.signature.
379
+
380
+ If func is a bound method, do not list the 'self' parameter.
381
+
382
+ Parameters
383
+ ----------
384
+ func : callable
385
+ A callable to inspect
386
+
387
+ Returns
388
+ -------
389
+ fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
390
+ kwonlydefaults, annotations)
391
+
392
+ NOTE: if the first argument of `func` is self, it is *not*, I repeat
393
+ *not*, included in fullargspec.args.
394
+ This is done for consistency between inspect.getargspec() under
395
+ Python 2.x, and inspect.signature() under Python 3.x.
396
+
397
+ """
398
+ sig = inspect.signature(func)
399
+ args = [
400
+ p.name for p in sig.parameters.values()
401
+ if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
402
+ inspect.Parameter.POSITIONAL_ONLY]
403
+ ]
404
+ varargs = [
405
+ p.name for p in sig.parameters.values()
406
+ if p.kind == inspect.Parameter.VAR_POSITIONAL
407
+ ]
408
+ varargs = varargs[0] if varargs else None
409
+ varkw = [
410
+ p.name for p in sig.parameters.values()
411
+ if p.kind == inspect.Parameter.VAR_KEYWORD
412
+ ]
413
+ varkw = varkw[0] if varkw else None
414
+ defaults = tuple(
415
+ p.default for p in sig.parameters.values()
416
+ if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
417
+ p.default is not p.empty)
418
+ ) or None
419
+ kwonlyargs = [
420
+ p.name for p in sig.parameters.values()
421
+ if p.kind == inspect.Parameter.KEYWORD_ONLY
422
+ ]
423
+ kwdefaults = {p.name: p.default for p in sig.parameters.values()
424
+ if p.kind == inspect.Parameter.KEYWORD_ONLY and
425
+ p.default is not p.empty}
426
+ annotations = {p.name: p.annotation for p in sig.parameters.values()
427
+ if p.annotation is not p.empty}
428
+ return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
429
+ kwdefaults or None, annotations)
430
+
431
+
432
+ class _FunctionWrapper:
433
+ """
434
+ Object to wrap user's function, allowing picklability
435
+ """
436
+ def __init__(self, f, args):
437
+ self.f = f
438
+ self.args = [] if args is None else args
439
+
440
+ def __call__(self, x):
441
+ return self.f(x, *self.args)
442
+
443
+
444
+ class MapWrapper:
445
+ """
446
+ Parallelisation wrapper for working with map-like callables, such as
447
+ `multiprocessing.Pool.map`.
448
+
449
+ Parameters
450
+ ----------
451
+ pool : int or map-like callable
452
+ If `pool` is an integer, then it specifies the number of threads to
453
+ use for parallelization. If ``int(pool) == 1``, then no parallel
454
+ processing is used and the map builtin is used.
455
+ If ``pool == -1``, then the pool will utilize all available CPUs.
456
+ If `pool` is a map-like callable that follows the same
457
+ calling sequence as the built-in map function, then this callable is
458
+ used for parallelization.
459
+ """
460
+ def __init__(self, pool=1):
461
+ self.pool = None
462
+ self._mapfunc = map
463
+ self._own_pool = False
464
+
465
+ if callable(pool):
466
+ self.pool = pool
467
+ self._mapfunc = self.pool
468
+ else:
469
+ from multiprocessing import Pool
470
+ # user supplies a number
471
+ if int(pool) == -1:
472
+ # use as many processors as possible
473
+ self.pool = Pool()
474
+ self._mapfunc = self.pool.map
475
+ self._own_pool = True
476
+ elif int(pool) == 1:
477
+ pass
478
+ elif int(pool) > 1:
479
+ # use the number of processors requested
480
+ self.pool = Pool(processes=int(pool))
481
+ self._mapfunc = self.pool.map
482
+ self._own_pool = True
483
+ else:
484
+ raise RuntimeError("Number of workers specified must be -1,"
485
+ " an int >= 1, or an object with a 'map' "
486
+ "method")
487
+
488
+ def __enter__(self):
489
+ return self
490
+
491
+ def terminate(self):
492
+ if self._own_pool:
493
+ self.pool.terminate()
494
+
495
+ def join(self):
496
+ if self._own_pool:
497
+ self.pool.join()
498
+
499
+ def close(self):
500
+ if self._own_pool:
501
+ self.pool.close()
502
+
503
+ def __exit__(self, exc_type, exc_value, traceback):
504
+ if self._own_pool:
505
+ self.pool.close()
506
+ self.pool.terminate()
507
+
508
+ def __call__(self, func, iterable):
509
+ # only accept one iterable because that's all Pool.map accepts
510
+ try:
511
+ return self._mapfunc(func, iterable)
512
+ except TypeError as e:
513
+ # wrong number of arguments
514
+ raise TypeError("The map-like callable must be of the"
515
+ " form f(func, iterable)") from e
516
+
517
+
518
+ def rng_integers(gen, low, high=None, size=None, dtype='int64',
519
+ endpoint=False):
520
+ """
521
+ Return random integers from low (inclusive) to high (exclusive), or if
522
+ endpoint=True, low (inclusive) to high (inclusive). Replaces
523
+ `RandomState.randint` (with endpoint=False) and
524
+ `RandomState.random_integers` (with endpoint=True).
525
+
526
+ Return random integers from the "discrete uniform" distribution of the
527
+ specified dtype. If high is None (the default), then results are from
528
+ 0 to low.
529
+
530
+ Parameters
531
+ ----------
532
+ gen : {None, np.random.RandomState, np.random.Generator}
533
+ Random number generator. If None, then the np.random.RandomState
534
+ singleton is used.
535
+ low : int or array-like of ints
536
+ Lowest (signed) integers to be drawn from the distribution (unless
537
+ high=None, in which case this parameter is 0 and this value is used
538
+ for high).
539
+ high : int or array-like of ints
540
+ If provided, one above the largest (signed) integer to be drawn from
541
+ the distribution (see above for behavior if high=None). If array-like,
542
+ must contain integer values.
543
+ size : array-like of ints, optional
544
+ Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
545
+ samples are drawn. Default is None, in which case a single value is
546
+ returned.
547
+ dtype : {str, dtype}, optional
548
+ Desired dtype of the result. All dtypes are determined by their name,
549
+ i.e., 'int64', 'int', etc, so byteorder is not available and a specific
550
+ precision may have different C types depending on the platform.
551
+ The default value is 'int64'.
552
+ endpoint : bool, optional
553
+ If True, sample from the interval [low, high] instead of the default
554
+ [low, high) Defaults to False.
555
+
556
+ Returns
557
+ -------
558
+ out: int or ndarray of ints
559
+ size-shaped array of random integers from the appropriate distribution,
560
+ or a single such random int if size not provided.
561
+ """
562
+ if isinstance(gen, Generator):
563
+ return gen.integers(low, high=high, size=size, dtype=dtype,
564
+ endpoint=endpoint)
565
+ else:
566
+ if gen is None:
567
+ # default is RandomState singleton used by np.random.
568
+ gen = np.random.mtrand._rand
569
+ if endpoint:
570
+ # inclusive of endpoint
571
+ # remember that low and high can be arrays, so don't modify in
572
+ # place
573
+ if high is None:
574
+ return gen.randint(low + 1, size=size, dtype=dtype)
575
+ if high is not None:
576
+ return gen.randint(low, high=high + 1, size=size, dtype=dtype)
577
+
578
+ # exclusive
579
+ return gen.randint(low, high=high, size=size, dtype=dtype)
580
+
581
+
582
+ @contextmanager
583
+ def _fixed_default_rng(seed=1638083107694713882823079058616272161):
584
+ """Context with a fixed np.random.default_rng seed."""
585
+ orig_fun = np.random.default_rng
586
+ np.random.default_rng = lambda seed=seed: orig_fun(seed)
587
+ try:
588
+ yield
589
+ finally:
590
+ np.random.default_rng = orig_fun
591
+
592
+
593
+ def _rng_html_rewrite(func):
594
+ """Rewrite the HTML rendering of ``np.random.default_rng``.
595
+
596
+ This is intended to decorate
597
+ ``numpydoc.docscrape_sphinx.SphinxDocString._str_examples``.
598
+
599
+ Examples are only run by Sphinx when there are plot involved. Even so,
600
+ it does not change the result values getting printed.
601
+ """
602
+ # hexadecimal or number seed, case-insensitive
603
+ pattern = re.compile(r'np.random.default_rng\((0x[0-9A-F]+|\d+)\)', re.I)
604
+
605
+ def _wrapped(*args, **kwargs):
606
+ res = func(*args, **kwargs)
607
+ lines = [
608
+ re.sub(pattern, 'np.random.default_rng()', line)
609
+ for line in res
610
+ ]
611
+ return lines
612
+
613
+ return _wrapped
614
+
615
+
616
+ def _argmin(a, keepdims=False, axis=None):
617
+ """
618
+ argmin with a `keepdims` parameter.
619
+
620
+ See https://github.com/numpy/numpy/issues/8710
621
+
622
+ If axis is not None, a.shape[axis] must be greater than 0.
623
+ """
624
+ res = np.argmin(a, axis=axis)
625
+ if keepdims and axis is not None:
626
+ res = np.expand_dims(res, axis=axis)
627
+ return res
628
+
629
+
630
+ def _first_nonnan(a, axis):
631
+ """
632
+ Return the first non-nan value along the given axis.
633
+
634
+ If a slice is all nan, nan is returned for that slice.
635
+
636
+ The shape of the return value corresponds to ``keepdims=True``.
637
+
638
+ Examples
639
+ --------
640
+ >>> import numpy as np
641
+ >>> nan = np.nan
642
+ >>> a = np.array([[ 3., 3., nan, 3.],
643
+ [ 1., nan, 2., 4.],
644
+ [nan, nan, 9., -1.],
645
+ [nan, 5., 4., 3.],
646
+ [ 2., 2., 2., 2.],
647
+ [nan, nan, nan, nan]])
648
+ >>> _first_nonnan(a, axis=0)
649
+ array([[3., 3., 2., 3.]])
650
+ >>> _first_nonnan(a, axis=1)
651
+ array([[ 3.],
652
+ [ 1.],
653
+ [ 9.],
654
+ [ 5.],
655
+ [ 2.],
656
+ [nan]])
657
+ """
658
+ k = _argmin(np.isnan(a), axis=axis, keepdims=True)
659
+ return np.take_along_axis(a, k, axis=axis)
660
+
661
+
662
+ def _nan_allsame(a, axis, keepdims=False):
663
+ """
664
+ Determine if the values along an axis are all the same.
665
+
666
+ nan values are ignored.
667
+
668
+ `a` must be a numpy array.
669
+
670
+ `axis` is assumed to be normalized; that is, 0 <= axis < a.ndim.
671
+
672
+ For an axis of length 0, the result is True. That is, we adopt the
673
+ convention that ``allsame([])`` is True. (There are no values in the
674
+ input that are different.)
675
+
676
+ `True` is returned for slices that are all nan--not because all the
677
+ values are the same, but because this is equivalent to ``allsame([])``.
678
+
679
+ Examples
680
+ --------
681
+ >>> from numpy import nan, array
682
+ >>> a = array([[ 3., 3., nan, 3.],
683
+ ... [ 1., nan, 2., 4.],
684
+ ... [nan, nan, 9., -1.],
685
+ ... [nan, 5., 4., 3.],
686
+ ... [ 2., 2., 2., 2.],
687
+ ... [nan, nan, nan, nan]])
688
+ >>> _nan_allsame(a, axis=1, keepdims=True)
689
+ array([[ True],
690
+ [False],
691
+ [False],
692
+ [False],
693
+ [ True],
694
+ [ True]])
695
+ """
696
+ if axis is None:
697
+ if a.size == 0:
698
+ return True
699
+ a = a.ravel()
700
+ axis = 0
701
+ else:
702
+ shp = a.shape
703
+ if shp[axis] == 0:
704
+ shp = shp[:axis] + (1,)*keepdims + shp[axis + 1:]
705
+ return np.full(shp, fill_value=True, dtype=bool)
706
+ a0 = _first_nonnan(a, axis=axis)
707
+ return ((a0 == a) | np.isnan(a)).all(axis=axis, keepdims=keepdims)
708
+
709
+
710
+ def _contains_nan(a, nan_policy='propagate', use_summation=True,
711
+ policies=None):
712
+ if not isinstance(a, np.ndarray):
713
+ use_summation = False # some array_likes ignore nans (e.g. pandas)
714
+ if policies is None:
715
+ policies = ['propagate', 'raise', 'omit']
716
+ if nan_policy not in policies:
717
+ raise ValueError("nan_policy must be one of {%s}" %
718
+ ', '.join("'%s'" % s for s in policies))
719
+
720
+ if np.issubdtype(a.dtype, np.inexact):
721
+ # The summation method avoids creating a (potentially huge) array.
722
+ if use_summation:
723
+ with np.errstate(invalid='ignore', over='ignore'):
724
+ contains_nan = np.isnan(np.sum(a))
725
+ else:
726
+ contains_nan = np.isnan(a).any()
727
+ elif np.issubdtype(a.dtype, object):
728
+ contains_nan = False
729
+ for el in a.ravel():
730
+ # isnan doesn't work on non-numeric elements
731
+ if np.issubdtype(type(el), np.number) and np.isnan(el):
732
+ contains_nan = True
733
+ break
734
+ else:
735
+ # Only `object` and `inexact` arrays can have NaNs
736
+ contains_nan = False
737
+
738
+ if contains_nan and nan_policy == 'raise':
739
+ raise ValueError("The input contains nan values")
740
+
741
+ return contains_nan, nan_policy
742
+
743
+
744
+ def _rename_parameter(old_name, new_name, dep_version=None):
745
+ """
746
+ Generate decorator for backward-compatible keyword renaming.
747
+
748
+ Apply the decorator generated by `_rename_parameter` to functions with a
749
+ recently renamed parameter to maintain backward-compatibility.
750
+
751
+ After decoration, the function behaves as follows:
752
+ If only the new parameter is passed into the function, behave as usual.
753
+ If only the old parameter is passed into the function (as a keyword), raise
754
+ a DeprecationWarning if `dep_version` is provided, and behave as usual
755
+ otherwise.
756
+ If both old and new parameters are passed into the function, raise a
757
+ DeprecationWarning if `dep_version` is provided, and raise the appropriate
758
+ TypeError (function got multiple values for argument).
759
+
760
+ Parameters
761
+ ----------
762
+ old_name : str
763
+ Old name of parameter
764
+ new_name : str
765
+ New name of parameter
766
+ dep_version : str, optional
767
+ Version of SciPy in which old parameter was deprecated in the format
768
+ 'X.Y.Z'. If supplied, the deprecation message will indicate that
769
+ support for the old parameter will be removed in version 'X.Y+2.Z'
770
+
771
+ Notes
772
+ -----
773
+ Untested with functions that accept *args. Probably won't work as written.
774
+
775
+ """
776
+ def decorator(fun):
777
+ @functools.wraps(fun)
778
+ def wrapper(*args, **kwargs):
779
+ if old_name in kwargs:
780
+ if dep_version:
781
+ end_version = dep_version.split('.')
782
+ end_version[1] = str(int(end_version[1]) + 2)
783
+ end_version = '.'.join(end_version)
784
+ message = (f"Use of keyword argument `{old_name}` is "
785
+ f"deprecated and replaced by `{new_name}`. "
786
+ f"Support for `{old_name}` will be removed "
787
+ f"in SciPy {end_version}.")
788
+ warnings.warn(message, DeprecationWarning, stacklevel=2)
789
+ if new_name in kwargs:
790
+ message = (f"{fun.__name__}() got multiple values for "
791
+ f"argument now known as `{new_name}`")
792
+ raise TypeError(message)
793
+ kwargs[new_name] = kwargs.pop(old_name)
794
+ return fun(*args, **kwargs)
795
+ return wrapper
796
+ return decorator
797
+
798
+
799
+ def _rng_spawn(rng, n_children):
800
+ # spawns independent RNGs from a parent RNG
801
+ bg = rng._bit_generator
802
+ ss = bg._seed_seq
803
+ child_rngs = [np.random.Generator(type(bg)(child_ss))
804
+ for child_ss in ss.spawn(n_children)]
805
+ return child_rngs
806
+
807
+
808
+ def _get_nan(*data):
809
+ # Get NaN of appropriate dtype for data
810
+ data = [np.asarray(item) for item in data]
811
+ try:
812
+ dtype = np.result_type(*data, np.half) # must be a float16 at least
813
+ except DTypePromotionError:
814
+ # fallback to float64
815
+ return np.array(np.nan, dtype=np.float64)[()]
816
+ return np.array(np.nan, dtype=dtype)[()]
817
+
818
+
819
+ def normalize_axis_index(axis, ndim):
820
+ # Check if `axis` is in the correct range and normalize it
821
+ if axis < -ndim or axis >= ndim:
822
+ msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
823
+ raise AxisError(msg)
824
+
825
+ if axis < 0:
826
+ axis = axis + ndim
827
+ return axis
828
+
829
+
830
+ def _call_callback_maybe_halt(callback, res):
831
+ """Call wrapped callback; return True if algorithm should stop.
832
+
833
+ Parameters
834
+ ----------
835
+ callback : callable or None
836
+ A user-provided callback wrapped with `_wrap_callback`
837
+ res : OptimizeResult
838
+ Information about the current iterate
839
+
840
+ Returns
841
+ -------
842
+ halt : bool
843
+ True if minimization should stop
844
+
845
+ """
846
+ if callback is None:
847
+ return False
848
+ try:
849
+ callback(res)
850
+ return False
851
+ except StopIteration:
852
+ callback.stop_iteration = True
853
+ return True
854
+
855
+
856
+ class _RichResult(dict):
857
+ """ Container for multiple outputs with pretty-printing """
858
+ def __getattr__(self, name):
859
+ try:
860
+ return self[name]
861
+ except KeyError as e:
862
+ raise AttributeError(name) from e
863
+
864
+ __setattr__ = dict.__setitem__
865
+ __delattr__ = dict.__delitem__
866
+
867
+ def __repr__(self):
868
+ order_keys = ['message', 'success', 'status', 'fun', 'funl', 'x', 'xl',
869
+ 'col_ind', 'nit', 'lower', 'upper', 'eqlin', 'ineqlin',
870
+ 'converged', 'flag', 'function_calls', 'iterations',
871
+ 'root']
872
+ order_keys = getattr(self, '_order_keys', order_keys)
873
+ # 'slack', 'con' are redundant with residuals
874
+ # 'crossover_nit' is probably not interesting to most users
875
+ omit_keys = {'slack', 'con', 'crossover_nit', '_order_keys'}
876
+
877
+ def key(item):
878
+ try:
879
+ return order_keys.index(item[0].lower())
880
+ except ValueError: # item not in list
881
+ return np.inf
882
+
883
+ def omit_redundant(items):
884
+ for item in items:
885
+ if item[0] in omit_keys:
886
+ continue
887
+ yield item
888
+
889
+ def item_sorter(d):
890
+ return sorted(omit_redundant(d.items()), key=key)
891
+
892
+ if self.keys():
893
+ return _dict_formatter(self, sorter=item_sorter)
894
+ else:
895
+ return self.__class__.__name__ + "()"
896
+
897
+ def __dir__(self):
898
+ return list(self.keys())
899
+
900
+
901
+ def _indenter(s, n=0):
902
+ """
903
+ Ensures that lines after the first are indented by the specified amount
904
+ """
905
+ split = s.split("\n")
906
+ indent = " "*n
907
+ return ("\n" + indent).join(split)
908
+
909
+
910
+ def _float_formatter_10(x):
911
+ """
912
+ Returns a string representation of a float with exactly ten characters
913
+ """
914
+ if np.isposinf(x):
915
+ return " inf"
916
+ elif np.isneginf(x):
917
+ return " -inf"
918
+ elif np.isnan(x):
919
+ return " nan"
920
+ return np.format_float_scientific(x, precision=3, pad_left=2, unique=False)
921
+
922
+
923
+ def _dict_formatter(d, n=0, mplus=1, sorter=None):
924
+ """
925
+ Pretty printer for dictionaries
926
+
927
+ `n` keeps track of the starting indentation;
928
+ lines are indented by this much after a line break.
929
+ `mplus` is additional left padding applied to keys
930
+ """
931
+ if isinstance(d, dict):
932
+ m = max(map(len, list(d.keys()))) + mplus # width to print keys
933
+ s = '\n'.join([k.rjust(m) + ': ' + # right justified, width m
934
+ _indenter(_dict_formatter(v, m+n+2, 0, sorter), m+2)
935
+ for k, v in sorter(d)]) # +2 for ': '
936
+ else:
937
+ # By default, NumPy arrays print with linewidth=76. `n` is
938
+ # the indent at which a line begins printing, so it is subtracted
939
+ # from the default to avoid exceeding 76 characters total.
940
+ # `edgeitems` is the number of elements to include before and after
941
+ # ellipses when arrays are not shown in full.
942
+ # `threshold` is the maximum number of elements for which an
943
+ # array is shown in full.
944
+ # These values tend to work well for use with OptimizeResult.
945
+ with np.printoptions(linewidth=76-n, edgeitems=2, threshold=12,
946
+ formatter={'float_kind': _float_formatter_10}):
947
+ s = str(d)
948
+ return s
venv/lib/python3.10/site-packages/scipy/_lib/decorator.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ######################### LICENSE ############################ #
2
+
3
+ # Copyright (c) 2005-2015, Michele Simionato
4
+ # All rights reserved.
5
+
6
+ # Redistribution and use in source and binary forms, with or without
7
+ # modification, are permitted provided that the following conditions are
8
+ # met:
9
+
10
+ # Redistributions of source code must retain the above copyright
11
+ # notice, this list of conditions and the following disclaimer.
12
+ # Redistributions in bytecode form must reproduce the above copyright
13
+ # notice, this list of conditions and the following disclaimer in
14
+ # the documentation and/or other materials provided with the
15
+ # distribution.
16
+
17
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ # HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22
+ # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23
+ # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24
+ # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25
+ # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26
+ # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27
+ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28
+ # DAMAGE.
29
+
30
+ """
31
+ Decorator module, see https://pypi.python.org/pypi/decorator
32
+ for the documentation.
33
+ """
34
+ import re
35
+ import sys
36
+ import inspect
37
+ import operator
38
+ import itertools
39
+ import collections
40
+
41
+ from inspect import getfullargspec
42
+
43
+ __version__ = '4.0.5'
44
+
45
+
46
+ def get_init(cls):
47
+ return cls.__init__
48
+
49
+
50
+ # getargspec has been deprecated in Python 3.5
51
+ ArgSpec = collections.namedtuple(
52
+ 'ArgSpec', 'args varargs varkw defaults')
53
+
54
+
55
+ def getargspec(f):
56
+ """A replacement for inspect.getargspec"""
57
+ spec = getfullargspec(f)
58
+ return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
59
+
60
+
61
+ DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
62
+
63
+
64
+ # basic functionality
65
+ class FunctionMaker:
66
+ """
67
+ An object with the ability to create functions with a given signature.
68
+ It has attributes name, doc, module, signature, defaults, dict, and
69
+ methods update and make.
70
+ """
71
+
72
+ # Atomic get-and-increment provided by the GIL
73
+ _compile_count = itertools.count()
74
+
75
+ def __init__(self, func=None, name=None, signature=None,
76
+ defaults=None, doc=None, module=None, funcdict=None):
77
+ self.shortsignature = signature
78
+ if func:
79
+ # func can be a class or a callable, but not an instance method
80
+ self.name = func.__name__
81
+ if self.name == '<lambda>': # small hack for lambda functions
82
+ self.name = '_lambda_'
83
+ self.doc = func.__doc__
84
+ self.module = func.__module__
85
+ if inspect.isfunction(func):
86
+ argspec = getfullargspec(func)
87
+ self.annotations = getattr(func, '__annotations__', {})
88
+ for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
89
+ 'kwonlydefaults'):
90
+ setattr(self, a, getattr(argspec, a))
91
+ for i, arg in enumerate(self.args):
92
+ setattr(self, 'arg%d' % i, arg)
93
+ allargs = list(self.args)
94
+ allshortargs = list(self.args)
95
+ if self.varargs:
96
+ allargs.append('*' + self.varargs)
97
+ allshortargs.append('*' + self.varargs)
98
+ elif self.kwonlyargs:
99
+ allargs.append('*') # single star syntax
100
+ for a in self.kwonlyargs:
101
+ allargs.append('%s=None' % a)
102
+ allshortargs.append(f'{a}={a}')
103
+ if self.varkw:
104
+ allargs.append('**' + self.varkw)
105
+ allshortargs.append('**' + self.varkw)
106
+ self.signature = ', '.join(allargs)
107
+ self.shortsignature = ', '.join(allshortargs)
108
+ self.dict = func.__dict__.copy()
109
+ # func=None happens when decorating a caller
110
+ if name:
111
+ self.name = name
112
+ if signature is not None:
113
+ self.signature = signature
114
+ if defaults:
115
+ self.defaults = defaults
116
+ if doc:
117
+ self.doc = doc
118
+ if module:
119
+ self.module = module
120
+ if funcdict:
121
+ self.dict = funcdict
122
+ # check existence required attributes
123
+ assert hasattr(self, 'name')
124
+ if not hasattr(self, 'signature'):
125
+ raise TypeError('You are decorating a non-function: %s' % func)
126
+
127
+ def update(self, func, **kw):
128
+ "Update the signature of func with the data in self"
129
+ func.__name__ = self.name
130
+ func.__doc__ = getattr(self, 'doc', None)
131
+ func.__dict__ = getattr(self, 'dict', {})
132
+ func.__defaults__ = getattr(self, 'defaults', ())
133
+ func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
134
+ func.__annotations__ = getattr(self, 'annotations', None)
135
+ try:
136
+ frame = sys._getframe(3)
137
+ except AttributeError: # for IronPython and similar implementations
138
+ callermodule = '?'
139
+ else:
140
+ callermodule = frame.f_globals.get('__name__', '?')
141
+ func.__module__ = getattr(self, 'module', callermodule)
142
+ func.__dict__.update(kw)
143
+
144
+ def make(self, src_templ, evaldict=None, addsource=False, **attrs):
145
+ "Make a new function from a given template and update the signature"
146
+ src = src_templ % vars(self) # expand name and signature
147
+ evaldict = evaldict or {}
148
+ mo = DEF.match(src)
149
+ if mo is None:
150
+ raise SyntaxError('not a valid function template\n%s' % src)
151
+ name = mo.group(1) # extract the function name
152
+ names = set([name] + [arg.strip(' *') for arg in
153
+ self.shortsignature.split(',')])
154
+ for n in names:
155
+ if n in ('_func_', '_call_'):
156
+ raise NameError(f'{n} is overridden in\n{src}')
157
+ if not src.endswith('\n'): # add a newline just for safety
158
+ src += '\n' # this is needed in old versions of Python
159
+
160
+ # Ensure each generated function has a unique filename for profilers
161
+ # (such as cProfile) that depend on the tuple of (<filename>,
162
+ # <definition line>, <function name>) being unique.
163
+ filename = '<decorator-gen-%d>' % (next(self._compile_count),)
164
+ try:
165
+ code = compile(src, filename, 'single')
166
+ exec(code, evaldict)
167
+ except: # noqa: E722
168
+ print('Error in generated code:', file=sys.stderr)
169
+ print(src, file=sys.stderr)
170
+ raise
171
+ func = evaldict[name]
172
+ if addsource:
173
+ attrs['__source__'] = src
174
+ self.update(func, **attrs)
175
+ return func
176
+
177
+ @classmethod
178
+ def create(cls, obj, body, evaldict, defaults=None,
179
+ doc=None, module=None, addsource=True, **attrs):
180
+ """
181
+ Create a function from the strings name, signature, and body.
182
+ evaldict is the evaluation dictionary. If addsource is true, an
183
+ attribute __source__ is added to the result. The attributes attrs
184
+ are added, if any.
185
+ """
186
+ if isinstance(obj, str): # "name(signature)"
187
+ name, rest = obj.strip().split('(', 1)
188
+ signature = rest[:-1] # strip a right parens
189
+ func = None
190
+ else: # a function
191
+ name = None
192
+ signature = None
193
+ func = obj
194
+ self = cls(func, name, signature, defaults, doc, module)
195
+ ibody = '\n'.join(' ' + line for line in body.splitlines())
196
+ return self.make('def %(name)s(%(signature)s):\n' + ibody,
197
+ evaldict, addsource, **attrs)
198
+
199
+
200
+ def decorate(func, caller):
201
+ """
202
+ decorate(func, caller) decorates a function using a caller.
203
+ """
204
+ evaldict = func.__globals__.copy()
205
+ evaldict['_call_'] = caller
206
+ evaldict['_func_'] = func
207
+ fun = FunctionMaker.create(
208
+ func, "return _call_(_func_, %(shortsignature)s)",
209
+ evaldict, __wrapped__=func)
210
+ if hasattr(func, '__qualname__'):
211
+ fun.__qualname__ = func.__qualname__
212
+ return fun
213
+
214
+
215
+ def decorator(caller, _func=None):
216
+ """decorator(caller) converts a caller function into a decorator"""
217
+ if _func is not None: # return a decorated function
218
+ # this is obsolete behavior; you should use decorate instead
219
+ return decorate(_func, caller)
220
+ # else return a decorator function
221
+ if inspect.isclass(caller):
222
+ name = caller.__name__.lower()
223
+ callerfunc = get_init(caller)
224
+ doc = (f'decorator({caller.__name__}) converts functions/generators into '
225
+ f'factories of {caller.__name__} objects')
226
+ elif inspect.isfunction(caller):
227
+ if caller.__name__ == '<lambda>':
228
+ name = '_lambda_'
229
+ else:
230
+ name = caller.__name__
231
+ callerfunc = caller
232
+ doc = caller.__doc__
233
+ else: # assume caller is an object with a __call__ method
234
+ name = caller.__class__.__name__.lower()
235
+ callerfunc = caller.__call__.__func__
236
+ doc = caller.__call__.__doc__
237
+ evaldict = callerfunc.__globals__.copy()
238
+ evaldict['_call_'] = caller
239
+ evaldict['_decorate_'] = decorate
240
+ return FunctionMaker.create(
241
+ '%s(func)' % name, 'return _decorate_(func, _call_)',
242
+ evaldict, doc=doc, module=caller.__module__,
243
+ __wrapped__=caller)
244
+
245
+
246
+ # ####################### contextmanager ####################### #
247
+
248
+ try: # Python >= 3.2
249
+ from contextlib import _GeneratorContextManager
250
+ except ImportError: # Python >= 2.5
251
+ from contextlib import GeneratorContextManager as _GeneratorContextManager
252
+
253
+
254
+ class ContextManager(_GeneratorContextManager):
255
+ def __call__(self, func):
256
+ """Context manager decorator"""
257
+ return FunctionMaker.create(
258
+ func, "with _self_: return _func_(%(shortsignature)s)",
259
+ dict(_self_=self, _func_=func), __wrapped__=func)
260
+
261
+
262
+ init = getfullargspec(_GeneratorContextManager.__init__)
263
+ n_args = len(init.args)
264
+ if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
265
+ def __init__(self, g, *a, **k):
266
+ return _GeneratorContextManager.__init__(self, g(*a, **k))
267
+ ContextManager.__init__ = __init__
268
+ elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
269
+ pass
270
+ elif n_args == 4: # (self, gen, args, kwds) Python 3.5
271
+ def __init__(self, g, *a, **k):
272
+ return _GeneratorContextManager.__init__(self, g, a, k)
273
+ ContextManager.__init__ = __init__
274
+
275
+ contextmanager = decorator(ContextManager)
276
+
277
+
278
+ # ############################ dispatch_on ############################ #
279
+
280
+ def append(a, vancestors):
281
+ """
282
+ Append ``a`` to the list of the virtual ancestors, unless it is already
283
+ included.
284
+ """
285
+ add = True
286
+ for j, va in enumerate(vancestors):
287
+ if issubclass(va, a):
288
+ add = False
289
+ break
290
+ if issubclass(a, va):
291
+ vancestors[j] = a
292
+ add = False
293
+ if add:
294
+ vancestors.append(a)
295
+
296
+
297
+ # inspired from simplegeneric by P.J. Eby and functools.singledispatch
298
+ def dispatch_on(*dispatch_args):
299
+ """
300
+ Factory of decorators turning a function into a generic function
301
+ dispatching on the given arguments.
302
+ """
303
+ assert dispatch_args, 'No dispatch args passed'
304
+ dispatch_str = '(%s,)' % ', '.join(dispatch_args)
305
+
306
+ def check(arguments, wrong=operator.ne, msg=''):
307
+ """Make sure one passes the expected number of arguments"""
308
+ if wrong(len(arguments), len(dispatch_args)):
309
+ raise TypeError('Expected %d arguments, got %d%s' %
310
+ (len(dispatch_args), len(arguments), msg))
311
+
312
+ def gen_func_dec(func):
313
+ """Decorator turning a function into a generic function"""
314
+
315
+ # first check the dispatch arguments
316
+ argset = set(getfullargspec(func).args)
317
+ if not set(dispatch_args) <= argset:
318
+ raise NameError('Unknown dispatch arguments %s' % dispatch_str)
319
+
320
+ typemap = {}
321
+
322
+ def vancestors(*types):
323
+ """
324
+ Get a list of sets of virtual ancestors for the given types
325
+ """
326
+ check(types)
327
+ ras = [[] for _ in range(len(dispatch_args))]
328
+ for types_ in typemap:
329
+ for t, type_, ra in zip(types, types_, ras):
330
+ if issubclass(t, type_) and type_ not in t.__mro__:
331
+ append(type_, ra)
332
+ return [set(ra) for ra in ras]
333
+
334
+ def ancestors(*types):
335
+ """
336
+ Get a list of virtual MROs, one for each type
337
+ """
338
+ check(types)
339
+ lists = []
340
+ for t, vas in zip(types, vancestors(*types)):
341
+ n_vas = len(vas)
342
+ if n_vas > 1:
343
+ raise RuntimeError(
344
+ f'Ambiguous dispatch for {t}: {vas}')
345
+ elif n_vas == 1:
346
+ va, = vas
347
+ mro = type('t', (t, va), {}).__mro__[1:]
348
+ else:
349
+ mro = t.__mro__
350
+ lists.append(mro[:-1]) # discard t and object
351
+ return lists
352
+
353
+ def register(*types):
354
+ """
355
+ Decorator to register an implementation for the given types
356
+ """
357
+ check(types)
358
+
359
+ def dec(f):
360
+ check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
361
+ typemap[types] = f
362
+ return f
363
+ return dec
364
+
365
+ def dispatch_info(*types):
366
+ """
367
+ An utility to introspect the dispatch algorithm
368
+ """
369
+ check(types)
370
+ lst = [tuple(a.__name__ for a in anc)
371
+ for anc in itertools.product(*ancestors(*types))]
372
+ return lst
373
+
374
+ def _dispatch(dispatch_args, *args, **kw):
375
+ types = tuple(type(arg) for arg in dispatch_args)
376
+ try: # fast path
377
+ f = typemap[types]
378
+ except KeyError:
379
+ pass
380
+ else:
381
+ return f(*args, **kw)
382
+ combinations = itertools.product(*ancestors(*types))
383
+ next(combinations) # the first one has been already tried
384
+ for types_ in combinations:
385
+ f = typemap.get(types_)
386
+ if f is not None:
387
+ return f(*args, **kw)
388
+
389
+ # else call the default implementation
390
+ return func(*args, **kw)
391
+
392
+ return FunctionMaker.create(
393
+ func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
394
+ dict(_f_=_dispatch), register=register, default=func,
395
+ typemap=typemap, vancestors=vancestors, ancestors=ancestors,
396
+ dispatch_info=dispatch_info, __wrapped__=func)
397
+
398
+ gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
399
+ return gen_func_dec
venv/lib/python3.10/site-packages/scipy/_lib/deprecation.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from inspect import Parameter, signature
2
+ import functools
3
+ import warnings
4
+ from importlib import import_module
5
+
6
+
7
+ __all__ = ["_deprecated"]
8
+
9
+
10
+ # Object to use as default value for arguments to be deprecated. This should
11
+ # be used over 'None' as the user could parse 'None' as a positional argument
12
+ _NoValue = object()
13
+
14
+ def _sub_module_deprecation(*, sub_package, module, private_modules, all,
15
+ attribute, correct_module=None):
16
+ """Helper function for deprecating modules that are public but were
17
+ intended to be private.
18
+
19
+ Parameters
20
+ ----------
21
+ sub_package : str
22
+ Subpackage the module belongs to eg. stats
23
+ module : str
24
+ Public but intended private module to deprecate
25
+ private_modules : list
26
+ Private replacement(s) for `module`; should contain the
27
+ content of ``all``, possibly spread over several modules.
28
+ all : list
29
+ ``__all__`` belonging to `module`
30
+ attribute : str
31
+ The attribute in `module` being accessed
32
+ correct_module : str, optional
33
+ Module in `sub_package` that `attribute` should be imported from.
34
+ Default is that `attribute` should be imported from ``scipy.sub_package``.
35
+ """
36
+ if correct_module is not None:
37
+ correct_import = f"scipy.{sub_package}.{correct_module}"
38
+ else:
39
+ correct_import = f"scipy.{sub_package}"
40
+
41
+ if attribute not in all:
42
+ raise AttributeError(
43
+ f"`scipy.{sub_package}.{module}` has no attribute `{attribute}`; "
44
+ f"furthermore, `scipy.{sub_package}.{module}` is deprecated "
45
+ f"and will be removed in SciPy 2.0.0."
46
+ )
47
+
48
+ attr = getattr(import_module(correct_import), attribute, None)
49
+
50
+ if attr is not None:
51
+ message = (
52
+ f"Please import `{attribute}` from the `{correct_import}` namespace; "
53
+ f"the `scipy.{sub_package}.{module}` namespace is deprecated "
54
+ f"and will be removed in SciPy 2.0.0."
55
+ )
56
+ else:
57
+ message = (
58
+ f"`scipy.{sub_package}.{module}.{attribute}` is deprecated along with "
59
+ f"the `scipy.{sub_package}.{module}` namespace. "
60
+ f"`scipy.{sub_package}.{module}.{attribute}` will be removed "
61
+ f"in SciPy 1.14.0, and the `scipy.{sub_package}.{module}` namespace "
62
+ f"will be removed in SciPy 2.0.0."
63
+ )
64
+
65
+ warnings.warn(message, category=DeprecationWarning, stacklevel=3)
66
+
67
+ for module in private_modules:
68
+ try:
69
+ return getattr(import_module(f"scipy.{sub_package}.{module}"), attribute)
70
+ except AttributeError as e:
71
+ # still raise an error if the attribute isn't in any of the expected
72
+ # private modules
73
+ if module == private_modules[-1]:
74
+ raise e
75
+ continue
76
+
77
+
78
+ def _deprecated(msg, stacklevel=2):
79
+ """Deprecate a function by emitting a warning on use."""
80
+ def wrap(fun):
81
+ if isinstance(fun, type):
82
+ warnings.warn(
83
+ f"Trying to deprecate class {fun!r}",
84
+ category=RuntimeWarning, stacklevel=2)
85
+ return fun
86
+
87
+ @functools.wraps(fun)
88
+ def call(*args, **kwargs):
89
+ warnings.warn(msg, category=DeprecationWarning,
90
+ stacklevel=stacklevel)
91
+ return fun(*args, **kwargs)
92
+ call.__doc__ = fun.__doc__
93
+ return call
94
+
95
+ return wrap
96
+
97
+
98
+ class _DeprecationHelperStr:
99
+ """
100
+ Helper class used by deprecate_cython_api
101
+ """
102
+ def __init__(self, content, message):
103
+ self._content = content
104
+ self._message = message
105
+
106
+ def __hash__(self):
107
+ return hash(self._content)
108
+
109
+ def __eq__(self, other):
110
+ res = (self._content == other)
111
+ if res:
112
+ warnings.warn(self._message, category=DeprecationWarning,
113
+ stacklevel=2)
114
+ return res
115
+
116
+
117
+ def deprecate_cython_api(module, routine_name, new_name=None, message=None):
118
+ """
119
+ Deprecate an exported cdef function in a public Cython API module.
120
+
121
+ Only functions can be deprecated; typedefs etc. cannot.
122
+
123
+ Parameters
124
+ ----------
125
+ module : module
126
+ Public Cython API module (e.g. scipy.linalg.cython_blas).
127
+ routine_name : str
128
+ Name of the routine to deprecate. May also be a fused-type
129
+ routine (in which case its all specializations are deprecated).
130
+ new_name : str
131
+ New name to include in the deprecation warning message
132
+ message : str
133
+ Additional text in the deprecation warning message
134
+
135
+ Examples
136
+ --------
137
+ Usually, this function would be used in the top-level of the
138
+ module ``.pyx`` file:
139
+
140
+ >>> from scipy._lib.deprecation import deprecate_cython_api
141
+ >>> import scipy.linalg.cython_blas as mod
142
+ >>> deprecate_cython_api(mod, "dgemm", "dgemm_new",
143
+ ... message="Deprecated in Scipy 1.5.0")
144
+ >>> del deprecate_cython_api, mod
145
+
146
+ After this, Cython modules that use the deprecated function emit a
147
+ deprecation warning when they are imported.
148
+
149
+ """
150
+ old_name = f"{module.__name__}.{routine_name}"
151
+
152
+ if new_name is None:
153
+ depdoc = "`%s` is deprecated!" % old_name
154
+ else:
155
+ depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!"
156
+
157
+ if message is not None:
158
+ depdoc += "\n" + message
159
+
160
+ d = module.__pyx_capi__
161
+
162
+ # Check if the function is a fused-type function with a mangled name
163
+ j = 0
164
+ has_fused = False
165
+ while True:
166
+ fused_name = f"__pyx_fuse_{j}{routine_name}"
167
+ if fused_name in d:
168
+ has_fused = True
169
+ d[_DeprecationHelperStr(fused_name, depdoc)] = d.pop(fused_name)
170
+ j += 1
171
+ else:
172
+ break
173
+
174
+ # If not, apply deprecation to the named routine
175
+ if not has_fused:
176
+ d[_DeprecationHelperStr(routine_name, depdoc)] = d.pop(routine_name)
177
+
178
+
179
+ # taken from scikit-learn, see
180
+ # https://github.com/scikit-learn/scikit-learn/blob/1.3.0/sklearn/utils/validation.py#L38
181
+ def _deprecate_positional_args(func=None, *, version=None):
182
+ """Decorator for methods that issues warnings for positional arguments.
183
+
184
+ Using the keyword-only argument syntax in pep 3102, arguments after the
185
+ * will issue a warning when passed as a positional argument.
186
+
187
+ Parameters
188
+ ----------
189
+ func : callable, default=None
190
+ Function to check arguments on.
191
+ version : callable, default=None
192
+ The version when positional arguments will result in error.
193
+ """
194
+ if version is None:
195
+ msg = "Need to specify a version where signature will be changed"
196
+ raise ValueError(msg)
197
+
198
+ def _inner_deprecate_positional_args(f):
199
+ sig = signature(f)
200
+ kwonly_args = []
201
+ all_args = []
202
+
203
+ for name, param in sig.parameters.items():
204
+ if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
205
+ all_args.append(name)
206
+ elif param.kind == Parameter.KEYWORD_ONLY:
207
+ kwonly_args.append(name)
208
+
209
+ @functools.wraps(f)
210
+ def inner_f(*args, **kwargs):
211
+ extra_args = len(args) - len(all_args)
212
+ if extra_args <= 0:
213
+ return f(*args, **kwargs)
214
+
215
+ # extra_args > 0
216
+ args_msg = [
217
+ f"{name}={arg}"
218
+ for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
219
+ ]
220
+ args_msg = ", ".join(args_msg)
221
+ warnings.warn(
222
+ (
223
+ f"You are passing {args_msg} as a positional argument. "
224
+ "Please change your invocation to use keyword arguments. "
225
+ f"From SciPy {version}, passing these as positional "
226
+ "arguments will result in an error."
227
+ ),
228
+ DeprecationWarning,
229
+ stacklevel=2,
230
+ )
231
+ kwargs.update(zip(sig.parameters, args))
232
+ return f(**kwargs)
233
+
234
+ return inner_f
235
+
236
+ if func is not None:
237
+ return _inner_deprecate_positional_args(func)
238
+
239
+ return _inner_deprecate_positional_args
venv/lib/python3.10/site-packages/scipy/_lib/doccer.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ''' Utilities to allow inserting docstring fragments for common
2
+ parameters into function and method docstrings'''
3
+
4
+ import sys
5
+
6
+ __all__ = [
7
+ 'docformat', 'inherit_docstring_from', 'indentcount_lines',
8
+ 'filldoc', 'unindent_dict', 'unindent_string', 'extend_notes_in_docstring',
9
+ 'replace_notes_in_docstring', 'doc_replace'
10
+ ]
11
+
12
+
13
+ def docformat(docstring, docdict=None):
14
+ ''' Fill a function docstring from variables in dictionary
15
+
16
+ Adapt the indent of the inserted docs
17
+
18
+ Parameters
19
+ ----------
20
+ docstring : string
21
+ docstring from function, possibly with dict formatting strings
22
+ docdict : dict, optional
23
+ dictionary with keys that match the dict formatting strings
24
+ and values that are docstring fragments to be inserted. The
25
+ indentation of the inserted docstrings is set to match the
26
+ minimum indentation of the ``docstring`` by adding this
27
+ indentation to all lines of the inserted string, except the
28
+ first.
29
+
30
+ Returns
31
+ -------
32
+ outstring : string
33
+ string with requested ``docdict`` strings inserted
34
+
35
+ Examples
36
+ --------
37
+ >>> docformat(' Test string with %(value)s', {'value':'inserted value'})
38
+ ' Test string with inserted value'
39
+ >>> docstring = 'First line\\n Second line\\n %(value)s'
40
+ >>> inserted_string = "indented\\nstring"
41
+ >>> docdict = {'value': inserted_string}
42
+ >>> docformat(docstring, docdict)
43
+ 'First line\\n Second line\\n indented\\n string'
44
+ '''
45
+ if not docstring:
46
+ return docstring
47
+ if docdict is None:
48
+ docdict = {}
49
+ if not docdict:
50
+ return docstring
51
+ lines = docstring.expandtabs().splitlines()
52
+ # Find the minimum indent of the main docstring, after first line
53
+ if len(lines) < 2:
54
+ icount = 0
55
+ else:
56
+ icount = indentcount_lines(lines[1:])
57
+ indent = ' ' * icount
58
+ # Insert this indent to dictionary docstrings
59
+ indented = {}
60
+ for name, dstr in docdict.items():
61
+ lines = dstr.expandtabs().splitlines()
62
+ try:
63
+ newlines = [lines[0]]
64
+ for line in lines[1:]:
65
+ newlines.append(indent+line)
66
+ indented[name] = '\n'.join(newlines)
67
+ except IndexError:
68
+ indented[name] = dstr
69
+ return docstring % indented
70
+
71
+
72
+ def inherit_docstring_from(cls):
73
+ """
74
+ This decorator modifies the decorated function's docstring by
75
+ replacing occurrences of '%(super)s' with the docstring of the
76
+ method of the same name from the class `cls`.
77
+
78
+ If the decorated method has no docstring, it is simply given the
79
+ docstring of `cls`s method.
80
+
81
+ Parameters
82
+ ----------
83
+ cls : Python class or instance
84
+ A class with a method with the same name as the decorated method.
85
+ The docstring of the method in this class replaces '%(super)s' in the
86
+ docstring of the decorated method.
87
+
88
+ Returns
89
+ -------
90
+ f : function
91
+ The decorator function that modifies the __doc__ attribute
92
+ of its argument.
93
+
94
+ Examples
95
+ --------
96
+ In the following, the docstring for Bar.func created using the
97
+ docstring of `Foo.func`.
98
+
99
+ >>> class Foo:
100
+ ... def func(self):
101
+ ... '''Do something useful.'''
102
+ ... return
103
+ ...
104
+ >>> class Bar(Foo):
105
+ ... @inherit_docstring_from(Foo)
106
+ ... def func(self):
107
+ ... '''%(super)s
108
+ ... Do it fast.
109
+ ... '''
110
+ ... return
111
+ ...
112
+ >>> b = Bar()
113
+ >>> b.func.__doc__
114
+ 'Do something useful.\n Do it fast.\n '
115
+
116
+ """
117
+ def _doc(func):
118
+ cls_docstring = getattr(cls, func.__name__).__doc__
119
+ func_docstring = func.__doc__
120
+ if func_docstring is None:
121
+ func.__doc__ = cls_docstring
122
+ else:
123
+ new_docstring = func_docstring % dict(super=cls_docstring)
124
+ func.__doc__ = new_docstring
125
+ return func
126
+ return _doc
127
+
128
+
129
+ def extend_notes_in_docstring(cls, notes):
130
+ """
131
+ This decorator replaces the decorated function's docstring
132
+ with the docstring from corresponding method in `cls`.
133
+ It extends the 'Notes' section of that docstring to include
134
+ the given `notes`.
135
+ """
136
+ def _doc(func):
137
+ cls_docstring = getattr(cls, func.__name__).__doc__
138
+ # If python is called with -OO option,
139
+ # there is no docstring
140
+ if cls_docstring is None:
141
+ return func
142
+ end_of_notes = cls_docstring.find(' References\n')
143
+ if end_of_notes == -1:
144
+ end_of_notes = cls_docstring.find(' Examples\n')
145
+ if end_of_notes == -1:
146
+ end_of_notes = len(cls_docstring)
147
+ func.__doc__ = (cls_docstring[:end_of_notes] + notes +
148
+ cls_docstring[end_of_notes:])
149
+ return func
150
+ return _doc
151
+
152
+
153
+ def replace_notes_in_docstring(cls, notes):
154
+ """
155
+ This decorator replaces the decorated function's docstring
156
+ with the docstring from corresponding method in `cls`.
157
+ It replaces the 'Notes' section of that docstring with
158
+ the given `notes`.
159
+ """
160
+ def _doc(func):
161
+ cls_docstring = getattr(cls, func.__name__).__doc__
162
+ notes_header = ' Notes\n -----\n'
163
+ # If python is called with -OO option,
164
+ # there is no docstring
165
+ if cls_docstring is None:
166
+ return func
167
+ start_of_notes = cls_docstring.find(notes_header)
168
+ end_of_notes = cls_docstring.find(' References\n')
169
+ if end_of_notes == -1:
170
+ end_of_notes = cls_docstring.find(' Examples\n')
171
+ if end_of_notes == -1:
172
+ end_of_notes = len(cls_docstring)
173
+ func.__doc__ = (cls_docstring[:start_of_notes + len(notes_header)] +
174
+ notes +
175
+ cls_docstring[end_of_notes:])
176
+ return func
177
+ return _doc
178
+
179
+
180
+ def indentcount_lines(lines):
181
+ ''' Minimum indent for all lines in line list
182
+
183
+ >>> lines = [' one', ' two', ' three']
184
+ >>> indentcount_lines(lines)
185
+ 1
186
+ >>> lines = []
187
+ >>> indentcount_lines(lines)
188
+ 0
189
+ >>> lines = [' one']
190
+ >>> indentcount_lines(lines)
191
+ 1
192
+ >>> indentcount_lines([' '])
193
+ 0
194
+ '''
195
+ indentno = sys.maxsize
196
+ for line in lines:
197
+ stripped = line.lstrip()
198
+ if stripped:
199
+ indentno = min(indentno, len(line) - len(stripped))
200
+ if indentno == sys.maxsize:
201
+ return 0
202
+ return indentno
203
+
204
+
205
+ def filldoc(docdict, unindent_params=True):
206
+ ''' Return docstring decorator using docdict variable dictionary
207
+
208
+ Parameters
209
+ ----------
210
+ docdict : dictionary
211
+ dictionary containing name, docstring fragment pairs
212
+ unindent_params : {False, True}, boolean, optional
213
+ If True, strip common indentation from all parameters in
214
+ docdict
215
+
216
+ Returns
217
+ -------
218
+ decfunc : function
219
+ decorator that applies dictionary to input function docstring
220
+
221
+ '''
222
+ if unindent_params:
223
+ docdict = unindent_dict(docdict)
224
+
225
+ def decorate(f):
226
+ f.__doc__ = docformat(f.__doc__, docdict)
227
+ return f
228
+ return decorate
229
+
230
+
231
+ def unindent_dict(docdict):
232
+ ''' Unindent all strings in a docdict '''
233
+ can_dict = {}
234
+ for name, dstr in docdict.items():
235
+ can_dict[name] = unindent_string(dstr)
236
+ return can_dict
237
+
238
+
239
+ def unindent_string(docstring):
240
+ ''' Set docstring to minimum indent for all lines, including first
241
+
242
+ >>> unindent_string(' two')
243
+ 'two'
244
+ >>> unindent_string(' two\\n three')
245
+ 'two\\n three'
246
+ '''
247
+ lines = docstring.expandtabs().splitlines()
248
+ icount = indentcount_lines(lines)
249
+ if icount == 0:
250
+ return docstring
251
+ return '\n'.join([line[icount:] for line in lines])
252
+
253
+
254
+ def doc_replace(obj, oldval, newval):
255
+ """Decorator to take the docstring from obj, with oldval replaced by newval
256
+
257
+ Equivalent to ``func.__doc__ = obj.__doc__.replace(oldval, newval)``
258
+
259
+ Parameters
260
+ ----------
261
+ obj : object
262
+ The object to take the docstring from.
263
+ oldval : string
264
+ The string to replace from the original docstring.
265
+ newval : string
266
+ The string to replace ``oldval`` with.
267
+ """
268
+ # __doc__ may be None for optimized Python (-OO)
269
+ doc = (obj.__doc__ or '').replace(oldval, newval)
270
+
271
+ def inner(func):
272
+ func.__doc__ = doc
273
+ return func
274
+
275
+ return inner
venv/lib/python3.10/site-packages/scipy/_lib/messagestream.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (85.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc ADDED
Binary file (983 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc ADDED
Binary file (1.77 kB). View file