applied-ai-018 commited on
Commit
c5f0b3c
·
verified ·
1 Parent(s): 6cf19f1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  2. venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py +0 -0
  3. venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py +1225 -0
  7. venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py +460 -0
  8. venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py +12 -0
  9. venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so +0 -0
  11. venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/scipy/signal/__init__.py +346 -0
  14. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/__init__.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_ltisys.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_peak_finding.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_short_time_fft.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_signaltools.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_waveforms.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_wavelets.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/bsplines.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/filter_design.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/fir_filter_design.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/lti_conversion.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/ltisys.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/spline.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/scipy/signal/__pycache__/wavelets.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/scipy/signal/_filter_design.py +0 -0
  33. venv/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py +139 -0
  34. venv/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so +0 -0
  35. venv/lib/python3.10/site-packages/scipy/signal/_sosfilt.cpython-310-x86_64-linux-gnu.so +0 -0
  36. venv/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so +0 -0
  37. venv/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so +0 -0
  38. venv/lib/python3.10/site-packages/scipy/signal/_waveforms.py +672 -0
  39. venv/lib/python3.10/site-packages/scipy/signal/filter_design.py +34 -0
  40. venv/lib/python3.10/site-packages/scipy/signal/tests/__init__.py +0 -0
  41. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_array_tools.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_bsplines.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_czt.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_dltisys.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_filter_design.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_fir_filter_design.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_ltisys.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_max_len_seq.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:603d1addf9b3ceace3d238ec4f98ee3fd2966c42b5099e8ac2d5f3c4e99f9659
3
+ size 33555612
venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_complex.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/__pycache__/_vertex.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base classes for low memory simplicial complex structures."""
2
+ import copy
3
+ import logging
4
+ import itertools
5
+ import decimal
6
+ from functools import cache
7
+
8
+ import numpy
9
+
10
+ from ._vertex import (VertexCacheField, VertexCacheIndex)
11
+
12
+
13
+ class Complex:
14
+ """
15
+ Base class for a simplicial complex described as a cache of vertices
16
+ together with their connections.
17
+
18
+ Important methods:
19
+ Domain triangulation:
20
+ Complex.triangulate, Complex.split_generation
21
+ Triangulating arbitrary points (must be traingulable,
22
+ may exist outside domain):
23
+ Complex.triangulate(sample_set)
24
+ Converting another simplicial complex structure data type to the
25
+ structure used in Complex (ex. OBJ wavefront)
26
+ Complex.convert(datatype, data)
27
+
28
+ Important objects:
29
+ HC.V: The cache of vertices and their connection
30
+ HC.H: Storage structure of all vertex groups
31
+
32
+ Parameters
33
+ ----------
34
+ dim : int
35
+ Spatial dimensionality of the complex R^dim
36
+ domain : list of tuples, optional
37
+ The bounds [x_l, x_u]^dim of the hyperrectangle space
38
+ ex. The default domain is the hyperrectangle [0, 1]^dim
39
+ Note: The domain must be convex, non-convex spaces can be cut
40
+ away from this domain using the non-linear
41
+ g_cons functions to define any arbitrary domain
42
+ (these domains may also be disconnected from each other)
43
+ sfield :
44
+ A scalar function defined in the associated domain f: R^dim --> R
45
+ sfield_args : tuple
46
+ Additional arguments to be passed to `sfield`
47
+ vfield :
48
+ A scalar function defined in the associated domain
49
+ f: R^dim --> R^m
50
+ (for example a gradient function of the scalar field)
51
+ vfield_args : tuple
52
+ Additional arguments to be passed to vfield
53
+ symmetry : None or list
54
+ Specify if the objective function contains symmetric variables.
55
+ The search space (and therefore performance) is decreased by up to
56
+ O(n!) times in the fully symmetric case.
57
+
58
+ E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
59
+
60
+ In this equation x_2 and x_3 are symmetric to x_1, while x_5 and
61
+ x_6 are symmetric to x_4, this can be specified to the solver as:
62
+
63
+ symmetry = [0, # Variable 1
64
+ 0, # symmetric to variable 1
65
+ 0, # symmetric to variable 1
66
+ 3, # Variable 4
67
+ 3, # symmetric to variable 4
68
+ 3, # symmetric to variable 4
69
+ ]
70
+
71
+ constraints : dict or sequence of dict, optional
72
+ Constraints definition.
73
+ Function(s) ``R**n`` in the form::
74
+
75
+ g(x) <= 0 applied as g : R^n -> R^m
76
+ h(x) == 0 applied as h : R^n -> R^p
77
+
78
+ Each constraint is defined in a dictionary with fields:
79
+
80
+ type : str
81
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
82
+ fun : callable
83
+ The function defining the constraint.
84
+ jac : callable, optional
85
+ The Jacobian of `fun` (only for SLSQP).
86
+ args : sequence, optional
87
+ Extra arguments to be passed to the function and Jacobian.
88
+
89
+ Equality constraint means that the constraint function result is to
90
+ be zero whereas inequality means that it is to be
91
+ non-negative.constraints : dict or sequence of dict, optional
92
+ Constraints definition.
93
+ Function(s) ``R**n`` in the form::
94
+
95
+ g(x) <= 0 applied as g : R^n -> R^m
96
+ h(x) == 0 applied as h : R^n -> R^p
97
+
98
+ Each constraint is defined in a dictionary with fields:
99
+
100
+ type : str
101
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
102
+ fun : callable
103
+ The function defining the constraint.
104
+ jac : callable, optional
105
+ The Jacobian of `fun` (unused).
106
+ args : sequence, optional
107
+ Extra arguments to be passed to the function and Jacobian.
108
+
109
+ Equality constraint means that the constraint function result is to
110
+ be zero whereas inequality means that it is to be non-negative.
111
+
112
+ workers : int optional
113
+ Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
114
+ functions in parallel.
115
+ """
116
+ def __init__(self, dim, domain=None, sfield=None, sfield_args=(),
117
+ symmetry=None, constraints=None, workers=1):
118
+ self.dim = dim
119
+
120
+ # Domains
121
+ self.domain = domain
122
+ if domain is None:
123
+ self.bounds = [(0.0, 1.0), ] * dim
124
+ else:
125
+ self.bounds = domain
126
+ self.symmetry = symmetry
127
+ # here in init to avoid if checks
128
+
129
+ # Field functions
130
+ self.sfield = sfield
131
+ self.sfield_args = sfield_args
132
+
133
+ # Process constraints
134
+ # Constraints
135
+ # Process constraint dict sequence:
136
+ if constraints is not None:
137
+ self.min_cons = constraints
138
+ self.g_cons = []
139
+ self.g_args = []
140
+ if not isinstance(constraints, (tuple, list)):
141
+ constraints = (constraints,)
142
+
143
+ for cons in constraints:
144
+ if cons['type'] in ('ineq'):
145
+ self.g_cons.append(cons['fun'])
146
+ try:
147
+ self.g_args.append(cons['args'])
148
+ except KeyError:
149
+ self.g_args.append(())
150
+ self.g_cons = tuple(self.g_cons)
151
+ self.g_args = tuple(self.g_args)
152
+ else:
153
+ self.g_cons = None
154
+ self.g_args = None
155
+
156
+ # Homology properties
157
+ self.gen = 0
158
+ self.perm_cycle = 0
159
+
160
+ # Every cell is stored in a list of its generation,
161
+ # ex. the initial cell is stored in self.H[0]
162
+ # 1st get new cells are stored in self.H[1] etc.
163
+ # When a cell is sub-generated it is removed from this list
164
+
165
+ self.H = [] # Storage structure of vertex groups
166
+
167
+ # Cache of all vertices
168
+ if (sfield is not None) or (self.g_cons is not None):
169
+ # Initiate a vertex cache and an associated field cache, note that
170
+ # the field case is always initiated inside the vertex cache if an
171
+ # associated field scalar field is defined:
172
+ if sfield is not None:
173
+ self.V = VertexCacheField(field=sfield, field_args=sfield_args,
174
+ g_cons=self.g_cons,
175
+ g_cons_args=self.g_args,
176
+ workers=workers)
177
+ elif self.g_cons is not None:
178
+ self.V = VertexCacheField(field=sfield, field_args=sfield_args,
179
+ g_cons=self.g_cons,
180
+ g_cons_args=self.g_args,
181
+ workers=workers)
182
+ else:
183
+ self.V = VertexCacheIndex()
184
+
185
+ self.V_non_symm = [] # List of non-symmetric vertices
186
+
187
+ def __call__(self):
188
+ return self.H
189
+
190
+ # %% Triangulation methods
191
+ def cyclic_product(self, bounds, origin, supremum, centroid=True):
192
+ """Generate initial triangulation using cyclic product"""
193
+ # Define current hyperrectangle
194
+ vot = tuple(origin)
195
+ vut = tuple(supremum) # Hyperrectangle supremum
196
+ self.V[vot]
197
+ vo = self.V[vot]
198
+ yield vo.x
199
+ self.V[vut].connect(self.V[vot])
200
+ yield vut
201
+ # Cyclic group approach with second x_l --- x_u operation.
202
+
203
+ # These containers store the "lower" and "upper" vertices
204
+ # corresponding to the origin or supremum of every C2 group.
205
+ # It has the structure of `dim` times embedded lists each containing
206
+ # these vertices as the entire complex grows. Bounds[0] has to be done
207
+ # outside the loops before we have symmetric containers.
208
+ # NOTE: This means that bounds[0][1] must always exist
209
+ C0x = [[self.V[vot]]]
210
+ a_vo = copy.copy(list(origin))
211
+ a_vo[0] = vut[0] # Update aN Origin
212
+ a_vo = self.V[tuple(a_vo)]
213
+ # self.V[vot].connect(self.V[tuple(a_vo)])
214
+ self.V[vot].connect(a_vo)
215
+ yield a_vo.x
216
+ C1x = [[a_vo]]
217
+ # C1x = [[self.V[tuple(a_vo)]]]
218
+ ab_C = [] # Container for a + b operations
219
+
220
+ # Loop over remaining bounds
221
+ for i, x in enumerate(bounds[1:]):
222
+ # Update lower and upper containers
223
+ C0x.append([])
224
+ C1x.append([])
225
+ # try to access a second bound (if not, C1 is symmetric)
226
+ try:
227
+ # Early try so that we don't have to copy the cache before
228
+ # moving on to next C1/C2: Try to add the operation of a new
229
+ # C2 product by accessing the upper bound
230
+ x[1]
231
+ # Copy lists for iteration
232
+ cC0x = [x[:] for x in C0x[:i + 1]]
233
+ cC1x = [x[:] for x in C1x[:i + 1]]
234
+ for j, (VL, VU) in enumerate(zip(cC0x, cC1x)):
235
+ for k, (vl, vu) in enumerate(zip(VL, VU)):
236
+ # Build aN vertices for each lower-upper pair in N:
237
+ a_vl = list(vl.x)
238
+ a_vu = list(vu.x)
239
+ a_vl[i + 1] = vut[i + 1]
240
+ a_vu[i + 1] = vut[i + 1]
241
+ a_vl = self.V[tuple(a_vl)]
242
+
243
+ # Connect vertices in N to corresponding vertices
244
+ # in aN:
245
+ vl.connect(a_vl)
246
+
247
+ yield a_vl.x
248
+
249
+ a_vu = self.V[tuple(a_vu)]
250
+ # Connect vertices in N to corresponding vertices
251
+ # in aN:
252
+ vu.connect(a_vu)
253
+
254
+ # Connect new vertex pair in aN:
255
+ a_vl.connect(a_vu)
256
+
257
+ # Connect lower pair to upper (triangulation
258
+ # operation of a + b (two arbitrary operations):
259
+ vl.connect(a_vu)
260
+ ab_C.append((vl, a_vu))
261
+
262
+ # Update the containers
263
+ C0x[i + 1].append(vl)
264
+ C0x[i + 1].append(vu)
265
+ C1x[i + 1].append(a_vl)
266
+ C1x[i + 1].append(a_vu)
267
+
268
+ # Update old containers
269
+ C0x[j].append(a_vl)
270
+ C1x[j].append(a_vu)
271
+
272
+ # Yield new points
273
+ yield a_vu.x
274
+
275
+ # Try to connect aN lower source of previous a + b
276
+ # operation with a aN vertex
277
+ ab_Cc = copy.copy(ab_C)
278
+
279
+ for vp in ab_Cc:
280
+ b_v = list(vp[0].x)
281
+ ab_v = list(vp[1].x)
282
+ b_v[i + 1] = vut[i + 1]
283
+ ab_v[i + 1] = vut[i + 1]
284
+ b_v = self.V[tuple(b_v)] # b + vl
285
+ ab_v = self.V[tuple(ab_v)] # b + a_vl
286
+ # Note o---o is already connected
287
+ vp[0].connect(ab_v) # o-s
288
+ b_v.connect(ab_v) # s-s
289
+
290
+ # Add new list of cross pairs
291
+ ab_C.append((vp[0], ab_v))
292
+ ab_C.append((b_v, ab_v))
293
+
294
+ except IndexError:
295
+ cC0x = C0x[i]
296
+ cC1x = C1x[i]
297
+ VL, VU = cC0x, cC1x
298
+ for k, (vl, vu) in enumerate(zip(VL, VU)):
299
+ # Build aN vertices for each lower-upper pair in N:
300
+ a_vu = list(vu.x)
301
+ a_vu[i + 1] = vut[i + 1]
302
+ # Connect vertices in N to corresponding vertices
303
+ # in aN:
304
+ a_vu = self.V[tuple(a_vu)]
305
+ # Connect vertices in N to corresponding vertices
306
+ # in aN:
307
+ vu.connect(a_vu)
308
+ # Connect new vertex pair in aN:
309
+ # a_vl.connect(a_vu)
310
+ # Connect lower pair to upper (triangulation
311
+ # operation of a + b (two arbitrary operations):
312
+ vl.connect(a_vu)
313
+ ab_C.append((vl, a_vu))
314
+ C0x[i + 1].append(vu)
315
+ C1x[i + 1].append(a_vu)
316
+ # Yield new points
317
+ a_vu.connect(self.V[vut])
318
+ yield a_vu.x
319
+ ab_Cc = copy.copy(ab_C)
320
+ for vp in ab_Cc:
321
+ if vp[1].x[i] == vut[i]:
322
+ ab_v = list(vp[1].x)
323
+ ab_v[i + 1] = vut[i + 1]
324
+ ab_v = self.V[tuple(ab_v)] # b + a_vl
325
+ # Note o---o is already connected
326
+ vp[0].connect(ab_v) # o-s
327
+
328
+ # Add new list of cross pairs
329
+ ab_C.append((vp[0], ab_v))
330
+
331
+ # Clean class trash
332
+ try:
333
+ del C0x
334
+ del cC0x
335
+ del C1x
336
+ del cC1x
337
+ del ab_C
338
+ del ab_Cc
339
+ except UnboundLocalError:
340
+ pass
341
+
342
+ # Extra yield to ensure that the triangulation is completed
343
+ if centroid:
344
+ vo = self.V[vot]
345
+ vs = self.V[vut]
346
+ # Disconnect the origin and supremum
347
+ vo.disconnect(vs)
348
+ # Build centroid
349
+ vc = self.split_edge(vot, vut)
350
+ for v in vo.nn:
351
+ v.connect(vc)
352
+ yield vc.x
353
+ return vc.x
354
+ else:
355
+ yield vut
356
+ return vut
357
+
358
+ def triangulate(self, n=None, symmetry=None, centroid=True,
359
+ printout=False):
360
+ """
361
+ Triangulate the initial domain, if n is not None then a limited number
362
+ of points will be generated
363
+
364
+ Parameters
365
+ ----------
366
+ n : int, Number of points to be sampled.
367
+ symmetry :
368
+
369
+ Ex. Dictionary/hashtable
370
+ f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
371
+
372
+ symmetry = symmetry[0]: 0, # Variable 1
373
+ symmetry[1]: 0, # symmetric to variable 1
374
+ symmetry[2]: 0, # symmetric to variable 1
375
+ symmetry[3]: 3, # Variable 4
376
+ symmetry[4]: 3, # symmetric to variable 4
377
+ symmetry[5]: 3, # symmetric to variable 4
378
+ }
379
+ centroid : bool, if True add a central point to the hypercube
380
+ printout : bool, if True print out results
381
+
382
+ NOTES:
383
+ ------
384
+ Rather than using the combinatorial algorithm to connect vertices we
385
+ make the following observation:
386
+
387
+ The bound pairs are similar a C2 cyclic group and the structure is
388
+ formed using the cartesian product:
389
+
390
+ H = C2 x C2 x C2 ... x C2 (dim times)
391
+
392
+ So construct any normal subgroup N and consider H/N first, we connect
393
+ all vertices within N (ex. N is C2 (the first dimension), then we move
394
+ to a left coset aN (an operation moving around the defined H/N group by
395
+ for example moving from the lower bound in C2 (dimension 2) to the
396
+ higher bound in C2. During this operation connection all the vertices.
397
+ Now repeat the N connections. Note that these elements can be connected
398
+ in parallel.
399
+ """
400
+ # Inherit class arguments
401
+ if symmetry is None:
402
+ symmetry = self.symmetry
403
+ # Build origin and supremum vectors
404
+ origin = [i[0] for i in self.bounds]
405
+ self.origin = origin
406
+ supremum = [i[1] for i in self.bounds]
407
+
408
+ self.supremum = supremum
409
+
410
+ if symmetry is None:
411
+ cbounds = self.bounds
412
+ else:
413
+ cbounds = copy.copy(self.bounds)
414
+ for i, j in enumerate(symmetry):
415
+ if i is not j:
416
+ # pop second entry on second symmetry vars
417
+ cbounds[i] = [self.bounds[symmetry[i]][0]]
418
+ # Sole (first) entry is the sup value and there is no
419
+ # origin:
420
+ cbounds[i] = [self.bounds[symmetry[i]][1]]
421
+ if (self.bounds[symmetry[i]] is not
422
+ self.bounds[symmetry[j]]):
423
+ logging.warning(f"Variable {i} was specified as "
424
+ f"symmetetric to variable {j}, however"
425
+ f", the bounds {i} ="
426
+ f" {self.bounds[symmetry[i]]} and {j}"
427
+ f" ="
428
+ f" {self.bounds[symmetry[j]]} do not "
429
+ f"match, the mismatch was ignored in "
430
+ f"the initial triangulation.")
431
+ cbounds[i] = self.bounds[symmetry[j]]
432
+
433
+ if n is None:
434
+ # Build generator
435
+ self.cp = self.cyclic_product(cbounds, origin, supremum, centroid)
436
+ for i in self.cp:
437
+ i
438
+
439
+ try:
440
+ self.triangulated_vectors.append((tuple(self.origin),
441
+ tuple(self.supremum)))
442
+ except (AttributeError, KeyError):
443
+ self.triangulated_vectors = [(tuple(self.origin),
444
+ tuple(self.supremum))]
445
+
446
+ else:
447
+ # Check if generator already exists
448
+ try:
449
+ self.cp
450
+ except (AttributeError, KeyError):
451
+ self.cp = self.cyclic_product(cbounds, origin, supremum,
452
+ centroid)
453
+
454
+ try:
455
+ while len(self.V.cache) < n:
456
+ next(self.cp)
457
+ except StopIteration:
458
+ try:
459
+ self.triangulated_vectors.append((tuple(self.origin),
460
+ tuple(self.supremum)))
461
+ except (AttributeError, KeyError):
462
+ self.triangulated_vectors = [(tuple(self.origin),
463
+ tuple(self.supremum))]
464
+
465
+ if printout:
466
+ # for v in self.C0():
467
+ # v.print_out()
468
+ for v in self.V.cache:
469
+ self.V[v].print_out()
470
+
471
+ return
472
+
473
+ def refine(self, n=1):
474
+ if n is None:
475
+ try:
476
+ self.triangulated_vectors
477
+ self.refine_all()
478
+ return
479
+ except AttributeError as ae:
480
+ if str(ae) == "'Complex' object has no attribute " \
481
+ "'triangulated_vectors'":
482
+ self.triangulate(symmetry=self.symmetry)
483
+ return
484
+ else:
485
+ raise
486
+
487
+ nt = len(self.V.cache) + n # Target number of total vertices
488
+ # In the outer while loop we iterate until we have added an extra `n`
489
+ # vertices to the complex:
490
+ while len(self.V.cache) < nt: # while loop 1
491
+ try: # try 1
492
+ # Try to access triangulated_vectors, this should only be
493
+ # defined if an initial triangulation has already been
494
+ # performed:
495
+ self.triangulated_vectors
496
+ # Try a usual iteration of the current generator, if it
497
+ # does not exist or is exhausted then produce a new generator
498
+ try: # try 2
499
+ next(self.rls)
500
+ except (AttributeError, StopIteration, KeyError):
501
+ vp = self.triangulated_vectors[0]
502
+ self.rls = self.refine_local_space(*vp, bounds=self.bounds)
503
+ next(self.rls)
504
+
505
+ except (AttributeError, KeyError):
506
+ # If an initial triangulation has not been completed, then
507
+ # we start/continue the initial triangulation targeting `nt`
508
+ # vertices, if nt is greater than the initial number of
509
+ # vertices then the `refine` routine will move back to try 1.
510
+ self.triangulate(nt, self.symmetry)
511
+ return
512
+
513
+ def refine_all(self, centroids=True):
514
+ """Refine the entire domain of the current complex."""
515
+ try:
516
+ self.triangulated_vectors
517
+ tvs = copy.copy(self.triangulated_vectors)
518
+ for i, vp in enumerate(tvs):
519
+ self.rls = self.refine_local_space(*vp, bounds=self.bounds)
520
+ for i in self.rls:
521
+ i
522
+ except AttributeError as ae:
523
+ if str(ae) == "'Complex' object has no attribute " \
524
+ "'triangulated_vectors'":
525
+ self.triangulate(symmetry=self.symmetry, centroid=centroids)
526
+ else:
527
+ raise
528
+
529
+ # This adds a centroid to every new sub-domain generated and defined
530
+ # by self.triangulated_vectors, in addition the vertices ! to complete
531
+ # the triangulation
532
+ return
533
+
534
+ def refine_local_space(self, origin, supremum, bounds, centroid=1):
535
+ # Copy for later removal
536
+ origin_c = copy.copy(origin)
537
+ supremum_c = copy.copy(supremum)
538
+
539
+ # Initiate local variables redefined in later inner `for` loop:
540
+ vl, vu, a_vu = None, None, None
541
+
542
+ # Change the vector orientation so that it is only increasing
543
+ s_ov = list(origin)
544
+ s_origin = list(origin)
545
+ s_sv = list(supremum)
546
+ s_supremum = list(supremum)
547
+ for i, vi in enumerate(s_origin):
548
+ if s_ov[i] > s_sv[i]:
549
+ s_origin[i] = s_sv[i]
550
+ s_supremum[i] = s_ov[i]
551
+
552
+ vot = tuple(s_origin)
553
+ vut = tuple(s_supremum) # Hyperrectangle supremum
554
+
555
+ vo = self.V[vot] # initiate if doesn't exist yet
556
+ vs = self.V[vut]
557
+ # Start by finding the old centroid of the new space:
558
+ vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
559
+
560
+ # Find set of extreme vertices in current local space
561
+ sup_set = copy.copy(vco.nn)
562
+ # Cyclic group approach with second x_l --- x_u operation.
563
+
564
+ # These containers store the "lower" and "upper" vertices
565
+ # corresponding to the origin or supremum of every C2 group.
566
+ # It has the structure of `dim` times embedded lists each containing
567
+ # these vertices as the entire complex grows. Bounds[0] has to be done
568
+ # outside the loops before we have symmetric containers.
569
+ # NOTE: This means that bounds[0][1] must always exist
570
+
571
+ a_vl = copy.copy(list(vot))
572
+ a_vl[0] = vut[0] # Update aN Origin
573
+ if tuple(a_vl) not in self.V.cache:
574
+ vo = self.V[vot] # initiate if doesn't exist yet
575
+ vs = self.V[vut]
576
+ # Start by finding the old centroid of the new space:
577
+ vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
578
+
579
+ # Find set of extreme vertices in current local space
580
+ sup_set = copy.copy(vco.nn)
581
+ a_vl = copy.copy(list(vot))
582
+ a_vl[0] = vut[0] # Update aN Origin
583
+ a_vl = self.V[tuple(a_vl)]
584
+ else:
585
+ a_vl = self.V[tuple(a_vl)]
586
+
587
+ c_v = self.split_edge(vo.x, a_vl.x)
588
+ c_v.connect(vco)
589
+ yield c_v.x
590
+ Cox = [[vo]]
591
+ Ccx = [[c_v]]
592
+ Cux = [[a_vl]]
593
+ ab_C = [] # Container for a + b operations
594
+ s_ab_C = [] # Container for symmetric a + b operations
595
+
596
+ # Loop over remaining bounds
597
+ for i, x in enumerate(bounds[1:]):
598
+ # Update lower and upper containers
599
+ Cox.append([])
600
+ Ccx.append([])
601
+ Cux.append([])
602
+ # try to access a second bound (if not, C1 is symmetric)
603
+ try:
604
+ t_a_vl = list(vot)
605
+ t_a_vl[i + 1] = vut[i + 1]
606
+
607
+ # New: lists are used anyway, so copy all
608
+ # %%
609
+ # Copy lists for iteration
610
+ cCox = [x[:] for x in Cox[:i + 1]]
611
+ cCcx = [x[:] for x in Ccx[:i + 1]]
612
+ cCux = [x[:] for x in Cux[:i + 1]]
613
+ # Try to connect aN lower source of previous a + b
614
+ # operation with a aN vertex
615
+ ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the
616
+ # (VL, VC, VU) for-loop, but we use the copy of the list in the
617
+ # ab_Cc for-loop.
618
+ s_ab_Cc = copy.copy(s_ab_C)
619
+
620
+ # Early try so that we don't have to copy the cache before
621
+ # moving on to next C1/C2: Try to add the operation of a new
622
+ # C2 product by accessing the upper bound
623
+ if tuple(t_a_vl) not in self.V.cache:
624
+ # Raise error to continue symmetric refine
625
+ raise IndexError
626
+ t_a_vu = list(vut)
627
+ t_a_vu[i + 1] = vut[i + 1]
628
+ if tuple(t_a_vu) not in self.V.cache:
629
+ # Raise error to continue symmetric refine:
630
+ raise IndexError
631
+
632
+ for vectors in s_ab_Cc:
633
+ # s_ab_C.append([c_vc, vl, vu, a_vu])
634
+ bc_vc = list(vectors[0].x)
635
+ b_vl = list(vectors[1].x)
636
+ b_vu = list(vectors[2].x)
637
+ ba_vu = list(vectors[3].x)
638
+
639
+ bc_vc[i + 1] = vut[i + 1]
640
+ b_vl[i + 1] = vut[i + 1]
641
+ b_vu[i + 1] = vut[i + 1]
642
+ ba_vu[i + 1] = vut[i + 1]
643
+
644
+ bc_vc = self.V[tuple(bc_vc)]
645
+ bc_vc.connect(vco) # NOTE: Unneeded?
646
+ yield bc_vc
647
+
648
+ # Split to centre, call this centre group "d = 0.5*a"
649
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
650
+ d_bc_vc.connect(bc_vc)
651
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
652
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
653
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
654
+ yield d_bc_vc.x
655
+ b_vl = self.V[tuple(b_vl)]
656
+ bc_vc.connect(b_vl) # Connect aN cross pairs
657
+ d_bc_vc.connect(b_vl) # Connect all to centroid
658
+
659
+ yield b_vl
660
+ b_vu = self.V[tuple(b_vu)]
661
+ bc_vc.connect(b_vu) # Connect aN cross pairs
662
+ d_bc_vc.connect(b_vu) # Connect all to centroid
663
+
664
+ b_vl_c = self.split_edge(b_vu.x, b_vl.x)
665
+ bc_vc.connect(b_vl_c)
666
+
667
+ yield b_vu
668
+ ba_vu = self.V[tuple(ba_vu)]
669
+ bc_vc.connect(ba_vu) # Connect aN cross pairs
670
+ d_bc_vc.connect(ba_vu) # Connect all to centroid
671
+
672
+ # Split the a + b edge of the initial triangulation:
673
+ os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
674
+ ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
675
+ b_vu_c = self.split_edge(b_vu.x, ba_vu.x)
676
+ bc_vc.connect(b_vu_c)
677
+ yield os_v.x # often equal to vco, but not always
678
+ yield ss_v.x # often equal to bc_vu, but not always
679
+ yield ba_vu
680
+ # Split remaining to centre, call this centre group
681
+ # "d = 0.5*a"
682
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
683
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
684
+ yield d_bc_vc.x
685
+ d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
686
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
687
+ d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
688
+ yield d_b_vl.x
689
+ d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
690
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
691
+ d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
692
+ yield d_b_vu.x
693
+ d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x)
694
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
695
+ d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
696
+ yield d_ba_vu
697
+
698
+ # comb = [c_vc, vl, vu, a_vl, a_vu,
699
+ # bc_vc, b_vl, b_vu, ba_vl, ba_vu]
700
+ comb = [vl, vu, a_vu,
701
+ b_vl, b_vu, ba_vu]
702
+ comb_iter = itertools.combinations(comb, 2)
703
+ for vecs in comb_iter:
704
+ self.split_edge(vecs[0].x, vecs[1].x)
705
+ # Add new list of cross pairs
706
+ ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
707
+ ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev
708
+
709
+ for vectors in ab_Cc:
710
+ bc_vc = list(vectors[0].x)
711
+ b_vl = list(vectors[1].x)
712
+ b_vu = list(vectors[2].x)
713
+ ba_vl = list(vectors[3].x)
714
+ ba_vu = list(vectors[4].x)
715
+ bc_vc[i + 1] = vut[i + 1]
716
+ b_vl[i + 1] = vut[i + 1]
717
+ b_vu[i + 1] = vut[i + 1]
718
+ ba_vl[i + 1] = vut[i + 1]
719
+ ba_vu[i + 1] = vut[i + 1]
720
+ bc_vc = self.V[tuple(bc_vc)]
721
+ bc_vc.connect(vco) # NOTE: Unneeded?
722
+ yield bc_vc
723
+
724
+ # Split to centre, call this centre group "d = 0.5*a"
725
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
726
+ d_bc_vc.connect(bc_vc)
727
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
728
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
729
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
730
+ d_bc_vc.connect(vectors[4]) # Connect all to centroid
731
+ yield d_bc_vc.x
732
+ b_vl = self.V[tuple(b_vl)]
733
+ bc_vc.connect(b_vl) # Connect aN cross pairs
734
+ d_bc_vc.connect(b_vl) # Connect all to centroid
735
+ yield b_vl
736
+ b_vu = self.V[tuple(b_vu)]
737
+ bc_vc.connect(b_vu) # Connect aN cross pairs
738
+ d_bc_vc.connect(b_vu) # Connect all to centroid
739
+ yield b_vu
740
+ ba_vl = self.V[tuple(ba_vl)]
741
+ bc_vc.connect(ba_vl) # Connect aN cross pairs
742
+ d_bc_vc.connect(ba_vl) # Connect all to centroid
743
+ self.split_edge(b_vu.x, ba_vl.x)
744
+ yield ba_vl
745
+ ba_vu = self.V[tuple(ba_vu)]
746
+ bc_vc.connect(ba_vu) # Connect aN cross pairs
747
+ d_bc_vc.connect(ba_vu) # Connect all to centroid
748
+ # Split the a + b edge of the initial triangulation:
749
+ os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
750
+ ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
751
+ yield os_v.x # often equal to vco, but not always
752
+ yield ss_v.x # often equal to bc_vu, but not always
753
+ yield ba_vu
754
+ # Split remaining to centre, call this centre group
755
+ # "d = 0.5*a"
756
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
757
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
758
+ yield d_bc_vc.x
759
+ d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
760
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
761
+ d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
762
+ yield d_b_vl.x
763
+ d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
764
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
765
+ d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
766
+ yield d_b_vu.x
767
+ d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
768
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
769
+ d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs
770
+ yield d_ba_vl
771
+ d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
772
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
773
+ d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
774
+ yield d_ba_vu
775
+ c_vc, vl, vu, a_vl, a_vu = vectors
776
+
777
+ comb = [vl, vu, a_vl, a_vu,
778
+ b_vl, b_vu, ba_vl, ba_vu]
779
+ comb_iter = itertools.combinations(comb, 2)
780
+ for vecs in comb_iter:
781
+ self.split_edge(vecs[0].x, vecs[1].x)
782
+
783
+ # Add new list of cross pairs
784
+ ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu))
785
+ ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu))
786
+ ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
787
+ ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl))
788
+
789
+ for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)):
790
+ for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
791
+ # Build aN vertices for each lower-upper C3 group in N:
792
+ a_vl = list(vl.x)
793
+ a_vu = list(vu.x)
794
+ a_vl[i + 1] = vut[i + 1]
795
+ a_vu[i + 1] = vut[i + 1]
796
+ a_vl = self.V[tuple(a_vl)]
797
+ a_vu = self.V[tuple(a_vu)]
798
+ # Note, build (a + vc) later for consistent yields
799
+ # Split the a + b edge of the initial triangulation:
800
+ c_vc = self.split_edge(vl.x, a_vu.x)
801
+ self.split_edge(vl.x, vu.x) # Equal to vc
802
+ # Build cN vertices for each lower-upper C3 group in N:
803
+ c_vc.connect(vco)
804
+ c_vc.connect(vc)
805
+ c_vc.connect(vl) # Connect c + ac operations
806
+ c_vc.connect(vu) # Connect c + ac operations
807
+ c_vc.connect(a_vl) # Connect c + ac operations
808
+ c_vc.connect(a_vu) # Connect c + ac operations
809
+ yield c_vc.x
810
+ c_vl = self.split_edge(vl.x, a_vl.x)
811
+ c_vl.connect(vco)
812
+ c_vc.connect(c_vl) # Connect cN group vertices
813
+ yield c_vl.x
814
+ # yield at end of loop:
815
+ c_vu = self.split_edge(vu.x, a_vu.x)
816
+ c_vu.connect(vco)
817
+ # Connect remaining cN group vertices
818
+ c_vc.connect(c_vu) # Connect cN group vertices
819
+ yield c_vu.x
820
+
821
+ a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ?
822
+ a_vc.connect(vco)
823
+ a_vc.connect(c_vc)
824
+
825
+ # Storage for connecting c + ac operations:
826
+ ab_C.append((c_vc, vl, vu, a_vl, a_vu))
827
+
828
+ # Update the containers
829
+ Cox[i + 1].append(vl)
830
+ Cox[i + 1].append(vc)
831
+ Cox[i + 1].append(vu)
832
+ Ccx[i + 1].append(c_vl)
833
+ Ccx[i + 1].append(c_vc)
834
+ Ccx[i + 1].append(c_vu)
835
+ Cux[i + 1].append(a_vl)
836
+ Cux[i + 1].append(a_vc)
837
+ Cux[i + 1].append(a_vu)
838
+
839
+ # Update old containers
840
+ Cox[j].append(c_vl) # !
841
+ Cox[j].append(a_vl)
842
+ Ccx[j].append(c_vc) # !
843
+ Ccx[j].append(a_vc) # !
844
+ Cux[j].append(c_vu) # !
845
+ Cux[j].append(a_vu)
846
+
847
+ # Yield new points
848
+ yield a_vc.x
849
+
850
+ except IndexError:
851
+ for vectors in ab_Cc:
852
+ ba_vl = list(vectors[3].x)
853
+ ba_vu = list(vectors[4].x)
854
+ ba_vl[i + 1] = vut[i + 1]
855
+ ba_vu[i + 1] = vut[i + 1]
856
+ ba_vu = self.V[tuple(ba_vu)]
857
+ yield ba_vu
858
+ d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s
859
+ yield ba_vu
860
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
861
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
862
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
863
+ d_bc_vc.connect(vectors[4]) # Connect all to centroid
864
+ yield d_bc_vc.x
865
+ ba_vl = self.V[tuple(ba_vl)]
866
+ yield ba_vl
867
+ d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
868
+ d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
869
+ d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x)
870
+ yield d_ba_vl
871
+ yield d_ba_vu
872
+ yield d_ba_vc
873
+ c_vc, vl, vu, a_vl, a_vu = vectors
874
+ comb = [vl, vu, a_vl, a_vu,
875
+ ba_vl,
876
+ ba_vu]
877
+ comb_iter = itertools.combinations(comb, 2)
878
+ for vecs in comb_iter:
879
+ self.split_edge(vecs[0].x, vecs[1].x)
880
+
881
+ # Copy lists for iteration
882
+ cCox = Cox[i]
883
+ cCcx = Ccx[i]
884
+ cCux = Cux[i]
885
+ VL, VC, VU = cCox, cCcx, cCux
886
+ for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
887
+ # Build aN vertices for each lower-upper pair in N:
888
+ a_vu = list(vu.x)
889
+ a_vu[i + 1] = vut[i + 1]
890
+
891
+ # Connect vertices in N to corresponding vertices
892
+ # in aN:
893
+ a_vu = self.V[tuple(a_vu)]
894
+ yield a_vl.x
895
+ # Split the a + b edge of the initial triangulation:
896
+ c_vc = self.split_edge(vl.x, a_vu.x)
897
+ self.split_edge(vl.x, vu.x) # Equal to vc
898
+ c_vc.connect(vco)
899
+ c_vc.connect(vc)
900
+ c_vc.connect(vl) # Connect c + ac operations
901
+ c_vc.connect(vu) # Connect c + ac operations
902
+ c_vc.connect(a_vu) # Connect c + ac operations
903
+ yield (c_vc.x)
904
+ c_vu = self.split_edge(vu.x,
905
+ a_vu.x) # yield at end of loop
906
+ c_vu.connect(vco)
907
+ # Connect remaining cN group vertices
908
+ c_vc.connect(c_vu) # Connect cN group vertices
909
+ yield (c_vu.x)
910
+
911
+ # Update the containers
912
+ Cox[i + 1].append(vu)
913
+ Ccx[i + 1].append(c_vu)
914
+ Cux[i + 1].append(a_vu)
915
+
916
+ # Update old containers
917
+ s_ab_C.append([c_vc, vl, vu, a_vu])
918
+
919
+ yield a_vu.x
920
+
921
+ # Clean class trash
922
+ try:
923
+ del Cox
924
+ del Ccx
925
+ del Cux
926
+ del ab_C
927
+ del ab_Cc
928
+ except UnboundLocalError:
929
+ pass
930
+
931
+ try:
932
+ self.triangulated_vectors.remove((tuple(origin_c),
933
+ tuple(supremum_c)))
934
+ except ValueError:
935
+ # Turn this into a logging warning?
936
+ pass
937
+ # Add newly triangulated vectors:
938
+ for vs in sup_set:
939
+ self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x)))
940
+
941
+ # Extra yield to ensure that the triangulation is completed
942
+ if centroid:
943
+ vcn_set = set()
944
+ c_nn_lists = []
945
+ for vs in sup_set:
946
+ # Build centroid
947
+ c_nn = self.vpool(vco.x, vs.x)
948
+ try:
949
+ c_nn.remove(vcn_set)
950
+ except KeyError:
951
+ pass
952
+ c_nn_lists.append(c_nn)
953
+
954
+ for c_nn in c_nn_lists:
955
+ try:
956
+ c_nn.remove(vcn_set)
957
+ except KeyError:
958
+ pass
959
+
960
+ for vs, c_nn in zip(sup_set, c_nn_lists):
961
+ # Build centroid
962
+ vcn = self.split_edge(vco.x, vs.x)
963
+ vcn_set.add(vcn)
964
+ try: # Shouldn't be needed?
965
+ c_nn.remove(vcn_set)
966
+ except KeyError:
967
+ pass
968
+ for vnn in c_nn:
969
+ vcn.connect(vnn)
970
+ yield vcn.x
971
+ else:
972
+ pass
973
+
974
+ yield vut
975
+ return
976
+
977
+ def refine_star(self, v):
978
+ """Refine the star domain of a vertex `v`."""
979
+ # Copy lists before iteration
980
+ vnn = copy.copy(v.nn)
981
+ v1nn = []
982
+ d_v0v1_set = set()
983
+ for v1 in vnn:
984
+ v1nn.append(copy.copy(v1.nn))
985
+
986
+ for v1, v1nn in zip(vnn, v1nn):
987
+ vnnu = v1nn.intersection(vnn)
988
+
989
+ d_v0v1 = self.split_edge(v.x, v1.x)
990
+ for o_d_v0v1 in d_v0v1_set:
991
+ d_v0v1.connect(o_d_v0v1)
992
+ d_v0v1_set.add(d_v0v1)
993
+ for v2 in vnnu:
994
+ d_v1v2 = self.split_edge(v1.x, v2.x)
995
+ d_v0v1.connect(d_v1v2)
996
+ return
997
+
998
+ @cache
999
+ def split_edge(self, v1, v2):
1000
+ v1 = self.V[v1]
1001
+ v2 = self.V[v2]
1002
+ # Destroy original edge, if it exists:
1003
+ v1.disconnect(v2)
1004
+ # Compute vertex on centre of edge:
1005
+ try:
1006
+ vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a
1007
+ except TypeError: # Allow for decimal operations
1008
+ vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a
1009
+
1010
+ vc = self.V[tuple(vct)]
1011
+ # Connect to original 2 vertices to the new centre vertex
1012
+ vc.connect(v1)
1013
+ vc.connect(v2)
1014
+ return vc
1015
+
1016
+ def vpool(self, origin, supremum):
1017
+ vot = tuple(origin)
1018
+ vst = tuple(supremum)
1019
+ # Initiate vertices in case they don't exist
1020
+ vo = self.V[vot]
1021
+ vs = self.V[vst]
1022
+
1023
+ # Remove origin - supremum disconnect
1024
+
1025
+ # Find the lower/upper bounds of the refinement hyperrectangle
1026
+ bl = list(vot)
1027
+ bu = list(vst)
1028
+ for i, (voi, vsi) in enumerate(zip(vot, vst)):
1029
+ if bl[i] > vsi:
1030
+ bl[i] = vsi
1031
+ if bu[i] < voi:
1032
+ bu[i] = voi
1033
+
1034
+ # NOTE: This is mostly done with sets/lists because we aren't sure
1035
+ # how well the numpy arrays will scale to thousands of
1036
+ # dimensions.
1037
+ vn_pool = set()
1038
+ vn_pool.update(vo.nn)
1039
+ vn_pool.update(vs.nn)
1040
+ cvn_pool = copy.copy(vn_pool)
1041
+ for vn in cvn_pool:
1042
+ for i, xi in enumerate(vn.x):
1043
+ if bl[i] <= xi <= bu[i]:
1044
+ pass
1045
+ else:
1046
+ try:
1047
+ vn_pool.remove(vn)
1048
+ except KeyError:
1049
+ pass # NOTE: Not all neigbouds are in initial pool
1050
+ return vn_pool
1051
+
1052
+ def vf_to_vv(self, vertices, simplices):
1053
+ """
1054
+ Convert a vertex-face mesh to a vertex-vertex mesh used by this class
1055
+
1056
+ Parameters
1057
+ ----------
1058
+ vertices : list
1059
+ Vertices
1060
+ simplices : list
1061
+ Simplices
1062
+ """
1063
+ if self.dim > 1:
1064
+ for s in simplices:
1065
+ edges = itertools.combinations(s, self.dim)
1066
+ for e in edges:
1067
+ self.V[tuple(vertices[e[0]])].connect(
1068
+ self.V[tuple(vertices[e[1]])])
1069
+ else:
1070
+ for e in simplices:
1071
+ self.V[tuple(vertices[e[0]])].connect(
1072
+ self.V[tuple(vertices[e[1]])])
1073
+ return
1074
+
1075
+ def connect_vertex_non_symm(self, v_x, near=None):
1076
+ """
1077
+ Adds a vertex at coords v_x to the complex that is not symmetric to the
1078
+ initial triangulation and sub-triangulation.
1079
+
1080
+ If near is specified (for example; a star domain or collections of
1081
+ cells known to contain v) then only those simplices containd in near
1082
+ will be searched, this greatly speeds up the process.
1083
+
1084
+ If near is not specified this method will search the entire simplicial
1085
+ complex structure.
1086
+
1087
+ Parameters
1088
+ ----------
1089
+ v_x : tuple
1090
+ Coordinates of non-symmetric vertex
1091
+ near : set or list
1092
+ List of vertices, these are points near v to check for
1093
+ """
1094
+ if near is None:
1095
+ star = self.V
1096
+ else:
1097
+ star = near
1098
+ # Create the vertex origin
1099
+ if tuple(v_x) in self.V.cache:
1100
+ if self.V[v_x] in self.V_non_symm:
1101
+ pass
1102
+ else:
1103
+ return
1104
+
1105
+ self.V[v_x]
1106
+ found_nn = False
1107
+ S_rows = []
1108
+ for v in star:
1109
+ S_rows.append(v.x)
1110
+
1111
+ S_rows = numpy.array(S_rows)
1112
+ A = numpy.array(S_rows) - numpy.array(v_x)
1113
+ # Iterate through all the possible simplices of S_rows
1114
+ for s_i in itertools.combinations(range(S_rows.shape[0]),
1115
+ r=self.dim + 1):
1116
+ # Check if connected, else s_i is not a simplex
1117
+ valid_simplex = True
1118
+ for i in itertools.combinations(s_i, r=2):
1119
+ # Every combination of vertices must be connected, we check of
1120
+ # the current iteration of all combinations of s_i are
1121
+ # connected we break the loop if it is not.
1122
+ if ((self.V[tuple(S_rows[i[1]])] not in
1123
+ self.V[tuple(S_rows[i[0]])].nn)
1124
+ and (self.V[tuple(S_rows[i[0]])] not in
1125
+ self.V[tuple(S_rows[i[1]])].nn)):
1126
+ valid_simplex = False
1127
+ break
1128
+
1129
+ S = S_rows[tuple([s_i])]
1130
+ if valid_simplex:
1131
+ if self.deg_simplex(S, proj=None):
1132
+ valid_simplex = False
1133
+
1134
+ # If s_i is a valid simplex we can test if v_x is inside si
1135
+ if valid_simplex:
1136
+ # Find the A_j0 value from the precalculated values
1137
+ A_j0 = A[tuple([s_i])]
1138
+ if self.in_simplex(S, v_x, A_j0):
1139
+ found_nn = True
1140
+ # breaks the main for loop, s_i is the target simplex:
1141
+ break
1142
+
1143
+ # Connect the simplex to point
1144
+ if found_nn:
1145
+ for i in s_i:
1146
+ self.V[v_x].connect(self.V[tuple(S_rows[i])])
1147
+ # Attached the simplex to storage for all non-symmetric vertices
1148
+ self.V_non_symm.append(self.V[v_x])
1149
+ # this bool value indicates a successful connection if True:
1150
+ return found_nn
1151
+
1152
+ def in_simplex(self, S, v_x, A_j0=None):
1153
+ """Check if a vector v_x is in simplex `S`.
1154
+
1155
+ Parameters
1156
+ ----------
1157
+ S : array_like
1158
+ Array containing simplex entries of vertices as rows
1159
+ v_x :
1160
+ A candidate vertex
1161
+ A_j0 : array, optional,
1162
+ Allows for A_j0 to be pre-calculated
1163
+
1164
+ Returns
1165
+ -------
1166
+ res : boolean
1167
+ True if `v_x` is in `S`
1168
+ """
1169
+ A_11 = numpy.delete(S, 0, 0) - S[0]
1170
+
1171
+ sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11))
1172
+ if sign_det_A_11 == 0:
1173
+ # NOTE: We keep the variable A_11, but we loop through A_jj
1174
+ # ind=
1175
+ # while sign_det_A_11 == 0:
1176
+ # A_11 = numpy.delete(S, ind, 0) - S[ind]
1177
+ # sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11))
1178
+
1179
+ sign_det_A_11 = -1 # TODO: Choose another det of j instead?
1180
+ # TODO: Unlikely to work in many cases
1181
+
1182
+ if A_j0 is None:
1183
+ A_j0 = S - v_x
1184
+
1185
+ for d in range(self.dim + 1):
1186
+ det_A_jj = (-1)**d * sign_det_A_11
1187
+ # TODO: Note that scipy might be faster to add as an optional
1188
+ # dependency
1189
+ sign_det_A_j0 = numpy.sign(numpy.linalg.det(numpy.delete(A_j0, d,
1190
+ 0)))
1191
+ # TODO: Note if sign_det_A_j0 == then the point is coplanar to the
1192
+ # current simplex facet, so perhaps return True and attach?
1193
+ if det_A_jj == sign_det_A_j0:
1194
+ continue
1195
+ else:
1196
+ return False
1197
+
1198
+ return True
1199
+
1200
+ def deg_simplex(self, S, proj=None):
1201
+ """Test a simplex S for degeneracy (linear dependence in R^dim).
1202
+
1203
+ Parameters
1204
+ ----------
1205
+ S : np.array
1206
+ Simplex with rows as vertex vectors
1207
+ proj : array, optional,
1208
+ If the projection S[1:] - S[0] is already
1209
+ computed it can be added as an optional argument.
1210
+ """
1211
+ # Strategy: we test all combination of faces, if any of the
1212
+ # determinants are zero then the vectors lie on the same face and is
1213
+ # therefore linearly dependent in the space of R^dim
1214
+ if proj is None:
1215
+ proj = S[1:] - S[0]
1216
+
1217
+ # TODO: Is checking the projection of one vertex against faces of other
1218
+ # vertices sufficient? Or do we need to check more vertices in
1219
+ # dimensions higher than 2?
1220
+ # TODO: Literature seems to suggest using proj.T, but why is this
1221
+ # needed?
1222
+ if numpy.linalg.det(proj) == 0.0: # TODO: Repalace with tolerance?
1223
+ return True # Simplex is degenerate
1224
+ else:
1225
+ return False # Simplex is not degenerate
venv/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_vertex.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from abc import ABC, abstractmethod
3
+
4
+ import numpy as np
5
+
6
+ from scipy._lib._util import MapWrapper
7
+
8
+
9
+ class VertexBase(ABC):
10
+ """
11
+ Base class for a vertex.
12
+ """
13
+ def __init__(self, x, nn=None, index=None):
14
+ """
15
+ Initiation of a vertex object.
16
+
17
+ Parameters
18
+ ----------
19
+ x : tuple or vector
20
+ The geometric location (domain).
21
+ nn : list, optional
22
+ Nearest neighbour list.
23
+ index : int, optional
24
+ Index of vertex.
25
+ """
26
+ self.x = x
27
+ self.hash = hash(self.x) # Save precomputed hash
28
+
29
+ if nn is not None:
30
+ self.nn = set(nn) # can use .indexupdate to add a new list
31
+ else:
32
+ self.nn = set()
33
+
34
+ self.index = index
35
+
36
+ def __hash__(self):
37
+ return self.hash
38
+
39
+ def __getattr__(self, item):
40
+ if item not in ['x_a']:
41
+ raise AttributeError(f"{type(self)} object has no attribute "
42
+ f"'{item}'")
43
+ if item == 'x_a':
44
+ self.x_a = np.array(self.x)
45
+ return self.x_a
46
+
47
+ @abstractmethod
48
+ def connect(self, v):
49
+ raise NotImplementedError("This method is only implemented with an "
50
+ "associated child of the base class.")
51
+
52
+ @abstractmethod
53
+ def disconnect(self, v):
54
+ raise NotImplementedError("This method is only implemented with an "
55
+ "associated child of the base class.")
56
+
57
+ def star(self):
58
+ """Returns the star domain ``st(v)`` of the vertex.
59
+
60
+ Parameters
61
+ ----------
62
+ v :
63
+ The vertex ``v`` in ``st(v)``
64
+
65
+ Returns
66
+ -------
67
+ st : set
68
+ A set containing all the vertices in ``st(v)``
69
+ """
70
+ self.st = self.nn
71
+ self.st.add(self)
72
+ return self.st
73
+
74
+
75
+ class VertexScalarField(VertexBase):
76
+ """
77
+ Add homology properties of a scalar field f: R^n --> R associated with
78
+ the geometry built from the VertexBase class
79
+ """
80
+
81
+ def __init__(self, x, field=None, nn=None, index=None, field_args=(),
82
+ g_cons=None, g_cons_args=()):
83
+ """
84
+ Parameters
85
+ ----------
86
+ x : tuple,
87
+ vector of vertex coordinates
88
+ field : callable, optional
89
+ a scalar field f: R^n --> R associated with the geometry
90
+ nn : list, optional
91
+ list of nearest neighbours
92
+ index : int, optional
93
+ index of the vertex
94
+ field_args : tuple, optional
95
+ additional arguments to be passed to field
96
+ g_cons : callable, optional
97
+ constraints on the vertex
98
+ g_cons_args : tuple, optional
99
+ additional arguments to be passed to g_cons
100
+
101
+ """
102
+ super().__init__(x, nn=nn, index=index)
103
+
104
+ # Note Vertex is only initiated once for all x so only
105
+ # evaluated once
106
+ # self.feasible = None
107
+
108
+ # self.f is externally defined by the cache to allow parallel
109
+ # processing
110
+ # None type that will break arithmetic operations unless defined
111
+ # self.f = None
112
+
113
+ self.check_min = True
114
+ self.check_max = True
115
+
116
+ def connect(self, v):
117
+ """Connects self to another vertex object v.
118
+
119
+ Parameters
120
+ ----------
121
+ v : VertexBase or VertexScalarField object
122
+ """
123
+ if v is not self and v not in self.nn:
124
+ self.nn.add(v)
125
+ v.nn.add(self)
126
+
127
+ # Flags for checking homology properties:
128
+ self.check_min = True
129
+ self.check_max = True
130
+ v.check_min = True
131
+ v.check_max = True
132
+
133
+ def disconnect(self, v):
134
+ if v in self.nn:
135
+ self.nn.remove(v)
136
+ v.nn.remove(self)
137
+
138
+ # Flags for checking homology properties:
139
+ self.check_min = True
140
+ self.check_max = True
141
+ v.check_min = True
142
+ v.check_max = True
143
+
144
+ def minimiser(self):
145
+ """Check whether this vertex is strictly less than all its
146
+ neighbours"""
147
+ if self.check_min:
148
+ self._min = all(self.f < v.f for v in self.nn)
149
+ self.check_min = False
150
+
151
+ return self._min
152
+
153
+ def maximiser(self):
154
+ """
155
+ Check whether this vertex is strictly greater than all its
156
+ neighbours.
157
+ """
158
+ if self.check_max:
159
+ self._max = all(self.f > v.f for v in self.nn)
160
+ self.check_max = False
161
+
162
+ return self._max
163
+
164
+
165
+ class VertexVectorField(VertexBase):
166
+ """
167
+ Add homology properties of a scalar field f: R^n --> R^m associated with
168
+ the geometry built from the VertexBase class.
169
+ """
170
+
171
+ def __init__(self, x, sfield=None, vfield=None, field_args=(),
172
+ vfield_args=(), g_cons=None,
173
+ g_cons_args=(), nn=None, index=None):
174
+ super().__init__(x, nn=nn, index=index)
175
+
176
+ raise NotImplementedError("This class is still a work in progress")
177
+
178
+
179
+ class VertexCacheBase:
180
+ """Base class for a vertex cache for a simplicial complex."""
181
+ def __init__(self):
182
+
183
+ self.cache = collections.OrderedDict()
184
+ self.nfev = 0 # Feasible points
185
+ self.index = -1
186
+
187
+ def __iter__(self):
188
+ for v in self.cache:
189
+ yield self.cache[v]
190
+ return
191
+
192
+ def size(self):
193
+ """Returns the size of the vertex cache."""
194
+ return self.index + 1
195
+
196
+ def print_out(self):
197
+ headlen = len(f"Vertex cache of size: {len(self.cache)}:")
198
+ print('=' * headlen)
199
+ print(f"Vertex cache of size: {len(self.cache)}:")
200
+ print('=' * headlen)
201
+ for v in self.cache:
202
+ self.cache[v].print_out()
203
+
204
+
205
+ class VertexCube(VertexBase):
206
+ """Vertex class to be used for a pure simplicial complex with no associated
207
+ differential geometry (single level domain that exists in R^n)"""
208
+ def __init__(self, x, nn=None, index=None):
209
+ super().__init__(x, nn=nn, index=index)
210
+
211
+ def connect(self, v):
212
+ if v is not self and v not in self.nn:
213
+ self.nn.add(v)
214
+ v.nn.add(self)
215
+
216
+ def disconnect(self, v):
217
+ if v in self.nn:
218
+ self.nn.remove(v)
219
+ v.nn.remove(self)
220
+
221
+
222
+ class VertexCacheIndex(VertexCacheBase):
223
+ def __init__(self):
224
+ """
225
+ Class for a vertex cache for a simplicial complex without an associated
226
+ field. Useful only for building and visualising a domain complex.
227
+
228
+ Parameters
229
+ ----------
230
+ """
231
+ super().__init__()
232
+ self.Vertex = VertexCube
233
+
234
+ def __getitem__(self, x, nn=None):
235
+ try:
236
+ return self.cache[x]
237
+ except KeyError:
238
+ self.index += 1
239
+ xval = self.Vertex(x, index=self.index)
240
+ # logging.info("New generated vertex at x = {}".format(x))
241
+ # NOTE: Surprisingly high performance increase if logging
242
+ # is commented out
243
+ self.cache[x] = xval
244
+ return self.cache[x]
245
+
246
+
247
+ class VertexCacheField(VertexCacheBase):
248
+ def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(),
249
+ workers=1):
250
+ """
251
+ Class for a vertex cache for a simplicial complex with an associated
252
+ field.
253
+
254
+ Parameters
255
+ ----------
256
+ field : callable
257
+ Scalar or vector field callable.
258
+ field_args : tuple, optional
259
+ Any additional fixed parameters needed to completely specify the
260
+ field function
261
+ g_cons : dict or sequence of dict, optional
262
+ Constraints definition.
263
+ Function(s) ``R**n`` in the form::
264
+ g_cons_args : tuple, optional
265
+ Any additional fixed parameters needed to completely specify the
266
+ constraint functions
267
+ workers : int optional
268
+ Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
269
+ functions in parallel.
270
+
271
+ """
272
+ super().__init__()
273
+ self.index = -1
274
+ self.Vertex = VertexScalarField
275
+ self.field = field
276
+ self.field_args = field_args
277
+ self.wfield = FieldWrapper(field, field_args) # if workers is not 1
278
+
279
+ self.g_cons = g_cons
280
+ self.g_cons_args = g_cons_args
281
+ self.wgcons = ConstraintWrapper(g_cons, g_cons_args)
282
+ self.gpool = set() # A set of tuples to process for feasibility
283
+
284
+ # Field processing objects
285
+ self.fpool = set() # A set of tuples to process for scalar function
286
+ self.sfc_lock = False # True if self.fpool is non-Empty
287
+
288
+ self.workers = workers
289
+ self._mapwrapper = MapWrapper(workers)
290
+
291
+ if workers == 1:
292
+ self.process_gpool = self.proc_gpool
293
+ if g_cons is None:
294
+ self.process_fpool = self.proc_fpool_nog
295
+ else:
296
+ self.process_fpool = self.proc_fpool_g
297
+ else:
298
+ self.process_gpool = self.pproc_gpool
299
+ if g_cons is None:
300
+ self.process_fpool = self.pproc_fpool_nog
301
+ else:
302
+ self.process_fpool = self.pproc_fpool_g
303
+
304
+ def __getitem__(self, x, nn=None):
305
+ try:
306
+ return self.cache[x]
307
+ except KeyError:
308
+ self.index += 1
309
+ xval = self.Vertex(x, field=self.field, nn=nn, index=self.index,
310
+ field_args=self.field_args,
311
+ g_cons=self.g_cons,
312
+ g_cons_args=self.g_cons_args)
313
+
314
+ self.cache[x] = xval # Define in cache
315
+ self.gpool.add(xval) # Add to pool for processing feasibility
316
+ self.fpool.add(xval) # Add to pool for processing field values
317
+ return self.cache[x]
318
+
319
+ def __getstate__(self):
320
+ self_dict = self.__dict__.copy()
321
+ del self_dict['pool']
322
+ return self_dict
323
+
324
+ def process_pools(self):
325
+ if self.g_cons is not None:
326
+ self.process_gpool()
327
+ self.process_fpool()
328
+ self.proc_minimisers()
329
+
330
+ def feasibility_check(self, v):
331
+ v.feasible = True
332
+ for g, args in zip(self.g_cons, self.g_cons_args):
333
+ # constraint may return more than 1 value.
334
+ if np.any(g(v.x_a, *args) < 0.0):
335
+ v.f = np.inf
336
+ v.feasible = False
337
+ break
338
+
339
+ def compute_sfield(self, v):
340
+ """Compute the scalar field values of a vertex object `v`.
341
+
342
+ Parameters
343
+ ----------
344
+ v : VertexBase or VertexScalarField object
345
+ """
346
+ try:
347
+ v.f = self.field(v.x_a, *self.field_args)
348
+ self.nfev += 1
349
+ except AttributeError:
350
+ v.f = np.inf
351
+ # logging.warning(f"Field function not found at x = {self.x_a}")
352
+ if np.isnan(v.f):
353
+ v.f = np.inf
354
+
355
+ def proc_gpool(self):
356
+ """Process all constraints."""
357
+ if self.g_cons is not None:
358
+ for v in self.gpool:
359
+ self.feasibility_check(v)
360
+ # Clean the pool
361
+ self.gpool = set()
362
+
363
+ def pproc_gpool(self):
364
+ """Process all constraints in parallel."""
365
+ gpool_l = []
366
+ for v in self.gpool:
367
+ gpool_l.append(v.x_a)
368
+
369
+ G = self._mapwrapper(self.wgcons.gcons, gpool_l)
370
+ for v, g in zip(self.gpool, G):
371
+ v.feasible = g # set vertex object attribute v.feasible = g (bool)
372
+
373
+ def proc_fpool_g(self):
374
+ """Process all field functions with constraints supplied."""
375
+ for v in self.fpool:
376
+ if v.feasible:
377
+ self.compute_sfield(v)
378
+ # Clean the pool
379
+ self.fpool = set()
380
+
381
+ def proc_fpool_nog(self):
382
+ """Process all field functions with no constraints supplied."""
383
+ for v in self.fpool:
384
+ self.compute_sfield(v)
385
+ # Clean the pool
386
+ self.fpool = set()
387
+
388
+ def pproc_fpool_g(self):
389
+ """
390
+ Process all field functions with constraints supplied in parallel.
391
+ """
392
+ self.wfield.func
393
+ fpool_l = []
394
+ for v in self.fpool:
395
+ if v.feasible:
396
+ fpool_l.append(v.x_a)
397
+ else:
398
+ v.f = np.inf
399
+ F = self._mapwrapper(self.wfield.func, fpool_l)
400
+ for va, f in zip(fpool_l, F):
401
+ vt = tuple(va)
402
+ self[vt].f = f # set vertex object attribute v.f = f
403
+ self.nfev += 1
404
+ # Clean the pool
405
+ self.fpool = set()
406
+
407
+ def pproc_fpool_nog(self):
408
+ """
409
+ Process all field functions with no constraints supplied in parallel.
410
+ """
411
+ self.wfield.func
412
+ fpool_l = []
413
+ for v in self.fpool:
414
+ fpool_l.append(v.x_a)
415
+ F = self._mapwrapper(self.wfield.func, fpool_l)
416
+ for va, f in zip(fpool_l, F):
417
+ vt = tuple(va)
418
+ self[vt].f = f # set vertex object attribute v.f = f
419
+ self.nfev += 1
420
+ # Clean the pool
421
+ self.fpool = set()
422
+
423
+ def proc_minimisers(self):
424
+ """Check for minimisers."""
425
+ for v in self:
426
+ v.minimiser()
427
+ v.maximiser()
428
+
429
+
430
+ class ConstraintWrapper:
431
+ """Object to wrap constraints to pass to `multiprocessing.Pool`."""
432
+ def __init__(self, g_cons, g_cons_args):
433
+ self.g_cons = g_cons
434
+ self.g_cons_args = g_cons_args
435
+
436
+ def gcons(self, v_x_a):
437
+ vfeasible = True
438
+ for g, args in zip(self.g_cons, self.g_cons_args):
439
+ # constraint may return more than 1 value.
440
+ if np.any(g(v_x_a, *args) < 0.0):
441
+ vfeasible = False
442
+ break
443
+ return vfeasible
444
+
445
+
446
+ class FieldWrapper:
447
+ """Object to wrap field to pass to `multiprocessing.Pool`."""
448
+ def __init__(self, field, field_args):
449
+ self.field = field
450
+ self.field_args = field_args
451
+
452
+ def func(self, v_x_a):
453
+ try:
454
+ v_f = self.field(v_x_a, *self.field_args)
455
+ except Exception:
456
+ v_f = np.inf
457
+ if np.isnan(v_f):
458
+ v_f = np.inf
459
+
460
+ return v_f
venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._trlib import TRLIBQuadraticSubproblem
2
+
3
+ __all__ = ['TRLIBQuadraticSubproblem', 'get_trlib_quadratic_subproblem']
4
+
5
+
6
+ def get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=False):
7
+ def subproblem_factory(x, fun, jac, hess, hessp):
8
+ return TRLIBQuadraticSubproblem(x, fun, jac, hess, hessp,
9
+ tol_rel_i=tol_rel_i,
10
+ tol_rel_b=tol_rel_b,
11
+ disp=disp)
12
+ return subproblem_factory
venv/lib/python3.10/site-packages/scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (708 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (381 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc ADDED
Binary file (7.24 kB). View file
 
venv/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__init__.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =======================================
3
+ Signal processing (:mod:`scipy.signal`)
4
+ =======================================
5
+
6
+ Convolution
7
+ ===========
8
+
9
+ .. autosummary::
10
+ :toctree: generated/
11
+
12
+ convolve -- N-D convolution.
13
+ correlate -- N-D correlation.
14
+ fftconvolve -- N-D convolution using the FFT.
15
+ oaconvolve -- N-D convolution using the overlap-add method.
16
+ convolve2d -- 2-D convolution (more options).
17
+ correlate2d -- 2-D correlation (more options).
18
+ sepfir2d -- Convolve with a 2-D separable FIR filter.
19
+ choose_conv_method -- Chooses faster of FFT and direct convolution methods.
20
+ correlation_lags -- Determines lag indices for 1D cross-correlation.
21
+
22
+ B-splines
23
+ =========
24
+
25
+ .. autosummary::
26
+ :toctree: generated/
27
+
28
+ gauss_spline -- Gaussian approximation to the B-spline basis function.
29
+ cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
30
+ qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
31
+ cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
32
+ qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
33
+ cspline1d_eval -- Evaluate a cubic spline at the given points.
34
+ qspline1d_eval -- Evaluate a quadratic spline at the given points.
35
+ spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
36
+
37
+ Filtering
38
+ =========
39
+
40
+ .. autosummary::
41
+ :toctree: generated/
42
+
43
+ order_filter -- N-D order filter.
44
+ medfilt -- N-D median filter.
45
+ medfilt2d -- 2-D median filter (faster).
46
+ wiener -- N-D Wiener filter.
47
+
48
+ symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
49
+ symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
50
+ lfilter -- 1-D FIR and IIR digital linear filtering.
51
+ lfiltic -- Construct initial conditions for `lfilter`.
52
+ lfilter_zi -- Compute an initial state zi for the lfilter function that
53
+ -- corresponds to the steady state of the step response.
54
+ filtfilt -- A forward-backward filter.
55
+ savgol_filter -- Filter a signal using the Savitzky-Golay filter.
56
+
57
+ deconvolve -- 1-D deconvolution using lfilter.
58
+
59
+ sosfilt -- 1-D IIR digital linear filtering using
60
+ -- a second-order sections filter representation.
61
+ sosfilt_zi -- Compute an initial state zi for the sosfilt function that
62
+ -- corresponds to the steady state of the step response.
63
+ sosfiltfilt -- A forward-backward filter for second-order sections.
64
+ hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
65
+ hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
66
+
67
+ decimate -- Downsample a signal.
68
+ detrend -- Remove linear and/or constant trends from data.
69
+ resample -- Resample using Fourier method.
70
+ resample_poly -- Resample using polyphase filtering method.
71
+ upfirdn -- Upsample, apply FIR filter, downsample.
72
+
73
+ Filter design
74
+ =============
75
+
76
+ .. autosummary::
77
+ :toctree: generated/
78
+
79
+ bilinear -- Digital filter from an analog filter using
80
+ -- the bilinear transform.
81
+ bilinear_zpk -- Digital filter from an analog filter using
82
+ -- the bilinear transform.
83
+ findfreqs -- Find array of frequencies for computing filter response.
84
+ firls -- FIR filter design using least-squares error minimization.
85
+ firwin -- Windowed FIR filter design, with frequency response
86
+ -- defined as pass and stop bands.
87
+ firwin2 -- Windowed FIR filter design, with arbitrary frequency
88
+ -- response.
89
+ freqs -- Analog filter frequency response from TF coefficients.
90
+ freqs_zpk -- Analog filter frequency response from ZPK coefficients.
91
+ freqz -- Digital filter frequency response from TF coefficients.
92
+ freqz_zpk -- Digital filter frequency response from ZPK coefficients.
93
+ sosfreqz -- Digital filter frequency response for SOS format filter.
94
+ gammatone -- FIR and IIR gammatone filter design.
95
+ group_delay -- Digital filter group delay.
96
+ iirdesign -- IIR filter design given bands and gains.
97
+ iirfilter -- IIR filter design given order and critical frequencies.
98
+ kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
99
+ -- the number of taps and the transition width at
100
+ -- discontinuities in the frequency response.
101
+ kaiser_beta -- Compute the Kaiser parameter beta, given the desired
102
+ -- FIR filter attenuation.
103
+ kaiserord -- Design a Kaiser window to limit ripple and width of
104
+ -- transition region.
105
+ minimum_phase -- Convert a linear phase FIR filter to minimum phase.
106
+ savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
107
+ -- filter.
108
+ remez -- Optimal FIR filter design.
109
+
110
+ unique_roots -- Unique roots and their multiplicities.
111
+ residue -- Partial fraction expansion of b(s) / a(s).
112
+ residuez -- Partial fraction expansion of b(z) / a(z).
113
+ invres -- Inverse partial fraction expansion for analog filter.
114
+ invresz -- Inverse partial fraction expansion for digital filter.
115
+ BadCoefficients -- Warning on badly conditioned filter coefficients.
116
+
117
+ Lower-level filter design functions:
118
+
119
+ .. autosummary::
120
+ :toctree: generated/
121
+
122
+ abcd_normalize -- Check state-space matrices and ensure they are rank-2.
123
+ band_stop_obj -- Band Stop Objective Function for order minimization.
124
+ besselap -- Return (z,p,k) for analog prototype of Bessel filter.
125
+ buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
126
+ cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
127
+ cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
128
+ cmplx_sort -- Sort roots based on magnitude.
129
+ ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
130
+ lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
131
+ lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter.
132
+ lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
133
+ lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter.
134
+ lp2hp -- Transform a lowpass filter prototype to a highpass filter.
135
+ lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter.
136
+ lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
137
+ lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter.
138
+ normalize -- Normalize polynomial representation of a transfer function.
139
+
140
+
141
+
142
+ Matlab-style IIR filter design
143
+ ==============================
144
+
145
+ .. autosummary::
146
+ :toctree: generated/
147
+
148
+ butter -- Butterworth
149
+ buttord
150
+ cheby1 -- Chebyshev Type I
151
+ cheb1ord
152
+ cheby2 -- Chebyshev Type II
153
+ cheb2ord
154
+ ellip -- Elliptic (Cauer)
155
+ ellipord
156
+ bessel -- Bessel (no order selection available -- try butterod)
157
+ iirnotch -- Design second-order IIR notch digital filter.
158
+ iirpeak -- Design second-order IIR peak (resonant) digital filter.
159
+ iircomb -- Design IIR comb filter.
160
+
161
+ Continuous-time linear systems
162
+ ==============================
163
+
164
+ .. autosummary::
165
+ :toctree: generated/
166
+
167
+ lti -- Continuous-time linear time invariant system base class.
168
+ StateSpace -- Linear time invariant system in state space form.
169
+ TransferFunction -- Linear time invariant system in transfer function form.
170
+ ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
171
+ lsim -- Continuous-time simulation of output to linear system.
172
+ impulse -- Impulse response of linear, time-invariant (LTI) system.
173
+ step -- Step response of continuous-time LTI system.
174
+ freqresp -- Frequency response of a continuous-time LTI system.
175
+ bode -- Bode magnitude and phase data (continuous-time LTI).
176
+
177
+ Discrete-time linear systems
178
+ ============================
179
+
180
+ .. autosummary::
181
+ :toctree: generated/
182
+
183
+ dlti -- Discrete-time linear time invariant system base class.
184
+ StateSpace -- Linear time invariant system in state space form.
185
+ TransferFunction -- Linear time invariant system in transfer function form.
186
+ ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
187
+ dlsim -- Simulation of output to a discrete-time linear system.
188
+ dimpulse -- Impulse response of a discrete-time LTI system.
189
+ dstep -- Step response of a discrete-time LTI system.
190
+ dfreqresp -- Frequency response of a discrete-time LTI system.
191
+ dbode -- Bode magnitude and phase data (discrete-time LTI).
192
+
193
+ LTI representations
194
+ ===================
195
+
196
+ .. autosummary::
197
+ :toctree: generated/
198
+
199
+ tf2zpk -- Transfer function to zero-pole-gain.
200
+ tf2sos -- Transfer function to second-order sections.
201
+ tf2ss -- Transfer function to state-space.
202
+ zpk2tf -- Zero-pole-gain to transfer function.
203
+ zpk2sos -- Zero-pole-gain to second-order sections.
204
+ zpk2ss -- Zero-pole-gain to state-space.
205
+ ss2tf -- State-pace to transfer function.
206
+ ss2zpk -- State-space to pole-zero-gain.
207
+ sos2zpk -- Second-order sections to zero-pole-gain.
208
+ sos2tf -- Second-order sections to transfer function.
209
+ cont2discrete -- Continuous-time to discrete-time LTI conversion.
210
+ place_poles -- Pole placement.
211
+
212
+ Waveforms
213
+ =========
214
+
215
+ .. autosummary::
216
+ :toctree: generated/
217
+
218
+ chirp -- Frequency swept cosine signal, with several freq functions.
219
+ gausspulse -- Gaussian modulated sinusoid.
220
+ max_len_seq -- Maximum length sequence.
221
+ sawtooth -- Periodic sawtooth.
222
+ square -- Square wave.
223
+ sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial.
224
+ unit_impulse -- Discrete unit impulse.
225
+
226
+ Window functions
227
+ ================
228
+
229
+ For window functions, see the `scipy.signal.windows` namespace.
230
+
231
+ In the `scipy.signal` namespace, there is a convenience function to
232
+ obtain these windows by name:
233
+
234
+ .. autosummary::
235
+ :toctree: generated/
236
+
237
+ get_window -- Return a window of a given length and type.
238
+
239
+ Wavelets
240
+ ========
241
+
242
+ .. autosummary::
243
+ :toctree: generated/
244
+
245
+ cascade -- Compute scaling function and wavelet from coefficients.
246
+ daub -- Return low-pass.
247
+ morlet -- Complex Morlet wavelet.
248
+ qmf -- Return quadrature mirror filter from low-pass.
249
+ ricker -- Return ricker wavelet.
250
+ morlet2 -- Return Morlet wavelet, compatible with cwt.
251
+ cwt -- Perform continuous wavelet transform.
252
+
253
+ Peak finding
254
+ ============
255
+
256
+ .. autosummary::
257
+ :toctree: generated/
258
+
259
+ argrelmin -- Calculate the relative minima of data.
260
+ argrelmax -- Calculate the relative maxima of data.
261
+ argrelextrema -- Calculate the relative extrema of data.
262
+ find_peaks -- Find a subset of peaks inside a signal.
263
+ find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation.
264
+ peak_prominences -- Calculate the prominence of each peak in a signal.
265
+ peak_widths -- Calculate the width of each peak in a signal.
266
+
267
+ Spectral analysis
268
+ =================
269
+
270
+ .. autosummary::
271
+ :toctree: generated/
272
+
273
+ periodogram -- Compute a (modified) periodogram.
274
+ welch -- Compute a periodogram using Welch's method.
275
+ csd -- Compute the cross spectral density, using Welch's method.
276
+ coherence -- Compute the magnitude squared coherence, using Welch's method.
277
+ spectrogram -- Compute the spectrogram (legacy).
278
+ lombscargle -- Computes the Lomb-Scargle periodogram.
279
+ vectorstrength -- Computes the vector strength.
280
+ ShortTimeFFT -- Interface for calculating the \
281
+ :ref:`Short Time Fourier Transform <tutorial_stft>` and \
282
+ its inverse.
283
+ stft -- Compute the Short Time Fourier Transform (legacy).
284
+ istft -- Compute the Inverse Short Time Fourier Transform (legacy).
285
+ check_COLA -- Check the COLA constraint for iSTFT reconstruction.
286
+ check_NOLA -- Check the NOLA constraint for iSTFT reconstruction.
287
+
288
+ Chirp Z-transform and Zoom FFT
289
+ ============================================
290
+
291
+ .. autosummary::
292
+ :toctree: generated/
293
+
294
+ czt - Chirp z-transform convenience function
295
+ zoom_fft - Zoom FFT convenience function
296
+ CZT - Chirp z-transform function generator
297
+ ZoomFFT - Zoom FFT function generator
298
+ czt_points - Output the z-plane points sampled by a chirp z-transform
299
+
300
+ The functions are simpler to use than the classes, but are less efficient when
301
+ using the same transform on many arrays of the same length, since they
302
+ repeatedly generate the same chirp signal with every call. In these cases,
303
+ use the classes to create a reusable function instead.
304
+
305
+ """
306
+
307
+ from . import _sigtools, windows
308
+ from ._waveforms import *
309
+ from ._max_len_seq import max_len_seq
310
+ from ._upfirdn import upfirdn
311
+
312
+ from ._spline import (
313
+ cspline2d,
314
+ qspline2d,
315
+ sepfir2d,
316
+ symiirorder1,
317
+ symiirorder2,
318
+ )
319
+
320
+ from ._bsplines import *
321
+ from ._filter_design import *
322
+ from ._fir_filter_design import *
323
+ from ._ltisys import *
324
+ from ._lti_conversion import *
325
+ from ._signaltools import *
326
+ from ._savitzky_golay import savgol_coeffs, savgol_filter
327
+ from ._spectral_py import *
328
+ from ._short_time_fft import *
329
+ from ._wavelets import *
330
+ from ._peak_finding import *
331
+ from ._czt import *
332
+ from .windows import get_window # keep this one in signal namespace
333
+
334
+ # Deprecated namespaces, to be removed in v2.0.0
335
+ from . import (
336
+ bsplines, filter_design, fir_filter_design, lti_conversion, ltisys,
337
+ spectral, signaltools, waveforms, wavelets, spline
338
+ )
339
+
340
+ __all__ = [
341
+ s for s in dir() if not s.startswith("_")
342
+ ]
343
+
344
+ from scipy._lib._testutils import PytestTester
345
+ test = PytestTester(__name__)
346
+ del PytestTester
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc ADDED
Binary file (168 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc ADDED
Binary file (42 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_ltisys.cpython-310.pyc ADDED
Binary file (94.5 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_peak_finding.cpython-310.pyc ADDED
Binary file (44.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_short_time_fft.cpython-310.pyc ADDED
Binary file (67.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_signaltools.cpython-310.pyc ADDED
Binary file (133 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_waveforms.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/_wavelets.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/bsplines.cpython-310.pyc ADDED
Binary file (769 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/filter_design.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/fir_filter_design.cpython-310.pyc ADDED
Binary file (804 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/lti_conversion.cpython-310.pyc ADDED
Binary file (755 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/ltisys.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/spline.cpython-310.pyc ADDED
Binary file (889 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/signal/__pycache__/wavelets.cpython-310.pyc ADDED
Binary file (669 Bytes). View file
 
venv/lib/python3.10/site-packages/scipy/signal/_filter_design.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/scipy/signal/_max_len_seq.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Eric Larson
2
+ # 2014
3
+
4
+ """Tools for MLS generation"""
5
+
6
+ import numpy as np
7
+
8
+ from ._max_len_seq_inner import _max_len_seq_inner
9
+
10
+ __all__ = ['max_len_seq']
11
+
12
+
13
+ # These are definitions of linear shift register taps for use in max_len_seq()
14
+ _mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
15
+ 9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
16
+ 14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
17
+ 18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
18
+ 23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
19
+ 27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
20
+ 31: [28], 32: [31, 30, 10]}
21
+
22
+ def max_len_seq(nbits, state=None, length=None, taps=None):
23
+ """
24
+ Maximum length sequence (MLS) generator.
25
+
26
+ Parameters
27
+ ----------
28
+ nbits : int
29
+ Number of bits to use. Length of the resulting sequence will
30
+ be ``(2**nbits) - 1``. Note that generating long sequences
31
+ (e.g., greater than ``nbits == 16``) can take a long time.
32
+ state : array_like, optional
33
+ If array, must be of length ``nbits``, and will be cast to binary
34
+ (bool) representation. If None, a seed of ones will be used,
35
+ producing a repeatable representation. If ``state`` is all
36
+ zeros, an error is raised as this is invalid. Default: None.
37
+ length : int, optional
38
+ Number of samples to compute. If None, the entire length
39
+ ``(2**nbits) - 1`` is computed.
40
+ taps : array_like, optional
41
+ Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
42
+ If None, taps will be automatically selected (for up to
43
+ ``nbits == 32``).
44
+
45
+ Returns
46
+ -------
47
+ seq : array
48
+ Resulting MLS sequence of 0's and 1's.
49
+ state : array
50
+ The final state of the shift register.
51
+
52
+ Notes
53
+ -----
54
+ The algorithm for MLS generation is generically described in:
55
+
56
+ https://en.wikipedia.org/wiki/Maximum_length_sequence
57
+
58
+ The default values for taps are specifically taken from the first
59
+ option listed for each value of ``nbits`` in:
60
+
61
+ https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
62
+
63
+ .. versionadded:: 0.15.0
64
+
65
+ Examples
66
+ --------
67
+ MLS uses binary convention:
68
+
69
+ >>> from scipy.signal import max_len_seq
70
+ >>> max_len_seq(4)[0]
71
+ array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
72
+
73
+ MLS has a white spectrum (except for DC):
74
+
75
+ >>> import numpy as np
76
+ >>> import matplotlib.pyplot as plt
77
+ >>> from numpy.fft import fft, ifft, fftshift, fftfreq
78
+ >>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
79
+ >>> spec = fft(seq)
80
+ >>> N = len(seq)
81
+ >>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
82
+ >>> plt.margins(0.1, 0.1)
83
+ >>> plt.grid(True)
84
+ >>> plt.show()
85
+
86
+ Circular autocorrelation of MLS is an impulse:
87
+
88
+ >>> acorrcirc = ifft(spec * np.conj(spec)).real
89
+ >>> plt.figure()
90
+ >>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
91
+ >>> plt.margins(0.1, 0.1)
92
+ >>> plt.grid(True)
93
+ >>> plt.show()
94
+
95
+ Linear autocorrelation of MLS is approximately an impulse:
96
+
97
+ >>> acorr = np.correlate(seq, seq, 'full')
98
+ >>> plt.figure()
99
+ >>> plt.plot(np.arange(-N+1, N), acorr, '.-')
100
+ >>> plt.margins(0.1, 0.1)
101
+ >>> plt.grid(True)
102
+ >>> plt.show()
103
+
104
+ """
105
+ taps_dtype = np.int32 if np.intp().itemsize == 4 else np.int64
106
+ if taps is None:
107
+ if nbits not in _mls_taps:
108
+ known_taps = np.array(list(_mls_taps.keys()))
109
+ raise ValueError(f'nbits must be between {known_taps.min()} and '
110
+ f'{known_taps.max()} if taps is None')
111
+ taps = np.array(_mls_taps[nbits], taps_dtype)
112
+ else:
113
+ taps = np.unique(np.array(taps, taps_dtype))[::-1]
114
+ if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
115
+ raise ValueError('taps must be non-empty with values between '
116
+ 'zero and nbits (inclusive)')
117
+ taps = np.array(taps) # needed for Cython and Pythran
118
+ n_max = (2**nbits) - 1
119
+ if length is None:
120
+ length = n_max
121
+ else:
122
+ length = int(length)
123
+ if length < 0:
124
+ raise ValueError('length must be greater than or equal to 0')
125
+ # We use int8 instead of bool here because NumPy arrays of bools
126
+ # don't seem to work nicely with Cython
127
+ if state is None:
128
+ state = np.ones(nbits, dtype=np.int8, order='c')
129
+ else:
130
+ # makes a copy if need be, ensuring it's 0's and 1's
131
+ state = np.array(state, dtype=bool, order='c').astype(np.int8)
132
+ if state.ndim != 1 or state.size != nbits:
133
+ raise ValueError('state must be a 1-D array of size nbits')
134
+ if np.all(state == 0):
135
+ raise ValueError('state must not be all zeros')
136
+
137
+ seq = np.empty(length, dtype=np.int8, order='c')
138
+ state = _max_len_seq_inner(taps, state, nbits, length, seq)
139
+ return seq, state
venv/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (305 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/_sosfilt.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (303 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (85.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (395 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/_waveforms.py ADDED
@@ -0,0 +1,672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Travis Oliphant
2
+ # 2003
3
+ #
4
+ # Feb. 2010: Updated by Warren Weckesser:
5
+ # Rewrote much of chirp()
6
+ # Added sweep_poly()
7
+ import numpy as np
8
+ from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
9
+ exp, cos, sin, polyval, polyint
10
+
11
+
12
+ __all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
13
+ 'unit_impulse']
14
+
15
+
16
+ def sawtooth(t, width=1):
17
+ """
18
+ Return a periodic sawtooth or triangle waveform.
19
+
20
+ The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
21
+ interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
22
+ ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
23
+
24
+ Note that this is not band-limited. It produces an infinite number
25
+ of harmonics, which are aliased back and forth across the frequency
26
+ spectrum.
27
+
28
+ Parameters
29
+ ----------
30
+ t : array_like
31
+ Time.
32
+ width : array_like, optional
33
+ Width of the rising ramp as a proportion of the total cycle.
34
+ Default is 1, producing a rising ramp, while 0 produces a falling
35
+ ramp. `width` = 0.5 produces a triangle wave.
36
+ If an array, causes wave shape to change over time, and must be the
37
+ same length as t.
38
+
39
+ Returns
40
+ -------
41
+ y : ndarray
42
+ Output array containing the sawtooth waveform.
43
+
44
+ Examples
45
+ --------
46
+ A 5 Hz waveform sampled at 500 Hz for 1 second:
47
+
48
+ >>> import numpy as np
49
+ >>> from scipy import signal
50
+ >>> import matplotlib.pyplot as plt
51
+ >>> t = np.linspace(0, 1, 500)
52
+ >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
53
+
54
+ """
55
+ t, w = asarray(t), asarray(width)
56
+ w = asarray(w + (t - t))
57
+ t = asarray(t + (w - w))
58
+ if t.dtype.char in ['fFdD']:
59
+ ytype = t.dtype.char
60
+ else:
61
+ ytype = 'd'
62
+ y = zeros(t.shape, ytype)
63
+
64
+ # width must be between 0 and 1 inclusive
65
+ mask1 = (w > 1) | (w < 0)
66
+ place(y, mask1, nan)
67
+
68
+ # take t modulo 2*pi
69
+ tmod = mod(t, 2 * pi)
70
+
71
+ # on the interval 0 to width*2*pi function is
72
+ # tmod / (pi*w) - 1
73
+ mask2 = (1 - mask1) & (tmod < w * 2 * pi)
74
+ tsub = extract(mask2, tmod)
75
+ wsub = extract(mask2, w)
76
+ place(y, mask2, tsub / (pi * wsub) - 1)
77
+
78
+ # on the interval width*2*pi to 2*pi function is
79
+ # (pi*(w+1)-tmod) / (pi*(1-w))
80
+
81
+ mask3 = (1 - mask1) & (1 - mask2)
82
+ tsub = extract(mask3, tmod)
83
+ wsub = extract(mask3, w)
84
+ place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
85
+ return y
86
+
87
+
88
+ def square(t, duty=0.5):
89
+ """
90
+ Return a periodic square-wave waveform.
91
+
92
+ The square wave has a period ``2*pi``, has value +1 from 0 to
93
+ ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
94
+ the interval [0,1].
95
+
96
+ Note that this is not band-limited. It produces an infinite number
97
+ of harmonics, which are aliased back and forth across the frequency
98
+ spectrum.
99
+
100
+ Parameters
101
+ ----------
102
+ t : array_like
103
+ The input time array.
104
+ duty : array_like, optional
105
+ Duty cycle. Default is 0.5 (50% duty cycle).
106
+ If an array, causes wave shape to change over time, and must be the
107
+ same length as t.
108
+
109
+ Returns
110
+ -------
111
+ y : ndarray
112
+ Output array containing the square waveform.
113
+
114
+ Examples
115
+ --------
116
+ A 5 Hz waveform sampled at 500 Hz for 1 second:
117
+
118
+ >>> import numpy as np
119
+ >>> from scipy import signal
120
+ >>> import matplotlib.pyplot as plt
121
+ >>> t = np.linspace(0, 1, 500, endpoint=False)
122
+ >>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
123
+ >>> plt.ylim(-2, 2)
124
+
125
+ A pulse-width modulated sine wave:
126
+
127
+ >>> plt.figure()
128
+ >>> sig = np.sin(2 * np.pi * t)
129
+ >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
130
+ >>> plt.subplot(2, 1, 1)
131
+ >>> plt.plot(t, sig)
132
+ >>> plt.subplot(2, 1, 2)
133
+ >>> plt.plot(t, pwm)
134
+ >>> plt.ylim(-1.5, 1.5)
135
+
136
+ """
137
+ t, w = asarray(t), asarray(duty)
138
+ w = asarray(w + (t - t))
139
+ t = asarray(t + (w - w))
140
+ if t.dtype.char in ['fFdD']:
141
+ ytype = t.dtype.char
142
+ else:
143
+ ytype = 'd'
144
+
145
+ y = zeros(t.shape, ytype)
146
+
147
+ # width must be between 0 and 1 inclusive
148
+ mask1 = (w > 1) | (w < 0)
149
+ place(y, mask1, nan)
150
+
151
+ # on the interval 0 to duty*2*pi function is 1
152
+ tmod = mod(t, 2 * pi)
153
+ mask2 = (1 - mask1) & (tmod < w * 2 * pi)
154
+ place(y, mask2, 1)
155
+
156
+ # on the interval duty*2*pi to 2*pi function is
157
+ # (pi*(w+1)-tmod) / (pi*(1-w))
158
+ mask3 = (1 - mask1) & (1 - mask2)
159
+ place(y, mask3, -1)
160
+ return y
161
+
162
+
163
+ def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
164
+ retenv=False):
165
+ """
166
+ Return a Gaussian modulated sinusoid:
167
+
168
+ ``exp(-a t^2) exp(1j*2*pi*fc*t).``
169
+
170
+ If `retquad` is True, then return the real and imaginary parts
171
+ (in-phase and quadrature).
172
+ If `retenv` is True, then return the envelope (unmodulated signal).
173
+ Otherwise, return the real part of the modulated sinusoid.
174
+
175
+ Parameters
176
+ ----------
177
+ t : ndarray or the string 'cutoff'
178
+ Input array.
179
+ fc : float, optional
180
+ Center frequency (e.g. Hz). Default is 1000.
181
+ bw : float, optional
182
+ Fractional bandwidth in frequency domain of pulse (e.g. Hz).
183
+ Default is 0.5.
184
+ bwr : float, optional
185
+ Reference level at which fractional bandwidth is calculated (dB).
186
+ Default is -6.
187
+ tpr : float, optional
188
+ If `t` is 'cutoff', then the function returns the cutoff
189
+ time for when the pulse amplitude falls below `tpr` (in dB).
190
+ Default is -60.
191
+ retquad : bool, optional
192
+ If True, return the quadrature (imaginary) as well as the real part
193
+ of the signal. Default is False.
194
+ retenv : bool, optional
195
+ If True, return the envelope of the signal. Default is False.
196
+
197
+ Returns
198
+ -------
199
+ yI : ndarray
200
+ Real part of signal. Always returned.
201
+ yQ : ndarray
202
+ Imaginary part of signal. Only returned if `retquad` is True.
203
+ yenv : ndarray
204
+ Envelope of signal. Only returned if `retenv` is True.
205
+
206
+ See Also
207
+ --------
208
+ scipy.signal.morlet
209
+
210
+ Examples
211
+ --------
212
+ Plot real component, imaginary component, and envelope for a 5 Hz pulse,
213
+ sampled at 100 Hz for 2 seconds:
214
+
215
+ >>> import numpy as np
216
+ >>> from scipy import signal
217
+ >>> import matplotlib.pyplot as plt
218
+ >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
219
+ >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
220
+ >>> plt.plot(t, i, t, q, t, e, '--')
221
+
222
+ """
223
+ if fc < 0:
224
+ raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
225
+ if bw <= 0:
226
+ raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
227
+ if bwr >= 0:
228
+ raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
229
+ "be < 0 dB" % bwr)
230
+
231
+ # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
232
+
233
+ ref = pow(10.0, bwr / 20.0)
234
+ # fdel = fc*bw/2: g(fdel) = ref --- solve this for a
235
+ #
236
+ # pi^2/a * fc^2 * bw^2 /4=-log(ref)
237
+ a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
238
+
239
+ if isinstance(t, str):
240
+ if t == 'cutoff': # compute cut_off point
241
+ # Solve exp(-a tc**2) = tref for tc
242
+ # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
243
+ if tpr >= 0:
244
+ raise ValueError("Reference level for time cutoff must "
245
+ "be < 0 dB")
246
+ tref = pow(10.0, tpr / 20.0)
247
+ return sqrt(-log(tref) / a)
248
+ else:
249
+ raise ValueError("If `t` is a string, it must be 'cutoff'")
250
+
251
+ yenv = exp(-a * t * t)
252
+ yI = yenv * cos(2 * pi * fc * t)
253
+ yQ = yenv * sin(2 * pi * fc * t)
254
+ if not retquad and not retenv:
255
+ return yI
256
+ if not retquad and retenv:
257
+ return yI, yenv
258
+ if retquad and not retenv:
259
+ return yI, yQ
260
+ if retquad and retenv:
261
+ return yI, yQ, yenv
262
+
263
+
264
+ def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
265
+ """Frequency-swept cosine generator.
266
+
267
+ In the following, 'Hz' should be interpreted as 'cycles per unit';
268
+ there is no requirement here that the unit is one second. The
269
+ important distinction is that the units of rotation are cycles, not
270
+ radians. Likewise, `t` could be a measurement of space instead of time.
271
+
272
+ Parameters
273
+ ----------
274
+ t : array_like
275
+ Times at which to evaluate the waveform.
276
+ f0 : float
277
+ Frequency (e.g. Hz) at time t=0.
278
+ t1 : float
279
+ Time at which `f1` is specified.
280
+ f1 : float
281
+ Frequency (e.g. Hz) of the waveform at time `t1`.
282
+ method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
283
+ Kind of frequency sweep. If not given, `linear` is assumed. See
284
+ Notes below for more details.
285
+ phi : float, optional
286
+ Phase offset, in degrees. Default is 0.
287
+ vertex_zero : bool, optional
288
+ This parameter is only used when `method` is 'quadratic'.
289
+ It determines whether the vertex of the parabola that is the graph
290
+ of the frequency is at t=0 or t=t1.
291
+
292
+ Returns
293
+ -------
294
+ y : ndarray
295
+ A numpy array containing the signal evaluated at `t` with the
296
+ requested time-varying frequency. More precisely, the function
297
+ returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
298
+ (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
299
+
300
+ See Also
301
+ --------
302
+ sweep_poly
303
+
304
+ Notes
305
+ -----
306
+ There are four options for the `method`. The following formulas give
307
+ the instantaneous frequency (in Hz) of the signal generated by
308
+ `chirp()`. For convenience, the shorter names shown below may also be
309
+ used.
310
+
311
+ linear, lin, li:
312
+
313
+ ``f(t) = f0 + (f1 - f0) * t / t1``
314
+
315
+ quadratic, quad, q:
316
+
317
+ The graph of the frequency f(t) is a parabola through (0, f0) and
318
+ (t1, f1). By default, the vertex of the parabola is at (0, f0).
319
+ If `vertex_zero` is False, then the vertex is at (t1, f1). The
320
+ formula is:
321
+
322
+ if vertex_zero is True:
323
+
324
+ ``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
325
+
326
+ else:
327
+
328
+ ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
329
+
330
+ To use a more general quadratic function, or an arbitrary
331
+ polynomial, use the function `scipy.signal.sweep_poly`.
332
+
333
+ logarithmic, log, lo:
334
+
335
+ ``f(t) = f0 * (f1/f0)**(t/t1)``
336
+
337
+ f0 and f1 must be nonzero and have the same sign.
338
+
339
+ This signal is also known as a geometric or exponential chirp.
340
+
341
+ hyperbolic, hyp:
342
+
343
+ ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
344
+
345
+ f0 and f1 must be nonzero.
346
+
347
+ Examples
348
+ --------
349
+ The following will be used in the examples:
350
+
351
+ >>> import numpy as np
352
+ >>> from scipy.signal import chirp, spectrogram
353
+ >>> import matplotlib.pyplot as plt
354
+
355
+ For the first example, we'll plot the waveform for a linear chirp
356
+ from 6 Hz to 1 Hz over 10 seconds:
357
+
358
+ >>> t = np.linspace(0, 10, 1500)
359
+ >>> w = chirp(t, f0=6, f1=1, t1=10, method='linear')
360
+ >>> plt.plot(t, w)
361
+ >>> plt.title("Linear Chirp, f(0)=6, f(10)=1")
362
+ >>> plt.xlabel('t (sec)')
363
+ >>> plt.show()
364
+
365
+ For the remaining examples, we'll use higher frequency ranges,
366
+ and demonstrate the result using `scipy.signal.spectrogram`.
367
+ We'll use a 4 second interval sampled at 7200 Hz.
368
+
369
+ >>> fs = 7200
370
+ >>> T = 4
371
+ >>> t = np.arange(0, int(T*fs)) / fs
372
+
373
+ We'll use this function to plot the spectrogram in each example.
374
+
375
+ >>> def plot_spectrogram(title, w, fs):
376
+ ... ff, tt, Sxx = spectrogram(w, fs=fs, nperseg=256, nfft=576)
377
+ ... fig, ax = plt.subplots()
378
+ ... ax.pcolormesh(tt, ff[:145], Sxx[:145], cmap='gray_r',
379
+ ... shading='gouraud')
380
+ ... ax.set_title(title)
381
+ ... ax.set_xlabel('t (sec)')
382
+ ... ax.set_ylabel('Frequency (Hz)')
383
+ ... ax.grid(True)
384
+ ...
385
+
386
+ Quadratic chirp from 1500 Hz to 250 Hz
387
+ (vertex of the parabolic curve of the frequency is at t=0):
388
+
389
+ >>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic')
390
+ >>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250', w, fs)
391
+ >>> plt.show()
392
+
393
+ Quadratic chirp from 1500 Hz to 250 Hz
394
+ (vertex of the parabolic curve of the frequency is at t=T):
395
+
396
+ >>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic',
397
+ ... vertex_zero=False)
398
+ >>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250\\n' +
399
+ ... '(vertex_zero=False)', w, fs)
400
+ >>> plt.show()
401
+
402
+ Logarithmic chirp from 1500 Hz to 250 Hz:
403
+
404
+ >>> w = chirp(t, f0=1500, f1=250, t1=T, method='logarithmic')
405
+ >>> plot_spectrogram(f'Logarithmic Chirp, f(0)=1500, f({T})=250', w, fs)
406
+ >>> plt.show()
407
+
408
+ Hyperbolic chirp from 1500 Hz to 250 Hz:
409
+
410
+ >>> w = chirp(t, f0=1500, f1=250, t1=T, method='hyperbolic')
411
+ >>> plot_spectrogram(f'Hyperbolic Chirp, f(0)=1500, f({T})=250', w, fs)
412
+ >>> plt.show()
413
+
414
+ """
415
+ # 'phase' is computed in _chirp_phase, to make testing easier.
416
+ phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
417
+ # Convert phi to radians.
418
+ phi *= pi / 180
419
+ return cos(phase + phi)
420
+
421
+
422
+ def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
423
+ """
424
+ Calculate the phase used by `chirp` to generate its output.
425
+
426
+ See `chirp` for a description of the arguments.
427
+
428
+ """
429
+ t = asarray(t)
430
+ f0 = float(f0)
431
+ t1 = float(t1)
432
+ f1 = float(f1)
433
+ if method in ['linear', 'lin', 'li']:
434
+ beta = (f1 - f0) / t1
435
+ phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
436
+
437
+ elif method in ['quadratic', 'quad', 'q']:
438
+ beta = (f1 - f0) / (t1 ** 2)
439
+ if vertex_zero:
440
+ phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
441
+ else:
442
+ phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
443
+
444
+ elif method in ['logarithmic', 'log', 'lo']:
445
+ if f0 * f1 <= 0.0:
446
+ raise ValueError("For a logarithmic chirp, f0 and f1 must be "
447
+ "nonzero and have the same sign.")
448
+ if f0 == f1:
449
+ phase = 2 * pi * f0 * t
450
+ else:
451
+ beta = t1 / log(f1 / f0)
452
+ phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
453
+
454
+ elif method in ['hyperbolic', 'hyp']:
455
+ if f0 == 0 or f1 == 0:
456
+ raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
457
+ "nonzero.")
458
+ if f0 == f1:
459
+ # Degenerate case: constant frequency.
460
+ phase = 2 * pi * f0 * t
461
+ else:
462
+ # Singular point: the instantaneous frequency blows up
463
+ # when t == sing.
464
+ sing = -f1 * t1 / (f0 - f1)
465
+ phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
466
+
467
+ else:
468
+ raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
469
+ " or 'hyperbolic', but a value of %r was given."
470
+ % method)
471
+
472
+ return phase
473
+
474
+
475
+ def sweep_poly(t, poly, phi=0):
476
+ """
477
+ Frequency-swept cosine generator, with a time-dependent frequency.
478
+
479
+ This function generates a sinusoidal function whose instantaneous
480
+ frequency varies with time. The frequency at time `t` is given by
481
+ the polynomial `poly`.
482
+
483
+ Parameters
484
+ ----------
485
+ t : ndarray
486
+ Times at which to evaluate the waveform.
487
+ poly : 1-D array_like or instance of numpy.poly1d
488
+ The desired frequency expressed as a polynomial. If `poly` is
489
+ a list or ndarray of length n, then the elements of `poly` are
490
+ the coefficients of the polynomial, and the instantaneous
491
+ frequency is
492
+
493
+ ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
494
+
495
+ If `poly` is an instance of numpy.poly1d, then the
496
+ instantaneous frequency is
497
+
498
+ ``f(t) = poly(t)``
499
+
500
+ phi : float, optional
501
+ Phase offset, in degrees, Default: 0.
502
+
503
+ Returns
504
+ -------
505
+ sweep_poly : ndarray
506
+ A numpy array containing the signal evaluated at `t` with the
507
+ requested time-varying frequency. More precisely, the function
508
+ returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
509
+ (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
510
+
511
+ See Also
512
+ --------
513
+ chirp
514
+
515
+ Notes
516
+ -----
517
+ .. versionadded:: 0.8.0
518
+
519
+ If `poly` is a list or ndarray of length `n`, then the elements of
520
+ `poly` are the coefficients of the polynomial, and the instantaneous
521
+ frequency is:
522
+
523
+ ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
524
+
525
+ If `poly` is an instance of `numpy.poly1d`, then the instantaneous
526
+ frequency is:
527
+
528
+ ``f(t) = poly(t)``
529
+
530
+ Finally, the output `s` is:
531
+
532
+ ``cos(phase + (pi/180)*phi)``
533
+
534
+ where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
535
+ ``f(t)`` as defined above.
536
+
537
+ Examples
538
+ --------
539
+ Compute the waveform with instantaneous frequency::
540
+
541
+ f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2
542
+
543
+ over the interval 0 <= t <= 10.
544
+
545
+ >>> import numpy as np
546
+ >>> from scipy.signal import sweep_poly
547
+ >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0])
548
+ >>> t = np.linspace(0, 10, 5001)
549
+ >>> w = sweep_poly(t, p)
550
+
551
+ Plot it:
552
+
553
+ >>> import matplotlib.pyplot as plt
554
+ >>> plt.subplot(2, 1, 1)
555
+ >>> plt.plot(t, w)
556
+ >>> plt.title("Sweep Poly\\nwith frequency " +
557
+ ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$")
558
+ >>> plt.subplot(2, 1, 2)
559
+ >>> plt.plot(t, p(t), 'r', label='f(t)')
560
+ >>> plt.legend()
561
+ >>> plt.xlabel('t')
562
+ >>> plt.tight_layout()
563
+ >>> plt.show()
564
+
565
+ """
566
+ # 'phase' is computed in _sweep_poly_phase, to make testing easier.
567
+ phase = _sweep_poly_phase(t, poly)
568
+ # Convert to radians.
569
+ phi *= pi / 180
570
+ return cos(phase + phi)
571
+
572
+
573
+ def _sweep_poly_phase(t, poly):
574
+ """
575
+ Calculate the phase used by sweep_poly to generate its output.
576
+
577
+ See `sweep_poly` for a description of the arguments.
578
+
579
+ """
580
+ # polyint handles lists, ndarrays and instances of poly1d automatically.
581
+ intpoly = polyint(poly)
582
+ phase = 2 * pi * polyval(intpoly, t)
583
+ return phase
584
+
585
+
586
+ def unit_impulse(shape, idx=None, dtype=float):
587
+ """
588
+ Unit impulse signal (discrete delta function) or unit basis vector.
589
+
590
+ Parameters
591
+ ----------
592
+ shape : int or tuple of int
593
+ Number of samples in the output (1-D), or a tuple that represents the
594
+ shape of the output (N-D).
595
+ idx : None or int or tuple of int or 'mid', optional
596
+ Index at which the value is 1. If None, defaults to the 0th element.
597
+ If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
598
+ all dimensions. If an int, the impulse will be at `idx` in all
599
+ dimensions.
600
+ dtype : data-type, optional
601
+ The desired data-type for the array, e.g., ``numpy.int8``. Default is
602
+ ``numpy.float64``.
603
+
604
+ Returns
605
+ -------
606
+ y : ndarray
607
+ Output array containing an impulse signal.
608
+
609
+ Notes
610
+ -----
611
+ The 1D case is also known as the Kronecker delta.
612
+
613
+ .. versionadded:: 0.19.0
614
+
615
+ Examples
616
+ --------
617
+ An impulse at the 0th element (:math:`\\delta[n]`):
618
+
619
+ >>> from scipy import signal
620
+ >>> signal.unit_impulse(8)
621
+ array([ 1., 0., 0., 0., 0., 0., 0., 0.])
622
+
623
+ Impulse offset by 2 samples (:math:`\\delta[n-2]`):
624
+
625
+ >>> signal.unit_impulse(7, 2)
626
+ array([ 0., 0., 1., 0., 0., 0., 0.])
627
+
628
+ 2-dimensional impulse, centered:
629
+
630
+ >>> signal.unit_impulse((3, 3), 'mid')
631
+ array([[ 0., 0., 0.],
632
+ [ 0., 1., 0.],
633
+ [ 0., 0., 0.]])
634
+
635
+ Impulse at (2, 2), using broadcasting:
636
+
637
+ >>> signal.unit_impulse((4, 4), 2)
638
+ array([[ 0., 0., 0., 0.],
639
+ [ 0., 0., 0., 0.],
640
+ [ 0., 0., 1., 0.],
641
+ [ 0., 0., 0., 0.]])
642
+
643
+ Plot the impulse response of a 4th-order Butterworth lowpass filter:
644
+
645
+ >>> imp = signal.unit_impulse(100, 'mid')
646
+ >>> b, a = signal.butter(4, 0.2)
647
+ >>> response = signal.lfilter(b, a, imp)
648
+
649
+ >>> import numpy as np
650
+ >>> import matplotlib.pyplot as plt
651
+ >>> plt.plot(np.arange(-50, 50), imp)
652
+ >>> plt.plot(np.arange(-50, 50), response)
653
+ >>> plt.margins(0.1, 0.1)
654
+ >>> plt.xlabel('Time [samples]')
655
+ >>> plt.ylabel('Amplitude')
656
+ >>> plt.grid(True)
657
+ >>> plt.show()
658
+
659
+ """
660
+ out = zeros(shape, dtype)
661
+
662
+ shape = np.atleast_1d(shape)
663
+
664
+ if idx is None:
665
+ idx = (0,) * len(shape)
666
+ elif idx == 'mid':
667
+ idx = tuple(shape // 2)
668
+ elif not hasattr(idx, "__iter__"):
669
+ idx = (idx,) * len(shape)
670
+
671
+ out[idx] = 1
672
+ return out
venv/lib/python3.10/site-packages/scipy/signal/filter_design.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
9
+ 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
10
+ 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
11
+ 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
12
+ 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
13
+ 'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
14
+ 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
15
+ 'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk',
16
+ 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk',
17
+ 'gammatone', 'iircomb',
18
+ 'atleast_1d', 'poly', 'polyval', 'roots', 'resize', 'absolute',
19
+ 'tan', 'log10', 'arcsinh', 'exp', 'arccosh',
20
+ 'ceil', 'conjugate', 'append', 'prod', 'full', 'array', 'mintypecode',
21
+ 'npp_polyval', 'polyvalfromroots', 'optimize', 'sp_fft', 'comb',
22
+ 'float_factorial', 'abs', 'maxflat', 'yulewalk',
23
+ 'EPSILON', 'filter_dict', 'band_dict', 'bessel_norms'
24
+ ]
25
+
26
+
27
+ def __dir__():
28
+ return __all__
29
+
30
+
31
+ def __getattr__(name):
32
+ return _sub_module_deprecation(sub_package="signal", module="filter_design",
33
+ private_modules=["_filter_design"], all=__all__,
34
+ attribute=name)
venv/lib/python3.10/site-packages/scipy/signal/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc ADDED
Binary file (4.43 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_array_tools.cpython-310.pyc ADDED
Binary file (3.48 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_bsplines.cpython-310.pyc ADDED
Binary file (7.27 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_czt.cpython-310.pyc ADDED
Binary file (6.55 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_dltisys.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_filter_design.cpython-310.pyc ADDED
Binary file (118 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_fir_filter_design.cpython-310.pyc ADDED
Binary file (23.8 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_ltisys.cpython-310.pyc ADDED
Binary file (36.3 kB). View file
 
venv/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_max_len_seq.cpython-310.pyc ADDED
Binary file (2.07 kB). View file