applied-ai-018 commited on
Commit
5a22ae9
·
verified ·
1 Parent(s): 5e7eb69

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_arraytools.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_bsplines.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_czt.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_ltisys.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_peak_finding.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_short_time_fft.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_signaltools.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_spectral_py.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_upfirdn.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_waveforms.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_wavelets.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/bsplines.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/filter_design.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/fir_filter_design.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/lti_conversion.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/ltisys.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/signaltools.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/spectral.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/spline.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/waveforms.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/wavelets.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/signal/_czt.py +575 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py +1301 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/signal/_ltisys.py +0 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/signal/_peak_finding.py +1312 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py +357 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/signal/_upfirdn.py +216 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/signal/_wavelets.py +556 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/signal/bsplines.py +23 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/signal/filter_design.py +34 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py +22 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/signal/signaltools.py +29 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__init__.py +0 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_czt.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_filter_design.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_max_len_seq.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_short_time_fft.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -164,3 +164,5 @@ env-llmeval/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs
164
  env-llmeval/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
165
  env-llmeval/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
166
  env-llmeval/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
164
  env-llmeval/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
165
  env-llmeval/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
166
  env-llmeval/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
167
+ env-llmeval/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
168
+ env-llmeval/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_arraytools.cpython-310.pyc ADDED
Binary file (8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_bsplines.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_czt.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc ADDED
Binary file (168 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc ADDED
Binary file (42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_ltisys.cpython-310.pyc ADDED
Binary file (94.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_peak_finding.cpython-310.pyc ADDED
Binary file (44.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_short_time_fft.cpython-310.pyc ADDED
Binary file (67.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_signaltools.cpython-310.pyc ADDED
Binary file (133 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_spectral_py.cpython-310.pyc ADDED
Binary file (69.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_upfirdn.cpython-310.pyc ADDED
Binary file (6.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_waveforms.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/_wavelets.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/bsplines.cpython-310.pyc ADDED
Binary file (766 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/filter_design.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/fir_filter_design.cpython-310.pyc ADDED
Binary file (801 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/lti_conversion.cpython-310.pyc ADDED
Binary file (752 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/ltisys.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/signaltools.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/spectral.cpython-310.pyc ADDED
Binary file (757 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/spline.cpython-310.pyc ADDED
Binary file (886 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/waveforms.cpython-310.pyc ADDED
Binary file (715 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/__pycache__/wavelets.cpython-310.pyc ADDED
Binary file (666 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/_czt.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This program is public domain
2
+ # Authors: Paul Kienzle, Nadav Horesh
3
+ """
4
+ Chirp z-transform.
5
+
6
+ We provide two interfaces to the chirp z-transform: an object interface
7
+ which precalculates part of the transform and can be applied efficiently
8
+ to many different data sets, and a functional interface which is applied
9
+ only to the given data set.
10
+
11
+ Transforms
12
+ ----------
13
+
14
+ CZT : callable (x, axis=-1) -> array
15
+ Define a chirp z-transform that can be applied to different signals.
16
+ ZoomFFT : callable (x, axis=-1) -> array
17
+ Define a Fourier transform on a range of frequencies.
18
+
19
+ Functions
20
+ ---------
21
+
22
+ czt : array
23
+ Compute the chirp z-transform for a signal.
24
+ zoom_fft : array
25
+ Compute the Fourier transform on a range of frequencies.
26
+ """
27
+
28
+ import cmath
29
+ import numbers
30
+ import numpy as np
31
+ from numpy import pi, arange
32
+ from scipy.fft import fft, ifft, next_fast_len
33
+
34
+ __all__ = ['czt', 'zoom_fft', 'CZT', 'ZoomFFT', 'czt_points']
35
+
36
+
37
+ def _validate_sizes(n, m):
38
+ if n < 1 or not isinstance(n, numbers.Integral):
39
+ raise ValueError('Invalid number of CZT data '
40
+ f'points ({n}) specified. '
41
+ 'n must be positive and integer type.')
42
+
43
+ if m is None:
44
+ m = n
45
+ elif m < 1 or not isinstance(m, numbers.Integral):
46
+ raise ValueError('Invalid number of CZT output '
47
+ f'points ({m}) specified. '
48
+ 'm must be positive and integer type.')
49
+
50
+ return m
51
+
52
+
53
+ def czt_points(m, w=None, a=1+0j):
54
+ """
55
+ Return the points at which the chirp z-transform is computed.
56
+
57
+ Parameters
58
+ ----------
59
+ m : int
60
+ The number of points desired.
61
+ w : complex, optional
62
+ The ratio between points in each step.
63
+ Defaults to equally spaced points around the entire unit circle.
64
+ a : complex, optional
65
+ The starting point in the complex plane. Default is 1+0j.
66
+
67
+ Returns
68
+ -------
69
+ out : ndarray
70
+ The points in the Z plane at which `CZT` samples the z-transform,
71
+ when called with arguments `m`, `w`, and `a`, as complex numbers.
72
+
73
+ See Also
74
+ --------
75
+ CZT : Class that creates a callable chirp z-transform function.
76
+ czt : Convenience function for quickly calculating CZT.
77
+
78
+ Examples
79
+ --------
80
+ Plot the points of a 16-point FFT:
81
+
82
+ >>> import numpy as np
83
+ >>> from scipy.signal import czt_points
84
+ >>> points = czt_points(16)
85
+ >>> import matplotlib.pyplot as plt
86
+ >>> plt.plot(points.real, points.imag, 'o')
87
+ >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
88
+ >>> plt.axis('equal')
89
+ >>> plt.show()
90
+
91
+ and a 91-point logarithmic spiral that crosses the unit circle:
92
+
93
+ >>> m, w, a = 91, 0.995*np.exp(-1j*np.pi*.05), 0.8*np.exp(1j*np.pi/6)
94
+ >>> points = czt_points(m, w, a)
95
+ >>> plt.plot(points.real, points.imag, 'o')
96
+ >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
97
+ >>> plt.axis('equal')
98
+ >>> plt.show()
99
+ """
100
+ m = _validate_sizes(1, m)
101
+
102
+ k = arange(m)
103
+
104
+ a = 1.0 * a # at least float
105
+
106
+ if w is None:
107
+ # Nothing specified, default to FFT
108
+ return a * np.exp(2j * pi * k / m)
109
+ else:
110
+ # w specified
111
+ w = 1.0 * w # at least float
112
+ return a * w**-k
113
+
114
+
115
+ class CZT:
116
+ """
117
+ Create a callable chirp z-transform function.
118
+
119
+ Transform to compute the frequency response around a spiral.
120
+ Objects of this class are callables which can compute the
121
+ chirp z-transform on their inputs. This object precalculates the constant
122
+ chirps used in the given transform.
123
+
124
+ Parameters
125
+ ----------
126
+ n : int
127
+ The size of the signal.
128
+ m : int, optional
129
+ The number of output points desired. Default is `n`.
130
+ w : complex, optional
131
+ The ratio between points in each step. This must be precise or the
132
+ accumulated error will degrade the tail of the output sequence.
133
+ Defaults to equally spaced points around the entire unit circle.
134
+ a : complex, optional
135
+ The starting point in the complex plane. Default is 1+0j.
136
+
137
+ Returns
138
+ -------
139
+ f : CZT
140
+ Callable object ``f(x, axis=-1)`` for computing the chirp z-transform
141
+ on `x`.
142
+
143
+ See Also
144
+ --------
145
+ czt : Convenience function for quickly calculating CZT.
146
+ ZoomFFT : Class that creates a callable partial FFT function.
147
+
148
+ Notes
149
+ -----
150
+ The defaults are chosen such that ``f(x)`` is equivalent to
151
+ ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to
152
+ ``fft.fft(x, m)``.
153
+
154
+ If `w` does not lie on the unit circle, then the transform will be
155
+ around a spiral with exponentially-increasing radius. Regardless,
156
+ angle will increase linearly.
157
+
158
+ For transforms that do lie on the unit circle, accuracy is better when
159
+ using `ZoomFFT`, since any numerical error in `w` is
160
+ accumulated for long data lengths, drifting away from the unit circle.
161
+
162
+ The chirp z-transform can be faster than an equivalent FFT with
163
+ zero padding. Try it with your own array sizes to see.
164
+
165
+ However, the chirp z-transform is considerably less precise than the
166
+ equivalent zero-padded FFT.
167
+
168
+ As this CZT is implemented using the Bluestein algorithm, it can compute
169
+ large prime-length Fourier transforms in O(N log N) time, rather than the
170
+ O(N**2) time required by the direct DFT calculation. (`scipy.fft` also
171
+ uses Bluestein's algorithm'.)
172
+
173
+ (The name "chirp z-transform" comes from the use of a chirp in the
174
+ Bluestein algorithm. It does not decompose signals into chirps, like
175
+ other transforms with "chirp" in the name.)
176
+
177
+ References
178
+ ----------
179
+ .. [1] Leo I. Bluestein, "A linear filtering approach to the computation
180
+ of the discrete Fourier transform," Northeast Electronics Research
181
+ and Engineering Meeting Record 10, 218-219 (1968).
182
+ .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and
183
+ its application," Bell Syst. Tech. J. 48, 1249-1292 (1969).
184
+
185
+ Examples
186
+ --------
187
+ Compute multiple prime-length FFTs:
188
+
189
+ >>> from scipy.signal import CZT
190
+ >>> import numpy as np
191
+ >>> a = np.random.rand(7)
192
+ >>> b = np.random.rand(7)
193
+ >>> c = np.random.rand(7)
194
+ >>> czt_7 = CZT(n=7)
195
+ >>> A = czt_7(a)
196
+ >>> B = czt_7(b)
197
+ >>> C = czt_7(c)
198
+
199
+ Display the points at which the FFT is calculated:
200
+
201
+ >>> czt_7.points()
202
+ array([ 1.00000000+0.j , 0.62348980+0.78183148j,
203
+ -0.22252093+0.97492791j, -0.90096887+0.43388374j,
204
+ -0.90096887-0.43388374j, -0.22252093-0.97492791j,
205
+ 0.62348980-0.78183148j])
206
+ >>> import matplotlib.pyplot as plt
207
+ >>> plt.plot(czt_7.points().real, czt_7.points().imag, 'o')
208
+ >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
209
+ >>> plt.axis('equal')
210
+ >>> plt.show()
211
+ """
212
+
213
+ def __init__(self, n, m=None, w=None, a=1+0j):
214
+ m = _validate_sizes(n, m)
215
+
216
+ k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2))
217
+
218
+ if w is None:
219
+ # Nothing specified, default to FFT-like
220
+ w = cmath.exp(-2j*pi/m)
221
+ wk2 = np.exp(-(1j * pi * ((k**2) % (2*m))) / m)
222
+ else:
223
+ # w specified
224
+ wk2 = w**(k**2/2.)
225
+
226
+ a = 1.0 * a # at least float
227
+
228
+ self.w, self.a = w, a
229
+ self.m, self.n = m, n
230
+
231
+ nfft = next_fast_len(n + m - 1)
232
+ self._Awk2 = a**-k[:n] * wk2[:n]
233
+ self._nfft = nfft
234
+ self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft)
235
+ self._wk2 = wk2[:m]
236
+ self._yidx = slice(n-1, n+m-1)
237
+
238
+ def __call__(self, x, *, axis=-1):
239
+ """
240
+ Calculate the chirp z-transform of a signal.
241
+
242
+ Parameters
243
+ ----------
244
+ x : array
245
+ The signal to transform.
246
+ axis : int, optional
247
+ Axis over which to compute the FFT. If not given, the last axis is
248
+ used.
249
+
250
+ Returns
251
+ -------
252
+ out : ndarray
253
+ An array of the same dimensions as `x`, but with the length of the
254
+ transformed axis set to `m`.
255
+ """
256
+ x = np.asarray(x)
257
+ if x.shape[axis] != self.n:
258
+ raise ValueError(f"CZT defined for length {self.n}, not "
259
+ f"{x.shape[axis]}")
260
+ # Calculate transpose coordinates, to allow operation on any given axis
261
+ trnsp = np.arange(x.ndim)
262
+ trnsp[[axis, -1]] = [-1, axis]
263
+ x = x.transpose(*trnsp)
264
+ y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft))
265
+ y = y[..., self._yidx] * self._wk2
266
+ return y.transpose(*trnsp)
267
+
268
+ def points(self):
269
+ """
270
+ Return the points at which the chirp z-transform is computed.
271
+ """
272
+ return czt_points(self.m, self.w, self.a)
273
+
274
+
275
+ class ZoomFFT(CZT):
276
+ """
277
+ Create a callable zoom FFT transform function.
278
+
279
+ This is a specialization of the chirp z-transform (`CZT`) for a set of
280
+ equally-spaced frequencies around the unit circle, used to calculate a
281
+ section of the FFT more efficiently than calculating the entire FFT and
282
+ truncating.
283
+
284
+ Parameters
285
+ ----------
286
+ n : int
287
+ The size of the signal.
288
+ fn : array_like
289
+ A length-2 sequence [`f1`, `f2`] giving the frequency range, or a
290
+ scalar, for which the range [0, `fn`] is assumed.
291
+ m : int, optional
292
+ The number of points to evaluate. Default is `n`.
293
+ fs : float, optional
294
+ The sampling frequency. If ``fs=10`` represented 10 kHz, for example,
295
+ then `f1` and `f2` would also be given in kHz.
296
+ The default sampling frequency is 2, so `f1` and `f2` should be
297
+ in the range [0, 1] to keep the transform below the Nyquist
298
+ frequency.
299
+ endpoint : bool, optional
300
+ If True, `f2` is the last sample. Otherwise, it is not included.
301
+ Default is False.
302
+
303
+ Returns
304
+ -------
305
+ f : ZoomFFT
306
+ Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`.
307
+
308
+ See Also
309
+ --------
310
+ zoom_fft : Convenience function for calculating a zoom FFT.
311
+
312
+ Notes
313
+ -----
314
+ The defaults are chosen such that ``f(x, 2)`` is equivalent to
315
+ ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to
316
+ ``fft.fft(x, m)``.
317
+
318
+ Sampling frequency is 1/dt, the time step between samples in the
319
+ signal `x`. The unit circle corresponds to frequencies from 0 up
320
+ to the sampling frequency. The default sampling frequency of 2
321
+ means that `f1`, `f2` values up to the Nyquist frequency are in the
322
+ range [0, 1). For `f1`, `f2` values expressed in radians, a sampling
323
+ frequency of 2*pi should be used.
324
+
325
+ Remember that a zoom FFT can only interpolate the points of the existing
326
+ FFT. It cannot help to resolve two separate nearby frequencies.
327
+ Frequency resolution can only be increased by increasing acquisition
328
+ time.
329
+
330
+ These functions are implemented using Bluestein's algorithm (as is
331
+ `scipy.fft`). [2]_
332
+
333
+ References
334
+ ----------
335
+ .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its
336
+ applications", pg 29 (1970)
337
+ https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf
338
+ .. [2] Leo I. Bluestein, "A linear filtering approach to the computation
339
+ of the discrete Fourier transform," Northeast Electronics Research
340
+ and Engineering Meeting Record 10, 218-219 (1968).
341
+
342
+ Examples
343
+ --------
344
+ To plot the transform results use something like the following:
345
+
346
+ >>> import numpy as np
347
+ >>> from scipy.signal import ZoomFFT
348
+ >>> t = np.linspace(0, 1, 1021)
349
+ >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t)
350
+ >>> f1, f2 = 5, 27
351
+ >>> transform = ZoomFFT(len(x), [f1, f2], len(x), fs=1021)
352
+ >>> X = transform(x)
353
+ >>> f = np.linspace(f1, f2, len(x))
354
+ >>> import matplotlib.pyplot as plt
355
+ >>> plt.plot(f, 20*np.log10(np.abs(X)))
356
+ >>> plt.show()
357
+ """
358
+
359
+ def __init__(self, n, fn, m=None, *, fs=2, endpoint=False):
360
+ m = _validate_sizes(n, m)
361
+
362
+ k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2))
363
+
364
+ if np.size(fn) == 2:
365
+ f1, f2 = fn
366
+ elif np.size(fn) == 1:
367
+ f1, f2 = 0.0, fn
368
+ else:
369
+ raise ValueError('fn must be a scalar or 2-length sequence')
370
+
371
+ self.f1, self.f2, self.fs = f1, f2, fs
372
+
373
+ if endpoint:
374
+ scale = ((f2 - f1) * m) / (fs * (m - 1))
375
+ else:
376
+ scale = (f2 - f1) / fs
377
+ a = cmath.exp(2j * pi * f1/fs)
378
+ wk2 = np.exp(-(1j * pi * scale * k**2) / m)
379
+
380
+ self.w = cmath.exp(-2j*pi/m * scale)
381
+ self.a = a
382
+ self.m, self.n = m, n
383
+
384
+ ak = np.exp(-2j * pi * f1/fs * k[:n])
385
+ self._Awk2 = ak * wk2[:n]
386
+
387
+ nfft = next_fast_len(n + m - 1)
388
+ self._nfft = nfft
389
+ self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft)
390
+ self._wk2 = wk2[:m]
391
+ self._yidx = slice(n-1, n+m-1)
392
+
393
+
394
+ def czt(x, m=None, w=None, a=1+0j, *, axis=-1):
395
+ """
396
+ Compute the frequency response around a spiral in the Z plane.
397
+
398
+ Parameters
399
+ ----------
400
+ x : array
401
+ The signal to transform.
402
+ m : int, optional
403
+ The number of output points desired. Default is the length of the
404
+ input data.
405
+ w : complex, optional
406
+ The ratio between points in each step. This must be precise or the
407
+ accumulated error will degrade the tail of the output sequence.
408
+ Defaults to equally spaced points around the entire unit circle.
409
+ a : complex, optional
410
+ The starting point in the complex plane. Default is 1+0j.
411
+ axis : int, optional
412
+ Axis over which to compute the FFT. If not given, the last axis is
413
+ used.
414
+
415
+ Returns
416
+ -------
417
+ out : ndarray
418
+ An array of the same dimensions as `x`, but with the length of the
419
+ transformed axis set to `m`.
420
+
421
+ See Also
422
+ --------
423
+ CZT : Class that creates a callable chirp z-transform function.
424
+ zoom_fft : Convenience function for partial FFT calculations.
425
+
426
+ Notes
427
+ -----
428
+ The defaults are chosen such that ``signal.czt(x)`` is equivalent to
429
+ ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is
430
+ equivalent to ``fft.fft(x, m)``.
431
+
432
+ If the transform needs to be repeated, use `CZT` to construct a
433
+ specialized transform function which can be reused without
434
+ recomputing constants.
435
+
436
+ An example application is in system identification, repeatedly evaluating
437
+ small slices of the z-transform of a system, around where a pole is
438
+ expected to exist, to refine the estimate of the pole's true location. [1]_
439
+
440
+ References
441
+ ----------
442
+ .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its
443
+ applications", pg 20 (1970)
444
+ https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf
445
+
446
+ Examples
447
+ --------
448
+ Generate a sinusoid:
449
+
450
+ >>> import numpy as np
451
+ >>> f1, f2, fs = 8, 10, 200 # Hz
452
+ >>> t = np.linspace(0, 1, fs, endpoint=False)
453
+ >>> x = np.sin(2*np.pi*t*f2)
454
+ >>> import matplotlib.pyplot as plt
455
+ >>> plt.plot(t, x)
456
+ >>> plt.axis([0, 1, -1.1, 1.1])
457
+ >>> plt.show()
458
+
459
+ Its discrete Fourier transform has all of its energy in a single frequency
460
+ bin:
461
+
462
+ >>> from scipy.fft import rfft, rfftfreq
463
+ >>> from scipy.signal import czt, czt_points
464
+ >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x)))
465
+ >>> plt.margins(0, 0.1)
466
+ >>> plt.show()
467
+
468
+ However, if the sinusoid is logarithmically-decaying:
469
+
470
+ >>> x = np.exp(-t*f1) * np.sin(2*np.pi*t*f2)
471
+ >>> plt.plot(t, x)
472
+ >>> plt.axis([0, 1, -1.1, 1.1])
473
+ >>> plt.show()
474
+
475
+ the DFT will have spectral leakage:
476
+
477
+ >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x)))
478
+ >>> plt.margins(0, 0.1)
479
+ >>> plt.show()
480
+
481
+ While the DFT always samples the z-transform around the unit circle, the
482
+ chirp z-transform allows us to sample the Z-transform along any
483
+ logarithmic spiral, such as a circle with radius smaller than unity:
484
+
485
+ >>> M = fs // 2 # Just positive frequencies, like rfft
486
+ >>> a = np.exp(-f1/fs) # Starting point of the circle, radius < 1
487
+ >>> w = np.exp(-1j*np.pi/M) # "Step size" of circle
488
+ >>> points = czt_points(M + 1, w, a) # M + 1 to include Nyquist
489
+ >>> plt.plot(points.real, points.imag, '.')
490
+ >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
491
+ >>> plt.axis('equal'); plt.axis([-1.05, 1.05, -0.05, 1.05])
492
+ >>> plt.show()
493
+
494
+ With the correct radius, this transforms the decaying sinusoid (and others
495
+ with the same decay rate) without spectral leakage:
496
+
497
+ >>> z_vals = czt(x, M + 1, w, a) # Include Nyquist for comparison to rfft
498
+ >>> freqs = np.angle(points)*fs/(2*np.pi) # angle = omega, radius = sigma
499
+ >>> plt.plot(freqs, abs(z_vals))
500
+ >>> plt.margins(0, 0.1)
501
+ >>> plt.show()
502
+ """
503
+ x = np.asarray(x)
504
+ transform = CZT(x.shape[axis], m=m, w=w, a=a)
505
+ return transform(x, axis=axis)
506
+
507
+
508
+ def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1):
509
+ """
510
+ Compute the DFT of `x` only for frequencies in range `fn`.
511
+
512
+ Parameters
513
+ ----------
514
+ x : array
515
+ The signal to transform.
516
+ fn : array_like
517
+ A length-2 sequence [`f1`, `f2`] giving the frequency range, or a
518
+ scalar, for which the range [0, `fn`] is assumed.
519
+ m : int, optional
520
+ The number of points to evaluate. The default is the length of `x`.
521
+ fs : float, optional
522
+ The sampling frequency. If ``fs=10`` represented 10 kHz, for example,
523
+ then `f1` and `f2` would also be given in kHz.
524
+ The default sampling frequency is 2, so `f1` and `f2` should be
525
+ in the range [0, 1] to keep the transform below the Nyquist
526
+ frequency.
527
+ endpoint : bool, optional
528
+ If True, `f2` is the last sample. Otherwise, it is not included.
529
+ Default is False.
530
+ axis : int, optional
531
+ Axis over which to compute the FFT. If not given, the last axis is
532
+ used.
533
+
534
+ Returns
535
+ -------
536
+ out : ndarray
537
+ The transformed signal. The Fourier transform will be calculated
538
+ at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m.
539
+
540
+ See Also
541
+ --------
542
+ ZoomFFT : Class that creates a callable partial FFT function.
543
+
544
+ Notes
545
+ -----
546
+ The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent
547
+ to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)``
548
+ is equivalent to ``fft.fft(x, m)``.
549
+
550
+ To graph the magnitude of the resulting transform, use::
551
+
552
+ plot(linspace(f1, f2, m, endpoint=False), abs(zoom_fft(x, [f1, f2], m)))
553
+
554
+ If the transform needs to be repeated, use `ZoomFFT` to construct
555
+ a specialized transform function which can be reused without
556
+ recomputing constants.
557
+
558
+ Examples
559
+ --------
560
+ To plot the transform results use something like the following:
561
+
562
+ >>> import numpy as np
563
+ >>> from scipy.signal import zoom_fft
564
+ >>> t = np.linspace(0, 1, 1021)
565
+ >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t)
566
+ >>> f1, f2 = 5, 27
567
+ >>> X = zoom_fft(x, [f1, f2], len(x), fs=1021)
568
+ >>> f = np.linspace(f1, f2, len(x))
569
+ >>> import matplotlib.pyplot as plt
570
+ >>> plt.plot(f, 20*np.log10(np.abs(X)))
571
+ >>> plt.show()
572
+ """
573
+ x = np.asarray(x)
574
+ transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint)
575
+ return transform(x, axis=axis)
env-llmeval/lib/python3.10/site-packages/scipy/signal/_fir_filter_design.py ADDED
@@ -0,0 +1,1301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for FIR filter design."""
2
+
3
+ from math import ceil, log
4
+ import operator
5
+ import warnings
6
+
7
+ import numpy as np
8
+ from numpy.fft import irfft, fft, ifft
9
+ from scipy.special import sinc
10
+ from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning,
11
+ lstsq)
12
+ from scipy._lib.deprecation import _NoValue, _deprecate_positional_args
13
+ from scipy.signal._arraytools import _validate_fs
14
+
15
+ from . import _sigtools
16
+
17
+ __all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
18
+ 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase']
19
+
20
+
21
+ def _get_fs(fs, nyq):
22
+ """
23
+ Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
24
+ """
25
+ if nyq is _NoValue and fs is None:
26
+ fs = 2
27
+ elif nyq is not _NoValue:
28
+ if fs is not None:
29
+ raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
30
+ msg = ("Keyword argument 'nyq' is deprecated in favour of 'fs' and "
31
+ "will be removed in SciPy 1.14.0.")
32
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
33
+ if nyq is None:
34
+ fs = 2
35
+ else:
36
+ fs = 2*nyq
37
+ return fs
38
+
39
+
40
+ # Some notes on function parameters:
41
+ #
42
+ # `cutoff` and `width` are given as numbers between 0 and 1. These are
43
+ # relative frequencies, expressed as a fraction of the Nyquist frequency.
44
+ # For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width
45
+ # of 300 Hz.
46
+ #
47
+ # The `order` of a FIR filter is one less than the number of taps.
48
+ # This is a potential source of confusion, so in the following code,
49
+ # we will always use the number of taps as the parameterization of
50
+ # the 'size' of the filter. The "number of taps" means the number
51
+ # of coefficients, which is the same as the length of the impulse
52
+ # response of the filter.
53
+
54
+
55
+ def kaiser_beta(a):
56
+ """Compute the Kaiser parameter `beta`, given the attenuation `a`.
57
+
58
+ Parameters
59
+ ----------
60
+ a : float
61
+ The desired attenuation in the stopband and maximum ripple in
62
+ the passband, in dB. This should be a *positive* number.
63
+
64
+ Returns
65
+ -------
66
+ beta : float
67
+ The `beta` parameter to be used in the formula for a Kaiser window.
68
+
69
+ References
70
+ ----------
71
+ Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
72
+
73
+ Examples
74
+ --------
75
+ Suppose we want to design a lowpass filter, with 65 dB attenuation
76
+ in the stop band. The Kaiser window parameter to be used in the
77
+ window method is computed by ``kaiser_beta(65)``:
78
+
79
+ >>> from scipy.signal import kaiser_beta
80
+ >>> kaiser_beta(65)
81
+ 6.20426
82
+
83
+ """
84
+ if a > 50:
85
+ beta = 0.1102 * (a - 8.7)
86
+ elif a > 21:
87
+ beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
88
+ else:
89
+ beta = 0.0
90
+ return beta
91
+
92
+
93
+ def kaiser_atten(numtaps, width):
94
+ """Compute the attenuation of a Kaiser FIR filter.
95
+
96
+ Given the number of taps `N` and the transition width `width`, compute the
97
+ attenuation `a` in dB, given by Kaiser's formula:
98
+
99
+ a = 2.285 * (N - 1) * pi * width + 7.95
100
+
101
+ Parameters
102
+ ----------
103
+ numtaps : int
104
+ The number of taps in the FIR filter.
105
+ width : float
106
+ The desired width of the transition region between passband and
107
+ stopband (or, in general, at any discontinuity) for the filter,
108
+ expressed as a fraction of the Nyquist frequency.
109
+
110
+ Returns
111
+ -------
112
+ a : float
113
+ The attenuation of the ripple, in dB.
114
+
115
+ See Also
116
+ --------
117
+ kaiserord, kaiser_beta
118
+
119
+ Examples
120
+ --------
121
+ Suppose we want to design a FIR filter using the Kaiser window method
122
+ that will have 211 taps and a transition width of 9 Hz for a signal that
123
+ is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency,
124
+ the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB)
125
+ is computed as follows:
126
+
127
+ >>> from scipy.signal import kaiser_atten
128
+ >>> kaiser_atten(211, 0.0375)
129
+ 64.48099630593983
130
+
131
+ """
132
+ a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
133
+ return a
134
+
135
+
136
+ def kaiserord(ripple, width):
137
+ """
138
+ Determine the filter window parameters for the Kaiser window method.
139
+
140
+ The parameters returned by this function are generally used to create
141
+ a finite impulse response filter using the window method, with either
142
+ `firwin` or `firwin2`.
143
+
144
+ Parameters
145
+ ----------
146
+ ripple : float
147
+ Upper bound for the deviation (in dB) of the magnitude of the
148
+ filter's frequency response from that of the desired filter (not
149
+ including frequencies in any transition intervals). That is, if w
150
+ is the frequency expressed as a fraction of the Nyquist frequency,
151
+ A(w) is the actual frequency response of the filter and D(w) is the
152
+ desired frequency response, the design requirement is that::
153
+
154
+ abs(A(w) - D(w))) < 10**(-ripple/20)
155
+
156
+ for 0 <= w <= 1 and w not in a transition interval.
157
+ width : float
158
+ Width of transition region, normalized so that 1 corresponds to pi
159
+ radians / sample. That is, the frequency is expressed as a fraction
160
+ of the Nyquist frequency.
161
+
162
+ Returns
163
+ -------
164
+ numtaps : int
165
+ The length of the Kaiser window.
166
+ beta : float
167
+ The beta parameter for the Kaiser window.
168
+
169
+ See Also
170
+ --------
171
+ kaiser_beta, kaiser_atten
172
+
173
+ Notes
174
+ -----
175
+ There are several ways to obtain the Kaiser window:
176
+
177
+ - ``signal.windows.kaiser(numtaps, beta, sym=True)``
178
+ - ``signal.get_window(beta, numtaps)``
179
+ - ``signal.get_window(('kaiser', beta), numtaps)``
180
+
181
+ The empirical equations discovered by Kaiser are used.
182
+
183
+ References
184
+ ----------
185
+ Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476.
186
+
187
+ Examples
188
+ --------
189
+ We will use the Kaiser window method to design a lowpass FIR filter
190
+ for a signal that is sampled at 1000 Hz.
191
+
192
+ We want at least 65 dB rejection in the stop band, and in the pass
193
+ band the gain should vary no more than 0.5%.
194
+
195
+ We want a cutoff frequency of 175 Hz, with a transition between the
196
+ pass band and the stop band of 24 Hz. That is, in the band [0, 163],
197
+ the gain varies no more than 0.5%, and in the band [187, 500], the
198
+ signal is attenuated by at least 65 dB.
199
+
200
+ >>> import numpy as np
201
+ >>> from scipy.signal import kaiserord, firwin, freqz
202
+ >>> import matplotlib.pyplot as plt
203
+ >>> fs = 1000.0
204
+ >>> cutoff = 175
205
+ >>> width = 24
206
+
207
+ The Kaiser method accepts just a single parameter to control the pass
208
+ band ripple and the stop band rejection, so we use the more restrictive
209
+ of the two. In this case, the pass band ripple is 0.005, or 46.02 dB,
210
+ so we will use 65 dB as the design parameter.
211
+
212
+ Use `kaiserord` to determine the length of the filter and the
213
+ parameter for the Kaiser window.
214
+
215
+ >>> numtaps, beta = kaiserord(65, width/(0.5*fs))
216
+ >>> numtaps
217
+ 167
218
+ >>> beta
219
+ 6.20426
220
+
221
+ Use `firwin` to create the FIR filter.
222
+
223
+ >>> taps = firwin(numtaps, cutoff, window=('kaiser', beta),
224
+ ... scale=False, fs=fs)
225
+
226
+ Compute the frequency response of the filter. ``w`` is the array of
227
+ frequencies, and ``h`` is the corresponding complex array of frequency
228
+ responses.
229
+
230
+ >>> w, h = freqz(taps, worN=8000)
231
+ >>> w *= 0.5*fs/np.pi # Convert w to Hz.
232
+
233
+ Compute the deviation of the magnitude of the filter's response from
234
+ that of the ideal lowpass filter. Values in the transition region are
235
+ set to ``nan``, so they won't appear in the plot.
236
+
237
+ >>> ideal = w < cutoff # The "ideal" frequency response.
238
+ >>> deviation = np.abs(np.abs(h) - ideal)
239
+ >>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan
240
+
241
+ Plot the deviation. A close look at the left end of the stop band shows
242
+ that the requirement for 65 dB attenuation is violated in the first lobe
243
+ by about 0.125 dB. This is not unusual for the Kaiser window method.
244
+
245
+ >>> plt.plot(w, 20*np.log10(np.abs(deviation)))
246
+ >>> plt.xlim(0, 0.5*fs)
247
+ >>> plt.ylim(-90, -60)
248
+ >>> plt.grid(alpha=0.25)
249
+ >>> plt.axhline(-65, color='r', ls='--', alpha=0.3)
250
+ >>> plt.xlabel('Frequency (Hz)')
251
+ >>> plt.ylabel('Deviation from ideal (dB)')
252
+ >>> plt.title('Lowpass Filter Frequency Response')
253
+ >>> plt.show()
254
+
255
+ """
256
+ A = abs(ripple) # in case somebody is confused as to what's meant
257
+ if A < 8:
258
+ # Formula for N is not valid in this range.
259
+ raise ValueError("Requested maximum ripple attenuation %f is too "
260
+ "small for the Kaiser formula." % A)
261
+ beta = kaiser_beta(A)
262
+
263
+ # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
264
+ # order, so we have to add 1 to get the number of taps.
265
+ numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
266
+
267
+ return int(ceil(numtaps)), beta
268
+
269
+
270
+ @_deprecate_positional_args(version="1.14")
271
+ def firwin(numtaps, cutoff, *, width=None, window='hamming', pass_zero=True,
272
+ scale=True, nyq=_NoValue, fs=None):
273
+ """
274
+ FIR filter design using the window method.
275
+
276
+ This function computes the coefficients of a finite impulse response
277
+ filter. The filter will have linear phase; it will be Type I if
278
+ `numtaps` is odd and Type II if `numtaps` is even.
279
+
280
+ Type II filters always have zero response at the Nyquist frequency, so a
281
+ ValueError exception is raised if firwin is called with `numtaps` even and
282
+ having a passband whose right end is at the Nyquist frequency.
283
+
284
+ Parameters
285
+ ----------
286
+ numtaps : int
287
+ Length of the filter (number of coefficients, i.e. the filter
288
+ order + 1). `numtaps` must be odd if a passband includes the
289
+ Nyquist frequency.
290
+ cutoff : float or 1-D array_like
291
+ Cutoff frequency of filter (expressed in the same units as `fs`)
292
+ OR an array of cutoff frequencies (that is, band edges). In the
293
+ latter case, the frequencies in `cutoff` should be positive and
294
+ monotonically increasing between 0 and `fs/2`. The values 0 and
295
+ `fs/2` must not be included in `cutoff`.
296
+ width : float or None, optional
297
+ If `width` is not None, then assume it is the approximate width
298
+ of the transition region (expressed in the same units as `fs`)
299
+ for use in Kaiser FIR filter design. In this case, the `window`
300
+ argument is ignored.
301
+ window : string or tuple of string and parameter values, optional
302
+ Desired window to use. See `scipy.signal.get_window` for a list
303
+ of windows and required parameters.
304
+ pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
305
+ If True, the gain at the frequency 0 (i.e., the "DC gain") is 1.
306
+ If False, the DC gain is 0. Can also be a string argument for the
307
+ desired filter type (equivalent to ``btype`` in IIR design functions).
308
+
309
+ .. versionadded:: 1.3.0
310
+ Support for string arguments.
311
+ scale : bool, optional
312
+ Set to True to scale the coefficients so that the frequency
313
+ response is exactly unity at a certain frequency.
314
+ That frequency is either:
315
+
316
+ - 0 (DC) if the first passband starts at 0 (i.e. pass_zero
317
+ is True)
318
+ - `fs/2` (the Nyquist frequency) if the first passband ends at
319
+ `fs/2` (i.e the filter is a single band highpass filter);
320
+ center of first passband otherwise
321
+
322
+ nyq : float, optional, deprecated
323
+ This is the Nyquist frequency. Each frequency in `cutoff` must be
324
+ between 0 and `nyq`. Default is 1.
325
+
326
+ .. deprecated:: 1.0.0
327
+ `firwin` keyword argument `nyq` is deprecated in favour of `fs` and
328
+ will be removed in SciPy 1.14.0.
329
+ fs : float, optional
330
+ The sampling frequency of the signal. Each frequency in `cutoff`
331
+ must be between 0 and ``fs/2``. Default is 2.
332
+
333
+ Returns
334
+ -------
335
+ h : (numtaps,) ndarray
336
+ Coefficients of length `numtaps` FIR filter.
337
+
338
+ Raises
339
+ ------
340
+ ValueError
341
+ If any value in `cutoff` is less than or equal to 0 or greater
342
+ than or equal to ``fs/2``, if the values in `cutoff` are not strictly
343
+ monotonically increasing, or if `numtaps` is even but a passband
344
+ includes the Nyquist frequency.
345
+
346
+ See Also
347
+ --------
348
+ firwin2
349
+ firls
350
+ minimum_phase
351
+ remez
352
+
353
+ Examples
354
+ --------
355
+ Low-pass from 0 to f:
356
+
357
+ >>> from scipy import signal
358
+ >>> numtaps = 3
359
+ >>> f = 0.1
360
+ >>> signal.firwin(numtaps, f)
361
+ array([ 0.06799017, 0.86401967, 0.06799017])
362
+
363
+ Use a specific window function:
364
+
365
+ >>> signal.firwin(numtaps, f, window='nuttall')
366
+ array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
367
+
368
+ High-pass ('stop' from 0 to f):
369
+
370
+ >>> signal.firwin(numtaps, f, pass_zero=False)
371
+ array([-0.00859313, 0.98281375, -0.00859313])
372
+
373
+ Band-pass:
374
+
375
+ >>> f1, f2 = 0.1, 0.2
376
+ >>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
377
+ array([ 0.06301614, 0.88770441, 0.06301614])
378
+
379
+ Band-stop:
380
+
381
+ >>> signal.firwin(numtaps, [f1, f2])
382
+ array([-0.00801395, 1.0160279 , -0.00801395])
383
+
384
+ Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
385
+
386
+ >>> f3, f4 = 0.3, 0.4
387
+ >>> signal.firwin(numtaps, [f1, f2, f3, f4])
388
+ array([-0.01376344, 1.02752689, -0.01376344])
389
+
390
+ Multi-band (passbands are [f1, f2] and [f3,f4]):
391
+
392
+ >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
393
+ array([ 0.04890915, 0.91284326, 0.04890915])
394
+
395
+ """
396
+ # The major enhancements to this function added in November 2010 were
397
+ # developed by Tom Krauss (see ticket #902).
398
+ fs = _validate_fs(fs, allow_none=True)
399
+
400
+ nyq = 0.5 * _get_fs(fs, nyq)
401
+
402
+ cutoff = np.atleast_1d(cutoff) / float(nyq)
403
+
404
+ # Check for invalid input.
405
+ if cutoff.ndim > 1:
406
+ raise ValueError("The cutoff argument must be at most "
407
+ "one-dimensional.")
408
+ if cutoff.size == 0:
409
+ raise ValueError("At least one cutoff frequency must be given.")
410
+ if cutoff.min() <= 0 or cutoff.max() >= 1:
411
+ raise ValueError("Invalid cutoff frequency: frequencies must be "
412
+ "greater than 0 and less than fs/2.")
413
+ if np.any(np.diff(cutoff) <= 0):
414
+ raise ValueError("Invalid cutoff frequencies: the frequencies "
415
+ "must be strictly increasing.")
416
+
417
+ if width is not None:
418
+ # A width was given. Find the beta parameter of the Kaiser window
419
+ # and set `window`. This overrides the value of `window` passed in.
420
+ atten = kaiser_atten(numtaps, float(width) / nyq)
421
+ beta = kaiser_beta(atten)
422
+ window = ('kaiser', beta)
423
+
424
+ if isinstance(pass_zero, str):
425
+ if pass_zero in ('bandstop', 'lowpass'):
426
+ if pass_zero == 'lowpass':
427
+ if cutoff.size != 1:
428
+ raise ValueError('cutoff must have one element if '
429
+ f'pass_zero=="lowpass", got {cutoff.shape}')
430
+ elif cutoff.size <= 1:
431
+ raise ValueError('cutoff must have at least two elements if '
432
+ f'pass_zero=="bandstop", got {cutoff.shape}')
433
+ pass_zero = True
434
+ elif pass_zero in ('bandpass', 'highpass'):
435
+ if pass_zero == 'highpass':
436
+ if cutoff.size != 1:
437
+ raise ValueError('cutoff must have one element if '
438
+ f'pass_zero=="highpass", got {cutoff.shape}')
439
+ elif cutoff.size <= 1:
440
+ raise ValueError('cutoff must have at least two elements if '
441
+ f'pass_zero=="bandpass", got {cutoff.shape}')
442
+ pass_zero = False
443
+ else:
444
+ raise ValueError('pass_zero must be True, False, "bandpass", '
445
+ '"lowpass", "highpass", or "bandstop", got '
446
+ f'{pass_zero}')
447
+ pass_zero = bool(operator.index(pass_zero)) # ensure bool-like
448
+
449
+ pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
450
+ if pass_nyquist and numtaps % 2 == 0:
451
+ raise ValueError("A filter with an even number of coefficients must "
452
+ "have zero response at the Nyquist frequency.")
453
+
454
+ # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
455
+ # is even, and each pair in cutoff corresponds to passband.
456
+ cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
457
+
458
+ # `bands` is a 2-D array; each row gives the left and right edges of
459
+ # a passband.
460
+ bands = cutoff.reshape(-1, 2)
461
+
462
+ # Build up the coefficients.
463
+ alpha = 0.5 * (numtaps - 1)
464
+ m = np.arange(0, numtaps) - alpha
465
+ h = 0
466
+ for left, right in bands:
467
+ h += right * sinc(right * m)
468
+ h -= left * sinc(left * m)
469
+
470
+ # Get and apply the window function.
471
+ from .windows import get_window
472
+ win = get_window(window, numtaps, fftbins=False)
473
+ h *= win
474
+
475
+ # Now handle scaling if desired.
476
+ if scale:
477
+ # Get the first passband.
478
+ left, right = bands[0]
479
+ if left == 0:
480
+ scale_frequency = 0.0
481
+ elif right == 1:
482
+ scale_frequency = 1.0
483
+ else:
484
+ scale_frequency = 0.5 * (left + right)
485
+ c = np.cos(np.pi * m * scale_frequency)
486
+ s = np.sum(h * c)
487
+ h /= s
488
+
489
+ return h
490
+
491
+
492
+ # Original version of firwin2 from scipy ticket #457, submitted by "tash".
493
+ #
494
+ # Rewritten by Warren Weckesser, 2010.
495
+ @_deprecate_positional_args(version="1.14")
496
+ def firwin2(numtaps, freq, gain, *, nfreqs=None, window='hamming', nyq=_NoValue,
497
+ antisymmetric=False, fs=None):
498
+ """
499
+ FIR filter design using the window method.
500
+
501
+ From the given frequencies `freq` and corresponding gains `gain`,
502
+ this function constructs an FIR filter with linear phase and
503
+ (approximately) the given frequency response.
504
+
505
+ Parameters
506
+ ----------
507
+ numtaps : int
508
+ The number of taps in the FIR filter. `numtaps` must be less than
509
+ `nfreqs`.
510
+ freq : array_like, 1-D
511
+ The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
512
+ Nyquist. The Nyquist frequency is half `fs`.
513
+ The values in `freq` must be nondecreasing. A value can be repeated
514
+ once to implement a discontinuity. The first value in `freq` must
515
+ be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must
516
+ not be repeated.
517
+ gain : array_like
518
+ The filter gains at the frequency sampling points. Certain
519
+ constraints to gain values, depending on the filter type, are applied,
520
+ see Notes for details.
521
+ nfreqs : int, optional
522
+ The size of the interpolation mesh used to construct the filter.
523
+ For most efficient behavior, this should be a power of 2 plus 1
524
+ (e.g, 129, 257, etc). The default is one more than the smallest
525
+ power of 2 that is not less than `numtaps`. `nfreqs` must be greater
526
+ than `numtaps`.
527
+ window : string or (string, float) or float, or None, optional
528
+ Window function to use. Default is "hamming". See
529
+ `scipy.signal.get_window` for the complete list of possible values.
530
+ If None, no window function is applied.
531
+ nyq : float, optional, deprecated
532
+ This is the Nyquist frequency. Each frequency in `freq` must be
533
+ between 0 and `nyq`. Default is 1.
534
+
535
+ .. deprecated:: 1.0.0
536
+ `firwin2` keyword argument `nyq` is deprecated in favour of `fs` and
537
+ will be removed in SciPy 1.14.0.
538
+ antisymmetric : bool, optional
539
+ Whether resulting impulse response is symmetric/antisymmetric.
540
+ See Notes for more details.
541
+ fs : float, optional
542
+ The sampling frequency of the signal. Each frequency in `cutoff`
543
+ must be between 0 and ``fs/2``. Default is 2.
544
+
545
+ Returns
546
+ -------
547
+ taps : ndarray
548
+ The filter coefficients of the FIR filter, as a 1-D array of length
549
+ `numtaps`.
550
+
551
+ See Also
552
+ --------
553
+ firls
554
+ firwin
555
+ minimum_phase
556
+ remez
557
+
558
+ Notes
559
+ -----
560
+ From the given set of frequencies and gains, the desired response is
561
+ constructed in the frequency domain. The inverse FFT is applied to the
562
+ desired response to create the associated convolution kernel, and the
563
+ first `numtaps` coefficients of this kernel, scaled by `window`, are
564
+ returned.
565
+
566
+ The FIR filter will have linear phase. The type of filter is determined by
567
+ the value of 'numtaps` and `antisymmetric` flag.
568
+ There are four possible combinations:
569
+
570
+ - odd `numtaps`, `antisymmetric` is False, type I filter is produced
571
+ - even `numtaps`, `antisymmetric` is False, type II filter is produced
572
+ - odd `numtaps`, `antisymmetric` is True, type III filter is produced
573
+ - even `numtaps`, `antisymmetric` is True, type IV filter is produced
574
+
575
+ Magnitude response of all but type I filters are subjects to following
576
+ constraints:
577
+
578
+ - type II -- zero at the Nyquist frequency
579
+ - type III -- zero at zero and Nyquist frequencies
580
+ - type IV -- zero at zero frequency
581
+
582
+ .. versionadded:: 0.9.0
583
+
584
+ References
585
+ ----------
586
+ .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
587
+ Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
588
+ (See, for example, Section 7.4.)
589
+
590
+ .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
591
+ Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
592
+
593
+ Examples
594
+ --------
595
+ A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
596
+ that decreases linearly on [0.5, 1.0] from 1 to 0:
597
+
598
+ >>> from scipy import signal
599
+ >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
600
+ >>> print(taps[72:78])
601
+ [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
602
+
603
+ """
604
+ fs = _validate_fs(fs, allow_none=True)
605
+ nyq = 0.5 * _get_fs(fs, nyq)
606
+
607
+ if len(freq) != len(gain):
608
+ raise ValueError('freq and gain must be of same length.')
609
+
610
+ if nfreqs is not None and numtaps >= nfreqs:
611
+ raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
612
+ 'called with ntaps=%d and nfreqs=%s') %
613
+ (numtaps, nfreqs))
614
+
615
+ if freq[0] != 0 or freq[-1] != nyq:
616
+ raise ValueError('freq must start with 0 and end with fs/2.')
617
+ d = np.diff(freq)
618
+ if (d < 0).any():
619
+ raise ValueError('The values in freq must be nondecreasing.')
620
+ d2 = d[:-1] + d[1:]
621
+ if (d2 == 0).any():
622
+ raise ValueError('A value in freq must not occur more than twice.')
623
+ if freq[1] == 0:
624
+ raise ValueError('Value 0 must not be repeated in freq')
625
+ if freq[-2] == nyq:
626
+ raise ValueError('Value fs/2 must not be repeated in freq')
627
+
628
+ if antisymmetric:
629
+ if numtaps % 2 == 0:
630
+ ftype = 4
631
+ else:
632
+ ftype = 3
633
+ else:
634
+ if numtaps % 2 == 0:
635
+ ftype = 2
636
+ else:
637
+ ftype = 1
638
+
639
+ if ftype == 2 and gain[-1] != 0.0:
640
+ raise ValueError("A Type II filter must have zero gain at the "
641
+ "Nyquist frequency.")
642
+ elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
643
+ raise ValueError("A Type III filter must have zero gain at zero "
644
+ "and Nyquist frequencies.")
645
+ elif ftype == 4 and gain[0] != 0.0:
646
+ raise ValueError("A Type IV filter must have zero gain at zero "
647
+ "frequency.")
648
+
649
+ if nfreqs is None:
650
+ nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
651
+
652
+ if (d == 0).any():
653
+ # Tweak any repeated values in freq so that interp works.
654
+ freq = np.array(freq, copy=True)
655
+ eps = np.finfo(float).eps * nyq
656
+ for k in range(len(freq) - 1):
657
+ if freq[k] == freq[k + 1]:
658
+ freq[k] = freq[k] - eps
659
+ freq[k + 1] = freq[k + 1] + eps
660
+ # Check if freq is strictly increasing after tweak
661
+ d = np.diff(freq)
662
+ if (d <= 0).any():
663
+ raise ValueError("freq cannot contain numbers that are too close "
664
+ "(within eps * (fs/2): "
665
+ f"{eps}) to a repeated value")
666
+
667
+ # Linearly interpolate the desired response on a uniform mesh `x`.
668
+ x = np.linspace(0.0, nyq, nfreqs)
669
+ fx = np.interp(x, freq, gain)
670
+
671
+ # Adjust the phases of the coefficients so that the first `ntaps` of the
672
+ # inverse FFT are the desired filter coefficients.
673
+ shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
674
+ if ftype > 2:
675
+ shift *= 1j
676
+
677
+ fx2 = fx * shift
678
+
679
+ # Use irfft to compute the inverse FFT.
680
+ out_full = irfft(fx2)
681
+
682
+ if window is not None:
683
+ # Create the window to apply to the filter coefficients.
684
+ from .windows import get_window
685
+ wind = get_window(window, numtaps, fftbins=False)
686
+ else:
687
+ wind = 1
688
+
689
+ # Keep only the first `numtaps` coefficients in `out`, and multiply by
690
+ # the window.
691
+ out = out_full[:numtaps] * wind
692
+
693
+ if ftype == 3:
694
+ out[out.size // 2] = 0.0
695
+
696
+ return out
697
+
698
+
699
+ @_deprecate_positional_args(version="1.14")
700
+ def remez(numtaps, bands, desired, *, weight=None, Hz=_NoValue, type='bandpass',
701
+ maxiter=25, grid_density=16, fs=None):
702
+ """
703
+ Calculate the minimax optimal filter using the Remez exchange algorithm.
704
+
705
+ Calculate the filter-coefficients for the finite impulse response
706
+ (FIR) filter whose transfer function minimizes the maximum error
707
+ between the desired gain and the realized gain in the specified
708
+ frequency bands using the Remez exchange algorithm.
709
+
710
+ Parameters
711
+ ----------
712
+ numtaps : int
713
+ The desired number of taps in the filter. The number of taps is
714
+ the number of terms in the filter, or the filter order plus one.
715
+ bands : array_like
716
+ A monotonic sequence containing the band edges.
717
+ All elements must be non-negative and less than half the sampling
718
+ frequency as given by `fs`.
719
+ desired : array_like
720
+ A sequence half the size of bands containing the desired gain
721
+ in each of the specified bands.
722
+ weight : array_like, optional
723
+ A relative weighting to give to each band region. The length of
724
+ `weight` has to be half the length of `bands`.
725
+ Hz : scalar, optional, deprecated
726
+ The sampling frequency in Hz. Default is 1.
727
+
728
+ .. deprecated:: 1.0.0
729
+ `remez` keyword argument `Hz` is deprecated in favour of `fs` and
730
+ will be removed in SciPy 1.14.0.
731
+ type : {'bandpass', 'differentiator', 'hilbert'}, optional
732
+ The type of filter:
733
+
734
+ * 'bandpass' : flat response in bands. This is the default.
735
+
736
+ * 'differentiator' : frequency proportional response in bands.
737
+
738
+ * 'hilbert' : filter with odd symmetry, that is, type III
739
+ (for even order) or type IV (for odd order)
740
+ linear phase filters.
741
+
742
+ maxiter : int, optional
743
+ Maximum number of iterations of the algorithm. Default is 25.
744
+ grid_density : int, optional
745
+ Grid density. The dense grid used in `remez` is of size
746
+ ``(numtaps + 1) * grid_density``. Default is 16.
747
+ fs : float, optional
748
+ The sampling frequency of the signal. Default is 1.
749
+
750
+ Returns
751
+ -------
752
+ out : ndarray
753
+ A rank-1 array containing the coefficients of the optimal
754
+ (in a minimax sense) filter.
755
+
756
+ See Also
757
+ --------
758
+ firls
759
+ firwin
760
+ firwin2
761
+ minimum_phase
762
+
763
+ References
764
+ ----------
765
+ .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
766
+ design of optimum FIR linear phase digital filters",
767
+ IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
768
+ .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
769
+ Program for Designing Optimum FIR Linear Phase Digital
770
+ Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
771
+ pp. 506-525, 1973.
772
+
773
+ Examples
774
+ --------
775
+ In these examples, `remez` is used to design low-pass, high-pass,
776
+ band-pass and band-stop filters. The parameters that define each filter
777
+ are the filter order, the band boundaries, the transition widths of the
778
+ boundaries, the desired gains in each band, and the sampling frequency.
779
+
780
+ We'll use a sample frequency of 22050 Hz in all the examples. In each
781
+ example, the desired gain in each band is either 0 (for a stop band)
782
+ or 1 (for a pass band).
783
+
784
+ `freqz` is used to compute the frequency response of each filter, and
785
+ the utility function ``plot_response`` defined below is used to plot
786
+ the response.
787
+
788
+ >>> import numpy as np
789
+ >>> from scipy import signal
790
+ >>> import matplotlib.pyplot as plt
791
+
792
+ >>> fs = 22050 # Sample rate, Hz
793
+
794
+ >>> def plot_response(w, h, title):
795
+ ... "Utility function to plot response functions"
796
+ ... fig = plt.figure()
797
+ ... ax = fig.add_subplot(111)
798
+ ... ax.plot(w, 20*np.log10(np.abs(h)))
799
+ ... ax.set_ylim(-40, 5)
800
+ ... ax.grid(True)
801
+ ... ax.set_xlabel('Frequency (Hz)')
802
+ ... ax.set_ylabel('Gain (dB)')
803
+ ... ax.set_title(title)
804
+
805
+ The first example is a low-pass filter, with cutoff frequency 8 kHz.
806
+ The filter length is 325, and the transition width from pass to stop
807
+ is 100 Hz.
808
+
809
+ >>> cutoff = 8000.0 # Desired cutoff frequency, Hz
810
+ >>> trans_width = 100 # Width of transition from pass to stop, Hz
811
+ >>> numtaps = 325 # Size of the FIR filter.
812
+ >>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs],
813
+ ... [1, 0], fs=fs)
814
+ >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
815
+ >>> plot_response(w, h, "Low-pass Filter")
816
+ >>> plt.show()
817
+
818
+ This example shows a high-pass filter:
819
+
820
+ >>> cutoff = 2000.0 # Desired cutoff frequency, Hz
821
+ >>> trans_width = 250 # Width of transition from pass to stop, Hz
822
+ >>> numtaps = 125 # Size of the FIR filter.
823
+ >>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs],
824
+ ... [0, 1], fs=fs)
825
+ >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
826
+ >>> plot_response(w, h, "High-pass Filter")
827
+ >>> plt.show()
828
+
829
+ This example shows a band-pass filter with a pass-band from 2 kHz to
830
+ 5 kHz. The transition width is 260 Hz and the length of the filter
831
+ is 63, which is smaller than in the other examples:
832
+
833
+ >>> band = [2000, 5000] # Desired pass band, Hz
834
+ >>> trans_width = 260 # Width of transition from pass to stop, Hz
835
+ >>> numtaps = 63 # Size of the FIR filter.
836
+ >>> edges = [0, band[0] - trans_width, band[0], band[1],
837
+ ... band[1] + trans_width, 0.5*fs]
838
+ >>> taps = signal.remez(numtaps, edges, [0, 1, 0], fs=fs)
839
+ >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
840
+ >>> plot_response(w, h, "Band-pass Filter")
841
+ >>> plt.show()
842
+
843
+ The low order leads to higher ripple and less steep transitions.
844
+
845
+ The next example shows a band-stop filter.
846
+
847
+ >>> band = [6000, 8000] # Desired stop band, Hz
848
+ >>> trans_width = 200 # Width of transition from pass to stop, Hz
849
+ >>> numtaps = 175 # Size of the FIR filter.
850
+ >>> edges = [0, band[0] - trans_width, band[0], band[1],
851
+ ... band[1] + trans_width, 0.5*fs]
852
+ >>> taps = signal.remez(numtaps, edges, [1, 0, 1], fs=fs)
853
+ >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
854
+ >>> plot_response(w, h, "Band-stop Filter")
855
+ >>> plt.show()
856
+
857
+ """
858
+ fs = _validate_fs(fs, allow_none=True)
859
+ if Hz is _NoValue and fs is None:
860
+ fs = 1.0
861
+ elif Hz is not _NoValue:
862
+ if fs is not None:
863
+ raise ValueError("Values cannot be given for both 'Hz' and 'fs'.")
864
+ msg = ("'remez' keyword argument 'Hz' is deprecated in favour of 'fs'"
865
+ " and will be removed in SciPy 1.14.0.")
866
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
867
+ fs = Hz
868
+
869
+ # Convert type
870
+ try:
871
+ tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
872
+ except KeyError as e:
873
+ raise ValueError("Type must be 'bandpass', 'differentiator', "
874
+ "or 'hilbert'") from e
875
+
876
+ # Convert weight
877
+ if weight is None:
878
+ weight = [1] * len(desired)
879
+
880
+ bands = np.asarray(bands).copy()
881
+ return _sigtools._remez(numtaps, bands, desired, weight, tnum, fs,
882
+ maxiter, grid_density)
883
+
884
+
885
+ @_deprecate_positional_args(version="1.14")
886
+ def firls(numtaps, bands, desired, *, weight=None, nyq=_NoValue, fs=None):
887
+ """
888
+ FIR filter design using least-squares error minimization.
889
+
890
+ Calculate the filter coefficients for the linear-phase finite
891
+ impulse response (FIR) filter which has the best approximation
892
+ to the desired frequency response described by `bands` and
893
+ `desired` in the least squares sense (i.e., the integral of the
894
+ weighted mean-squared error within the specified bands is
895
+ minimized).
896
+
897
+ Parameters
898
+ ----------
899
+ numtaps : int
900
+ The number of taps in the FIR filter. `numtaps` must be odd.
901
+ bands : array_like
902
+ A monotonic nondecreasing sequence containing the band edges in
903
+ Hz. All elements must be non-negative and less than or equal to
904
+ the Nyquist frequency given by `nyq`. The bands are specified as
905
+ frequency pairs, thus, if using a 1D array, its length must be
906
+ even, e.g., `np.array([0, 1, 2, 3, 4, 5])`. Alternatively, the
907
+ bands can be specified as an nx2 sized 2D array, where n is the
908
+ number of bands, e.g, `np.array([[0, 1], [2, 3], [4, 5]])`.
909
+ desired : array_like
910
+ A sequence the same size as `bands` containing the desired gain
911
+ at the start and end point of each band.
912
+ weight : array_like, optional
913
+ A relative weighting to give to each band region when solving
914
+ the least squares problem. `weight` has to be half the size of
915
+ `bands`.
916
+ nyq : float, optional, deprecated
917
+ This is the Nyquist frequency. Each frequency in `bands` must be
918
+ between 0 and `nyq` (inclusive). Default is 1.
919
+
920
+ .. deprecated:: 1.0.0
921
+ `firls` keyword argument `nyq` is deprecated in favour of `fs` and
922
+ will be removed in SciPy 1.14.0.
923
+ fs : float, optional
924
+ The sampling frequency of the signal. Each frequency in `bands`
925
+ must be between 0 and ``fs/2`` (inclusive). Default is 2.
926
+
927
+ Returns
928
+ -------
929
+ coeffs : ndarray
930
+ Coefficients of the optimal (in a least squares sense) FIR filter.
931
+
932
+ See Also
933
+ --------
934
+ firwin
935
+ firwin2
936
+ minimum_phase
937
+ remez
938
+
939
+ Notes
940
+ -----
941
+ This implementation follows the algorithm given in [1]_.
942
+ As noted there, least squares design has multiple advantages:
943
+
944
+ 1. Optimal in a least-squares sense.
945
+ 2. Simple, non-iterative method.
946
+ 3. The general solution can obtained by solving a linear
947
+ system of equations.
948
+ 4. Allows the use of a frequency dependent weighting function.
949
+
950
+ This function constructs a Type I linear phase FIR filter, which
951
+ contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:
952
+
953
+ .. math:: coeffs(n) = coeffs(numtaps - 1 - n)
954
+
955
+ The odd number of coefficients and filter symmetry avoid boundary
956
+ conditions that could otherwise occur at the Nyquist and 0 frequencies
957
+ (e.g., for Type II, III, or IV variants).
958
+
959
+ .. versionadded:: 0.18
960
+
961
+ References
962
+ ----------
963
+ .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares.
964
+ OpenStax CNX. Aug 9, 2005.
965
+ http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7
966
+
967
+ Examples
968
+ --------
969
+ We want to construct a band-pass filter. Note that the behavior in the
970
+ frequency ranges between our stop bands and pass bands is unspecified,
971
+ and thus may overshoot depending on the parameters of our filter:
972
+
973
+ >>> import numpy as np
974
+ >>> from scipy import signal
975
+ >>> import matplotlib.pyplot as plt
976
+ >>> fig, axs = plt.subplots(2)
977
+ >>> fs = 10.0 # Hz
978
+ >>> desired = (0, 0, 1, 1, 0, 0)
979
+ >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):
980
+ ... fir_firls = signal.firls(73, bands, desired, fs=fs)
981
+ ... fir_remez = signal.remez(73, bands, desired[::2], fs=fs)
982
+ ... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs)
983
+ ... hs = list()
984
+ ... ax = axs[bi]
985
+ ... for fir in (fir_firls, fir_remez, fir_firwin2):
986
+ ... freq, response = signal.freqz(fir)
987
+ ... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0])
988
+ ... for band, gains in zip(zip(bands[::2], bands[1::2]),
989
+ ... zip(desired[::2], desired[1::2])):
990
+ ... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)
991
+ ... if bi == 0:
992
+ ... ax.legend(hs, ('firls', 'remez', 'firwin2'),
993
+ ... loc='lower center', frameon=False)
994
+ ... else:
995
+ ... ax.set_xlabel('Frequency (Hz)')
996
+ ... ax.grid(True)
997
+ ... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')
998
+ ...
999
+ >>> fig.tight_layout()
1000
+ >>> plt.show()
1001
+
1002
+ """
1003
+ fs = _validate_fs(fs, allow_none=True)
1004
+ nyq = 0.5 * _get_fs(fs, nyq)
1005
+
1006
+ numtaps = int(numtaps)
1007
+ if numtaps % 2 == 0 or numtaps < 1:
1008
+ raise ValueError("numtaps must be odd and >= 1")
1009
+ M = (numtaps-1) // 2
1010
+
1011
+ # normalize bands 0->1 and make it 2 columns
1012
+ nyq = float(nyq)
1013
+ if nyq <= 0:
1014
+ raise ValueError('nyq must be positive, got %s <= 0.' % nyq)
1015
+ bands = np.asarray(bands).flatten() / nyq
1016
+ if len(bands) % 2 != 0:
1017
+ raise ValueError("bands must contain frequency pairs.")
1018
+ if (bands < 0).any() or (bands > 1).any():
1019
+ raise ValueError("bands must be between 0 and 1 relative to Nyquist")
1020
+ bands.shape = (-1, 2)
1021
+
1022
+ # check remaining params
1023
+ desired = np.asarray(desired).flatten()
1024
+ if bands.size != desired.size:
1025
+ raise ValueError("desired must have one entry per frequency, got {} "
1026
+ "gains for {} frequencies.".format(desired.size, bands.size))
1027
+ desired.shape = (-1, 2)
1028
+ if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():
1029
+ raise ValueError("bands must be monotonically nondecreasing and have "
1030
+ "width > 0.")
1031
+ if (bands[:-1, 1] > bands[1:, 0]).any():
1032
+ raise ValueError("bands must not overlap.")
1033
+ if (desired < 0).any():
1034
+ raise ValueError("desired must be non-negative.")
1035
+ if weight is None:
1036
+ weight = np.ones(len(desired))
1037
+ weight = np.asarray(weight).flatten()
1038
+ if len(weight) != len(desired):
1039
+ raise ValueError("weight must be the same size as the number of "
1040
+ f"band pairs ({len(bands)}).")
1041
+ if (weight < 0).any():
1042
+ raise ValueError("weight must be non-negative.")
1043
+
1044
+ # Set up the linear matrix equation to be solved, Qa = b
1045
+
1046
+ # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)
1047
+ # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.
1048
+
1049
+ # We omit the factor of 0.5 above, instead adding it during coefficient
1050
+ # calculation.
1051
+
1052
+ # We also omit the 1/π from both Q and b equations, as they cancel
1053
+ # during solving.
1054
+
1055
+ # We have that:
1056
+ # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)
1057
+ # Using our normalization ω=πf and with a constant weight W over each
1058
+ # interval f1->f2 we get:
1059
+ # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf
1060
+ # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
1061
+ n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
1062
+ q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight)
1063
+
1064
+ # Now we assemble our sum of Toeplitz and Hankel
1065
+ Q1 = toeplitz(q[:M+1])
1066
+ Q2 = hankel(q[:M+1], q[M:])
1067
+ Q = Q1 + Q2
1068
+
1069
+ # Now for b(n) we have that:
1070
+ # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π)
1071
+ # Using our normalization ω=πf and with a constant weight W over each
1072
+ # interval and a linear term for D(ω) we get (over each f1->f2 interval):
1073
+ # b(n) = W ∫ (mf+c)cos(πnf)df
1074
+ # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2
1075
+ # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
1076
+ n = n[:M + 1] # only need this many coefficients here
1077
+ # Choose m and c such that we are at the start and end weights
1078
+ m = (np.diff(desired, axis=1) / np.diff(bands, axis=1))
1079
+ c = desired[:, [0]] - bands[:, [0]] * m
1080
+ b = bands * (m*bands + c) * np.sinc(bands * n)
1081
+ # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0
1082
+ b[0] -= m * bands * bands / 2.
1083
+ b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2
1084
+ b = np.dot(np.diff(b, axis=2)[:, :, 0], weight)
1085
+
1086
+ # Now we can solve the equation
1087
+ try: # try the fast way
1088
+ with warnings.catch_warnings(record=True) as w:
1089
+ warnings.simplefilter('always')
1090
+ a = solve(Q, b, assume_a="pos", check_finite=False)
1091
+ for ww in w:
1092
+ if (ww.category == LinAlgWarning and
1093
+ str(ww.message).startswith('Ill-conditioned matrix')):
1094
+ raise LinAlgError(str(ww.message))
1095
+ except LinAlgError: # in case Q is rank deficient
1096
+ # This is faster than pinvh, even though we don't explicitly use
1097
+ # the symmetry here. gelsy was faster than gelsd and gelss in
1098
+ # some non-exhaustive tests.
1099
+ a = lstsq(Q, b, lapack_driver='gelsy')[0]
1100
+
1101
+ # make coefficients symmetric (linear phase)
1102
+ coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:]))
1103
+ return coeffs
1104
+
1105
+
1106
+ def _dhtm(mag):
1107
+ """Compute the modified 1-D discrete Hilbert transform
1108
+
1109
+ Parameters
1110
+ ----------
1111
+ mag : ndarray
1112
+ The magnitude spectrum. Should be 1-D with an even length, and
1113
+ preferably a fast length for FFT/IFFT.
1114
+ """
1115
+ # Adapted based on code by Niranjan Damera-Venkata,
1116
+ # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`)
1117
+ sig = np.zeros(len(mag))
1118
+ # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5
1119
+ midpt = len(mag) // 2
1120
+ sig[1:midpt] = 1
1121
+ sig[midpt+1:] = -1
1122
+ # eventually if we want to support complex filters, we will need a
1123
+ # np.abs() on the mag inside the log, and should remove the .real
1124
+ recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real
1125
+ return recon
1126
+
1127
+
1128
+ def minimum_phase(h, method='homomorphic', n_fft=None):
1129
+ """Convert a linear-phase FIR filter to minimum phase
1130
+
1131
+ Parameters
1132
+ ----------
1133
+ h : array
1134
+ Linear-phase FIR filter coefficients.
1135
+ method : {'hilbert', 'homomorphic'}
1136
+ The method to use:
1137
+
1138
+ 'homomorphic' (default)
1139
+ This method [4]_ [5]_ works best with filters with an
1140
+ odd number of taps, and the resulting minimum phase filter
1141
+ will have a magnitude response that approximates the square
1142
+ root of the original filter's magnitude response.
1143
+
1144
+ 'hilbert'
1145
+ This method [1]_ is designed to be used with equiripple
1146
+ filters (e.g., from `remez`) with unity or zero gain
1147
+ regions.
1148
+
1149
+ n_fft : int
1150
+ The number of points to use for the FFT. Should be at least a
1151
+ few times larger than the signal length (see Notes).
1152
+
1153
+ Returns
1154
+ -------
1155
+ h_minimum : array
1156
+ The minimum-phase version of the filter, with length
1157
+ ``(length(h) + 1) // 2``.
1158
+
1159
+ See Also
1160
+ --------
1161
+ firwin
1162
+ firwin2
1163
+ remez
1164
+
1165
+ Notes
1166
+ -----
1167
+ Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection
1168
+ of an FFT length to estimate the complex cepstrum of the filter.
1169
+
1170
+ In the case of the Hilbert method, the deviation from the ideal
1171
+ spectrum ``epsilon`` is related to the number of stopband zeros
1172
+ ``n_stop`` and FFT length ``n_fft`` as::
1173
+
1174
+ epsilon = 2. * n_stop / n_fft
1175
+
1176
+ For example, with 100 stopband zeros and a FFT length of 2048,
1177
+ ``epsilon = 0.0976``. If we conservatively assume that the number of
1178
+ stopband zeros is one less than the filter length, we can take the FFT
1179
+ length to be the next power of 2 that satisfies ``epsilon=0.01`` as::
1180
+
1181
+ n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
1182
+
1183
+ This gives reasonable results for both the Hilbert and homomorphic
1184
+ methods, and gives the value used when ``n_fft=None``.
1185
+
1186
+ Alternative implementations exist for creating minimum-phase filters,
1187
+ including zero inversion [2]_ and spectral factorization [3]_ [4]_.
1188
+ For more information, see:
1189
+
1190
+ http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters
1191
+
1192
+ References
1193
+ ----------
1194
+ .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and
1195
+ complex minimum phase digital FIR filters," Acoustics, Speech,
1196
+ and Signal Processing, 1999. Proceedings., 1999 IEEE International
1197
+ Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3.
1198
+ :doi:`10.1109/ICASSP.1999.756179`
1199
+ .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR
1200
+ filters by direct factorization," Signal Processing,
1201
+ vol. 10, no. 4, pp. 369-383, Jun. 1986.
1202
+ .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in
1203
+ Handbook for Digital Signal Processing, chapter 4,
1204
+ New York: Wiley-Interscience, 1993.
1205
+ .. [4] J. S. Lim, Advanced Topics in Signal Processing.
1206
+ Englewood Cliffs, N.J.: Prentice Hall, 1988.
1207
+ .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck,
1208
+ "Discrete-Time Signal Processing," 2nd edition.
1209
+ Upper Saddle River, N.J.: Prentice Hall, 1999.
1210
+
1211
+ Examples
1212
+ --------
1213
+ Create an optimal linear-phase filter, then convert it to minimum phase:
1214
+
1215
+ >>> import numpy as np
1216
+ >>> from scipy.signal import remez, minimum_phase, freqz, group_delay
1217
+ >>> import matplotlib.pyplot as plt
1218
+ >>> freq = [0, 0.2, 0.3, 1.0]
1219
+ >>> desired = [1, 0]
1220
+ >>> h_linear = remez(151, freq, desired, fs=2.)
1221
+
1222
+ Convert it to minimum phase:
1223
+
1224
+ >>> h_min_hom = minimum_phase(h_linear, method='homomorphic')
1225
+ >>> h_min_hil = minimum_phase(h_linear, method='hilbert')
1226
+
1227
+ Compare the three filters:
1228
+
1229
+ >>> fig, axs = plt.subplots(4, figsize=(4, 8))
1230
+ >>> for h, style, color in zip((h_linear, h_min_hom, h_min_hil),
1231
+ ... ('-', '-', '--'), ('k', 'r', 'c')):
1232
+ ... w, H = freqz(h)
1233
+ ... w, gd = group_delay((h, 1))
1234
+ ... w /= np.pi
1235
+ ... axs[0].plot(h, color=color, linestyle=style)
1236
+ ... axs[1].plot(w, np.abs(H), color=color, linestyle=style)
1237
+ ... axs[2].plot(w, 20 * np.log10(np.abs(H)), color=color, linestyle=style)
1238
+ ... axs[3].plot(w, gd, color=color, linestyle=style)
1239
+ >>> for ax in axs:
1240
+ ... ax.grid(True, color='0.5')
1241
+ ... ax.fill_between(freq[1:3], *ax.get_ylim(), color='#ffeeaa', zorder=1)
1242
+ >>> axs[0].set(xlim=[0, len(h_linear) - 1], ylabel='Amplitude', xlabel='Samples')
1243
+ >>> axs[1].legend(['Linear', 'Min-Hom', 'Min-Hil'], title='Phase')
1244
+ >>> for ax, ylim in zip(axs[1:], ([0, 1.1], [-150, 10], [-60, 60])):
1245
+ ... ax.set(xlim=[0, 1], ylim=ylim, xlabel='Frequency')
1246
+ >>> axs[1].set(ylabel='Magnitude')
1247
+ >>> axs[2].set(ylabel='Magnitude (dB)')
1248
+ >>> axs[3].set(ylabel='Group delay')
1249
+ >>> plt.tight_layout()
1250
+
1251
+ """
1252
+ h = np.asarray(h)
1253
+ if np.iscomplexobj(h):
1254
+ raise ValueError('Complex filters not supported')
1255
+ if h.ndim != 1 or h.size <= 2:
1256
+ raise ValueError('h must be 1-D and at least 2 samples long')
1257
+ n_half = len(h) // 2
1258
+ if not np.allclose(h[-n_half:][::-1], h[:n_half]):
1259
+ warnings.warn('h does not appear to by symmetric, conversion may fail',
1260
+ RuntimeWarning, stacklevel=2)
1261
+ if not isinstance(method, str) or method not in \
1262
+ ('homomorphic', 'hilbert',):
1263
+ raise ValueError(f'method must be "homomorphic" or "hilbert", got {method!r}')
1264
+ if n_fft is None:
1265
+ n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
1266
+ n_fft = int(n_fft)
1267
+ if n_fft < len(h):
1268
+ raise ValueError('n_fft must be at least len(h)==%s' % len(h))
1269
+ if method == 'hilbert':
1270
+ w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half)
1271
+ H = np.real(fft(h, n_fft) * np.exp(1j * w))
1272
+ dp = max(H) - 1
1273
+ ds = 0 - min(H)
1274
+ S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2
1275
+ H += ds
1276
+ H *= S
1277
+ H = np.sqrt(H, out=H)
1278
+ H += 1e-10 # ensure that the log does not explode
1279
+ h_minimum = _dhtm(H)
1280
+ else: # method == 'homomorphic'
1281
+ # zero-pad; calculate the DFT
1282
+ h_temp = np.abs(fft(h, n_fft))
1283
+ # take 0.25*log(|H|**2) = 0.5*log(|H|)
1284
+ h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up
1285
+ np.log(h_temp, out=h_temp)
1286
+ h_temp *= 0.5
1287
+ # IDFT
1288
+ h_temp = ifft(h_temp).real
1289
+ # multiply pointwise by the homomorphic filter
1290
+ # lmin[n] = 2u[n] - d[n]
1291
+ win = np.zeros(n_fft)
1292
+ win[0] = 1
1293
+ stop = (len(h) + 1) // 2
1294
+ win[1:stop] = 2
1295
+ if len(h) % 2:
1296
+ win[stop] = 1
1297
+ h_temp *= win
1298
+ h_temp = ifft(np.exp(fft(h_temp)))
1299
+ h_minimum = h_temp.real
1300
+ n_out = n_half + len(h) % 2
1301
+ return h_minimum[:n_out]
env-llmeval/lib/python3.10/site-packages/scipy/signal/_ltisys.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/_peak_finding.py ADDED
@@ -0,0 +1,1312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for identifying peaks in signals.
3
+ """
4
+ import math
5
+ import numpy as np
6
+
7
+ from scipy.signal._wavelets import _cwt, _ricker
8
+ from scipy.stats import scoreatpercentile
9
+
10
+ from ._peak_finding_utils import (
11
+ _local_maxima_1d,
12
+ _select_by_peak_distance,
13
+ _peak_prominences,
14
+ _peak_widths
15
+ )
16
+
17
+
18
+ __all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences',
19
+ 'peak_widths', 'find_peaks', 'find_peaks_cwt']
20
+
21
+
22
+ def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
23
+ """
24
+ Calculate the relative extrema of `data`.
25
+
26
+ Relative extrema are calculated by finding locations where
27
+ ``comparator(data[n], data[n+1:n+order+1])`` is True.
28
+
29
+ Parameters
30
+ ----------
31
+ data : ndarray
32
+ Array in which to find the relative extrema.
33
+ comparator : callable
34
+ Function to use to compare two data points.
35
+ Should take two arrays as arguments.
36
+ axis : int, optional
37
+ Axis over which to select from `data`. Default is 0.
38
+ order : int, optional
39
+ How many points on each side to use for the comparison
40
+ to consider ``comparator(n,n+x)`` to be True.
41
+ mode : str, optional
42
+ How the edges of the vector are treated. 'wrap' (wrap around) or
43
+ 'clip' (treat overflow as the same as the last (or first) element).
44
+ Default 'clip'. See numpy.take.
45
+
46
+ Returns
47
+ -------
48
+ extrema : ndarray
49
+ Boolean array of the same shape as `data` that is True at an extrema,
50
+ False otherwise.
51
+
52
+ See also
53
+ --------
54
+ argrelmax, argrelmin
55
+
56
+ Examples
57
+ --------
58
+ >>> import numpy as np
59
+ >>> from scipy.signal._peak_finding import _boolrelextrema
60
+ >>> testdata = np.array([1,2,3,2,1])
61
+ >>> _boolrelextrema(testdata, np.greater, axis=0)
62
+ array([False, False, True, False, False], dtype=bool)
63
+
64
+ """
65
+ if (int(order) != order) or (order < 1):
66
+ raise ValueError('Order must be an int >= 1')
67
+
68
+ datalen = data.shape[axis]
69
+ locs = np.arange(0, datalen)
70
+
71
+ results = np.ones(data.shape, dtype=bool)
72
+ main = data.take(locs, axis=axis, mode=mode)
73
+ for shift in range(1, order + 1):
74
+ plus = data.take(locs + shift, axis=axis, mode=mode)
75
+ minus = data.take(locs - shift, axis=axis, mode=mode)
76
+ results &= comparator(main, plus)
77
+ results &= comparator(main, minus)
78
+ if ~results.any():
79
+ return results
80
+ return results
81
+
82
+
83
+ def argrelmin(data, axis=0, order=1, mode='clip'):
84
+ """
85
+ Calculate the relative minima of `data`.
86
+
87
+ Parameters
88
+ ----------
89
+ data : ndarray
90
+ Array in which to find the relative minima.
91
+ axis : int, optional
92
+ Axis over which to select from `data`. Default is 0.
93
+ order : int, optional
94
+ How many points on each side to use for the comparison
95
+ to consider ``comparator(n, n+x)`` to be True.
96
+ mode : str, optional
97
+ How the edges of the vector are treated.
98
+ Available options are 'wrap' (wrap around) or 'clip' (treat overflow
99
+ as the same as the last (or first) element).
100
+ Default 'clip'. See numpy.take.
101
+
102
+ Returns
103
+ -------
104
+ extrema : tuple of ndarrays
105
+ Indices of the minima in arrays of integers. ``extrema[k]`` is
106
+ the array of indices of axis `k` of `data`. Note that the
107
+ return value is a tuple even when `data` is 1-D.
108
+
109
+ See Also
110
+ --------
111
+ argrelextrema, argrelmax, find_peaks
112
+
113
+ Notes
114
+ -----
115
+ This function uses `argrelextrema` with np.less as comparator. Therefore, it
116
+ requires a strict inequality on both sides of a value to consider it a
117
+ minimum. This means flat minima (more than one sample wide) are not detected.
118
+ In case of 1-D `data` `find_peaks` can be used to detect all
119
+ local minima, including flat ones, by calling it with negated `data`.
120
+
121
+ .. versionadded:: 0.11.0
122
+
123
+ Examples
124
+ --------
125
+ >>> import numpy as np
126
+ >>> from scipy.signal import argrelmin
127
+ >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
128
+ >>> argrelmin(x)
129
+ (array([1, 5]),)
130
+ >>> y = np.array([[1, 2, 1, 2],
131
+ ... [2, 2, 0, 0],
132
+ ... [5, 3, 4, 4]])
133
+ ...
134
+ >>> argrelmin(y, axis=1)
135
+ (array([0, 2]), array([2, 1]))
136
+
137
+ """
138
+ return argrelextrema(data, np.less, axis, order, mode)
139
+
140
+
141
+ def argrelmax(data, axis=0, order=1, mode='clip'):
142
+ """
143
+ Calculate the relative maxima of `data`.
144
+
145
+ Parameters
146
+ ----------
147
+ data : ndarray
148
+ Array in which to find the relative maxima.
149
+ axis : int, optional
150
+ Axis over which to select from `data`. Default is 0.
151
+ order : int, optional
152
+ How many points on each side to use for the comparison
153
+ to consider ``comparator(n, n+x)`` to be True.
154
+ mode : str, optional
155
+ How the edges of the vector are treated.
156
+ Available options are 'wrap' (wrap around) or 'clip' (treat overflow
157
+ as the same as the last (or first) element).
158
+ Default 'clip'. See `numpy.take`.
159
+
160
+ Returns
161
+ -------
162
+ extrema : tuple of ndarrays
163
+ Indices of the maxima in arrays of integers. ``extrema[k]`` is
164
+ the array of indices of axis `k` of `data`. Note that the
165
+ return value is a tuple even when `data` is 1-D.
166
+
167
+ See Also
168
+ --------
169
+ argrelextrema, argrelmin, find_peaks
170
+
171
+ Notes
172
+ -----
173
+ This function uses `argrelextrema` with np.greater as comparator. Therefore,
174
+ it requires a strict inequality on both sides of a value to consider it a
175
+ maximum. This means flat maxima (more than one sample wide) are not detected.
176
+ In case of 1-D `data` `find_peaks` can be used to detect all
177
+ local maxima, including flat ones.
178
+
179
+ .. versionadded:: 0.11.0
180
+
181
+ Examples
182
+ --------
183
+ >>> import numpy as np
184
+ >>> from scipy.signal import argrelmax
185
+ >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
186
+ >>> argrelmax(x)
187
+ (array([3, 6]),)
188
+ >>> y = np.array([[1, 2, 1, 2],
189
+ ... [2, 2, 0, 0],
190
+ ... [5, 3, 4, 4]])
191
+ ...
192
+ >>> argrelmax(y, axis=1)
193
+ (array([0]), array([1]))
194
+ """
195
+ return argrelextrema(data, np.greater, axis, order, mode)
196
+
197
+
198
+ def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
199
+ """
200
+ Calculate the relative extrema of `data`.
201
+
202
+ Parameters
203
+ ----------
204
+ data : ndarray
205
+ Array in which to find the relative extrema.
206
+ comparator : callable
207
+ Function to use to compare two data points.
208
+ Should take two arrays as arguments.
209
+ axis : int, optional
210
+ Axis over which to select from `data`. Default is 0.
211
+ order : int, optional
212
+ How many points on each side to use for the comparison
213
+ to consider ``comparator(n, n+x)`` to be True.
214
+ mode : str, optional
215
+ How the edges of the vector are treated. 'wrap' (wrap around) or
216
+ 'clip' (treat overflow as the same as the last (or first) element).
217
+ Default is 'clip'. See `numpy.take`.
218
+
219
+ Returns
220
+ -------
221
+ extrema : tuple of ndarrays
222
+ Indices of the maxima in arrays of integers. ``extrema[k]`` is
223
+ the array of indices of axis `k` of `data`. Note that the
224
+ return value is a tuple even when `data` is 1-D.
225
+
226
+ See Also
227
+ --------
228
+ argrelmin, argrelmax
229
+
230
+ Notes
231
+ -----
232
+
233
+ .. versionadded:: 0.11.0
234
+
235
+ Examples
236
+ --------
237
+ >>> import numpy as np
238
+ >>> from scipy.signal import argrelextrema
239
+ >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
240
+ >>> argrelextrema(x, np.greater)
241
+ (array([3, 6]),)
242
+ >>> y = np.array([[1, 2, 1, 2],
243
+ ... [2, 2, 0, 0],
244
+ ... [5, 3, 4, 4]])
245
+ ...
246
+ >>> argrelextrema(y, np.less, axis=1)
247
+ (array([0, 2]), array([2, 1]))
248
+
249
+ """
250
+ results = _boolrelextrema(data, comparator,
251
+ axis, order, mode)
252
+ return np.nonzero(results)
253
+
254
+
255
+ def _arg_x_as_expected(value):
256
+ """Ensure argument `x` is a 1-D C-contiguous array of dtype('float64').
257
+
258
+ Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x`
259
+ compatible with the signature of the wrapped Cython functions.
260
+
261
+ Returns
262
+ -------
263
+ value : ndarray
264
+ A 1-D C-contiguous array with dtype('float64').
265
+ """
266
+ value = np.asarray(value, order='C', dtype=np.float64)
267
+ if value.ndim != 1:
268
+ raise ValueError('`x` must be a 1-D array')
269
+ return value
270
+
271
+
272
+ def _arg_peaks_as_expected(value):
273
+ """Ensure argument `peaks` is a 1-D C-contiguous array of dtype('intp').
274
+
275
+ Used in `peak_prominences` and `peak_widths` to make `peaks` compatible
276
+ with the signature of the wrapped Cython functions.
277
+
278
+ Returns
279
+ -------
280
+ value : ndarray
281
+ A 1-D C-contiguous array with dtype('intp').
282
+ """
283
+ value = np.asarray(value)
284
+ if value.size == 0:
285
+ # Empty arrays default to np.float64 but are valid input
286
+ value = np.array([], dtype=np.intp)
287
+ try:
288
+ # Safely convert to C-contiguous array of type np.intp
289
+ value = value.astype(np.intp, order='C', casting='safe',
290
+ subok=False, copy=False)
291
+ except TypeError as e:
292
+ raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e
293
+ if value.ndim != 1:
294
+ raise ValueError('`peaks` must be a 1-D array')
295
+ return value
296
+
297
+
298
+ def _arg_wlen_as_expected(value):
299
+ """Ensure argument `wlen` is of type `np.intp` and larger than 1.
300
+
301
+ Used in `peak_prominences` and `peak_widths`.
302
+
303
+ Returns
304
+ -------
305
+ value : np.intp
306
+ The original `value` rounded up to an integer or -1 if `value` was
307
+ None.
308
+ """
309
+ if value is None:
310
+ # _peak_prominences expects an intp; -1 signals that no value was
311
+ # supplied by the user
312
+ value = -1
313
+ elif 1 < value:
314
+ # Round up to a positive integer
315
+ if isinstance(value, float):
316
+ value = math.ceil(value)
317
+ value = np.intp(value)
318
+ else:
319
+ raise ValueError(f'`wlen` must be larger than 1, was {value}')
320
+ return value
321
+
322
+
323
+ def peak_prominences(x, peaks, wlen=None):
324
+ """
325
+ Calculate the prominence of each peak in a signal.
326
+
327
+ The prominence of a peak measures how much a peak stands out from the
328
+ surrounding baseline of the signal and is defined as the vertical distance
329
+ between the peak and its lowest contour line.
330
+
331
+ Parameters
332
+ ----------
333
+ x : sequence
334
+ A signal with peaks.
335
+ peaks : sequence
336
+ Indices of peaks in `x`.
337
+ wlen : int, optional
338
+ A window length in samples that optionally limits the evaluated area for
339
+ each peak to a subset of `x`. The peak is always placed in the middle of
340
+ the window therefore the given length is rounded up to the next odd
341
+ integer. This parameter can speed up the calculation (see Notes).
342
+
343
+ Returns
344
+ -------
345
+ prominences : ndarray
346
+ The calculated prominences for each peak in `peaks`.
347
+ left_bases, right_bases : ndarray
348
+ The peaks' bases as indices in `x` to the left and right of each peak.
349
+ The higher base of each pair is a peak's lowest contour line.
350
+
351
+ Raises
352
+ ------
353
+ ValueError
354
+ If a value in `peaks` is an invalid index for `x`.
355
+
356
+ Warns
357
+ -----
358
+ PeakPropertyWarning
359
+ For indices in `peaks` that don't point to valid local maxima in `x`,
360
+ the returned prominence will be 0 and this warning is raised. This
361
+ also happens if `wlen` is smaller than the plateau size of a peak.
362
+
363
+ Warnings
364
+ --------
365
+ This function may return unexpected results for data containing NaNs. To
366
+ avoid this, NaNs should either be removed or replaced.
367
+
368
+ See Also
369
+ --------
370
+ find_peaks
371
+ Find peaks inside a signal based on peak properties.
372
+ peak_widths
373
+ Calculate the width of peaks.
374
+
375
+ Notes
376
+ -----
377
+ Strategy to compute a peak's prominence:
378
+
379
+ 1. Extend a horizontal line from the current peak to the left and right
380
+ until the line either reaches the window border (see `wlen`) or
381
+ intersects the signal again at the slope of a higher peak. An
382
+ intersection with a peak of the same height is ignored.
383
+ 2. On each side find the minimal signal value within the interval defined
384
+ above. These points are the peak's bases.
385
+ 3. The higher one of the two bases marks the peak's lowest contour line. The
386
+ prominence can then be calculated as the vertical difference between the
387
+ peaks height itself and its lowest contour line.
388
+
389
+ Searching for the peak's bases can be slow for large `x` with periodic
390
+ behavior because large chunks or even the full signal need to be evaluated
391
+ for the first algorithmic step. This evaluation area can be limited with the
392
+ parameter `wlen` which restricts the algorithm to a window around the
393
+ current peak and can shorten the calculation time if the window length is
394
+ short in relation to `x`.
395
+ However, this may stop the algorithm from finding the true global contour
396
+ line if the peak's true bases are outside this window. Instead, a higher
397
+ contour line is found within the restricted window leading to a smaller
398
+ calculated prominence. In practice, this is only relevant for the highest set
399
+ of peaks in `x`. This behavior may even be used intentionally to calculate
400
+ "local" prominences.
401
+
402
+ .. versionadded:: 1.1.0
403
+
404
+ References
405
+ ----------
406
+ .. [1] Wikipedia Article for Topographic Prominence:
407
+ https://en.wikipedia.org/wiki/Topographic_prominence
408
+
409
+ Examples
410
+ --------
411
+ >>> import numpy as np
412
+ >>> from scipy.signal import find_peaks, peak_prominences
413
+ >>> import matplotlib.pyplot as plt
414
+
415
+ Create a test signal with two overlaid harmonics
416
+
417
+ >>> x = np.linspace(0, 6 * np.pi, 1000)
418
+ >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
419
+
420
+ Find all peaks and calculate prominences
421
+
422
+ >>> peaks, _ = find_peaks(x)
423
+ >>> prominences = peak_prominences(x, peaks)[0]
424
+ >>> prominences
425
+ array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 ,
426
+ 0.47822491, 2.48340261, 0.47822491])
427
+
428
+ Calculate the height of each peak's contour line and plot the results
429
+
430
+ >>> contour_heights = x[peaks] - prominences
431
+ >>> plt.plot(x)
432
+ >>> plt.plot(peaks, x[peaks], "x")
433
+ >>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks])
434
+ >>> plt.show()
435
+
436
+ Let's evaluate a second example that demonstrates several edge cases for
437
+ one peak at index 5.
438
+
439
+ >>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0])
440
+ >>> peaks = np.array([5])
441
+ >>> plt.plot(x)
442
+ >>> plt.plot(peaks, x[peaks], "x")
443
+ >>> plt.show()
444
+ >>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases)
445
+ (array([3.]), array([2]), array([6]))
446
+
447
+ Note how the peak at index 3 of the same height is not considered as a
448
+ border while searching for the left base. Instead, two minima at 0 and 2
449
+ are found in which case the one closer to the evaluated peak is always
450
+ chosen. On the right side, however, the base must be placed at 6 because the
451
+ higher peak represents the right border to the evaluated area.
452
+
453
+ >>> peak_prominences(x, peaks, wlen=3.1)
454
+ (array([2.]), array([4]), array([6]))
455
+
456
+ Here, we restricted the algorithm to a window from 3 to 7 (the length is 5
457
+ samples because `wlen` was rounded up to the next odd integer). Thus, the
458
+ only two candidates in the evaluated area are the two neighboring samples
459
+ and a smaller prominence is calculated.
460
+ """
461
+ x = _arg_x_as_expected(x)
462
+ peaks = _arg_peaks_as_expected(peaks)
463
+ wlen = _arg_wlen_as_expected(wlen)
464
+ return _peak_prominences(x, peaks, wlen)
465
+
466
+
467
+ def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None):
468
+ """
469
+ Calculate the width of each peak in a signal.
470
+
471
+ This function calculates the width of a peak in samples at a relative
472
+ distance to the peak's height and prominence.
473
+
474
+ Parameters
475
+ ----------
476
+ x : sequence
477
+ A signal with peaks.
478
+ peaks : sequence
479
+ Indices of peaks in `x`.
480
+ rel_height : float, optional
481
+ Chooses the relative height at which the peak width is measured as a
482
+ percentage of its prominence. 1.0 calculates the width of the peak at
483
+ its lowest contour line while 0.5 evaluates at half the prominence
484
+ height. Must be at least 0. See notes for further explanation.
485
+ prominence_data : tuple, optional
486
+ A tuple of three arrays matching the output of `peak_prominences` when
487
+ called with the same arguments `x` and `peaks`. This data are calculated
488
+ internally if not provided.
489
+ wlen : int, optional
490
+ A window length in samples passed to `peak_prominences` as an optional
491
+ argument for internal calculation of `prominence_data`. This argument
492
+ is ignored if `prominence_data` is given.
493
+
494
+ Returns
495
+ -------
496
+ widths : ndarray
497
+ The widths for each peak in samples.
498
+ width_heights : ndarray
499
+ The height of the contour lines at which the `widths` where evaluated.
500
+ left_ips, right_ips : ndarray
501
+ Interpolated positions of left and right intersection points of a
502
+ horizontal line at the respective evaluation height.
503
+
504
+ Raises
505
+ ------
506
+ ValueError
507
+ If `prominence_data` is supplied but doesn't satisfy the condition
508
+ ``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak,
509
+ has the wrong dtype, is not C-contiguous or does not have the same
510
+ shape.
511
+
512
+ Warns
513
+ -----
514
+ PeakPropertyWarning
515
+ Raised if any calculated width is 0. This may stem from the supplied
516
+ `prominence_data` or if `rel_height` is set to 0.
517
+
518
+ Warnings
519
+ --------
520
+ This function may return unexpected results for data containing NaNs. To
521
+ avoid this, NaNs should either be removed or replaced.
522
+
523
+ See Also
524
+ --------
525
+ find_peaks
526
+ Find peaks inside a signal based on peak properties.
527
+ peak_prominences
528
+ Calculate the prominence of peaks.
529
+
530
+ Notes
531
+ -----
532
+ The basic algorithm to calculate a peak's width is as follows:
533
+
534
+ * Calculate the evaluation height :math:`h_{eval}` with the formula
535
+ :math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the
536
+ height of the peak itself, :math:`P` is the peak's prominence and
537
+ :math:`R` a positive ratio specified with the argument `rel_height`.
538
+ * Draw a horizontal line at the evaluation height to both sides, starting at
539
+ the peak's current vertical position until the lines either intersect a
540
+ slope, the signal border or cross the vertical position of the peak's
541
+ base (see `peak_prominences` for an definition). For the first case,
542
+ intersection with the signal, the true intersection point is estimated
543
+ with linear interpolation.
544
+ * Calculate the width as the horizontal distance between the chosen
545
+ endpoints on both sides. As a consequence of this the maximal possible
546
+ width for each peak is the horizontal distance between its bases.
547
+
548
+ As shown above to calculate a peak's width its prominence and bases must be
549
+ known. You can supply these yourself with the argument `prominence_data`.
550
+ Otherwise, they are internally calculated (see `peak_prominences`).
551
+
552
+ .. versionadded:: 1.1.0
553
+
554
+ Examples
555
+ --------
556
+ >>> import numpy as np
557
+ >>> from scipy.signal import chirp, find_peaks, peak_widths
558
+ >>> import matplotlib.pyplot as plt
559
+
560
+ Create a test signal with two overlaid harmonics
561
+
562
+ >>> x = np.linspace(0, 6 * np.pi, 1000)
563
+ >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
564
+
565
+ Find all peaks and calculate their widths at the relative height of 0.5
566
+ (contour line at half the prominence height) and 1 (at the lowest contour
567
+ line at full prominence height).
568
+
569
+ >>> peaks, _ = find_peaks(x)
570
+ >>> results_half = peak_widths(x, peaks, rel_height=0.5)
571
+ >>> results_half[0] # widths
572
+ array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081,
573
+ 35.46729324, 41.30429622, 181.93835853, 45.37078546])
574
+ >>> results_full = peak_widths(x, peaks, rel_height=1)
575
+ >>> results_full[0] # widths
576
+ array([181.9396084 , 72.99284945, 61.28657872, 373.84622694,
577
+ 61.78404617, 72.48822812, 253.09161876, 79.36860878])
578
+
579
+ Plot signal, peaks and contour lines at which the widths where calculated
580
+
581
+ >>> plt.plot(x)
582
+ >>> plt.plot(peaks, x[peaks], "x")
583
+ >>> plt.hlines(*results_half[1:], color="C2")
584
+ >>> plt.hlines(*results_full[1:], color="C3")
585
+ >>> plt.show()
586
+ """
587
+ x = _arg_x_as_expected(x)
588
+ peaks = _arg_peaks_as_expected(peaks)
589
+ if prominence_data is None:
590
+ # Calculate prominence if not supplied and use wlen if supplied.
591
+ wlen = _arg_wlen_as_expected(wlen)
592
+ prominence_data = _peak_prominences(x, peaks, wlen)
593
+ return _peak_widths(x, peaks, rel_height, *prominence_data)
594
+
595
+
596
+ def _unpack_condition_args(interval, x, peaks):
597
+ """
598
+ Parse condition arguments for `find_peaks`.
599
+
600
+ Parameters
601
+ ----------
602
+ interval : number or ndarray or sequence
603
+ Either a number or ndarray or a 2-element sequence of the former. The
604
+ first value is always interpreted as `imin` and the second, if supplied,
605
+ as `imax`.
606
+ x : ndarray
607
+ The signal with `peaks`.
608
+ peaks : ndarray
609
+ An array with indices used to reduce `imin` and / or `imax` if those are
610
+ arrays.
611
+
612
+ Returns
613
+ -------
614
+ imin, imax : number or ndarray or None
615
+ Minimal and maximal value in `argument`.
616
+
617
+ Raises
618
+ ------
619
+ ValueError :
620
+ If interval border is given as array and its size does not match the size
621
+ of `x`.
622
+
623
+ Notes
624
+ -----
625
+
626
+ .. versionadded:: 1.1.0
627
+ """
628
+ try:
629
+ imin, imax = interval
630
+ except (TypeError, ValueError):
631
+ imin, imax = (interval, None)
632
+
633
+ # Reduce arrays if arrays
634
+ if isinstance(imin, np.ndarray):
635
+ if imin.size != x.size:
636
+ raise ValueError('array size of lower interval border must match x')
637
+ imin = imin[peaks]
638
+ if isinstance(imax, np.ndarray):
639
+ if imax.size != x.size:
640
+ raise ValueError('array size of upper interval border must match x')
641
+ imax = imax[peaks]
642
+
643
+ return imin, imax
644
+
645
+
646
+ def _select_by_property(peak_properties, pmin, pmax):
647
+ """
648
+ Evaluate where the generic property of peaks confirms to an interval.
649
+
650
+ Parameters
651
+ ----------
652
+ peak_properties : ndarray
653
+ An array with properties for each peak.
654
+ pmin : None or number or ndarray
655
+ Lower interval boundary for `peak_properties`. ``None`` is interpreted as
656
+ an open border.
657
+ pmax : None or number or ndarray
658
+ Upper interval boundary for `peak_properties`. ``None`` is interpreted as
659
+ an open border.
660
+
661
+ Returns
662
+ -------
663
+ keep : bool
664
+ A boolean mask evaluating to true where `peak_properties` confirms to the
665
+ interval.
666
+
667
+ See Also
668
+ --------
669
+ find_peaks
670
+
671
+ Notes
672
+ -----
673
+
674
+ .. versionadded:: 1.1.0
675
+ """
676
+ keep = np.ones(peak_properties.size, dtype=bool)
677
+ if pmin is not None:
678
+ keep &= (pmin <= peak_properties)
679
+ if pmax is not None:
680
+ keep &= (peak_properties <= pmax)
681
+ return keep
682
+
683
+
684
+ def _select_by_peak_threshold(x, peaks, tmin, tmax):
685
+ """
686
+ Evaluate which peaks fulfill the threshold condition.
687
+
688
+ Parameters
689
+ ----------
690
+ x : ndarray
691
+ A 1-D array which is indexable by `peaks`.
692
+ peaks : ndarray
693
+ Indices of peaks in `x`.
694
+ tmin, tmax : scalar or ndarray or None
695
+ Minimal and / or maximal required thresholds. If supplied as ndarrays
696
+ their size must match `peaks`. ``None`` is interpreted as an open
697
+ border.
698
+
699
+ Returns
700
+ -------
701
+ keep : bool
702
+ A boolean mask evaluating to true where `peaks` fulfill the threshold
703
+ condition.
704
+ left_thresholds, right_thresholds : ndarray
705
+ Array matching `peak` containing the thresholds of each peak on
706
+ both sides.
707
+
708
+ Notes
709
+ -----
710
+
711
+ .. versionadded:: 1.1.0
712
+ """
713
+ # Stack thresholds on both sides to make min / max operations easier:
714
+ # tmin is compared with the smaller, and tmax with the greater threshold to
715
+ # each peak's side
716
+ stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1],
717
+ x[peaks] - x[peaks + 1]])
718
+ keep = np.ones(peaks.size, dtype=bool)
719
+ if tmin is not None:
720
+ min_thresholds = np.min(stacked_thresholds, axis=0)
721
+ keep &= (tmin <= min_thresholds)
722
+ if tmax is not None:
723
+ max_thresholds = np.max(stacked_thresholds, axis=0)
724
+ keep &= (max_thresholds <= tmax)
725
+
726
+ return keep, stacked_thresholds[0], stacked_thresholds[1]
727
+
728
+
729
+ def find_peaks(x, height=None, threshold=None, distance=None,
730
+ prominence=None, width=None, wlen=None, rel_height=0.5,
731
+ plateau_size=None):
732
+ """
733
+ Find peaks inside a signal based on peak properties.
734
+
735
+ This function takes a 1-D array and finds all local maxima by
736
+ simple comparison of neighboring values. Optionally, a subset of these
737
+ peaks can be selected by specifying conditions for a peak's properties.
738
+
739
+ Parameters
740
+ ----------
741
+ x : sequence
742
+ A signal with peaks.
743
+ height : number or ndarray or sequence, optional
744
+ Required height of peaks. Either a number, ``None``, an array matching
745
+ `x` or a 2-element sequence of the former. The first element is
746
+ always interpreted as the minimal and the second, if supplied, as the
747
+ maximal required height.
748
+ threshold : number or ndarray or sequence, optional
749
+ Required threshold of peaks, the vertical distance to its neighboring
750
+ samples. Either a number, ``None``, an array matching `x` or a
751
+ 2-element sequence of the former. The first element is always
752
+ interpreted as the minimal and the second, if supplied, as the maximal
753
+ required threshold.
754
+ distance : number, optional
755
+ Required minimal horizontal distance (>= 1) in samples between
756
+ neighbouring peaks. Smaller peaks are removed first until the condition
757
+ is fulfilled for all remaining peaks.
758
+ prominence : number or ndarray or sequence, optional
759
+ Required prominence of peaks. Either a number, ``None``, an array
760
+ matching `x` or a 2-element sequence of the former. The first
761
+ element is always interpreted as the minimal and the second, if
762
+ supplied, as the maximal required prominence.
763
+ width : number or ndarray or sequence, optional
764
+ Required width of peaks in samples. Either a number, ``None``, an array
765
+ matching `x` or a 2-element sequence of the former. The first
766
+ element is always interpreted as the minimal and the second, if
767
+ supplied, as the maximal required width.
768
+ wlen : int, optional
769
+ Used for calculation of the peaks prominences, thus it is only used if
770
+ one of the arguments `prominence` or `width` is given. See argument
771
+ `wlen` in `peak_prominences` for a full description of its effects.
772
+ rel_height : float, optional
773
+ Used for calculation of the peaks width, thus it is only used if `width`
774
+ is given. See argument `rel_height` in `peak_widths` for a full
775
+ description of its effects.
776
+ plateau_size : number or ndarray or sequence, optional
777
+ Required size of the flat top of peaks in samples. Either a number,
778
+ ``None``, an array matching `x` or a 2-element sequence of the former.
779
+ The first element is always interpreted as the minimal and the second,
780
+ if supplied as the maximal required plateau size.
781
+
782
+ .. versionadded:: 1.2.0
783
+
784
+ Returns
785
+ -------
786
+ peaks : ndarray
787
+ Indices of peaks in `x` that satisfy all given conditions.
788
+ properties : dict
789
+ A dictionary containing properties of the returned peaks which were
790
+ calculated as intermediate results during evaluation of the specified
791
+ conditions:
792
+
793
+ * 'peak_heights'
794
+ If `height` is given, the height of each peak in `x`.
795
+ * 'left_thresholds', 'right_thresholds'
796
+ If `threshold` is given, these keys contain a peaks vertical
797
+ distance to its neighbouring samples.
798
+ * 'prominences', 'right_bases', 'left_bases'
799
+ If `prominence` is given, these keys are accessible. See
800
+ `peak_prominences` for a description of their content.
801
+ * 'width_heights', 'left_ips', 'right_ips'
802
+ If `width` is given, these keys are accessible. See `peak_widths`
803
+ for a description of their content.
804
+ * 'plateau_sizes', left_edges', 'right_edges'
805
+ If `plateau_size` is given, these keys are accessible and contain
806
+ the indices of a peak's edges (edges are still part of the
807
+ plateau) and the calculated plateau sizes.
808
+
809
+ .. versionadded:: 1.2.0
810
+
811
+ To calculate and return properties without excluding peaks, provide the
812
+ open interval ``(None, None)`` as a value to the appropriate argument
813
+ (excluding `distance`).
814
+
815
+ Warns
816
+ -----
817
+ PeakPropertyWarning
818
+ Raised if a peak's properties have unexpected values (see
819
+ `peak_prominences` and `peak_widths`).
820
+
821
+ Warnings
822
+ --------
823
+ This function may return unexpected results for data containing NaNs. To
824
+ avoid this, NaNs should either be removed or replaced.
825
+
826
+ See Also
827
+ --------
828
+ find_peaks_cwt
829
+ Find peaks using the wavelet transformation.
830
+ peak_prominences
831
+ Directly calculate the prominence of peaks.
832
+ peak_widths
833
+ Directly calculate the width of peaks.
834
+
835
+ Notes
836
+ -----
837
+ In the context of this function, a peak or local maximum is defined as any
838
+ sample whose two direct neighbours have a smaller amplitude. For flat peaks
839
+ (more than one sample of equal amplitude wide) the index of the middle
840
+ sample is returned (rounded down in case the number of samples is even).
841
+ For noisy signals the peak locations can be off because the noise might
842
+ change the position of local maxima. In those cases consider smoothing the
843
+ signal before searching for peaks or use other peak finding and fitting
844
+ methods (like `find_peaks_cwt`).
845
+
846
+ Some additional comments on specifying conditions:
847
+
848
+ * Almost all conditions (excluding `distance`) can be given as half-open or
849
+ closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open
850
+ interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval
851
+ :math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified
852
+ as well, which returns the matching properties without exclusion of peaks.
853
+ * The border is always included in the interval used to select valid peaks.
854
+ * For several conditions the interval borders can be specified with
855
+ arrays matching `x` in shape which enables dynamic constrains based on
856
+ the sample position.
857
+ * The conditions are evaluated in the following order: `plateau_size`,
858
+ `height`, `threshold`, `distance`, `prominence`, `width`. In most cases
859
+ this order is the fastest one because faster operations are applied first
860
+ to reduce the number of peaks that need to be evaluated later.
861
+ * While indices in `peaks` are guaranteed to be at least `distance` samples
862
+ apart, edges of flat peaks may be closer than the allowed `distance`.
863
+ * Use `wlen` to reduce the time it takes to evaluate the conditions for
864
+ `prominence` or `width` if `x` is large or has many local maxima
865
+ (see `peak_prominences`).
866
+
867
+ .. versionadded:: 1.1.0
868
+
869
+ Examples
870
+ --------
871
+ To demonstrate this function's usage we use a signal `x` supplied with
872
+ SciPy (see `scipy.datasets.electrocardiogram`). Let's find all peaks (local
873
+ maxima) in `x` whose amplitude lies above 0.
874
+
875
+ >>> import numpy as np
876
+ >>> import matplotlib.pyplot as plt
877
+ >>> from scipy.datasets import electrocardiogram
878
+ >>> from scipy.signal import find_peaks
879
+ >>> x = electrocardiogram()[2000:4000]
880
+ >>> peaks, _ = find_peaks(x, height=0)
881
+ >>> plt.plot(x)
882
+ >>> plt.plot(peaks, x[peaks], "x")
883
+ >>> plt.plot(np.zeros_like(x), "--", color="gray")
884
+ >>> plt.show()
885
+
886
+ We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching
887
+ `x` in size to reflect a changing condition for different parts of the
888
+ signal.
889
+
890
+ >>> border = np.sin(np.linspace(0, 3 * np.pi, x.size))
891
+ >>> peaks, _ = find_peaks(x, height=(-border, border))
892
+ >>> plt.plot(x)
893
+ >>> plt.plot(-border, "--", color="gray")
894
+ >>> plt.plot(border, ":", color="gray")
895
+ >>> plt.plot(peaks, x[peaks], "x")
896
+ >>> plt.show()
897
+
898
+ Another useful condition for periodic signals can be given with the
899
+ `distance` argument. In this case, we can easily select the positions of
900
+ QRS complexes within the electrocardiogram (ECG) by demanding a distance of
901
+ at least 150 samples.
902
+
903
+ >>> peaks, _ = find_peaks(x, distance=150)
904
+ >>> np.diff(peaks)
905
+ array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172])
906
+ >>> plt.plot(x)
907
+ >>> plt.plot(peaks, x[peaks], "x")
908
+ >>> plt.show()
909
+
910
+ Especially for noisy signals peaks can be easily grouped by their
911
+ prominence (see `peak_prominences`). E.g., we can select all peaks except
912
+ for the mentioned QRS complexes by limiting the allowed prominence to 0.6.
913
+
914
+ >>> peaks, properties = find_peaks(x, prominence=(None, 0.6))
915
+ >>> properties["prominences"].max()
916
+ 0.5049999999999999
917
+ >>> plt.plot(x)
918
+ >>> plt.plot(peaks, x[peaks], "x")
919
+ >>> plt.show()
920
+
921
+ And, finally, let's examine a different section of the ECG which contains
922
+ beat forms of different shape. To select only the atypical heart beats, we
923
+ combine two conditions: a minimal prominence of 1 and width of at least 20
924
+ samples.
925
+
926
+ >>> x = electrocardiogram()[17000:18000]
927
+ >>> peaks, properties = find_peaks(x, prominence=1, width=20)
928
+ >>> properties["prominences"], properties["widths"]
929
+ (array([1.495, 2.3 ]), array([36.93773946, 39.32723577]))
930
+ >>> plt.plot(x)
931
+ >>> plt.plot(peaks, x[peaks], "x")
932
+ >>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"],
933
+ ... ymax = x[peaks], color = "C1")
934
+ >>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"],
935
+ ... xmax=properties["right_ips"], color = "C1")
936
+ >>> plt.show()
937
+ """
938
+ # _argmaxima1d expects array of dtype 'float64'
939
+ x = _arg_x_as_expected(x)
940
+ if distance is not None and distance < 1:
941
+ raise ValueError('`distance` must be greater or equal to 1')
942
+
943
+ peaks, left_edges, right_edges = _local_maxima_1d(x)
944
+ properties = {}
945
+
946
+ if plateau_size is not None:
947
+ # Evaluate plateau size
948
+ plateau_sizes = right_edges - left_edges + 1
949
+ pmin, pmax = _unpack_condition_args(plateau_size, x, peaks)
950
+ keep = _select_by_property(plateau_sizes, pmin, pmax)
951
+ peaks = peaks[keep]
952
+ properties["plateau_sizes"] = plateau_sizes
953
+ properties["left_edges"] = left_edges
954
+ properties["right_edges"] = right_edges
955
+ properties = {key: array[keep] for key, array in properties.items()}
956
+
957
+ if height is not None:
958
+ # Evaluate height condition
959
+ peak_heights = x[peaks]
960
+ hmin, hmax = _unpack_condition_args(height, x, peaks)
961
+ keep = _select_by_property(peak_heights, hmin, hmax)
962
+ peaks = peaks[keep]
963
+ properties["peak_heights"] = peak_heights
964
+ properties = {key: array[keep] for key, array in properties.items()}
965
+
966
+ if threshold is not None:
967
+ # Evaluate threshold condition
968
+ tmin, tmax = _unpack_condition_args(threshold, x, peaks)
969
+ keep, left_thresholds, right_thresholds = _select_by_peak_threshold(
970
+ x, peaks, tmin, tmax)
971
+ peaks = peaks[keep]
972
+ properties["left_thresholds"] = left_thresholds
973
+ properties["right_thresholds"] = right_thresholds
974
+ properties = {key: array[keep] for key, array in properties.items()}
975
+
976
+ if distance is not None:
977
+ # Evaluate distance condition
978
+ keep = _select_by_peak_distance(peaks, x[peaks], distance)
979
+ peaks = peaks[keep]
980
+ properties = {key: array[keep] for key, array in properties.items()}
981
+
982
+ if prominence is not None or width is not None:
983
+ # Calculate prominence (required for both conditions)
984
+ wlen = _arg_wlen_as_expected(wlen)
985
+ properties.update(zip(
986
+ ['prominences', 'left_bases', 'right_bases'],
987
+ _peak_prominences(x, peaks, wlen=wlen)
988
+ ))
989
+
990
+ if prominence is not None:
991
+ # Evaluate prominence condition
992
+ pmin, pmax = _unpack_condition_args(prominence, x, peaks)
993
+ keep = _select_by_property(properties['prominences'], pmin, pmax)
994
+ peaks = peaks[keep]
995
+ properties = {key: array[keep] for key, array in properties.items()}
996
+
997
+ if width is not None:
998
+ # Calculate widths
999
+ properties.update(zip(
1000
+ ['widths', 'width_heights', 'left_ips', 'right_ips'],
1001
+ _peak_widths(x, peaks, rel_height, properties['prominences'],
1002
+ properties['left_bases'], properties['right_bases'])
1003
+ ))
1004
+ # Evaluate width condition
1005
+ wmin, wmax = _unpack_condition_args(width, x, peaks)
1006
+ keep = _select_by_property(properties['widths'], wmin, wmax)
1007
+ peaks = peaks[keep]
1008
+ properties = {key: array[keep] for key, array in properties.items()}
1009
+
1010
+ return peaks, properties
1011
+
1012
+
1013
+ def _identify_ridge_lines(matr, max_distances, gap_thresh):
1014
+ """
1015
+ Identify ridges in the 2-D matrix.
1016
+
1017
+ Expect that the width of the wavelet feature increases with increasing row
1018
+ number.
1019
+
1020
+ Parameters
1021
+ ----------
1022
+ matr : 2-D ndarray
1023
+ Matrix in which to identify ridge lines.
1024
+ max_distances : 1-D sequence
1025
+ At each row, a ridge line is only connected
1026
+ if the relative max at row[n] is within
1027
+ `max_distances`[n] from the relative max at row[n+1].
1028
+ gap_thresh : int
1029
+ If a relative maximum is not found within `max_distances`,
1030
+ there will be a gap. A ridge line is discontinued if
1031
+ there are more than `gap_thresh` points without connecting
1032
+ a new relative maximum.
1033
+
1034
+ Returns
1035
+ -------
1036
+ ridge_lines : tuple
1037
+ Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
1038
+ ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
1039
+ found. Each ridge-line will be sorted by row (increasing), but the
1040
+ order of the ridge lines is not specified.
1041
+
1042
+ References
1043
+ ----------
1044
+ .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
1045
+ :doi:`10.1093/bioinformatics/btl355`
1046
+
1047
+ Examples
1048
+ --------
1049
+ >>> import numpy as np
1050
+ >>> from scipy.signal._peak_finding import _identify_ridge_lines
1051
+ >>> rng = np.random.default_rng()
1052
+ >>> data = rng.random((5,5))
1053
+ >>> max_dist = 3
1054
+ >>> max_distances = np.full(20, max_dist)
1055
+ >>> ridge_lines = _identify_ridge_lines(data, max_distances, 1)
1056
+
1057
+ Notes
1058
+ -----
1059
+ This function is intended to be used in conjunction with `cwt`
1060
+ as part of `find_peaks_cwt`.
1061
+
1062
+ """
1063
+ if len(max_distances) < matr.shape[0]:
1064
+ raise ValueError('Max_distances must have at least as many rows '
1065
+ 'as matr')
1066
+
1067
+ all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
1068
+ # Highest row for which there are any relative maxima
1069
+ has_relmax = np.nonzero(all_max_cols.any(axis=1))[0]
1070
+ if len(has_relmax) == 0:
1071
+ return []
1072
+ start_row = has_relmax[-1]
1073
+ # Each ridge line is a 3-tuple:
1074
+ # rows, cols,Gap number
1075
+ ridge_lines = [[[start_row],
1076
+ [col],
1077
+ 0] for col in np.nonzero(all_max_cols[start_row])[0]]
1078
+ final_lines = []
1079
+ rows = np.arange(start_row - 1, -1, -1)
1080
+ cols = np.arange(0, matr.shape[1])
1081
+ for row in rows:
1082
+ this_max_cols = cols[all_max_cols[row]]
1083
+
1084
+ # Increment gap number of each line,
1085
+ # set it to zero later if appropriate
1086
+ for line in ridge_lines:
1087
+ line[2] += 1
1088
+
1089
+ # XXX These should always be all_max_cols[row]
1090
+ # But the order might be different. Might be an efficiency gain
1091
+ # to make sure the order is the same and avoid this iteration
1092
+ prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
1093
+ # Look through every relative maximum found at current row
1094
+ # Attempt to connect them with existing ridge lines.
1095
+ for ind, col in enumerate(this_max_cols):
1096
+ # If there is a previous ridge line within
1097
+ # the max_distance to connect to, do so.
1098
+ # Otherwise start a new one.
1099
+ line = None
1100
+ if len(prev_ridge_cols) > 0:
1101
+ diffs = np.abs(col - prev_ridge_cols)
1102
+ closest = np.argmin(diffs)
1103
+ if diffs[closest] <= max_distances[row]:
1104
+ line = ridge_lines[closest]
1105
+ if line is not None:
1106
+ # Found a point close enough, extend current ridge line
1107
+ line[1].append(col)
1108
+ line[0].append(row)
1109
+ line[2] = 0
1110
+ else:
1111
+ new_line = [[row],
1112
+ [col],
1113
+ 0]
1114
+ ridge_lines.append(new_line)
1115
+
1116
+ # Remove the ridge lines with gap_number too high
1117
+ # XXX Modifying a list while iterating over it.
1118
+ # Should be safe, since we iterate backwards, but
1119
+ # still tacky.
1120
+ for ind in range(len(ridge_lines) - 1, -1, -1):
1121
+ line = ridge_lines[ind]
1122
+ if line[2] > gap_thresh:
1123
+ final_lines.append(line)
1124
+ del ridge_lines[ind]
1125
+
1126
+ out_lines = []
1127
+ for line in (final_lines + ridge_lines):
1128
+ sortargs = np.array(np.argsort(line[0]))
1129
+ rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
1130
+ rows[sortargs] = line[0]
1131
+ cols[sortargs] = line[1]
1132
+ out_lines.append([rows, cols])
1133
+
1134
+ return out_lines
1135
+
1136
+
1137
+ def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
1138
+ min_snr=1, noise_perc=10):
1139
+ """
1140
+ Filter ridge lines according to prescribed criteria. Intended
1141
+ to be used for finding relative maxima.
1142
+
1143
+ Parameters
1144
+ ----------
1145
+ cwt : 2-D ndarray
1146
+ Continuous wavelet transform from which the `ridge_lines` were defined.
1147
+ ridge_lines : 1-D sequence
1148
+ Each element should contain 2 sequences, the rows and columns
1149
+ of the ridge line (respectively).
1150
+ window_size : int, optional
1151
+ Size of window to use to calculate noise floor.
1152
+ Default is ``cwt.shape[1] / 20``.
1153
+ min_length : int, optional
1154
+ Minimum length a ridge line needs to be acceptable.
1155
+ Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
1156
+ min_snr : float, optional
1157
+ Minimum SNR ratio. Default 1. The signal is the value of
1158
+ the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
1159
+ noise is the `noise_perc`\\ th percentile of datapoints contained within a
1160
+ window of `window_size` around ``cwt[0, loc]``.
1161
+ noise_perc : float, optional
1162
+ When calculating the noise floor, percentile of data points
1163
+ examined below which to consider noise. Calculated using
1164
+ scipy.stats.scoreatpercentile.
1165
+
1166
+ References
1167
+ ----------
1168
+ .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
1169
+ :doi:`10.1093/bioinformatics/btl355`
1170
+
1171
+ """
1172
+ num_points = cwt.shape[1]
1173
+ if min_length is None:
1174
+ min_length = np.ceil(cwt.shape[0] / 4)
1175
+ if window_size is None:
1176
+ window_size = np.ceil(num_points / 20)
1177
+
1178
+ window_size = int(window_size)
1179
+ hf_window, odd = divmod(window_size, 2)
1180
+
1181
+ # Filter based on SNR
1182
+ row_one = cwt[0, :]
1183
+ noises = np.empty_like(row_one)
1184
+ for ind, val in enumerate(row_one):
1185
+ window_start = max(ind - hf_window, 0)
1186
+ window_end = min(ind + hf_window + odd, num_points)
1187
+ noises[ind] = scoreatpercentile(row_one[window_start:window_end],
1188
+ per=noise_perc)
1189
+
1190
+ def filt_func(line):
1191
+ if len(line[0]) < min_length:
1192
+ return False
1193
+ snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
1194
+ if snr < min_snr:
1195
+ return False
1196
+ return True
1197
+
1198
+ return list(filter(filt_func, ridge_lines))
1199
+
1200
+
1201
+ def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
1202
+ gap_thresh=None, min_length=None,
1203
+ min_snr=1, noise_perc=10, window_size=None):
1204
+ """
1205
+ Find peaks in a 1-D array with wavelet transformation.
1206
+
1207
+ The general approach is to smooth `vector` by convolving it with
1208
+ `wavelet(width)` for each width in `widths`. Relative maxima which
1209
+ appear at enough length scales, and with sufficiently high SNR, are
1210
+ accepted.
1211
+
1212
+ Parameters
1213
+ ----------
1214
+ vector : ndarray
1215
+ 1-D array in which to find the peaks.
1216
+ widths : float or sequence
1217
+ Single width or 1-D array-like of widths to use for calculating
1218
+ the CWT matrix. In general,
1219
+ this range should cover the expected width of peaks of interest.
1220
+ wavelet : callable, optional
1221
+ Should take two parameters and return a 1-D array to convolve
1222
+ with `vector`. The first parameter determines the number of points
1223
+ of the returned wavelet array, the second parameter is the scale
1224
+ (`width`) of the wavelet. Should be normalized and symmetric.
1225
+ Default is the ricker wavelet.
1226
+ max_distances : ndarray, optional
1227
+ At each row, a ridge line is only connected if the relative max at
1228
+ row[n] is within ``max_distances[n]`` from the relative max at
1229
+ ``row[n+1]``. Default value is ``widths/4``.
1230
+ gap_thresh : float, optional
1231
+ If a relative maximum is not found within `max_distances`,
1232
+ there will be a gap. A ridge line is discontinued if there are more
1233
+ than `gap_thresh` points without connecting a new relative maximum.
1234
+ Default is the first value of the widths array i.e. widths[0].
1235
+ min_length : int, optional
1236
+ Minimum length a ridge line needs to be acceptable.
1237
+ Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
1238
+ min_snr : float, optional
1239
+ Minimum SNR ratio. Default 1. The signal is the maximum CWT coefficient
1240
+ on the largest ridge line. The noise is `noise_perc` th percentile of
1241
+ datapoints contained within the same ridge line.
1242
+ noise_perc : float, optional
1243
+ When calculating the noise floor, percentile of data points
1244
+ examined below which to consider noise. Calculated using
1245
+ `stats.scoreatpercentile`. Default is 10.
1246
+ window_size : int, optional
1247
+ Size of window to use to calculate noise floor.
1248
+ Default is ``cwt.shape[1] / 20``.
1249
+
1250
+ Returns
1251
+ -------
1252
+ peaks_indices : ndarray
1253
+ Indices of the locations in the `vector` where peaks were found.
1254
+ The list is sorted.
1255
+
1256
+ See Also
1257
+ --------
1258
+ cwt
1259
+ Continuous wavelet transform.
1260
+ find_peaks
1261
+ Find peaks inside a signal based on peak properties.
1262
+
1263
+ Notes
1264
+ -----
1265
+ This approach was designed for finding sharp peaks among noisy data,
1266
+ however with proper parameter selection it should function well for
1267
+ different peak shapes.
1268
+
1269
+ The algorithm is as follows:
1270
+ 1. Perform a continuous wavelet transform on `vector`, for the supplied
1271
+ `widths`. This is a convolution of `vector` with `wavelet(width)` for
1272
+ each width in `widths`. See `cwt`.
1273
+ 2. Identify "ridge lines" in the cwt matrix. These are relative maxima
1274
+ at each row, connected across adjacent rows. See identify_ridge_lines
1275
+ 3. Filter the ridge_lines using filter_ridge_lines.
1276
+
1277
+ .. versionadded:: 0.11.0
1278
+
1279
+ References
1280
+ ----------
1281
+ .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
1282
+ :doi:`10.1093/bioinformatics/btl355`
1283
+
1284
+ Examples
1285
+ --------
1286
+ >>> import numpy as np
1287
+ >>> from scipy import signal
1288
+ >>> xs = np.arange(0, np.pi, 0.05)
1289
+ >>> data = np.sin(xs)
1290
+ >>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
1291
+ >>> peakind, xs[peakind], data[peakind]
1292
+ ([32], array([ 1.6]), array([ 0.9995736]))
1293
+
1294
+ """
1295
+ widths = np.atleast_1d(np.asarray(widths))
1296
+
1297
+ if gap_thresh is None:
1298
+ gap_thresh = np.ceil(widths[0])
1299
+ if max_distances is None:
1300
+ max_distances = widths / 4.0
1301
+ if wavelet is None:
1302
+ wavelet = _ricker
1303
+
1304
+ cwt_dat = _cwt(vector, wavelet, widths)
1305
+ ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
1306
+ filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
1307
+ window_size=window_size, min_snr=min_snr,
1308
+ noise_perc=noise_perc)
1309
+ max_locs = np.asarray([x[1][0] for x in filtered])
1310
+ max_locs.sort()
1311
+
1312
+ return max_locs
env-llmeval/lib/python3.10/site-packages/scipy/signal/_savitzky_golay.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import lstsq
3
+ from scipy._lib._util import float_factorial
4
+ from scipy.ndimage import convolve1d
5
+ from ._arraytools import axis_slice
6
+
7
+
8
+ def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None,
9
+ use="conv"):
10
+ """Compute the coefficients for a 1-D Savitzky-Golay FIR filter.
11
+
12
+ Parameters
13
+ ----------
14
+ window_length : int
15
+ The length of the filter window (i.e., the number of coefficients).
16
+ polyorder : int
17
+ The order of the polynomial used to fit the samples.
18
+ `polyorder` must be less than `window_length`.
19
+ deriv : int, optional
20
+ The order of the derivative to compute. This must be a
21
+ nonnegative integer. The default is 0, which means to filter
22
+ the data without differentiating.
23
+ delta : float, optional
24
+ The spacing of the samples to which the filter will be applied.
25
+ This is only used if deriv > 0.
26
+ pos : int or None, optional
27
+ If pos is not None, it specifies evaluation position within the
28
+ window. The default is the middle of the window.
29
+ use : str, optional
30
+ Either 'conv' or 'dot'. This argument chooses the order of the
31
+ coefficients. The default is 'conv', which means that the
32
+ coefficients are ordered to be used in a convolution. With
33
+ use='dot', the order is reversed, so the filter is applied by
34
+ dotting the coefficients with the data set.
35
+
36
+ Returns
37
+ -------
38
+ coeffs : 1-D ndarray
39
+ The filter coefficients.
40
+
41
+ See Also
42
+ --------
43
+ savgol_filter
44
+
45
+ Notes
46
+ -----
47
+ .. versionadded:: 0.14.0
48
+
49
+ References
50
+ ----------
51
+ A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by
52
+ Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8),
53
+ pp 1627-1639.
54
+ Jianwen Luo, Kui Ying, and Jing Bai. 2005. Savitzky-Golay smoothing and
55
+ differentiation filter for even number data. Signal Process.
56
+ 85, 7 (July 2005), 1429-1434.
57
+
58
+ Examples
59
+ --------
60
+ >>> import numpy as np
61
+ >>> from scipy.signal import savgol_coeffs
62
+ >>> savgol_coeffs(5, 2)
63
+ array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429])
64
+ >>> savgol_coeffs(5, 2, deriv=1)
65
+ array([ 2.00000000e-01, 1.00000000e-01, 2.07548111e-16, -1.00000000e-01,
66
+ -2.00000000e-01])
67
+
68
+ Note that use='dot' simply reverses the coefficients.
69
+
70
+ >>> savgol_coeffs(5, 2, pos=3)
71
+ array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714])
72
+ >>> savgol_coeffs(5, 2, pos=3, use='dot')
73
+ array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286])
74
+ >>> savgol_coeffs(4, 2, pos=3, deriv=1, use='dot')
75
+ array([0.45, -0.85, -0.65, 1.05])
76
+
77
+ `x` contains data from the parabola x = t**2, sampled at
78
+ t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the
79
+ derivative at the last position. When dotted with `x` the result should
80
+ be 6.
81
+
82
+ >>> x = np.array([1, 0, 1, 4, 9])
83
+ >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot')
84
+ >>> c.dot(x)
85
+ 6.0
86
+ """
87
+
88
+ # An alternative method for finding the coefficients when deriv=0 is
89
+ # t = np.arange(window_length)
90
+ # unit = (t == pos).astype(int)
91
+ # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t)
92
+ # The method implemented here is faster.
93
+
94
+ # To recreate the table of sample coefficients shown in the chapter on
95
+ # the Savitzy-Golay filter in the Numerical Recipes book, use
96
+ # window_length = nL + nR + 1
97
+ # pos = nL + 1
98
+ # c = savgol_coeffs(window_length, M, pos=pos, use='dot')
99
+
100
+ if polyorder >= window_length:
101
+ raise ValueError("polyorder must be less than window_length.")
102
+
103
+ halflen, rem = divmod(window_length, 2)
104
+
105
+ if pos is None:
106
+ if rem == 0:
107
+ pos = halflen - 0.5
108
+ else:
109
+ pos = halflen
110
+
111
+ if not (0 <= pos < window_length):
112
+ raise ValueError("pos must be nonnegative and less than "
113
+ "window_length.")
114
+
115
+ if use not in ['conv', 'dot']:
116
+ raise ValueError("`use` must be 'conv' or 'dot'")
117
+
118
+ if deriv > polyorder:
119
+ coeffs = np.zeros(window_length)
120
+ return coeffs
121
+
122
+ # Form the design matrix A. The columns of A are powers of the integers
123
+ # from -pos to window_length - pos - 1. The powers (i.e., rows) range
124
+ # from 0 to polyorder. (That is, A is a vandermonde matrix, but not
125
+ # necessarily square.)
126
+ x = np.arange(-pos, window_length - pos, dtype=float)
127
+
128
+ if use == "conv":
129
+ # Reverse so that result can be used in a convolution.
130
+ x = x[::-1]
131
+
132
+ order = np.arange(polyorder + 1).reshape(-1, 1)
133
+ A = x ** order
134
+
135
+ # y determines which order derivative is returned.
136
+ y = np.zeros(polyorder + 1)
137
+ # The coefficient assigned to y[deriv] scales the result to take into
138
+ # account the order of the derivative and the sample spacing.
139
+ y[deriv] = float_factorial(deriv) / (delta ** deriv)
140
+
141
+ # Find the least-squares solution of A*c = y
142
+ coeffs, _, _, _ = lstsq(A, y)
143
+
144
+ return coeffs
145
+
146
+
147
+ def _polyder(p, m):
148
+ """Differentiate polynomials represented with coefficients.
149
+
150
+ p must be a 1-D or 2-D array. In the 2-D case, each column gives
151
+ the coefficients of a polynomial; the first row holds the coefficients
152
+ associated with the highest power. m must be a nonnegative integer.
153
+ (numpy.polyder doesn't handle the 2-D case.)
154
+ """
155
+
156
+ if m == 0:
157
+ result = p
158
+ else:
159
+ n = len(p)
160
+ if n <= m:
161
+ result = np.zeros_like(p[:1, ...])
162
+ else:
163
+ dp = p[:-m].copy()
164
+ for k in range(m):
165
+ rng = np.arange(n - k - 1, m - k - 1, -1)
166
+ dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1))
167
+ result = dp
168
+ return result
169
+
170
+
171
+ def _fit_edge(x, window_start, window_stop, interp_start, interp_stop,
172
+ axis, polyorder, deriv, delta, y):
173
+ """
174
+ Given an N-d array `x` and the specification of a slice of `x` from
175
+ `window_start` to `window_stop` along `axis`, create an interpolating
176
+ polynomial of each 1-D slice, and evaluate that polynomial in the slice
177
+ from `interp_start` to `interp_stop`. Put the result into the
178
+ corresponding slice of `y`.
179
+ """
180
+
181
+ # Get the edge into a (window_length, -1) array.
182
+ x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis)
183
+ if axis == 0 or axis == -x.ndim:
184
+ xx_edge = x_edge
185
+ swapped = False
186
+ else:
187
+ xx_edge = x_edge.swapaxes(axis, 0)
188
+ swapped = True
189
+ xx_edge = xx_edge.reshape(xx_edge.shape[0], -1)
190
+
191
+ # Fit the edges. poly_coeffs has shape (polyorder + 1, -1),
192
+ # where '-1' is the same as in xx_edge.
193
+ poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start),
194
+ xx_edge, polyorder)
195
+
196
+ if deriv > 0:
197
+ poly_coeffs = _polyder(poly_coeffs, deriv)
198
+
199
+ # Compute the interpolated values for the edge.
200
+ i = np.arange(interp_start - window_start, interp_stop - window_start)
201
+ values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv)
202
+
203
+ # Now put the values into the appropriate slice of y.
204
+ # First reshape values to match y.
205
+ shp = list(y.shape)
206
+ shp[0], shp[axis] = shp[axis], shp[0]
207
+ values = values.reshape(interp_stop - interp_start, *shp[1:])
208
+ if swapped:
209
+ values = values.swapaxes(0, axis)
210
+ # Get a view of the data to be replaced by values.
211
+ y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis)
212
+ y_edge[...] = values
213
+
214
+
215
+ def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y):
216
+ """
217
+ Use polynomial interpolation of x at the low and high ends of the axis
218
+ to fill in the halflen values in y.
219
+
220
+ This function just calls _fit_edge twice, once for each end of the axis.
221
+ """
222
+ halflen = window_length // 2
223
+ _fit_edge(x, 0, window_length, 0, halflen, axis,
224
+ polyorder, deriv, delta, y)
225
+ n = x.shape[axis]
226
+ _fit_edge(x, n - window_length, n, n - halflen, n, axis,
227
+ polyorder, deriv, delta, y)
228
+
229
+
230
+ def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0,
231
+ axis=-1, mode='interp', cval=0.0):
232
+ """ Apply a Savitzky-Golay filter to an array.
233
+
234
+ This is a 1-D filter. If `x` has dimension greater than 1, `axis`
235
+ determines the axis along which the filter is applied.
236
+
237
+ Parameters
238
+ ----------
239
+ x : array_like
240
+ The data to be filtered. If `x` is not a single or double precision
241
+ floating point array, it will be converted to type ``numpy.float64``
242
+ before filtering.
243
+ window_length : int
244
+ The length of the filter window (i.e., the number of coefficients).
245
+ If `mode` is 'interp', `window_length` must be less than or equal
246
+ to the size of `x`.
247
+ polyorder : int
248
+ The order of the polynomial used to fit the samples.
249
+ `polyorder` must be less than `window_length`.
250
+ deriv : int, optional
251
+ The order of the derivative to compute. This must be a
252
+ nonnegative integer. The default is 0, which means to filter
253
+ the data without differentiating.
254
+ delta : float, optional
255
+ The spacing of the samples to which the filter will be applied.
256
+ This is only used if deriv > 0. Default is 1.0.
257
+ axis : int, optional
258
+ The axis of the array `x` along which the filter is to be applied.
259
+ Default is -1.
260
+ mode : str, optional
261
+ Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This
262
+ determines the type of extension to use for the padded signal to
263
+ which the filter is applied. When `mode` is 'constant', the padding
264
+ value is given by `cval`. See the Notes for more details on 'mirror',
265
+ 'constant', 'wrap', and 'nearest'.
266
+ When the 'interp' mode is selected (the default), no extension
267
+ is used. Instead, a degree `polyorder` polynomial is fit to the
268
+ last `window_length` values of the edges, and this polynomial is
269
+ used to evaluate the last `window_length // 2` output values.
270
+ cval : scalar, optional
271
+ Value to fill past the edges of the input if `mode` is 'constant'.
272
+ Default is 0.0.
273
+
274
+ Returns
275
+ -------
276
+ y : ndarray, same shape as `x`
277
+ The filtered data.
278
+
279
+ See Also
280
+ --------
281
+ savgol_coeffs
282
+
283
+ Notes
284
+ -----
285
+ Details on the `mode` options:
286
+
287
+ 'mirror':
288
+ Repeats the values at the edges in reverse order. The value
289
+ closest to the edge is not included.
290
+ 'nearest':
291
+ The extension contains the nearest input value.
292
+ 'constant':
293
+ The extension contains the value given by the `cval` argument.
294
+ 'wrap':
295
+ The extension contains the values from the other end of the array.
296
+
297
+ For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and
298
+ `window_length` is 7, the following shows the extended data for
299
+ the various `mode` options (assuming `cval` is 0)::
300
+
301
+ mode | Ext | Input | Ext
302
+ -----------+---------+------------------------+---------
303
+ 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5
304
+ 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8
305
+ 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0
306
+ 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3
307
+
308
+ .. versionadded:: 0.14.0
309
+
310
+ Examples
311
+ --------
312
+ >>> import numpy as np
313
+ >>> from scipy.signal import savgol_filter
314
+ >>> np.set_printoptions(precision=2) # For compact display.
315
+ >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9])
316
+
317
+ Filter with a window length of 5 and a degree 2 polynomial. Use
318
+ the defaults for all other parameters.
319
+
320
+ >>> savgol_filter(x, 5, 2)
321
+ array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ])
322
+
323
+ Note that the last five values in x are samples of a parabola, so
324
+ when mode='interp' (the default) is used with polyorder=2, the last
325
+ three values are unchanged. Compare that to, for example,
326
+ `mode='nearest'`:
327
+
328
+ >>> savgol_filter(x, 5, 2, mode='nearest')
329
+ array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97])
330
+
331
+ """
332
+ if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]:
333
+ raise ValueError("mode must be 'mirror', 'constant', 'nearest' "
334
+ "'wrap' or 'interp'.")
335
+
336
+ x = np.asarray(x)
337
+ # Ensure that x is either single or double precision floating point.
338
+ if x.dtype != np.float64 and x.dtype != np.float32:
339
+ x = x.astype(np.float64)
340
+
341
+ coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)
342
+
343
+ if mode == "interp":
344
+ if window_length > x.shape[axis]:
345
+ raise ValueError("If mode is 'interp', window_length must be less "
346
+ "than or equal to the size of x.")
347
+
348
+ # Do not pad. Instead, for the elements within `window_length // 2`
349
+ # of the ends of the sequence, use the polynomial that is fitted to
350
+ # the last `window_length` elements.
351
+ y = convolve1d(x, coeffs, axis=axis, mode="constant")
352
+ _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y)
353
+ else:
354
+ # Any mode other than 'interp' is passed on to ndimage.convolve1d.
355
+ y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval)
356
+
357
+ return y
env-llmeval/lib/python3.10/site-packages/scipy/signal/_spline.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (85.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/_upfirdn.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code adapted from "upfirdn" python library with permission:
2
+ #
3
+ # Copyright (c) 2009, Motorola, Inc
4
+ #
5
+ # All Rights Reserved.
6
+ #
7
+ # Redistribution and use in source and binary forms, with or without
8
+ # modification, are permitted provided that the following conditions are
9
+ # met:
10
+ #
11
+ # * Redistributions of source code must retain the above copyright notice,
12
+ # this list of conditions and the following disclaimer.
13
+ #
14
+ # * Redistributions in binary form must reproduce the above copyright
15
+ # notice, this list of conditions and the following disclaimer in the
16
+ # documentation and/or other materials provided with the distribution.
17
+ #
18
+ # * Neither the name of Motorola nor the names of its contributors may be
19
+ # used to endorse or promote products derived from this software without
20
+ # specific prior written permission.
21
+ #
22
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23
+ # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24
+ # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25
+ # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27
+ # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28
+ # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29
+ # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30
+ # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
+
34
+ import numpy as np
35
+
36
+ from ._upfirdn_apply import _output_len, _apply, mode_enum
37
+
38
+ __all__ = ['upfirdn', '_output_len']
39
+
40
+ _upfirdn_modes = [
41
+ 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect',
42
+ 'antisymmetric', 'antireflect', 'line',
43
+ ]
44
+
45
+
46
+ def _pad_h(h, up):
47
+ """Store coefficients in a transposed, flipped arrangement.
48
+
49
+ For example, suppose upRate is 3, and the
50
+ input number of coefficients is 10, represented as h[0], ..., h[9].
51
+
52
+ Then the internal buffer will look like this::
53
+
54
+ h[9], h[6], h[3], h[0], // flipped phase 0 coefs
55
+ 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded)
56
+ 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)
57
+
58
+ """
59
+ h_padlen = len(h) + (-len(h) % up)
60
+ h_full = np.zeros(h_padlen, h.dtype)
61
+ h_full[:len(h)] = h
62
+ h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()
63
+ return h_full
64
+
65
+
66
+ def _check_mode(mode):
67
+ mode = mode.lower()
68
+ enum = mode_enum(mode)
69
+ return enum
70
+
71
+
72
+ class _UpFIRDn:
73
+ """Helper for resampling."""
74
+
75
+ def __init__(self, h, x_dtype, up, down):
76
+ h = np.asarray(h)
77
+ if h.ndim != 1 or h.size == 0:
78
+ raise ValueError('h must be 1-D with non-zero length')
79
+ self._output_type = np.result_type(h.dtype, x_dtype, np.float32)
80
+ h = np.asarray(h, self._output_type)
81
+ self._up = int(up)
82
+ self._down = int(down)
83
+ if self._up < 1 or self._down < 1:
84
+ raise ValueError('Both up and down must be >= 1')
85
+ # This both transposes, and "flips" each phase for filtering
86
+ self._h_trans_flip = _pad_h(h, self._up)
87
+ self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)
88
+ self._h_len_orig = len(h)
89
+
90
+ def apply_filter(self, x, axis=-1, mode='constant', cval=0):
91
+ """Apply the prepared filter to the specified axis of N-D signal x."""
92
+ output_len = _output_len(self._h_len_orig, x.shape[axis],
93
+ self._up, self._down)
94
+ # Explicit use of np.int64 for output_shape dtype avoids OverflowError
95
+ # when allocating large array on platforms where intp is 32 bits.
96
+ output_shape = np.asarray(x.shape, dtype=np.int64)
97
+ output_shape[axis] = output_len
98
+ out = np.zeros(output_shape, dtype=self._output_type, order='C')
99
+ axis = axis % x.ndim
100
+ mode = _check_mode(mode)
101
+ _apply(np.asarray(x, self._output_type),
102
+ self._h_trans_flip, out,
103
+ self._up, self._down, axis, mode, cval)
104
+ return out
105
+
106
+
107
+ def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):
108
+ """Upsample, FIR filter, and downsample.
109
+
110
+ Parameters
111
+ ----------
112
+ h : array_like
113
+ 1-D FIR (finite-impulse response) filter coefficients.
114
+ x : array_like
115
+ Input signal array.
116
+ up : int, optional
117
+ Upsampling rate. Default is 1.
118
+ down : int, optional
119
+ Downsampling rate. Default is 1.
120
+ axis : int, optional
121
+ The axis of the input data array along which to apply the
122
+ linear filter. The filter is applied to each subarray along
123
+ this axis. Default is -1.
124
+ mode : str, optional
125
+ The signal extension mode to use. The set
126
+ ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to
127
+ modes provided by `numpy.pad`. ``"smooth"`` implements a smooth
128
+ extension by extending based on the slope of the last 2 points at each
129
+ end of the array. ``"antireflect"`` and ``"antisymmetric"`` are
130
+ anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode
131
+ `"line"` extends the signal based on a linear trend defined by the
132
+ first and last points along the ``axis``.
133
+
134
+ .. versionadded:: 1.4.0
135
+ cval : float, optional
136
+ The constant value to use when ``mode == "constant"``.
137
+
138
+ .. versionadded:: 1.4.0
139
+
140
+ Returns
141
+ -------
142
+ y : ndarray
143
+ The output signal array. Dimensions will be the same as `x` except
144
+ for along `axis`, which will change size according to the `h`,
145
+ `up`, and `down` parameters.
146
+
147
+ Notes
148
+ -----
149
+ The algorithm is an implementation of the block diagram shown on page 129
150
+ of the Vaidyanathan text [1]_ (Figure 4.3-8d).
151
+
152
+ The direct approach of upsampling by factor of P with zero insertion,
153
+ FIR filtering of length ``N``, and downsampling by factor of Q is
154
+ O(N*Q) per output sample. The polyphase implementation used here is
155
+ O(N/P).
156
+
157
+ .. versionadded:: 0.18
158
+
159
+ References
160
+ ----------
161
+ .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,
162
+ Prentice Hall, 1993.
163
+
164
+ Examples
165
+ --------
166
+ Simple operations:
167
+
168
+ >>> import numpy as np
169
+ >>> from scipy.signal import upfirdn
170
+ >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter
171
+ array([ 1., 2., 3., 2., 1.])
172
+ >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion
173
+ array([ 1., 0., 0., 2., 0., 0., 3.])
174
+ >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold
175
+ array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])
176
+ >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation
177
+ array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5])
178
+ >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3
179
+ array([ 0., 3., 6., 9.])
180
+ >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3
181
+ array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5])
182
+
183
+ Apply a single filter to multiple signals:
184
+
185
+ >>> x = np.reshape(np.arange(8), (4, 2))
186
+ >>> x
187
+ array([[0, 1],
188
+ [2, 3],
189
+ [4, 5],
190
+ [6, 7]])
191
+
192
+ Apply along the last dimension of ``x``:
193
+
194
+ >>> h = [1, 1]
195
+ >>> upfirdn(h, x, 2)
196
+ array([[ 0., 0., 1., 1.],
197
+ [ 2., 2., 3., 3.],
198
+ [ 4., 4., 5., 5.],
199
+ [ 6., 6., 7., 7.]])
200
+
201
+ Apply along the 0th dimension of ``x``:
202
+
203
+ >>> upfirdn(h, x, 2, axis=0)
204
+ array([[ 0., 1.],
205
+ [ 0., 1.],
206
+ [ 2., 3.],
207
+ [ 2., 3.],
208
+ [ 4., 5.],
209
+ [ 4., 5.],
210
+ [ 6., 7.],
211
+ [ 6., 7.]])
212
+ """
213
+ x = np.asarray(x)
214
+ ufd = _UpFIRDn(h, x.dtype, up, down)
215
+ # This is equivalent to (but faster than) using np.apply_along_axis
216
+ return ufd.apply_filter(x, axis, mode, cval)
env-llmeval/lib/python3.10/site-packages/scipy/signal/_wavelets.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+ from scipy.linalg import eig
5
+ from scipy.special import comb
6
+ from scipy.signal import convolve
7
+
8
+ __all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']
9
+
10
+
11
+ _msg="""scipy.signal.%s is deprecated in SciPy 1.12 and will be removed
12
+ in SciPy 1.15. We recommend using PyWavelets instead.
13
+ """
14
+
15
+
16
+ def daub(p):
17
+ """
18
+ The coefficients for the FIR low-pass filter producing Daubechies wavelets.
19
+
20
+ .. deprecated:: 1.12.0
21
+
22
+ scipy.signal.daub is deprecated in SciPy 1.12 and will be removed
23
+ in SciPy 1.15. We recommend using PyWavelets instead.
24
+
25
+ p>=1 gives the order of the zero at f=1/2.
26
+ There are 2p filter coefficients.
27
+
28
+ Parameters
29
+ ----------
30
+ p : int
31
+ Order of the zero at f=1/2, can have values from 1 to 34.
32
+
33
+ Returns
34
+ -------
35
+ daub : ndarray
36
+ Return
37
+
38
+ """
39
+ warnings.warn(_msg % 'daub', DeprecationWarning, stacklevel=2)
40
+
41
+ sqrt = np.sqrt
42
+ if p < 1:
43
+ raise ValueError("p must be at least 1.")
44
+ if p == 1:
45
+ c = 1 / sqrt(2)
46
+ return np.array([c, c])
47
+ elif p == 2:
48
+ f = sqrt(2) / 8
49
+ c = sqrt(3)
50
+ return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
51
+ elif p == 3:
52
+ tmp = 12 * sqrt(10)
53
+ z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
54
+ z1c = np.conj(z1)
55
+ f = sqrt(2) / 8
56
+ d0 = np.real((1 - z1) * (1 - z1c))
57
+ a0 = np.real(z1 * z1c)
58
+ a1 = 2 * np.real(z1)
59
+ return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
60
+ a0 - 3 * a1 + 3, 3 - a1, 1])
61
+ elif p < 35:
62
+ # construct polynomial and factor it
63
+ if p < 35:
64
+ P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
65
+ yj = np.roots(P)
66
+ else: # try different polynomial --- needs work
67
+ P = [comb(p - 1 + k, k, exact=1) / 4.0**k
68
+ for k in range(p)][::-1]
69
+ yj = np.roots(P) / 4
70
+ # for each root, compute two z roots, select the one with |z|>1
71
+ # Build up final polynomial
72
+ c = np.poly1d([1, 1])**p
73
+ q = np.poly1d([1])
74
+ for k in range(p - 1):
75
+ yval = yj[k]
76
+ part = 2 * sqrt(yval * (yval - 1))
77
+ const = 1 - 2 * yval
78
+ z1 = const + part
79
+ if (abs(z1)) < 1:
80
+ z1 = const - part
81
+ q = q * [1, -z1]
82
+
83
+ q = c * np.real(q)
84
+ # Normalize result
85
+ q = q / np.sum(q) * sqrt(2)
86
+ return q.c[::-1]
87
+ else:
88
+ raise ValueError("Polynomial factorization does not work "
89
+ "well for p too large.")
90
+
91
+
92
+ def qmf(hk):
93
+ """
94
+ Return high-pass qmf filter from low-pass
95
+
96
+ .. deprecated:: 1.12.0
97
+
98
+ scipy.signal.qmf is deprecated in SciPy 1.12 and will be removed
99
+ in SciPy 1.15. We recommend using PyWavelets instead.
100
+
101
+ Parameters
102
+ ----------
103
+ hk : array_like
104
+ Coefficients of high-pass filter.
105
+
106
+ Returns
107
+ -------
108
+ array_like
109
+ High-pass filter coefficients.
110
+
111
+ """
112
+ warnings.warn(_msg % 'qmf', DeprecationWarning, stacklevel=2)
113
+
114
+ N = len(hk) - 1
115
+ asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
116
+ return hk[::-1] * np.array(asgn)
117
+
118
+
119
+ def cascade(hk, J=7):
120
+ """
121
+ Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
122
+
123
+ .. deprecated:: 1.12.0
124
+
125
+ scipy.signal.cascade is deprecated in SciPy 1.12 and will be removed
126
+ in SciPy 1.15. We recommend using PyWavelets instead.
127
+
128
+ Parameters
129
+ ----------
130
+ hk : array_like
131
+ Coefficients of low-pass filter.
132
+ J : int, optional
133
+ Values will be computed at grid points ``K/2**J``. Default is 7.
134
+
135
+ Returns
136
+ -------
137
+ x : ndarray
138
+ The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
139
+ ``len(hk) = len(gk) = N+1``.
140
+ phi : ndarray
141
+ The scaling function ``phi(x)`` at `x`:
142
+ ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
143
+ psi : ndarray, optional
144
+ The wavelet function ``psi(x)`` at `x`:
145
+ ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
146
+ `psi` is only returned if `gk` is not None.
147
+
148
+ Notes
149
+ -----
150
+ The algorithm uses the vector cascade algorithm described by Strang and
151
+ Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
152
+ and slices for quick reuse. Then inserts vectors into final vector at the
153
+ end.
154
+
155
+ """
156
+ warnings.warn(_msg % 'cascade', DeprecationWarning, stacklevel=2)
157
+
158
+ N = len(hk) - 1
159
+
160
+ if (J > 30 - np.log2(N + 1)):
161
+ raise ValueError("Too many levels.")
162
+ if (J < 1):
163
+ raise ValueError("Too few levels.")
164
+
165
+ # construct matrices needed
166
+ nn, kk = np.ogrid[:N, :N]
167
+ s2 = np.sqrt(2)
168
+ # append a zero so that take works
169
+ thk = np.r_[hk, 0]
170
+ gk = qmf(hk)
171
+ tgk = np.r_[gk, 0]
172
+
173
+ indx1 = np.clip(2 * nn - kk, -1, N + 1)
174
+ indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
175
+ m = np.empty((2, 2, N, N), 'd')
176
+ m[0, 0] = np.take(thk, indx1, 0)
177
+ m[0, 1] = np.take(thk, indx2, 0)
178
+ m[1, 0] = np.take(tgk, indx1, 0)
179
+ m[1, 1] = np.take(tgk, indx2, 0)
180
+ m *= s2
181
+
182
+ # construct the grid of points
183
+ x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
184
+ phi = 0 * x
185
+
186
+ psi = 0 * x
187
+
188
+ # find phi0, and phi1
189
+ lam, v = eig(m[0, 0])
190
+ ind = np.argmin(np.absolute(lam - 1))
191
+ # a dictionary with a binary representation of the
192
+ # evaluation points x < 1 -- i.e. position is 0.xxxx
193
+ v = np.real(v[:, ind])
194
+ # need scaling function to integrate to 1 so find
195
+ # eigenvector normalized to sum(v,axis=0)=1
196
+ sm = np.sum(v)
197
+ if sm < 0: # need scaling function to integrate to 1
198
+ v = -v
199
+ sm = -sm
200
+ bitdic = {'0': v / sm}
201
+ bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
202
+ step = 1 << J
203
+ phi[::step] = bitdic['0']
204
+ phi[(1 << (J - 1))::step] = bitdic['1']
205
+ psi[::step] = np.dot(m[1, 0], bitdic['0'])
206
+ psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
207
+ # descend down the levels inserting more and more values
208
+ # into bitdic -- store the values in the correct location once we
209
+ # have computed them -- stored in the dictionary
210
+ # for quicker use later.
211
+ prevkeys = ['1']
212
+ for level in range(2, J + 1):
213
+ newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
214
+ fac = 1 << (J - level)
215
+ for key in newkeys:
216
+ # convert key to number
217
+ num = 0
218
+ for pos in range(level):
219
+ if key[pos] == '1':
220
+ num += (1 << (level - 1 - pos))
221
+ pastphi = bitdic[key[1:]]
222
+ ii = int(key[0])
223
+ temp = np.dot(m[0, ii], pastphi)
224
+ bitdic[key] = temp
225
+ phi[num * fac::step] = temp
226
+ psi[num * fac::step] = np.dot(m[1, ii], pastphi)
227
+ prevkeys = newkeys
228
+
229
+ return x, phi, psi
230
+
231
+
232
+ def morlet(M, w=5.0, s=1.0, complete=True):
233
+ """
234
+ Complex Morlet wavelet.
235
+
236
+ .. deprecated:: 1.12.0
237
+
238
+ scipy.signal.morlet is deprecated in SciPy 1.12 and will be removed
239
+ in SciPy 1.15. We recommend using PyWavelets instead.
240
+
241
+ Parameters
242
+ ----------
243
+ M : int
244
+ Length of the wavelet.
245
+ w : float, optional
246
+ Omega0. Default is 5
247
+ s : float, optional
248
+ Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
249
+ complete : bool, optional
250
+ Whether to use the complete or the standard version.
251
+
252
+ Returns
253
+ -------
254
+ morlet : (M,) ndarray
255
+
256
+ See Also
257
+ --------
258
+ morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.
259
+ scipy.signal.gausspulse
260
+
261
+ Notes
262
+ -----
263
+ The standard version::
264
+
265
+ pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
266
+
267
+ This commonly used wavelet is often referred to simply as the
268
+ Morlet wavelet. Note that this simplified version can cause
269
+ admissibility problems at low values of `w`.
270
+
271
+ The complete version::
272
+
273
+ pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
274
+
275
+ This version has a correction
276
+ term to improve admissibility. For `w` greater than 5, the
277
+ correction term is negligible.
278
+
279
+ Note that the energy of the return wavelet is not normalised
280
+ according to `s`.
281
+
282
+ The fundamental frequency of this wavelet in Hz is given
283
+ by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
284
+
285
+ Note: This function was created before `cwt` and is not compatible
286
+ with it.
287
+
288
+ Examples
289
+ --------
290
+ >>> from scipy import signal
291
+ >>> import matplotlib.pyplot as plt
292
+
293
+ >>> M = 100
294
+ >>> s = 4.0
295
+ >>> w = 2.0
296
+ >>> wavelet = signal.morlet(M, s, w)
297
+ >>> plt.plot(wavelet.real, label="real")
298
+ >>> plt.plot(wavelet.imag, label="imag")
299
+ >>> plt.legend()
300
+ >>> plt.show()
301
+
302
+ """
303
+ warnings.warn(_msg % 'morlet', DeprecationWarning, stacklevel=2)
304
+
305
+ x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
306
+ output = np.exp(1j * w * x)
307
+
308
+ if complete:
309
+ output -= np.exp(-0.5 * (w**2))
310
+
311
+ output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
312
+
313
+ return output
314
+
315
+
316
+ def ricker(points, a):
317
+ """
318
+ Return a Ricker wavelet, also known as the "Mexican hat wavelet".
319
+
320
+ .. deprecated:: 1.12.0
321
+
322
+ scipy.signal.ricker is deprecated in SciPy 1.12 and will be removed
323
+ in SciPy 1.15. We recommend using PyWavelets instead.
324
+
325
+ It models the function:
326
+
327
+ ``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,
328
+
329
+ where ``A = 2/(sqrt(3*a)*(pi**0.25))``.
330
+
331
+ Parameters
332
+ ----------
333
+ points : int
334
+ Number of points in `vector`.
335
+ Will be centered around 0.
336
+ a : scalar
337
+ Width parameter of the wavelet.
338
+
339
+ Returns
340
+ -------
341
+ vector : (N,) ndarray
342
+ Array of length `points` in shape of ricker curve.
343
+
344
+ Examples
345
+ --------
346
+ >>> from scipy import signal
347
+ >>> import matplotlib.pyplot as plt
348
+
349
+ >>> points = 100
350
+ >>> a = 4.0
351
+ >>> vec2 = signal.ricker(points, a)
352
+ >>> print(len(vec2))
353
+ 100
354
+ >>> plt.plot(vec2)
355
+ >>> plt.show()
356
+
357
+ """
358
+ warnings.warn(_msg % 'ricker', DeprecationWarning, stacklevel=2)
359
+ return _ricker(points, a)
360
+
361
+
362
+ def _ricker(points, a):
363
+ A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
364
+ wsq = a**2
365
+ vec = np.arange(0, points) - (points - 1.0) / 2
366
+ xsq = vec**2
367
+ mod = (1 - xsq / wsq)
368
+ gauss = np.exp(-xsq / (2 * wsq))
369
+ total = A * mod * gauss
370
+ return total
371
+
372
+
373
+ def morlet2(M, s, w=5):
374
+ """
375
+ Complex Morlet wavelet, designed to work with `cwt`.
376
+
377
+ .. deprecated:: 1.12.0
378
+
379
+ scipy.signal.morlet2 is deprecated in SciPy 1.12 and will be removed
380
+ in SciPy 1.15. We recommend using PyWavelets instead.
381
+
382
+ Returns the complete version of morlet wavelet, normalised
383
+ according to `s`::
384
+
385
+ exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)
386
+
387
+ Parameters
388
+ ----------
389
+ M : int
390
+ Length of the wavelet.
391
+ s : float
392
+ Width parameter of the wavelet.
393
+ w : float, optional
394
+ Omega0. Default is 5
395
+
396
+ Returns
397
+ -------
398
+ morlet : (M,) ndarray
399
+
400
+ See Also
401
+ --------
402
+ morlet : Implementation of Morlet wavelet, incompatible with `cwt`
403
+
404
+ Notes
405
+ -----
406
+
407
+ .. versionadded:: 1.4.0
408
+
409
+ This function was designed to work with `cwt`. Because `morlet2`
410
+ returns an array of complex numbers, the `dtype` argument of `cwt`
411
+ should be set to `complex128` for best results.
412
+
413
+ Note the difference in implementation with `morlet`.
414
+ The fundamental frequency of this wavelet in Hz is given by::
415
+
416
+ f = w*fs / (2*s*np.pi)
417
+
418
+ where ``fs`` is the sampling rate and `s` is the wavelet width parameter.
419
+ Similarly we can get the wavelet width parameter at ``f``::
420
+
421
+ s = w*fs / (2*f*np.pi)
422
+
423
+ Examples
424
+ --------
425
+ >>> import numpy as np
426
+ >>> from scipy import signal
427
+ >>> import matplotlib.pyplot as plt
428
+
429
+ >>> M = 100
430
+ >>> s = 4.0
431
+ >>> w = 2.0
432
+ >>> wavelet = signal.morlet2(M, s, w)
433
+ >>> plt.plot(abs(wavelet))
434
+ >>> plt.show()
435
+
436
+ This example shows basic use of `morlet2` with `cwt` in time-frequency
437
+ analysis:
438
+
439
+ >>> t, dt = np.linspace(0, 1, 200, retstep=True)
440
+ >>> fs = 1/dt
441
+ >>> w = 6.
442
+ >>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)
443
+ >>> freq = np.linspace(1, fs/2, 100)
444
+ >>> widths = w*fs / (2*freq*np.pi)
445
+ >>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)
446
+ >>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud')
447
+ >>> plt.show()
448
+
449
+ """
450
+ warnings.warn(_msg % 'morlet2', DeprecationWarning, stacklevel=2)
451
+
452
+ x = np.arange(0, M) - (M - 1.0) / 2
453
+ x = x / s
454
+ wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)
455
+ output = np.sqrt(1/s) * wavelet
456
+ return output
457
+
458
+
459
+ def cwt(data, wavelet, widths, dtype=None, **kwargs):
460
+ """
461
+ Continuous wavelet transform.
462
+
463
+ .. deprecated:: 1.12.0
464
+
465
+ scipy.signal.cwt is deprecated in SciPy 1.12 and will be removed
466
+ in SciPy 1.15. We recommend using PyWavelets instead.
467
+
468
+ Performs a continuous wavelet transform on `data`,
469
+ using the `wavelet` function. A CWT performs a convolution
470
+ with `data` using the `wavelet` function, which is characterized
471
+ by a width parameter and length parameter. The `wavelet` function
472
+ is allowed to be complex.
473
+
474
+ Parameters
475
+ ----------
476
+ data : (N,) ndarray
477
+ data on which to perform the transform.
478
+ wavelet : function
479
+ Wavelet function, which should take 2 arguments.
480
+ The first argument is the number of points that the returned vector
481
+ will have (len(wavelet(length,width)) == length).
482
+ The second is a width parameter, defining the size of the wavelet
483
+ (e.g. standard deviation of a gaussian). See `ricker`, which
484
+ satisfies these requirements.
485
+ widths : (M,) sequence
486
+ Widths to use for transform.
487
+ dtype : data-type, optional
488
+ The desired data type of output. Defaults to ``float64`` if the
489
+ output of `wavelet` is real and ``complex128`` if it is complex.
490
+
491
+ .. versionadded:: 1.4.0
492
+
493
+ kwargs
494
+ Keyword arguments passed to wavelet function.
495
+
496
+ .. versionadded:: 1.4.0
497
+
498
+ Returns
499
+ -------
500
+ cwt: (M, N) ndarray
501
+ Will have shape of (len(widths), len(data)).
502
+
503
+ Notes
504
+ -----
505
+
506
+ .. versionadded:: 1.4.0
507
+
508
+ For non-symmetric, complex-valued wavelets, the input signal is convolved
509
+ with the time-reversed complex-conjugate of the wavelet data [1].
510
+
511
+ ::
512
+
513
+ length = min(10 * width[ii], len(data))
514
+ cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],
515
+ **kwargs))[::-1], mode='same')
516
+
517
+ References
518
+ ----------
519
+ .. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)",
520
+ Academic Press, 2009.
521
+
522
+ Examples
523
+ --------
524
+ >>> import numpy as np
525
+ >>> from scipy import signal
526
+ >>> import matplotlib.pyplot as plt
527
+ >>> t = np.linspace(-1, 1, 200, endpoint=False)
528
+ >>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
529
+ >>> widths = np.arange(1, 31)
530
+ >>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
531
+
532
+ .. note:: For cwt matrix plotting it is advisable to flip the y-axis
533
+
534
+ >>> cwtmatr_yflip = np.flipud(cwtmatr)
535
+ >>> plt.imshow(cwtmatr_yflip, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
536
+ ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
537
+ >>> plt.show()
538
+ """
539
+ warnings.warn(_msg % 'cwt', DeprecationWarning, stacklevel=2)
540
+ return _cwt(data, wavelet, widths, dtype, **kwargs)
541
+
542
+
543
+ def _cwt(data, wavelet, widths, dtype=None, **kwargs):
544
+ # Determine output type
545
+ if dtype is None:
546
+ if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
547
+ dtype = np.complex128
548
+ else:
549
+ dtype = np.float64
550
+
551
+ output = np.empty((len(widths), len(data)), dtype=dtype)
552
+ for ind, width in enumerate(widths):
553
+ N = np.min([10 * width, len(data)])
554
+ wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
555
+ output[ind] = convolve(data, wavelet_data, mode='same')
556
+ return output
env-llmeval/lib/python3.10/site-packages/scipy/signal/bsplines.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'spline_filter', 'gauss_spline',
9
+ 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval',
10
+ 'zeros_like', 'array', 'arctan2',
11
+ 'tan', 'arange', 'floor', 'exp', 'greater', 'add',
12
+ 'cspline2d', 'sepfir2d'
13
+ ]
14
+
15
+
16
+ def __dir__():
17
+ return __all__
18
+
19
+
20
+ def __getattr__(name):
21
+ return _sub_module_deprecation(sub_package="signal", module="bsplines",
22
+ private_modules=["_bsplines"], all=__all__,
23
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/signal/filter_design.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
9
+ 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
10
+ 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
11
+ 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
12
+ 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
13
+ 'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
14
+ 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
15
+ 'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk',
16
+ 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk',
17
+ 'gammatone', 'iircomb',
18
+ 'atleast_1d', 'poly', 'polyval', 'roots', 'resize', 'absolute',
19
+ 'tan', 'log10', 'arcsinh', 'exp', 'arccosh',
20
+ 'ceil', 'conjugate', 'append', 'prod', 'full', 'array', 'mintypecode',
21
+ 'npp_polyval', 'polyvalfromroots', 'optimize', 'sp_fft', 'comb',
22
+ 'float_factorial', 'abs', 'maxflat', 'yulewalk',
23
+ 'EPSILON', 'filter_dict', 'band_dict', 'bessel_norms'
24
+ ]
25
+
26
+
27
+ def __dir__():
28
+ return __all__
29
+
30
+
31
+ def __getattr__(name):
32
+ return _sub_module_deprecation(sub_package="signal", module="filter_design",
33
+ private_modules=["_filter_design"], all=__all__,
34
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/signal/fir_filter_design.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'kaiser_beta', 'kaiser_atten', 'kaiserord',
9
+ 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase',
10
+ 'ceil', 'log', 'irfft', 'fft', 'ifft', 'sinc', 'toeplitz',
11
+ 'hankel', 'solve', 'LinAlgError', 'LinAlgWarning', 'lstsq'
12
+ ]
13
+
14
+
15
+ def __dir__():
16
+ return __all__
17
+
18
+
19
+ def __getattr__(name):
20
+ return _sub_module_deprecation(sub_package="signal", module="fir_filter_design",
21
+ private_modules=["_fir_filter_design"], all=__all__,
22
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/signal/signaltools.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.signal` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'correlate', 'correlation_lags', 'correlate2d',
9
+ 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',
10
+ 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
11
+ 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
12
+ 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
13
+ 'residuez', 'resample', 'resample_poly', 'detrend',
14
+ 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
15
+ 'filtfilt', 'decimate', 'vectorstrength',
16
+ 'timeit', 'cKDTree', 'dlti', 'upfirdn', 'linalg',
17
+ 'sp_fft', 'lambertw', 'get_window', 'axis_slice', 'axis_reverse',
18
+ 'odd_ext', 'even_ext', 'const_ext', 'cheby1', 'firwin'
19
+ ]
20
+
21
+
22
+ def __dir__():
23
+ return __all__
24
+
25
+
26
+ def __getattr__(name):
27
+ return _sub_module_deprecation(sub_package="signal", module="signaltools",
28
+ private_modules=["_signaltools"], all=__all__,
29
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc ADDED
Binary file (4.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_czt.cpython-310.pyc ADDED
Binary file (6.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_filter_design.cpython-310.pyc ADDED
Binary file (118 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_max_len_seq.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc ADDED
Binary file (1.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc ADDED
Binary file (9.31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_short_time_fft.cpython-310.pyc ADDED
Binary file (29.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc ADDED
Binary file (117 kB). View file