applied-ai-018 commited on
Commit
d31891b
·
verified ·
1 Parent(s): 627f14a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/cluster/__init__.py +31 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so +0 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/cluster/hierarchy.py +0 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py +0 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py +145 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py +202 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py +1225 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py +421 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/cluster/vq.py +835 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__init__.py +103 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_basic.py +428 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_helper.py +115 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_realtransforms.py +598 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/helper.py +19 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/pseudo_diffs.py +22 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__init__.py +0 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_basic.py +873 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py +54 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py +31 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py +380 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py +815 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/io/mmio.py +20 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__init__.py +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_2d.sav +0 -0
env-llmeval/lib/python3.10/site-packages/scipy/cluster/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================
3
+ Clustering package (:mod:`scipy.cluster`)
4
+ =========================================
5
+
6
+ .. currentmodule:: scipy.cluster
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ cluster.vq
12
+ cluster.hierarchy
13
+
14
+ Clustering algorithms are useful in information theory, target detection,
15
+ communications, compression, and other areas. The `vq` module only
16
+ supports vector quantization and the k-means algorithms.
17
+
18
+ The `hierarchy` module provides functions for hierarchical and
19
+ agglomerative clustering. Its features include generating hierarchical
20
+ clusters from distance matrices,
21
+ calculating statistics on clusters, cutting linkages
22
+ to generate flat clusters, and visualizing clusters with dendrograms.
23
+
24
+ """
25
+ __all__ = ['vq', 'hierarchy']
26
+
27
+ from . import vq, hierarchy
28
+
29
+ from scipy._lib._testutils import PytestTester
30
+ test = PytestTester(__name__)
31
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc ADDED
Binary file (131 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc ADDED
Binary file (28.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (423 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (356 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (128 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/hierarchy.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc ADDED
Binary file (4.68 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc ADDED
Binary file (6.21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import array
2
+
3
+
4
+ Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02],
5
+ [7.50205180e-01, 4.60299830e-01, 8.98696460e-01],
6
+ [6.65461230e-01, 6.94011420e-01, 9.10465700e-01],
7
+ [9.64047590e-01, 1.43082200e-03, 7.39874220e-01],
8
+ [1.08159060e-01, 5.53028790e-01, 6.63804780e-02],
9
+ [9.31359130e-01, 8.25424910e-01, 9.52315440e-01],
10
+ [6.78086960e-01, 3.41903970e-01, 5.61481950e-01],
11
+ [9.82730940e-01, 7.04605210e-01, 8.70978630e-02],
12
+ [6.14691610e-01, 4.69989230e-02, 6.02406450e-01],
13
+ [5.80161260e-01, 9.17354970e-01, 5.88163850e-01],
14
+ [1.38246310e+00, 1.96358160e+00, 1.94437880e+00],
15
+ [2.10675860e+00, 1.67148730e+00, 1.34854480e+00],
16
+ [1.39880070e+00, 1.66142050e+00, 1.32224550e+00],
17
+ [1.71410460e+00, 1.49176380e+00, 1.45432170e+00],
18
+ [1.54102340e+00, 1.84374950e+00, 1.64658950e+00],
19
+ [2.08512480e+00, 1.84524350e+00, 2.17340850e+00],
20
+ [1.30748740e+00, 1.53801650e+00, 2.16007740e+00],
21
+ [1.41447700e+00, 1.99329070e+00, 1.99107420e+00],
22
+ [1.61943490e+00, 1.47703280e+00, 1.89788160e+00],
23
+ [1.59880600e+00, 1.54988980e+00, 1.57563350e+00],
24
+ [3.37247380e+00, 2.69635310e+00, 3.39981700e+00],
25
+ [3.13705120e+00, 3.36528090e+00, 3.06089070e+00],
26
+ [3.29413250e+00, 3.19619500e+00, 2.90700170e+00],
27
+ [2.65510510e+00, 3.06785900e+00, 2.97198540e+00],
28
+ [3.30941040e+00, 2.59283970e+00, 2.57714110e+00],
29
+ [2.59557220e+00, 3.33477370e+00, 3.08793190e+00],
30
+ [2.58206180e+00, 3.41615670e+00, 3.26441990e+00],
31
+ [2.71127000e+00, 2.77032450e+00, 2.63466500e+00],
32
+ [2.79617850e+00, 3.25473720e+00, 3.41801560e+00],
33
+ [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]])
34
+
35
+ ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754.,
36
+ 564., 138., 219., 869., 669.])
37
+
38
+ linkage_ytdist_single = array([[2., 5., 138., 2.],
39
+ [3., 4., 219., 2.],
40
+ [0., 7., 255., 3.],
41
+ [1., 8., 268., 4.],
42
+ [6., 9., 295., 6.]])
43
+
44
+ linkage_ytdist_complete = array([[2., 5., 138., 2.],
45
+ [3., 4., 219., 2.],
46
+ [1., 6., 400., 3.],
47
+ [0., 7., 412., 3.],
48
+ [8., 9., 996., 6.]])
49
+
50
+ linkage_ytdist_average = array([[2., 5., 138., 2.],
51
+ [3., 4., 219., 2.],
52
+ [0., 7., 333.5, 3.],
53
+ [1., 6., 347.5, 3.],
54
+ [8., 9., 680.77777778, 6.]])
55
+
56
+ linkage_ytdist_weighted = array([[2., 5., 138., 2.],
57
+ [3., 4., 219., 2.],
58
+ [0., 7., 333.5, 3.],
59
+ [1., 6., 347.5, 3.],
60
+ [8., 9., 670.125, 6.]])
61
+
62
+ # the optimal leaf ordering of linkage_ytdist_single
63
+ linkage_ytdist_single_olo = array([[5., 2., 138., 2.],
64
+ [4., 3., 219., 2.],
65
+ [7., 0., 255., 3.],
66
+ [1., 8., 268., 4.],
67
+ [6., 9., 295., 6.]])
68
+
69
+ X = array([[1.43054825, -7.5693489],
70
+ [6.95887839, 6.82293382],
71
+ [2.87137846, -9.68248579],
72
+ [7.87974764, -6.05485803],
73
+ [8.24018364, -6.09495602],
74
+ [7.39020262, 8.54004355]])
75
+
76
+ linkage_X_centroid = array([[3., 4., 0.36265956, 2.],
77
+ [1., 5., 1.77045373, 2.],
78
+ [0., 2., 2.55760419, 2.],
79
+ [6., 8., 6.43614494, 4.],
80
+ [7., 9., 15.17363237, 6.]])
81
+
82
+ linkage_X_median = array([[3., 4., 0.36265956, 2.],
83
+ [1., 5., 1.77045373, 2.],
84
+ [0., 2., 2.55760419, 2.],
85
+ [6., 8., 6.43614494, 4.],
86
+ [7., 9., 15.17363237, 6.]])
87
+
88
+ linkage_X_ward = array([[3., 4., 0.36265956, 2.],
89
+ [1., 5., 1.77045373, 2.],
90
+ [0., 2., 2.55760419, 2.],
91
+ [6., 8., 9.10208346, 4.],
92
+ [7., 9., 24.7784379, 6.]])
93
+
94
+ # the optimal leaf ordering of linkage_X_ward
95
+ linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.],
96
+ [5., 1., 1.77045373, 2.],
97
+ [2., 0., 2.55760419, 2.],
98
+ [6., 8., 9.10208346, 4.],
99
+ [7., 9., 24.7784379, 6.]])
100
+
101
+ inconsistent_ytdist = {
102
+ 1: array([[138., 0., 1., 0.],
103
+ [219., 0., 1., 0.],
104
+ [255., 0., 1., 0.],
105
+ [268., 0., 1., 0.],
106
+ [295., 0., 1., 0.]]),
107
+ 2: array([[138., 0., 1., 0.],
108
+ [219., 0., 1., 0.],
109
+ [237., 25.45584412, 2., 0.70710678],
110
+ [261.5, 9.19238816, 2., 0.70710678],
111
+ [233.66666667, 83.9424406, 3., 0.7306594]]),
112
+ 3: array([[138., 0., 1., 0.],
113
+ [219., 0., 1., 0.],
114
+ [237., 25.45584412, 2., 0.70710678],
115
+ [247.33333333, 25.38372182, 3., 0.81417007],
116
+ [239., 69.36377537, 4., 0.80733783]]),
117
+ 4: array([[138., 0., 1., 0.],
118
+ [219., 0., 1., 0.],
119
+ [237., 25.45584412, 2., 0.70710678],
120
+ [247.33333333, 25.38372182, 3., 0.81417007],
121
+ [235., 60.73302232, 5., 0.98793042]])}
122
+
123
+ fcluster_inconsistent = {
124
+ 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
125
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
126
+ 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
127
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
128
+ 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
129
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
130
+
131
+ fcluster_distance = {
132
+ 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3,
133
+ 1, 1, 1, 2, 1, 1, 1, 1, 1]),
134
+ 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
135
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
136
+ 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
137
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
138
+
139
+ fcluster_maxclust = {
140
+ 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4,
141
+ 1, 1, 1, 3, 1, 1, 1, 1, 2]),
142
+ 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2,
143
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
144
+ 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
145
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from pytest import raises as assert_raises
3
+ import numpy as np
4
+ from scipy.cluster.hierarchy import DisjointSet
5
+ import string
6
+
7
+
8
+ def generate_random_token():
9
+ k = len(string.ascii_letters)
10
+ tokens = list(np.arange(k, dtype=int))
11
+ tokens += list(np.arange(k, dtype=float))
12
+ tokens += list(string.ascii_letters)
13
+ tokens += [None for i in range(k)]
14
+ tokens = np.array(tokens, dtype=object)
15
+ rng = np.random.RandomState(seed=0)
16
+
17
+ while 1:
18
+ size = rng.randint(1, 3)
19
+ element = rng.choice(tokens, size)
20
+ if size == 1:
21
+ yield element[0]
22
+ else:
23
+ yield tuple(element)
24
+
25
+
26
+ def get_elements(n):
27
+ # dict is deterministic without difficulty of comparing numpy ints
28
+ elements = {}
29
+ for element in generate_random_token():
30
+ if element not in elements:
31
+ elements[element] = len(elements)
32
+ if len(elements) >= n:
33
+ break
34
+ return list(elements.keys())
35
+
36
+
37
+ def test_init():
38
+ n = 10
39
+ elements = get_elements(n)
40
+ dis = DisjointSet(elements)
41
+ assert dis.n_subsets == n
42
+ assert list(dis) == elements
43
+
44
+
45
+ def test_len():
46
+ n = 10
47
+ elements = get_elements(n)
48
+ dis = DisjointSet(elements)
49
+ assert len(dis) == n
50
+
51
+ dis.add("dummy")
52
+ assert len(dis) == n + 1
53
+
54
+
55
+ @pytest.mark.parametrize("n", [10, 100])
56
+ def test_contains(n):
57
+ elements = get_elements(n)
58
+ dis = DisjointSet(elements)
59
+ for x in elements:
60
+ assert x in dis
61
+
62
+ assert "dummy" not in dis
63
+
64
+
65
+ @pytest.mark.parametrize("n", [10, 100])
66
+ def test_add(n):
67
+ elements = get_elements(n)
68
+ dis1 = DisjointSet(elements)
69
+
70
+ dis2 = DisjointSet()
71
+ for i, x in enumerate(elements):
72
+ dis2.add(x)
73
+ assert len(dis2) == i + 1
74
+
75
+ # test idempotency by adding element again
76
+ dis2.add(x)
77
+ assert len(dis2) == i + 1
78
+
79
+ assert list(dis1) == list(dis2)
80
+
81
+
82
+ def test_element_not_present():
83
+ elements = get_elements(n=10)
84
+ dis = DisjointSet(elements)
85
+
86
+ with assert_raises(KeyError):
87
+ dis["dummy"]
88
+
89
+ with assert_raises(KeyError):
90
+ dis.merge(elements[0], "dummy")
91
+
92
+ with assert_raises(KeyError):
93
+ dis.connected(elements[0], "dummy")
94
+
95
+
96
+ @pytest.mark.parametrize("direction", ["forwards", "backwards"])
97
+ @pytest.mark.parametrize("n", [10, 100])
98
+ def test_linear_union_sequence(n, direction):
99
+ elements = get_elements(n)
100
+ dis = DisjointSet(elements)
101
+ assert elements == list(dis)
102
+
103
+ indices = list(range(n - 1))
104
+ if direction == "backwards":
105
+ indices = indices[::-1]
106
+
107
+ for it, i in enumerate(indices):
108
+ assert not dis.connected(elements[i], elements[i + 1])
109
+ assert dis.merge(elements[i], elements[i + 1])
110
+ assert dis.connected(elements[i], elements[i + 1])
111
+ assert dis.n_subsets == n - 1 - it
112
+
113
+ roots = [dis[i] for i in elements]
114
+ if direction == "forwards":
115
+ assert all(elements[0] == r for r in roots)
116
+ else:
117
+ assert all(elements[-2] == r for r in roots)
118
+ assert not dis.merge(elements[0], elements[-1])
119
+
120
+
121
+ @pytest.mark.parametrize("n", [10, 100])
122
+ def test_self_unions(n):
123
+ elements = get_elements(n)
124
+ dis = DisjointSet(elements)
125
+
126
+ for x in elements:
127
+ assert dis.connected(x, x)
128
+ assert not dis.merge(x, x)
129
+ assert dis.connected(x, x)
130
+ assert dis.n_subsets == len(elements)
131
+
132
+ assert elements == list(dis)
133
+ roots = [dis[x] for x in elements]
134
+ assert elements == roots
135
+
136
+
137
+ @pytest.mark.parametrize("order", ["ab", "ba"])
138
+ @pytest.mark.parametrize("n", [10, 100])
139
+ def test_equal_size_ordering(n, order):
140
+ elements = get_elements(n)
141
+ dis = DisjointSet(elements)
142
+
143
+ rng = np.random.RandomState(seed=0)
144
+ indices = np.arange(n)
145
+ rng.shuffle(indices)
146
+
147
+ for i in range(0, len(indices), 2):
148
+ a, b = elements[indices[i]], elements[indices[i + 1]]
149
+ if order == "ab":
150
+ assert dis.merge(a, b)
151
+ else:
152
+ assert dis.merge(b, a)
153
+
154
+ expected = elements[min(indices[i], indices[i + 1])]
155
+ assert dis[a] == expected
156
+ assert dis[b] == expected
157
+
158
+
159
+ @pytest.mark.parametrize("kmax", [5, 10])
160
+ def test_binary_tree(kmax):
161
+ n = 2**kmax
162
+ elements = get_elements(n)
163
+ dis = DisjointSet(elements)
164
+ rng = np.random.RandomState(seed=0)
165
+
166
+ for k in 2**np.arange(kmax):
167
+ for i in range(0, n, 2 * k):
168
+ r1, r2 = rng.randint(0, k, size=2)
169
+ a, b = elements[i + r1], elements[i + k + r2]
170
+ assert not dis.connected(a, b)
171
+ assert dis.merge(a, b)
172
+ assert dis.connected(a, b)
173
+
174
+ assert elements == list(dis)
175
+ roots = [dis[i] for i in elements]
176
+ expected_indices = np.arange(n) - np.arange(n) % (2 * k)
177
+ expected = [elements[i] for i in expected_indices]
178
+ assert roots == expected
179
+
180
+
181
+ @pytest.mark.parametrize("n", [10, 100])
182
+ def test_subsets(n):
183
+ elements = get_elements(n)
184
+ dis = DisjointSet(elements)
185
+
186
+ rng = np.random.RandomState(seed=0)
187
+ for i, j in rng.randint(0, n, (n, 2)):
188
+ x = elements[i]
189
+ y = elements[j]
190
+
191
+ expected = {element for element in dis if {dis[element]} == {dis[x]}}
192
+ assert dis.subset_size(x) == len(dis.subset(x))
193
+ assert expected == dis.subset(x)
194
+
195
+ expected = {dis[element]: set() for element in dis}
196
+ for element in dis:
197
+ expected[dis[element]].add(element)
198
+ expected = list(expected.values())
199
+ assert expected == dis.subsets()
200
+
201
+ dis.merge(x, y)
202
+ assert dis.subset(x) == dis.subset(y)
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Author: Damian Eads
3
+ # Date: April 17, 2008
4
+ #
5
+ # Copyright (C) 2008 Damian Eads
6
+ #
7
+ # Redistribution and use in source and binary forms, with or without
8
+ # modification, are permitted provided that the following conditions
9
+ # are met:
10
+ #
11
+ # 1. Redistributions of source code must retain the above copyright
12
+ # notice, this list of conditions and the following disclaimer.
13
+ #
14
+ # 2. Redistributions in binary form must reproduce the above
15
+ # copyright notice, this list of conditions and the following
16
+ # disclaimer in the documentation and/or other materials provided
17
+ # with the distribution.
18
+ #
19
+ # 3. The name of the author may not be used to endorse or promote
20
+ # products derived from this software without specific prior
21
+ # written permission.
22
+ #
23
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
24
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
27
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
29
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
+ import numpy as np
35
+ from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
36
+ import pytest
37
+ from pytest import raises as assert_raises
38
+
39
+ import scipy.cluster.hierarchy
40
+ from scipy.cluster.hierarchy import (
41
+ ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
42
+ num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
43
+ is_isomorphic, single, leaders,
44
+ correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
45
+ is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
46
+ set_link_color_palette, cut_tree, optimal_leaf_ordering,
47
+ _order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
48
+ from scipy.spatial.distance import pdist
49
+ from scipy.cluster._hierarchy import Heap
50
+ from scipy.conftest import array_api_compatible
51
+ from scipy._lib._array_api import xp_assert_close, xp_assert_equal
52
+
53
+ from . import hierarchy_test_data
54
+
55
+
56
+ # Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
57
+ # check if it's available
58
+ try:
59
+ import matplotlib
60
+ # and set the backend to be Agg (no gui)
61
+ matplotlib.use('Agg')
62
+ # before importing pyplot
63
+ import matplotlib.pyplot as plt
64
+ have_matplotlib = True
65
+ except Exception:
66
+ have_matplotlib = False
67
+
68
+
69
+ pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")]
70
+ skip_if_array_api = pytest.mark.skip_if_array_api
71
+
72
+
73
+ class TestLinkage:
74
+
75
+ @skip_if_array_api(cpu_only=True)
76
+ def test_linkage_non_finite_elements_in_distance_matrix(self, xp):
77
+ # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
78
+ # Exception expected.
79
+ y = xp.zeros((6,))
80
+ y[0] = xp.nan
81
+ assert_raises(ValueError, linkage, y)
82
+
83
+ @skip_if_array_api(cpu_only=True)
84
+ def test_linkage_empty_distance_matrix(self, xp):
85
+ # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
86
+ y = xp.zeros((0,))
87
+ assert_raises(ValueError, linkage, y)
88
+
89
+ @skip_if_array_api(cpu_only=True)
90
+ def test_linkage_tdist(self, xp):
91
+ for method in ['single', 'complete', 'average', 'weighted']:
92
+ self.check_linkage_tdist(method, xp)
93
+
94
+ def check_linkage_tdist(self, method, xp):
95
+ # Tests linkage(Y, method) on the tdist data set.
96
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), method)
97
+ expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
98
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
99
+
100
+ @skip_if_array_api(cpu_only=True)
101
+ def test_linkage_X(self, xp):
102
+ for method in ['centroid', 'median', 'ward']:
103
+ self.check_linkage_q(method, xp)
104
+
105
+ def check_linkage_q(self, method, xp):
106
+ # Tests linkage(Y, method) on the Q data set.
107
+ Z = linkage(xp.asarray(hierarchy_test_data.X), method)
108
+ expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
109
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
110
+
111
+ y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
112
+ metric="euclidean")
113
+ Z = linkage(xp.asarray(y), method)
114
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
115
+
116
+ @skip_if_array_api(cpu_only=True)
117
+ def test_compare_with_trivial(self, xp):
118
+ rng = np.random.RandomState(0)
119
+ n = 20
120
+ X = rng.rand(n, 2)
121
+ d = pdist(X)
122
+
123
+ for method, code in _LINKAGE_METHODS.items():
124
+ Z_trivial = _hierarchy.linkage(d, n, code)
125
+ Z = linkage(xp.asarray(d), method)
126
+ xp_assert_close(Z, xp.asarray(Z_trivial), rtol=1e-14, atol=1e-15)
127
+
128
+ @skip_if_array_api(cpu_only=True)
129
+ def test_optimal_leaf_ordering(self, xp):
130
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), optimal_ordering=True)
131
+ expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
132
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
133
+
134
+
135
+ @skip_if_array_api(cpu_only=True)
136
+ class TestLinkageTies:
137
+
138
+ _expectations = {
139
+ 'single': np.array([[0, 1, 1.41421356, 2],
140
+ [2, 3, 1.41421356, 3]]),
141
+ 'complete': np.array([[0, 1, 1.41421356, 2],
142
+ [2, 3, 2.82842712, 3]]),
143
+ 'average': np.array([[0, 1, 1.41421356, 2],
144
+ [2, 3, 2.12132034, 3]]),
145
+ 'weighted': np.array([[0, 1, 1.41421356, 2],
146
+ [2, 3, 2.12132034, 3]]),
147
+ 'centroid': np.array([[0, 1, 1.41421356, 2],
148
+ [2, 3, 2.12132034, 3]]),
149
+ 'median': np.array([[0, 1, 1.41421356, 2],
150
+ [2, 3, 2.12132034, 3]]),
151
+ 'ward': np.array([[0, 1, 1.41421356, 2],
152
+ [2, 3, 2.44948974, 3]]),
153
+ }
154
+
155
+ def test_linkage_ties(self, xp):
156
+ for method in ['single', 'complete', 'average', 'weighted',
157
+ 'centroid', 'median', 'ward']:
158
+ self.check_linkage_ties(method, xp)
159
+
160
+ def check_linkage_ties(self, method, xp):
161
+ X = xp.asarray([[-1, -1], [0, 0], [1, 1]])
162
+ Z = linkage(X, method=method)
163
+ expectedZ = self._expectations[method]
164
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
165
+
166
+
167
+ @skip_if_array_api(cpu_only=True)
168
+ class TestInconsistent:
169
+
170
+ def test_inconsistent_tdist(self, xp):
171
+ for depth in hierarchy_test_data.inconsistent_ytdist:
172
+ self.check_inconsistent_tdist(depth, xp)
173
+
174
+ def check_inconsistent_tdist(self, depth, xp):
175
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
176
+ xp_assert_close(inconsistent(Z, depth),
177
+ xp.asarray(hierarchy_test_data.inconsistent_ytdist[depth]))
178
+
179
+
180
+ @skip_if_array_api(cpu_only=True)
181
+ class TestCopheneticDistance:
182
+
183
+ def test_linkage_cophenet_tdist_Z(self, xp):
184
+ # Tests cophenet(Z) on tdist data set.
185
+ expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
186
+ 295, 138, 219, 295, 295])
187
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
188
+ M = cophenet(Z)
189
+ xp_assert_close(M, xp.asarray(expectedM, dtype=xp.float64), atol=1e-10)
190
+
191
+ def test_linkage_cophenet_tdist_Z_Y(self, xp):
192
+ # Tests cophenet(Z, Y) on tdist data set.
193
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
194
+ (c, M) = cophenet(Z, xp.asarray(hierarchy_test_data.ytdist))
195
+ expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
196
+ 295, 138, 219, 295, 295], dtype=xp.float64)
197
+ expectedc = xp.asarray(0.639931296433393415057366837573, dtype=xp.float64)[()]
198
+ xp_assert_close(c, expectedc, atol=1e-10)
199
+ xp_assert_close(M, expectedM, atol=1e-10)
200
+
201
+
202
+ class TestMLabLinkageConversion:
203
+
204
+ def test_mlab_linkage_conversion_empty(self, xp):
205
+ # Tests from/to_mlab_linkage on empty linkage array.
206
+ X = xp.asarray([], dtype=xp.float64)
207
+ xp_assert_equal(from_mlab_linkage(X), X)
208
+ xp_assert_equal(to_mlab_linkage(X), X)
209
+
210
+ @skip_if_array_api(cpu_only=True)
211
+ def test_mlab_linkage_conversion_single_row(self, xp):
212
+ # Tests from/to_mlab_linkage on linkage array with single row.
213
+ Z = xp.asarray([[0., 1., 3., 2.]])
214
+ Zm = xp.asarray([[1, 2, 3]])
215
+ xp_assert_close(from_mlab_linkage(Zm), xp.asarray(Z, dtype=xp.float64),
216
+ rtol=1e-15)
217
+ xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64),
218
+ rtol=1e-15)
219
+
220
+ @skip_if_array_api(cpu_only=True)
221
+ def test_mlab_linkage_conversion_multiple_rows(self, xp):
222
+ # Tests from/to_mlab_linkage on linkage array with multiple rows.
223
+ Zm = xp.asarray([[3, 6, 138], [4, 5, 219],
224
+ [1, 8, 255], [2, 9, 268], [7, 10, 295]])
225
+ Z = xp.asarray([[2., 5., 138., 2.],
226
+ [3., 4., 219., 2.],
227
+ [0., 7., 255., 3.],
228
+ [1., 8., 268., 4.],
229
+ [6., 9., 295., 6.]],
230
+ dtype=xp.float64)
231
+ xp_assert_close(from_mlab_linkage(Zm), Z, rtol=1e-15)
232
+ xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64),
233
+ rtol=1e-15)
234
+
235
+
236
+ @skip_if_array_api(cpu_only=True)
237
+ class TestFcluster:
238
+
239
+ def test_fclusterdata(self, xp):
240
+ for t in hierarchy_test_data.fcluster_inconsistent:
241
+ self.check_fclusterdata(t, 'inconsistent', xp)
242
+ for t in hierarchy_test_data.fcluster_distance:
243
+ self.check_fclusterdata(t, 'distance', xp)
244
+ for t in hierarchy_test_data.fcluster_maxclust:
245
+ self.check_fclusterdata(t, 'maxclust', xp)
246
+
247
+ def check_fclusterdata(self, t, criterion, xp):
248
+ # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set
249
+ expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
250
+ X = xp.asarray(hierarchy_test_data.Q_X)
251
+ T = fclusterdata(X, criterion=criterion, t=t)
252
+ assert_(is_isomorphic(T, expectedT))
253
+
254
+ def test_fcluster(self, xp):
255
+ for t in hierarchy_test_data.fcluster_inconsistent:
256
+ self.check_fcluster(t, 'inconsistent', xp)
257
+ for t in hierarchy_test_data.fcluster_distance:
258
+ self.check_fcluster(t, 'distance', xp)
259
+ for t in hierarchy_test_data.fcluster_maxclust:
260
+ self.check_fcluster(t, 'maxclust', xp)
261
+
262
+ def check_fcluster(self, t, criterion, xp):
263
+ # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
264
+ expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
265
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
266
+ T = fcluster(Z, criterion=criterion, t=t)
267
+ assert_(is_isomorphic(T, expectedT))
268
+
269
+ def test_fcluster_monocrit(self, xp):
270
+ for t in hierarchy_test_data.fcluster_distance:
271
+ self.check_fcluster_monocrit(t, xp)
272
+ for t in hierarchy_test_data.fcluster_maxclust:
273
+ self.check_fcluster_maxclust_monocrit(t, xp)
274
+
275
+ def check_fcluster_monocrit(self, t, xp):
276
+ expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t])
277
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
278
+ T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
279
+ assert_(is_isomorphic(T, expectedT))
280
+
281
+ def check_fcluster_maxclust_monocrit(self, t, xp):
282
+ expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t])
283
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
284
+ T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
285
+ assert_(is_isomorphic(T, expectedT))
286
+
287
+
288
+ @skip_if_array_api(cpu_only=True)
289
+ class TestLeaders:
290
+
291
+ def test_leaders_single(self, xp):
292
+ # Tests leaders using a flat clustering generated by single linkage.
293
+ X = hierarchy_test_data.Q_X
294
+ Y = pdist(X)
295
+ Y = xp.asarray(Y)
296
+ Z = linkage(Y)
297
+ T = fcluster(Z, criterion='maxclust', t=3)
298
+ Lright = (xp.asarray([53, 55, 56]), xp.asarray([2, 3, 1]))
299
+ T = xp.asarray(T, dtype=xp.int32)
300
+ L = leaders(Z, T)
301
+ assert_allclose(np.concatenate(L), np.concatenate(Lright), rtol=1e-15)
302
+
303
+
304
+ @skip_if_array_api(np_only=True,
305
+ reasons=['`is_isomorphic` only supports NumPy backend'])
306
+ class TestIsIsomorphic:
307
+
308
+ @skip_if_array_api(np_only=True,
309
+ reasons=['array-likes only supported for NumPy backend'])
310
+ def test_array_like(self, xp):
311
+ assert is_isomorphic([1, 1, 1], [2, 2, 2])
312
+ assert is_isomorphic([], [])
313
+
314
+ def test_is_isomorphic_1(self, xp):
315
+ # Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
316
+ a = xp.asarray([1, 1, 1])
317
+ b = xp.asarray([2, 2, 2])
318
+ assert is_isomorphic(a, b)
319
+ assert is_isomorphic(b, a)
320
+
321
+ def test_is_isomorphic_2(self, xp):
322
+ # Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
323
+ a = xp.asarray([1, 7, 1])
324
+ b = xp.asarray([2, 3, 2])
325
+ assert is_isomorphic(a, b)
326
+ assert is_isomorphic(b, a)
327
+
328
+ def test_is_isomorphic_3(self, xp):
329
+ # Tests is_isomorphic on test case #3 (no flat clusters)
330
+ a = xp.asarray([])
331
+ b = xp.asarray([])
332
+ assert is_isomorphic(a, b)
333
+
334
+ def test_is_isomorphic_4A(self, xp):
335
+ # Tests is_isomorphic on test case #4A
336
+ # (3 flat clusters, different labelings, isomorphic)
337
+ a = xp.asarray([1, 2, 3])
338
+ b = xp.asarray([1, 3, 2])
339
+ assert is_isomorphic(a, b)
340
+ assert is_isomorphic(b, a)
341
+
342
+ def test_is_isomorphic_4B(self, xp):
343
+ # Tests is_isomorphic on test case #4B
344
+ # (3 flat clusters, different labelings, nonisomorphic)
345
+ a = xp.asarray([1, 2, 3, 3])
346
+ b = xp.asarray([1, 3, 2, 3])
347
+ assert is_isomorphic(a, b) is False
348
+ assert is_isomorphic(b, a) is False
349
+
350
+ def test_is_isomorphic_4C(self, xp):
351
+ # Tests is_isomorphic on test case #4C
352
+ # (3 flat clusters, different labelings, isomorphic)
353
+ a = xp.asarray([7, 2, 3])
354
+ b = xp.asarray([6, 3, 2])
355
+ assert is_isomorphic(a, b)
356
+ assert is_isomorphic(b, a)
357
+
358
+ def test_is_isomorphic_5(self, xp):
359
+ # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
360
+ # clusters, random permutation of the labeling).
361
+ for nc in [2, 3, 5]:
362
+ self.help_is_isomorphic_randperm(1000, nc, xp=xp)
363
+
364
+ def test_is_isomorphic_6(self, xp):
365
+ # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
366
+ # clusters, random permutation of the labeling, slightly
367
+ # nonisomorphic.)
368
+ for nc in [2, 3, 5]:
369
+ self.help_is_isomorphic_randperm(1000, nc, True, 5, xp=xp)
370
+
371
+ def test_is_isomorphic_7(self, xp):
372
+ # Regression test for gh-6271
373
+ a = xp.asarray([1, 2, 3])
374
+ b = xp.asarray([1, 1, 1])
375
+ assert not is_isomorphic(a, b)
376
+
377
+ def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0,
378
+ *, xp):
379
+ for k in range(3):
380
+ a = (np.random.rand(nobs) * nclusters).astype(int)
381
+ b = np.zeros(a.size, dtype=int)
382
+ P = np.random.permutation(nclusters)
383
+ for i in range(0, a.shape[0]):
384
+ b[i] = P[a[i]]
385
+ if noniso:
386
+ Q = np.random.permutation(nobs)
387
+ b[Q[0:nerrors]] += 1
388
+ b[Q[0:nerrors]] %= nclusters
389
+ a = xp.asarray(a)
390
+ b = xp.asarray(b)
391
+ assert is_isomorphic(a, b) == (not noniso)
392
+ assert is_isomorphic(b, a) == (not noniso)
393
+
394
+
395
+ @skip_if_array_api(cpu_only=True)
396
+ class TestIsValidLinkage:
397
+
398
+ def test_is_valid_linkage_various_size(self, xp):
399
+ for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
400
+ (1, 4, True), (2, 4, True)]:
401
+ self.check_is_valid_linkage_various_size(nrow, ncol, valid, xp)
402
+
403
+ def check_is_valid_linkage_various_size(self, nrow, ncol, valid, xp):
404
+ # Tests is_valid_linkage(Z) with linkage matrices of various sizes
405
+ Z = xp.asarray([[0, 1, 3.0, 2, 5],
406
+ [3, 2, 4.0, 3, 3]], dtype=xp.float64)
407
+ Z = Z[:nrow, :ncol]
408
+ assert_(is_valid_linkage(Z) == valid)
409
+ if not valid:
410
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
411
+
412
+ def test_is_valid_linkage_int_type(self, xp):
413
+ # Tests is_valid_linkage(Z) with integer type.
414
+ Z = xp.asarray([[0, 1, 3.0, 2],
415
+ [3, 2, 4.0, 3]], dtype=xp.int64)
416
+ assert_(is_valid_linkage(Z) is False)
417
+ assert_raises(TypeError, is_valid_linkage, Z, throw=True)
418
+
419
+ def test_is_valid_linkage_empty(self, xp):
420
+ # Tests is_valid_linkage(Z) with empty linkage.
421
+ Z = xp.zeros((0, 4), dtype=xp.float64)
422
+ assert_(is_valid_linkage(Z) is False)
423
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
424
+
425
+ def test_is_valid_linkage_4_and_up(self, xp):
426
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
427
+ # sizes 4 and 15 (step size 3).
428
+ for i in range(4, 15, 3):
429
+ y = np.random.rand(i*(i-1)//2)
430
+ y = xp.asarray(y)
431
+ Z = linkage(y)
432
+ assert_(is_valid_linkage(Z) is True)
433
+
434
+ def test_is_valid_linkage_4_and_up_neg_index_left(self, xp):
435
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
436
+ # sizes 4 and 15 (step size 3) with negative indices (left).
437
+ for i in range(4, 15, 3):
438
+ y = np.random.rand(i*(i-1)//2)
439
+ y = xp.asarray(y)
440
+ Z = linkage(y)
441
+ Z[i//2,0] = -2
442
+ assert_(is_valid_linkage(Z) is False)
443
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
444
+
445
+ def test_is_valid_linkage_4_and_up_neg_index_right(self, xp):
446
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
447
+ # sizes 4 and 15 (step size 3) with negative indices (right).
448
+ for i in range(4, 15, 3):
449
+ y = np.random.rand(i*(i-1)//2)
450
+ y = xp.asarray(y)
451
+ Z = linkage(y)
452
+ Z[i//2,1] = -2
453
+ assert_(is_valid_linkage(Z) is False)
454
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
455
+
456
+ def test_is_valid_linkage_4_and_up_neg_dist(self, xp):
457
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
458
+ # sizes 4 and 15 (step size 3) with negative distances.
459
+ for i in range(4, 15, 3):
460
+ y = np.random.rand(i*(i-1)//2)
461
+ y = xp.asarray(y)
462
+ Z = linkage(y)
463
+ Z[i//2,2] = -0.5
464
+ assert_(is_valid_linkage(Z) is False)
465
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
466
+
467
+ def test_is_valid_linkage_4_and_up_neg_counts(self, xp):
468
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
469
+ # sizes 4 and 15 (step size 3) with negative counts.
470
+ for i in range(4, 15, 3):
471
+ y = np.random.rand(i*(i-1)//2)
472
+ y = xp.asarray(y)
473
+ Z = linkage(y)
474
+ Z[i//2,3] = -2
475
+ assert_(is_valid_linkage(Z) is False)
476
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
477
+
478
+
479
+ @skip_if_array_api(cpu_only=True)
480
+ class TestIsValidInconsistent:
481
+
482
+ def test_is_valid_im_int_type(self, xp):
483
+ # Tests is_valid_im(R) with integer type.
484
+ R = xp.asarray([[0, 1, 3.0, 2],
485
+ [3, 2, 4.0, 3]], dtype=xp.int64)
486
+ assert_(is_valid_im(R) is False)
487
+ assert_raises(TypeError, is_valid_im, R, throw=True)
488
+
489
+ def test_is_valid_im_various_size(self, xp):
490
+ for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
491
+ (1, 4, True), (2, 4, True)]:
492
+ self.check_is_valid_im_various_size(nrow, ncol, valid, xp)
493
+
494
+ def check_is_valid_im_various_size(self, nrow, ncol, valid, xp):
495
+ # Tests is_valid_im(R) with linkage matrices of various sizes
496
+ R = xp.asarray([[0, 1, 3.0, 2, 5],
497
+ [3, 2, 4.0, 3, 3]], dtype=xp.float64)
498
+ R = R[:nrow, :ncol]
499
+ assert_(is_valid_im(R) == valid)
500
+ if not valid:
501
+ assert_raises(ValueError, is_valid_im, R, throw=True)
502
+
503
+ def test_is_valid_im_empty(self, xp):
504
+ # Tests is_valid_im(R) with empty inconsistency matrix.
505
+ R = xp.zeros((0, 4), dtype=xp.float64)
506
+ assert_(is_valid_im(R) is False)
507
+ assert_raises(ValueError, is_valid_im, R, throw=True)
508
+
509
+ def test_is_valid_im_4_and_up(self, xp):
510
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
511
+ # (step size 3).
512
+ for i in range(4, 15, 3):
513
+ y = np.random.rand(i*(i-1)//2)
514
+ y = xp.asarray(y)
515
+ Z = linkage(y)
516
+ R = inconsistent(Z)
517
+ assert_(is_valid_im(R) is True)
518
+
519
+ def test_is_valid_im_4_and_up_neg_index_left(self, xp):
520
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
521
+ # (step size 3) with negative link height means.
522
+ for i in range(4, 15, 3):
523
+ y = np.random.rand(i*(i-1)//2)
524
+ y = xp.asarray(y)
525
+ Z = linkage(y)
526
+ R = inconsistent(Z)
527
+ R[i//2,0] = -2.0
528
+ assert_(is_valid_im(R) is False)
529
+ assert_raises(ValueError, is_valid_im, R, throw=True)
530
+
531
+ def test_is_valid_im_4_and_up_neg_index_right(self, xp):
532
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
533
+ # (step size 3) with negative link height standard deviations.
534
+ for i in range(4, 15, 3):
535
+ y = np.random.rand(i*(i-1)//2)
536
+ y = xp.asarray(y)
537
+ Z = linkage(y)
538
+ R = inconsistent(Z)
539
+ R[i//2,1] = -2.0
540
+ assert_(is_valid_im(R) is False)
541
+ assert_raises(ValueError, is_valid_im, R, throw=True)
542
+
543
+ def test_is_valid_im_4_and_up_neg_dist(self, xp):
544
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
545
+ # (step size 3) with negative link counts.
546
+ for i in range(4, 15, 3):
547
+ y = np.random.rand(i*(i-1)//2)
548
+ y = xp.asarray(y)
549
+ Z = linkage(y)
550
+ R = inconsistent(Z)
551
+ R[i//2,2] = -0.5
552
+ assert_(is_valid_im(R) is False)
553
+ assert_raises(ValueError, is_valid_im, R, throw=True)
554
+
555
+
556
+ class TestNumObsLinkage:
557
+
558
+ @skip_if_array_api(cpu_only=True)
559
+ def test_num_obs_linkage_empty(self, xp):
560
+ # Tests num_obs_linkage(Z) with empty linkage.
561
+ Z = xp.zeros((0, 4), dtype=xp.float64)
562
+ assert_raises(ValueError, num_obs_linkage, Z)
563
+
564
+ def test_num_obs_linkage_1x4(self, xp):
565
+ # Tests num_obs_linkage(Z) on linkage over 2 observations.
566
+ Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64)
567
+ assert_equal(num_obs_linkage(Z), 2)
568
+
569
+ def test_num_obs_linkage_2x4(self, xp):
570
+ # Tests num_obs_linkage(Z) on linkage over 3 observations.
571
+ Z = xp.asarray([[0, 1, 3.0, 2],
572
+ [3, 2, 4.0, 3]], dtype=xp.float64)
573
+ assert_equal(num_obs_linkage(Z), 3)
574
+
575
+ @skip_if_array_api(cpu_only=True)
576
+ def test_num_obs_linkage_4_and_up(self, xp):
577
+ # Tests num_obs_linkage(Z) on linkage on observation sets between sizes
578
+ # 4 and 15 (step size 3).
579
+ for i in range(4, 15, 3):
580
+ y = np.random.rand(i*(i-1)//2)
581
+ y = xp.asarray(y)
582
+ Z = linkage(y)
583
+ assert_equal(num_obs_linkage(Z), i)
584
+
585
+
586
+ @skip_if_array_api(cpu_only=True)
587
+ class TestLeavesList:
588
+
589
+ def test_leaves_list_1x4(self, xp):
590
+ # Tests leaves_list(Z) on a 1x4 linkage.
591
+ Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64)
592
+ to_tree(Z)
593
+ assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15)
594
+
595
+ def test_leaves_list_2x4(self, xp):
596
+ # Tests leaves_list(Z) on a 2x4 linkage.
597
+ Z = xp.asarray([[0, 1, 3.0, 2],
598
+ [3, 2, 4.0, 3]], dtype=xp.float64)
599
+ to_tree(Z)
600
+ assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15)
601
+
602
+ def test_leaves_list_Q(self, xp):
603
+ for method in ['single', 'complete', 'average', 'weighted', 'centroid',
604
+ 'median', 'ward']:
605
+ self.check_leaves_list_Q(method, xp)
606
+
607
+ def check_leaves_list_Q(self, method, xp):
608
+ # Tests leaves_list(Z) on the Q data set
609
+ X = xp.asarray(hierarchy_test_data.Q_X)
610
+ Z = linkage(X, method)
611
+ node = to_tree(Z)
612
+ assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15)
613
+
614
+ def test_Q_subtree_pre_order(self, xp):
615
+ # Tests that pre_order() works when called on sub-trees.
616
+ X = xp.asarray(hierarchy_test_data.Q_X)
617
+ Z = linkage(X, 'single')
618
+ node = to_tree(Z)
619
+ assert_allclose(node.pre_order(), (node.get_left().pre_order()
620
+ + node.get_right().pre_order()),
621
+ rtol=1e-15)
622
+
623
+
624
+ @skip_if_array_api(cpu_only=True)
625
+ class TestCorrespond:
626
+
627
+ def test_correspond_empty(self, xp):
628
+ # Tests correspond(Z, y) with empty linkage and condensed distance matrix.
629
+ y = xp.zeros((0,), dtype=xp.float64)
630
+ Z = xp.zeros((0,4), dtype=xp.float64)
631
+ assert_raises(ValueError, correspond, Z, y)
632
+
633
+ def test_correspond_2_and_up(self, xp):
634
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
635
+ # different sizes.
636
+ for i in range(2, 4):
637
+ y = np.random.rand(i*(i-1)//2)
638
+ y = xp.asarray(y)
639
+ Z = linkage(y)
640
+ assert_(correspond(Z, y))
641
+ for i in range(4, 15, 3):
642
+ y = np.random.rand(i*(i-1)//2)
643
+ y = xp.asarray(y)
644
+ Z = linkage(y)
645
+ assert_(correspond(Z, y))
646
+
647
+ def test_correspond_4_and_up(self, xp):
648
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
649
+ # different sizes. Correspondence should be false.
650
+ for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
651
+ list(zip(list(range(3, 5)), list(range(2, 4))))):
652
+ y = np.random.rand(i*(i-1)//2)
653
+ y2 = np.random.rand(j*(j-1)//2)
654
+ y = xp.asarray(y)
655
+ y2 = xp.asarray(y2)
656
+ Z = linkage(y)
657
+ Z2 = linkage(y2)
658
+ assert not correspond(Z, y2)
659
+ assert not correspond(Z2, y)
660
+
661
+ def test_correspond_4_and_up_2(self, xp):
662
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
663
+ # different sizes. Correspondence should be false.
664
+ for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
665
+ list(zip(list(range(2, 7)), list(range(16, 21))))):
666
+ y = np.random.rand(i*(i-1)//2)
667
+ y2 = np.random.rand(j*(j-1)//2)
668
+ y = xp.asarray(y)
669
+ y2 = xp.asarray(y2)
670
+ Z = linkage(y)
671
+ Z2 = linkage(y2)
672
+ assert not correspond(Z, y2)
673
+ assert not correspond(Z2, y)
674
+
675
+ def test_num_obs_linkage_multi_matrix(self, xp):
676
+ # Tests num_obs_linkage with observation matrices of multiple sizes.
677
+ for n in range(2, 10):
678
+ X = np.random.rand(n, 4)
679
+ Y = pdist(X)
680
+ Y = xp.asarray(Y)
681
+ Z = linkage(Y)
682
+ assert_equal(num_obs_linkage(Z), n)
683
+
684
+
685
+ @skip_if_array_api(cpu_only=True)
686
+ class TestIsMonotonic:
687
+
688
+ def test_is_monotonic_empty(self, xp):
689
+ # Tests is_monotonic(Z) on an empty linkage.
690
+ Z = xp.zeros((0, 4), dtype=xp.float64)
691
+ assert_raises(ValueError, is_monotonic, Z)
692
+
693
+ def test_is_monotonic_1x4(self, xp):
694
+ # Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
695
+ Z = xp.asarray([[0, 1, 0.3, 2]], dtype=xp.float64)
696
+ assert is_monotonic(Z)
697
+
698
+ def test_is_monotonic_2x4_T(self, xp):
699
+ # Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
700
+ Z = xp.asarray([[0, 1, 0.3, 2],
701
+ [2, 3, 0.4, 3]], dtype=xp.float64)
702
+ assert is_monotonic(Z)
703
+
704
+ def test_is_monotonic_2x4_F(self, xp):
705
+ # Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
706
+ Z = xp.asarray([[0, 1, 0.4, 2],
707
+ [2, 3, 0.3, 3]], dtype=xp.float64)
708
+ assert not is_monotonic(Z)
709
+
710
+ def test_is_monotonic_3x4_T(self, xp):
711
+ # Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
712
+ Z = xp.asarray([[0, 1, 0.3, 2],
713
+ [2, 3, 0.4, 2],
714
+ [4, 5, 0.6, 4]], dtype=xp.float64)
715
+ assert is_monotonic(Z)
716
+
717
+ def test_is_monotonic_3x4_F1(self, xp):
718
+ # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
719
+ Z = xp.asarray([[0, 1, 0.3, 2],
720
+ [2, 3, 0.2, 2],
721
+ [4, 5, 0.6, 4]], dtype=xp.float64)
722
+ assert not is_monotonic(Z)
723
+
724
+ def test_is_monotonic_3x4_F2(self, xp):
725
+ # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
726
+ Z = xp.asarray([[0, 1, 0.8, 2],
727
+ [2, 3, 0.4, 2],
728
+ [4, 5, 0.6, 4]], dtype=xp.float64)
729
+ assert not is_monotonic(Z)
730
+
731
+ def test_is_monotonic_3x4_F3(self, xp):
732
+ # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
733
+ Z = xp.asarray([[0, 1, 0.3, 2],
734
+ [2, 3, 0.4, 2],
735
+ [4, 5, 0.2, 4]], dtype=xp.float64)
736
+ assert not is_monotonic(Z)
737
+
738
+ def test_is_monotonic_tdist_linkage1(self, xp):
739
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
740
+ # tdist data set. Expecting True.
741
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
742
+ assert is_monotonic(Z)
743
+
744
+ def test_is_monotonic_tdist_linkage2(self, xp):
745
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
746
+ # tdist data set. Perturbing. Expecting False.
747
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
748
+ Z[2,2] = 0.0
749
+ assert not is_monotonic(Z)
750
+
751
+ def test_is_monotonic_Q_linkage(self, xp):
752
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
753
+ # Q data set. Expecting True.
754
+ X = xp.asarray(hierarchy_test_data.Q_X)
755
+ Z = linkage(X, 'single')
756
+ assert is_monotonic(Z)
757
+
758
+
759
+ @skip_if_array_api(cpu_only=True)
760
+ class TestMaxDists:
761
+
762
+ def test_maxdists_empty_linkage(self, xp):
763
+ # Tests maxdists(Z) on empty linkage. Expecting exception.
764
+ Z = xp.zeros((0, 4), dtype=xp.float64)
765
+ assert_raises(ValueError, maxdists, Z)
766
+
767
+ def test_maxdists_one_cluster_linkage(self, xp):
768
+ # Tests maxdists(Z) on linkage with one cluster.
769
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
770
+ MD = maxdists(Z)
771
+ expectedMD = calculate_maximum_distances(Z, xp)
772
+ xp_assert_close(MD, expectedMD, atol=1e-15)
773
+
774
+ def test_maxdists_Q_linkage(self, xp):
775
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
776
+ self.check_maxdists_Q_linkage(method, xp)
777
+
778
+ def check_maxdists_Q_linkage(self, method, xp):
779
+ # Tests maxdists(Z) on the Q data set
780
+ X = xp.asarray(hierarchy_test_data.Q_X)
781
+ Z = linkage(X, method)
782
+ MD = maxdists(Z)
783
+ expectedMD = calculate_maximum_distances(Z, xp)
784
+ xp_assert_close(MD, expectedMD, atol=1e-15)
785
+
786
+
787
+ class TestMaxInconsts:
788
+
789
+ @skip_if_array_api(cpu_only=True)
790
+ def test_maxinconsts_empty_linkage(self, xp):
791
+ # Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
792
+ Z = xp.zeros((0, 4), dtype=xp.float64)
793
+ R = xp.zeros((0, 4), dtype=xp.float64)
794
+ assert_raises(ValueError, maxinconsts, Z, R)
795
+
796
+ def test_maxinconsts_difrow_linkage(self, xp):
797
+ # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
798
+ # different numbers of clusters. Expecting exception.
799
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
800
+ R = np.random.rand(2, 4)
801
+ R = xp.asarray(R)
802
+ assert_raises(ValueError, maxinconsts, Z, R)
803
+
804
+ @skip_if_array_api(cpu_only=True)
805
+ def test_maxinconsts_one_cluster_linkage(self, xp):
806
+ # Tests maxinconsts(Z, R) on linkage with one cluster.
807
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
808
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
809
+ MD = maxinconsts(Z, R)
810
+ expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp)
811
+ xp_assert_close(MD, expectedMD, atol=1e-15)
812
+
813
+ @skip_if_array_api(cpu_only=True)
814
+ def test_maxinconsts_Q_linkage(self, xp):
815
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
816
+ self.check_maxinconsts_Q_linkage(method, xp)
817
+
818
+ def check_maxinconsts_Q_linkage(self, method, xp):
819
+ # Tests maxinconsts(Z, R) on the Q data set
820
+ X = xp.asarray(hierarchy_test_data.Q_X)
821
+ Z = linkage(X, method)
822
+ R = inconsistent(Z)
823
+ MD = maxinconsts(Z, R)
824
+ expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp)
825
+ xp_assert_close(MD, expectedMD, atol=1e-15)
826
+
827
+
828
+ class TestMaxRStat:
829
+
830
+ def test_maxRstat_invalid_index(self, xp):
831
+ for i in [3.3, -1, 4]:
832
+ self.check_maxRstat_invalid_index(i, xp)
833
+
834
+ def check_maxRstat_invalid_index(self, i, xp):
835
+ # Tests maxRstat(Z, R, i). Expecting exception.
836
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
837
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
838
+ if isinstance(i, int):
839
+ assert_raises(ValueError, maxRstat, Z, R, i)
840
+ else:
841
+ assert_raises(TypeError, maxRstat, Z, R, i)
842
+
843
+ @skip_if_array_api(cpu_only=True)
844
+ def test_maxRstat_empty_linkage(self, xp):
845
+ for i in range(4):
846
+ self.check_maxRstat_empty_linkage(i, xp)
847
+
848
+ def check_maxRstat_empty_linkage(self, i, xp):
849
+ # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
850
+ Z = xp.zeros((0, 4), dtype=xp.float64)
851
+ R = xp.zeros((0, 4), dtype=xp.float64)
852
+ assert_raises(ValueError, maxRstat, Z, R, i)
853
+
854
+ def test_maxRstat_difrow_linkage(self, xp):
855
+ for i in range(4):
856
+ self.check_maxRstat_difrow_linkage(i, xp)
857
+
858
+ def check_maxRstat_difrow_linkage(self, i, xp):
859
+ # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
860
+ # different numbers of clusters. Expecting exception.
861
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
862
+ R = np.random.rand(2, 4)
863
+ R = xp.asarray(R)
864
+ assert_raises(ValueError, maxRstat, Z, R, i)
865
+
866
+ @skip_if_array_api(cpu_only=True)
867
+ def test_maxRstat_one_cluster_linkage(self, xp):
868
+ for i in range(4):
869
+ self.check_maxRstat_one_cluster_linkage(i, xp)
870
+
871
+ def check_maxRstat_one_cluster_linkage(self, i, xp):
872
+ # Tests maxRstat(Z, R, i) on linkage with one cluster.
873
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
874
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
875
+ MD = maxRstat(Z, R, 1)
876
+ expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp)
877
+ xp_assert_close(MD, expectedMD, atol=1e-15)
878
+
879
+ @skip_if_array_api(cpu_only=True)
880
+ def test_maxRstat_Q_linkage(self, xp):
881
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
882
+ for i in range(4):
883
+ self.check_maxRstat_Q_linkage(method, i, xp)
884
+
885
+ def check_maxRstat_Q_linkage(self, method, i, xp):
886
+ # Tests maxRstat(Z, R, i) on the Q data set
887
+ X = xp.asarray(hierarchy_test_data.Q_X)
888
+ Z = linkage(X, method)
889
+ R = inconsistent(Z)
890
+ MD = maxRstat(Z, R, 1)
891
+ expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp)
892
+ xp_assert_close(MD, expectedMD, atol=1e-15)
893
+
894
+
895
+ @skip_if_array_api(cpu_only=True)
896
+ class TestDendrogram:
897
+
898
+ def test_dendrogram_single_linkage_tdist(self, xp):
899
+ # Tests dendrogram calculation on single linkage of the tdist data set.
900
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
901
+ R = dendrogram(Z, no_plot=True)
902
+ leaves = R["leaves"]
903
+ assert_equal(leaves, [2, 5, 1, 0, 3, 4])
904
+
905
+ def test_valid_orientation(self, xp):
906
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
907
+ assert_raises(ValueError, dendrogram, Z, orientation="foo")
908
+
909
+ def test_labels_as_array_or_list(self, xp):
910
+ # test for gh-12418
911
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
912
+ labels = xp.asarray([1, 3, 2, 6, 4, 5])
913
+ result1 = dendrogram(Z, labels=labels, no_plot=True)
914
+ result2 = dendrogram(Z, labels=list(labels), no_plot=True)
915
+ assert result1 == result2
916
+
917
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
918
+ def test_valid_label_size(self, xp):
919
+ link = xp.asarray([
920
+ [0, 1, 1.0, 4],
921
+ [2, 3, 1.0, 5],
922
+ [4, 5, 2.0, 6],
923
+ ])
924
+ plt.figure()
925
+ with pytest.raises(ValueError) as exc_info:
926
+ dendrogram(link, labels=list(range(100)))
927
+ assert "Dimensions of Z and labels must be consistent."\
928
+ in str(exc_info.value)
929
+
930
+ with pytest.raises(
931
+ ValueError,
932
+ match="Dimensions of Z and labels must be consistent."):
933
+ dendrogram(link, labels=[])
934
+
935
+ plt.close()
936
+
937
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
938
+ def test_dendrogram_plot(self, xp):
939
+ for orientation in ['top', 'bottom', 'left', 'right']:
940
+ self.check_dendrogram_plot(orientation, xp)
941
+
942
+ def check_dendrogram_plot(self, orientation, xp):
943
+ # Tests dendrogram plotting.
944
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
945
+ expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
946
+ 'dcoord': [[0.0, 138.0, 138.0, 0.0],
947
+ [0.0, 219.0, 219.0, 0.0],
948
+ [0.0, 255.0, 255.0, 219.0],
949
+ [0.0, 268.0, 268.0, 255.0],
950
+ [138.0, 295.0, 295.0, 268.0]],
951
+ 'icoord': [[5.0, 5.0, 15.0, 15.0],
952
+ [45.0, 45.0, 55.0, 55.0],
953
+ [35.0, 35.0, 50.0, 50.0],
954
+ [25.0, 25.0, 42.5, 42.5],
955
+ [10.0, 10.0, 33.75, 33.75]],
956
+ 'ivl': ['2', '5', '1', '0', '3', '4'],
957
+ 'leaves': [2, 5, 1, 0, 3, 4],
958
+ 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
959
+ }
960
+
961
+ fig = plt.figure()
962
+ ax = fig.add_subplot(221)
963
+
964
+ # test that dendrogram accepts ax keyword
965
+ R1 = dendrogram(Z, ax=ax, orientation=orientation)
966
+ R1['dcoord'] = np.asarray(R1['dcoord'])
967
+ assert_equal(R1, expected)
968
+
969
+ # test that dendrogram accepts and handle the leaf_font_size and
970
+ # leaf_rotation keywords
971
+ dendrogram(Z, ax=ax, orientation=orientation,
972
+ leaf_font_size=20, leaf_rotation=90)
973
+ testlabel = (
974
+ ax.get_xticklabels()[0]
975
+ if orientation in ['top', 'bottom']
976
+ else ax.get_yticklabels()[0]
977
+ )
978
+ assert_equal(testlabel.get_rotation(), 90)
979
+ assert_equal(testlabel.get_size(), 20)
980
+ dendrogram(Z, ax=ax, orientation=orientation,
981
+ leaf_rotation=90)
982
+ testlabel = (
983
+ ax.get_xticklabels()[0]
984
+ if orientation in ['top', 'bottom']
985
+ else ax.get_yticklabels()[0]
986
+ )
987
+ assert_equal(testlabel.get_rotation(), 90)
988
+ dendrogram(Z, ax=ax, orientation=orientation,
989
+ leaf_font_size=20)
990
+ testlabel = (
991
+ ax.get_xticklabels()[0]
992
+ if orientation in ['top', 'bottom']
993
+ else ax.get_yticklabels()[0]
994
+ )
995
+ assert_equal(testlabel.get_size(), 20)
996
+ plt.close()
997
+
998
+ # test plotting to gca (will import pylab)
999
+ R2 = dendrogram(Z, orientation=orientation)
1000
+ plt.close()
1001
+ R2['dcoord'] = np.asarray(R2['dcoord'])
1002
+ assert_equal(R2, expected)
1003
+
1004
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
1005
+ def test_dendrogram_truncate_mode(self, xp):
1006
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
1007
+
1008
+ R = dendrogram(Z, 2, 'lastp', show_contracted=True)
1009
+ plt.close()
1010
+ R['dcoord'] = np.asarray(R['dcoord'])
1011
+ assert_equal(R, {'color_list': ['C0'],
1012
+ 'dcoord': [[0.0, 295.0, 295.0, 0.0]],
1013
+ 'icoord': [[5.0, 5.0, 15.0, 15.0]],
1014
+ 'ivl': ['(2)', '(4)'],
1015
+ 'leaves': [6, 9],
1016
+ 'leaves_color_list': ['C0', 'C0'],
1017
+ })
1018
+
1019
+ R = dendrogram(Z, 2, 'mtica', show_contracted=True)
1020
+ plt.close()
1021
+ R['dcoord'] = np.asarray(R['dcoord'])
1022
+ assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
1023
+ 'dcoord': [[0.0, 138.0, 138.0, 0.0],
1024
+ [0.0, 255.0, 255.0, 0.0],
1025
+ [0.0, 268.0, 268.0, 255.0],
1026
+ [138.0, 295.0, 295.0, 268.0]],
1027
+ 'icoord': [[5.0, 5.0, 15.0, 15.0],
1028
+ [35.0, 35.0, 45.0, 45.0],
1029
+ [25.0, 25.0, 40.0, 40.0],
1030
+ [10.0, 10.0, 32.5, 32.5]],
1031
+ 'ivl': ['2', '5', '1', '0', '(2)'],
1032
+ 'leaves': [2, 5, 1, 0, 7],
1033
+ 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
1034
+ })
1035
+
1036
+ def test_dendrogram_colors(self, xp):
1037
+ # Tests dendrogram plots with alternate colors
1038
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
1039
+
1040
+ set_link_color_palette(['c', 'm', 'y', 'k'])
1041
+ R = dendrogram(Z, no_plot=True,
1042
+ above_threshold_color='g', color_threshold=250)
1043
+ set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
1044
+
1045
+ color_list = R['color_list']
1046
+ assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
1047
+
1048
+ # reset color palette (global list)
1049
+ set_link_color_palette(None)
1050
+
1051
+ def test_dendrogram_leaf_colors_zero_dist(self, xp):
1052
+ # tests that the colors of leafs are correct for tree
1053
+ # with two identical points
1054
+ x = xp.asarray([[1, 0, 0],
1055
+ [0, 0, 1],
1056
+ [0, 2, 0],
1057
+ [0, 0, 1],
1058
+ [0, 1, 0],
1059
+ [0, 1, 0]])
1060
+ z = linkage(x, "single")
1061
+ d = dendrogram(z, no_plot=True)
1062
+ exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
1063
+ colors = d["leaves_color_list"]
1064
+ assert_equal(colors, exp_colors)
1065
+
1066
+ def test_dendrogram_leaf_colors(self, xp):
1067
+ # tests that the colors are correct for a tree
1068
+ # with two near points ((0, 0, 1.1) and (0, 0, 1))
1069
+ x = xp.asarray([[1, 0, 0],
1070
+ [0, 0, 1.1],
1071
+ [0, 2, 0],
1072
+ [0, 0, 1],
1073
+ [0, 1, 0],
1074
+ [0, 1, 0]])
1075
+ z = linkage(x, "single")
1076
+ d = dendrogram(z, no_plot=True)
1077
+ exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
1078
+ colors = d["leaves_color_list"]
1079
+ assert_equal(colors, exp_colors)
1080
+
1081
+
1082
+ def calculate_maximum_distances(Z, xp):
1083
+ # Used for testing correctness of maxdists.
1084
+ n = Z.shape[0] + 1
1085
+ B = xp.zeros((n-1,), dtype=Z.dtype)
1086
+ q = xp.zeros((3,))
1087
+ for i in range(0, n - 1):
1088
+ q[:] = 0.0
1089
+ left = Z[i, 0]
1090
+ right = Z[i, 1]
1091
+ if left >= n:
1092
+ q[0] = B[xp.asarray(left, dtype=xp.int64) - n]
1093
+ if right >= n:
1094
+ q[1] = B[xp.asarray(right, dtype=xp.int64) - n]
1095
+ q[2] = Z[i, 2]
1096
+ B[i] = xp.max(q)
1097
+ return B
1098
+
1099
+
1100
+ def calculate_maximum_inconsistencies(Z, R, k=3, xp=np):
1101
+ # Used for testing correctness of maxinconsts.
1102
+ n = Z.shape[0] + 1
1103
+ dtype = xp.result_type(Z, R)
1104
+ B = xp.zeros((n-1,), dtype=dtype)
1105
+ q = xp.zeros((3,))
1106
+ for i in range(0, n - 1):
1107
+ q[:] = 0.0
1108
+ left = Z[i, 0]
1109
+ right = Z[i, 1]
1110
+ if left >= n:
1111
+ q[0] = B[xp.asarray(left, dtype=xp.int64) - n]
1112
+ if right >= n:
1113
+ q[1] = B[xp.asarray(right, dtype=xp.int64) - n]
1114
+ q[2] = R[i, k]
1115
+ B[i] = xp.max(q)
1116
+ return B
1117
+
1118
+
1119
+ @skip_if_array_api(cpu_only=True)
1120
+ def test_unsupported_uncondensed_distance_matrix_linkage_warning(xp):
1121
+ assert_warns(ClusterWarning, linkage, xp.asarray([[0, 1], [1, 0]]))
1122
+
1123
+
1124
+ def test_euclidean_linkage_value_error(xp):
1125
+ for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
1126
+ assert_raises(ValueError, linkage, xp.asarray([[1, 1], [1, 1]]),
1127
+ method=method, metric='cityblock')
1128
+
1129
+
1130
+ @skip_if_array_api(cpu_only=True)
1131
+ def test_2x2_linkage(xp):
1132
+ Z1 = linkage(xp.asarray([1]), method='single', metric='euclidean')
1133
+ Z2 = linkage(xp.asarray([[0, 1], [0, 0]]), method='single', metric='euclidean')
1134
+ xp_assert_close(Z1, Z2, rtol=1e-15)
1135
+
1136
+
1137
+ @skip_if_array_api(cpu_only=True)
1138
+ def test_node_compare(xp):
1139
+ np.random.seed(23)
1140
+ nobs = 50
1141
+ X = np.random.randn(nobs, 4)
1142
+ X = xp.asarray(X)
1143
+ Z = scipy.cluster.hierarchy.ward(X)
1144
+ tree = to_tree(Z)
1145
+ assert_(tree > tree.get_left())
1146
+ assert_(tree.get_right() > tree.get_left())
1147
+ assert_(tree.get_right() == tree.get_right())
1148
+ assert_(tree.get_right() != tree.get_left())
1149
+
1150
+
1151
+ @skip_if_array_api(np_only=True, reasons=['`cut_tree` uses non-standard indexing'])
1152
+ def test_cut_tree(xp):
1153
+ np.random.seed(23)
1154
+ nobs = 50
1155
+ X = np.random.randn(nobs, 4)
1156
+ X = xp.asarray(X)
1157
+ Z = scipy.cluster.hierarchy.ward(X)
1158
+ cutree = cut_tree(Z)
1159
+
1160
+ # cutree.dtype varies between int32 and int64 over platforms
1161
+ xp_assert_close(cutree[:, 0], xp.arange(nobs), rtol=1e-15, check_dtype=False)
1162
+ xp_assert_close(cutree[:, -1], xp.zeros(nobs), rtol=1e-15, check_dtype=False)
1163
+ assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1))
1164
+
1165
+ xp_assert_close(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15)
1166
+ xp_assert_close(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15)
1167
+ xp_assert_close(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15)
1168
+
1169
+ nodes = _order_cluster_tree(Z)
1170
+ heights = xp.asarray([node.dist for node in nodes])
1171
+
1172
+ xp_assert_close(cutree[:, np.searchsorted(heights, [5])],
1173
+ cut_tree(Z, height=5), rtol=1e-15)
1174
+ xp_assert_close(cutree[:, np.searchsorted(heights, [5, 10])],
1175
+ cut_tree(Z, height=[5, 10]), rtol=1e-15)
1176
+ xp_assert_close(cutree[:, np.searchsorted(heights, [10, 5])],
1177
+ cut_tree(Z, height=[10, 5]), rtol=1e-15)
1178
+
1179
+
1180
+ @skip_if_array_api(cpu_only=True)
1181
+ def test_optimal_leaf_ordering(xp):
1182
+ # test with the distance vector y
1183
+ Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.ytdist)),
1184
+ xp.asarray(hierarchy_test_data.ytdist))
1185
+ expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
1186
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
1187
+
1188
+ # test with the observation matrix X
1189
+ Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.X), 'ward'),
1190
+ xp.asarray(hierarchy_test_data.X))
1191
+ expectedZ = hierarchy_test_data.linkage_X_ward_olo
1192
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
1193
+
1194
+
1195
+ @skip_if_array_api(np_only=True, reasons=['`Heap` only supports NumPy backend'])
1196
+ def test_Heap(xp):
1197
+ values = xp.asarray([2, -1, 0, -1.5, 3])
1198
+ heap = Heap(values)
1199
+
1200
+ pair = heap.get_min()
1201
+ assert_equal(pair['key'], 3)
1202
+ assert_equal(pair['value'], -1.5)
1203
+
1204
+ heap.remove_min()
1205
+ pair = heap.get_min()
1206
+ assert_equal(pair['key'], 1)
1207
+ assert_equal(pair['value'], -1)
1208
+
1209
+ heap.change_value(1, 2.5)
1210
+ pair = heap.get_min()
1211
+ assert_equal(pair['key'], 2)
1212
+ assert_equal(pair['value'], 0)
1213
+
1214
+ heap.remove_min()
1215
+ heap.remove_min()
1216
+
1217
+ heap.change_value(1, 10)
1218
+ pair = heap.get_min()
1219
+ assert_equal(pair['key'], 4)
1220
+ assert_equal(pair['value'], 3)
1221
+
1222
+ heap.remove_min()
1223
+ pair = heap.get_min()
1224
+ assert_equal(pair['key'], 1)
1225
+ assert_equal(pair['value'], 10)
env-llmeval/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import sys
3
+ from copy import deepcopy
4
+
5
+ import numpy as np
6
+ from numpy.testing import (
7
+ assert_array_equal, assert_equal, assert_, suppress_warnings
8
+ )
9
+ import pytest
10
+ from pytest import raises as assert_raises
11
+
12
+ from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
13
+ ClusterError, _krandinit)
14
+ from scipy.cluster import _vq
15
+ from scipy.conftest import array_api_compatible
16
+ from scipy.sparse._sputils import matrix
17
+
18
+ from scipy._lib._array_api import (
19
+ SCIPY_ARRAY_API, copy, cov, xp_assert_close, xp_assert_equal
20
+ )
21
+
22
+ pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")]
23
+ skip_if_array_api = pytest.mark.skip_if_array_api
24
+
25
+ TESTDATA_2D = np.array([
26
+ -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
27
+ -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
28
+ 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
29
+ -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
30
+ -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
31
+ -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
32
+ 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
33
+ -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
34
+ -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
35
+ -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
36
+ 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
37
+ -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
38
+ 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
39
+ -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
40
+ 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
41
+ -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
42
+ 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
43
+ 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
44
+ -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
45
+ 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
46
+ -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
47
+ -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
48
+ -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
49
+ 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
50
+ -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
51
+ 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
52
+ 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
53
+ -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
54
+ 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
55
+ 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
56
+ -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
57
+ -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
58
+ 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
59
+ -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
60
+ -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
61
+ -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
62
+ 2.11]).reshape((200, 2))
63
+
64
+
65
+ # Global data
66
+ X = np.array([[3.0, 3], [4, 3], [4, 2],
67
+ [9, 2], [5, 1], [6, 2], [9, 4],
68
+ [5, 2], [5, 4], [7, 4], [6, 5]])
69
+
70
+ CODET1 = np.array([[3.0000, 3.0000],
71
+ [6.2000, 4.0000],
72
+ [5.8000, 1.8000]])
73
+
74
+ CODET2 = np.array([[11.0/3, 8.0/3],
75
+ [6.7500, 4.2500],
76
+ [6.2500, 1.7500]])
77
+
78
+ LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
79
+
80
+
81
+ class TestWhiten:
82
+
83
+ def test_whiten(self, xp):
84
+ desired = xp.asarray([[5.08738849, 2.97091878],
85
+ [3.19909255, 0.69660580],
86
+ [4.51041982, 0.02640918],
87
+ [4.38567074, 0.95120889],
88
+ [2.32191480, 1.63195503]])
89
+
90
+ obs = xp.asarray([[0.98744510, 0.82766775],
91
+ [0.62093317, 0.19406729],
92
+ [0.87545741, 0.00735733],
93
+ [0.85124403, 0.26499712],
94
+ [0.45067590, 0.45464607]])
95
+ xp_assert_close(whiten(obs), desired, rtol=1e-5)
96
+
97
+ def test_whiten_zero_std(self, xp):
98
+ desired = xp.asarray([[0., 1.0, 2.86666544],
99
+ [0., 1.0, 1.32460034],
100
+ [0., 1.0, 3.74382172]])
101
+
102
+ obs = xp.asarray([[0., 1., 0.74109533],
103
+ [0., 1., 0.34243798],
104
+ [0., 1., 0.96785929]])
105
+ with warnings.catch_warnings(record=True) as w:
106
+ warnings.simplefilter('always')
107
+
108
+ xp_assert_close(whiten(obs), desired, rtol=1e-5)
109
+
110
+ assert_equal(len(w), 1)
111
+ assert_(issubclass(w[-1].category, RuntimeWarning))
112
+
113
+ def test_whiten_not_finite(self, xp):
114
+ for bad_value in xp.nan, xp.inf, -xp.inf:
115
+ obs = xp.asarray([[0.98744510, bad_value],
116
+ [0.62093317, 0.19406729],
117
+ [0.87545741, 0.00735733],
118
+ [0.85124403, 0.26499712],
119
+ [0.45067590, 0.45464607]])
120
+ assert_raises(ValueError, whiten, obs)
121
+
122
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
123
+ reason='`np.matrix` unsupported in array API mode')
124
+ def test_whiten_not_finite_matrix(self, xp):
125
+ for bad_value in np.nan, np.inf, -np.inf:
126
+ obs = matrix([[0.98744510, bad_value],
127
+ [0.62093317, 0.19406729],
128
+ [0.87545741, 0.00735733],
129
+ [0.85124403, 0.26499712],
130
+ [0.45067590, 0.45464607]])
131
+ assert_raises(ValueError, whiten, obs)
132
+
133
+
134
+ class TestVq:
135
+
136
+ @skip_if_array_api(cpu_only=True)
137
+ def test_py_vq(self, xp):
138
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
139
+ # label1.dtype varies between int32 and int64 over platforms
140
+ label1 = py_vq(xp.asarray(X), xp.asarray(initc))[0]
141
+ xp_assert_equal(label1, xp.asarray(LABEL1, dtype=xp.int64),
142
+ check_dtype=False)
143
+
144
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
145
+ reason='`np.matrix` unsupported in array API mode')
146
+ def test_py_vq_matrix(self, xp):
147
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
148
+ # label1.dtype varies between int32 and int64 over platforms
149
+ label1 = py_vq(matrix(X), matrix(initc))[0]
150
+ assert_array_equal(label1, LABEL1)
151
+
152
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
153
+ def test_vq(self, xp):
154
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
155
+ label1, _ = _vq.vq(xp.asarray(X), xp.asarray(initc))
156
+ assert_array_equal(label1, LABEL1)
157
+ _, _ = vq(xp.asarray(X), xp.asarray(initc))
158
+
159
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
160
+ reason='`np.matrix` unsupported in array API mode')
161
+ def test_vq_matrix(self, xp):
162
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
163
+ label1, _ = _vq.vq(matrix(X), matrix(initc))
164
+ assert_array_equal(label1, LABEL1)
165
+ _, _ = vq(matrix(X), matrix(initc))
166
+
167
+ @skip_if_array_api(cpu_only=True)
168
+ def test_vq_1d(self, xp):
169
+ # Test special rank 1 vq algo, python implementation.
170
+ data = X[:, 0]
171
+ initc = data[:3]
172
+ a, b = _vq.vq(data, initc)
173
+ data = xp.asarray(data)
174
+ initc = xp.asarray(initc)
175
+ ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
176
+ # ta.dtype varies between int32 and int64 over platforms
177
+ xp_assert_equal(ta, xp.asarray(a, dtype=xp.int64), check_dtype=False)
178
+ xp_assert_equal(tb, xp.asarray(b))
179
+
180
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
181
+ def test__vq_sametype(self, xp):
182
+ a = xp.asarray([1.0, 2.0], dtype=xp.float64)
183
+ b = a.astype(xp.float32)
184
+ assert_raises(TypeError, _vq.vq, a, b)
185
+
186
+ @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend'])
187
+ def test__vq_invalid_type(self, xp):
188
+ a = xp.asarray([1, 2], dtype=int)
189
+ assert_raises(TypeError, _vq.vq, a, a)
190
+
191
+ @skip_if_array_api(cpu_only=True)
192
+ def test_vq_large_nfeat(self, xp):
193
+ X = np.random.rand(20, 20)
194
+ code_book = np.random.rand(3, 20)
195
+
196
+ codes0, dis0 = _vq.vq(X, code_book)
197
+ codes1, dis1 = py_vq(
198
+ xp.asarray(X), xp.asarray(code_book)
199
+ )
200
+ xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
201
+ # codes1.dtype varies between int32 and int64 over platforms
202
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
203
+
204
+ X = X.astype(np.float32)
205
+ code_book = code_book.astype(np.float32)
206
+
207
+ codes0, dis0 = _vq.vq(X, code_book)
208
+ codes1, dis1 = py_vq(
209
+ xp.asarray(X), xp.asarray(code_book)
210
+ )
211
+ xp_assert_close(dis1, xp.asarray(dis0, dtype=xp.float64), rtol=1e-5)
212
+ # codes1.dtype varies between int32 and int64 over platforms
213
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
214
+
215
+ @skip_if_array_api(cpu_only=True)
216
+ def test_vq_large_features(self, xp):
217
+ X = np.random.rand(10, 5) * 1000000
218
+ code_book = np.random.rand(2, 5) * 1000000
219
+
220
+ codes0, dis0 = _vq.vq(X, code_book)
221
+ codes1, dis1 = py_vq(
222
+ xp.asarray(X), xp.asarray(code_book)
223
+ )
224
+ xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
225
+ # codes1.dtype varies between int32 and int64 over platforms
226
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
227
+
228
+
229
+ # Whole class skipped on GPU for now;
230
+ # once pdist/cdist are hooked up for CuPy, more tests will work
231
+ @skip_if_array_api(cpu_only=True)
232
+ class TestKMean:
233
+
234
+ def test_large_features(self, xp):
235
+ # Generate a data set with large values, and run kmeans on it to
236
+ # (regression for 1077).
237
+ d = 300
238
+ n = 100
239
+
240
+ m1 = np.random.randn(d)
241
+ m2 = np.random.randn(d)
242
+ x = 10000 * np.random.randn(n, d) - 20000 * m1
243
+ y = 10000 * np.random.randn(n, d) + 20000 * m2
244
+
245
+ data = np.empty((x.shape[0] + y.shape[0], d), np.float64)
246
+ data[:x.shape[0]] = x
247
+ data[x.shape[0]:] = y
248
+
249
+ kmeans(xp.asarray(data), 2)
250
+
251
+ def test_kmeans_simple(self, xp):
252
+ np.random.seed(54321)
253
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
254
+ code1 = kmeans(xp.asarray(X), xp.asarray(initc), iter=1)[0]
255
+ xp_assert_close(code1, xp.asarray(CODET2))
256
+
257
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
258
+ reason='`np.matrix` unsupported in array API mode')
259
+ def test_kmeans_simple_matrix(self, xp):
260
+ np.random.seed(54321)
261
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
262
+ code1 = kmeans(matrix(X), matrix(initc), iter=1)[0]
263
+ xp_assert_close(code1, CODET2)
264
+
265
+ def test_kmeans_lost_cluster(self, xp):
266
+ # This will cause kmeans to have a cluster with no points.
267
+ data = xp.asarray(TESTDATA_2D)
268
+ initk = xp.asarray([[-1.8127404, -0.67128041],
269
+ [2.04621601, 0.07401111],
270
+ [-2.31149087, -0.05160469]])
271
+
272
+ kmeans(data, initk)
273
+ with suppress_warnings() as sup:
274
+ sup.filter(UserWarning,
275
+ "One of the clusters is empty. Re-run kmeans with a "
276
+ "different initialization")
277
+ kmeans2(data, initk, missing='warn')
278
+
279
+ assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
280
+
281
+ def test_kmeans2_simple(self, xp):
282
+ np.random.seed(12345678)
283
+ initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
284
+ arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix]
285
+ for tp in arrays:
286
+ code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
287
+ code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
288
+
289
+ xp_assert_close(code1, xp.asarray(CODET1))
290
+ xp_assert_close(code2, xp.asarray(CODET2))
291
+
292
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
293
+ reason='`np.matrix` unsupported in array API mode')
294
+ def test_kmeans2_simple_matrix(self, xp):
295
+ np.random.seed(12345678)
296
+ initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
297
+ code1 = kmeans2(matrix(X), matrix(initc), iter=1)[0]
298
+ code2 = kmeans2(matrix(X), matrix(initc), iter=2)[0]
299
+
300
+ xp_assert_close(code1, CODET1)
301
+ xp_assert_close(code2, CODET2)
302
+
303
+ def test_kmeans2_rank1(self, xp):
304
+ data = xp.asarray(TESTDATA_2D)
305
+ data1 = data[:, 0]
306
+
307
+ initc = data1[:3]
308
+ code = copy(initc, xp=xp)
309
+ kmeans2(data1, code, iter=1)[0]
310
+ kmeans2(data1, code, iter=2)[0]
311
+
312
+ def test_kmeans2_rank1_2(self, xp):
313
+ data = xp.asarray(TESTDATA_2D)
314
+ data1 = data[:, 0]
315
+ kmeans2(data1, 2, iter=1)
316
+
317
+ def test_kmeans2_high_dim(self, xp):
318
+ # test kmeans2 when the number of dimensions exceeds the number
319
+ # of input points
320
+ data = xp.asarray(TESTDATA_2D)
321
+ data = xp.reshape(data, (20, 20))[:10, :]
322
+ kmeans2(data, 2)
323
+
324
+ def test_kmeans2_init(self, xp):
325
+ np.random.seed(12345)
326
+ data = xp.asarray(TESTDATA_2D)
327
+ k = 3
328
+
329
+ kmeans2(data, k, minit='points')
330
+ kmeans2(data[:, 1], k, minit='points') # special case (1-D)
331
+
332
+ kmeans2(data, k, minit='++')
333
+ kmeans2(data[:, 1], k, minit='++') # special case (1-D)
334
+
335
+ # minit='random' can give warnings, filter those
336
+ with suppress_warnings() as sup:
337
+ sup.filter(message="One of the clusters is empty. Re-run.")
338
+ kmeans2(data, k, minit='random')
339
+ kmeans2(data[:, 1], k, minit='random') # special case (1-D)
340
+
341
+ @pytest.mark.skipif(sys.platform == 'win32',
342
+ reason='Fails with MemoryError in Wine.')
343
+ def test_krandinit(self, xp):
344
+ data = xp.asarray(TESTDATA_2D)
345
+ datas = [xp.reshape(data, (200, 2)),
346
+ xp.reshape(data, (20, 20))[:10, :]]
347
+ k = int(1e6)
348
+ for data in datas:
349
+ rng = np.random.default_rng(1234)
350
+ init = _krandinit(data, k, rng, xp)
351
+ orig_cov = cov(data.T)
352
+ init_cov = cov(init.T)
353
+ xp_assert_close(orig_cov, init_cov, atol=1e-2)
354
+
355
+ def test_kmeans2_empty(self, xp):
356
+ # Regression test for gh-1032.
357
+ assert_raises(ValueError, kmeans2, xp.asarray([]), 2)
358
+
359
+ def test_kmeans_0k(self, xp):
360
+ # Regression test for gh-1073: fail when k arg is 0.
361
+ assert_raises(ValueError, kmeans, xp.asarray(X), 0)
362
+ assert_raises(ValueError, kmeans2, xp.asarray(X), 0)
363
+ assert_raises(ValueError, kmeans2, xp.asarray(X), xp.asarray([]))
364
+
365
+ def test_kmeans_large_thres(self, xp):
366
+ # Regression test for gh-1774
367
+ x = xp.asarray([1, 2, 3, 4, 10], dtype=xp.float64)
368
+ res = kmeans(x, 1, thresh=1e16)
369
+ xp_assert_close(res[0], xp.asarray([4.], dtype=xp.float64))
370
+ xp_assert_close(res[1], xp.asarray(2.3999999999999999, dtype=xp.float64)[()])
371
+
372
+ def test_kmeans2_kpp_low_dim(self, xp):
373
+ # Regression test for gh-11462
374
+ prev_res = xp.asarray([[-1.95266667, 0.898],
375
+ [-3.153375, 3.3945]], dtype=xp.float64)
376
+ np.random.seed(42)
377
+ res, _ = kmeans2(xp.asarray(TESTDATA_2D), 2, minit='++')
378
+ xp_assert_close(res, prev_res)
379
+
380
+ def test_kmeans2_kpp_high_dim(self, xp):
381
+ # Regression test for gh-11462
382
+ n_dim = 100
383
+ size = 10
384
+ centers = np.vstack([5 * np.ones(n_dim),
385
+ -5 * np.ones(n_dim)])
386
+ np.random.seed(42)
387
+ data = np.vstack([
388
+ np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
389
+ np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
390
+ ])
391
+
392
+ data = xp.asarray(data)
393
+ res, _ = kmeans2(data, 2, minit='++')
394
+ xp_assert_equal(xp.sign(res), xp.sign(xp.asarray(centers)))
395
+
396
+ def test_kmeans_diff_convergence(self, xp):
397
+ # Regression test for gh-8727
398
+ obs = xp.asarray([-3, -1, 0, 1, 1, 8], dtype=xp.float64)
399
+ res = kmeans(obs, xp.asarray([-3., 0.99]))
400
+ xp_assert_close(res[0], xp.asarray([-0.4, 8.], dtype=xp.float64))
401
+ xp_assert_close(res[1], xp.asarray(1.0666666666666667, dtype=xp.float64)[()])
402
+
403
+ def test_kmeans_and_kmeans2_random_seed(self, xp):
404
+
405
+ seed_list = [
406
+ 1234, np.random.RandomState(1234), np.random.default_rng(1234)
407
+ ]
408
+
409
+ for seed in seed_list:
410
+ seed1 = deepcopy(seed)
411
+ seed2 = deepcopy(seed)
412
+ data = xp.asarray(TESTDATA_2D)
413
+ # test for kmeans
414
+ res1, _ = kmeans(data, 2, seed=seed1)
415
+ res2, _ = kmeans(data, 2, seed=seed2)
416
+ xp_assert_close(res1, res2, xp=xp) # should be same results
417
+ # test for kmeans2
418
+ for minit in ["random", "points", "++"]:
419
+ res1, _ = kmeans2(data, 2, minit=minit, seed=seed1)
420
+ res2, _ = kmeans2(data, 2, minit=minit, seed=seed2)
421
+ xp_assert_close(res1, res2, xp=xp) # should be same results
env-llmeval/lib/python3.10/site-packages/scipy/cluster/vq.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
3
+ ====================================================================
4
+
5
+ Provides routines for k-means clustering, generating code books
6
+ from k-means models and quantizing vectors by comparing them with
7
+ centroids in a code book.
8
+
9
+ .. autosummary::
10
+ :toctree: generated/
11
+
12
+ whiten -- Normalize a group of observations so each feature has unit variance
13
+ vq -- Calculate code book membership of a set of observation vectors
14
+ kmeans -- Perform k-means on a set of observation vectors forming k clusters
15
+ kmeans2 -- A different implementation of k-means with more methods
16
+ -- for initializing centroids
17
+
18
+ Background information
19
+ ----------------------
20
+ The k-means algorithm takes as input the number of clusters to
21
+ generate, k, and a set of observation vectors to cluster. It
22
+ returns a set of centroids, one for each of the k clusters. An
23
+ observation vector is classified with the cluster number or
24
+ centroid index of the centroid closest to it.
25
+
26
+ A vector v belongs to cluster i if it is closer to centroid i than
27
+ any other centroid. If v belongs to i, we say centroid i is the
28
+ dominating centroid of v. The k-means algorithm tries to
29
+ minimize distortion, which is defined as the sum of the squared distances
30
+ between each observation vector and its dominating centroid.
31
+ The minimization is achieved by iteratively reclassifying
32
+ the observations into clusters and recalculating the centroids until
33
+ a configuration is reached in which the centroids are stable. One can
34
+ also define a maximum number of iterations.
35
+
36
+ Since vector quantization is a natural application for k-means,
37
+ information theory terminology is often used. The centroid index
38
+ or cluster index is also referred to as a "code" and the table
39
+ mapping codes to centroids and, vice versa, is often referred to as a
40
+ "code book". The result of k-means, a set of centroids, can be
41
+ used to quantize vectors. Quantization aims to find an encoding of
42
+ vectors that reduces the expected distortion.
43
+
44
+ All routines expect obs to be an M by N array, where the rows are
45
+ the observation vectors. The codebook is a k by N array, where the
46
+ ith row is the centroid of code word i. The observation vectors
47
+ and centroids have the same feature dimension.
48
+
49
+ As an example, suppose we wish to compress a 24-bit color image
50
+ (each pixel is represented by one byte for red, one for blue, and
51
+ one for green) before sending it over the web. By using a smaller
52
+ 8-bit encoding, we can reduce the amount of data by two
53
+ thirds. Ideally, the colors for each of the 256 possible 8-bit
54
+ encoding values should be chosen to minimize distortion of the
55
+ color. Running k-means with k=256 generates a code book of 256
56
+ codes, which fills up all possible 8-bit sequences. Instead of
57
+ sending a 3-byte value for each pixel, the 8-bit centroid index
58
+ (or code word) of the dominating centroid is transmitted. The code
59
+ book is also sent over the wire so each 8-bit code can be
60
+ translated back to a 24-bit pixel value representation. If the
61
+ image of interest was of an ocean, we would expect many 24-bit
62
+ blues to be represented by 8-bit codes. If it was an image of a
63
+ human face, more flesh-tone colors would be represented in the
64
+ code book.
65
+
66
+ """
67
+ import warnings
68
+ import numpy as np
69
+ from collections import deque
70
+ from scipy._lib._array_api import (
71
+ _asarray, array_namespace, size, atleast_nd, copy, cov
72
+ )
73
+ from scipy._lib._util import check_random_state, rng_integers
74
+ from scipy.spatial.distance import cdist
75
+
76
+ from . import _vq
77
+
78
+ __docformat__ = 'restructuredtext'
79
+
80
+ __all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
81
+
82
+
83
+ class ClusterError(Exception):
84
+ pass
85
+
86
+
87
+ def whiten(obs, check_finite=True):
88
+ """
89
+ Normalize a group of observations on a per feature basis.
90
+
91
+ Before running k-means, it is beneficial to rescale each feature
92
+ dimension of the observation set by its standard deviation (i.e. "whiten"
93
+ it - as in "white noise" where each frequency has equal power).
94
+ Each feature is divided by its standard deviation across all observations
95
+ to give it unit variance.
96
+
97
+ Parameters
98
+ ----------
99
+ obs : ndarray
100
+ Each row of the array is an observation. The
101
+ columns are the features seen during each observation.
102
+
103
+ >>> # f0 f1 f2
104
+ >>> obs = [[ 1., 1., 1.], #o0
105
+ ... [ 2., 2., 2.], #o1
106
+ ... [ 3., 3., 3.], #o2
107
+ ... [ 4., 4., 4.]] #o3
108
+
109
+ check_finite : bool, optional
110
+ Whether to check that the input matrices contain only finite numbers.
111
+ Disabling may give a performance gain, but may result in problems
112
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
113
+ Default: True
114
+
115
+ Returns
116
+ -------
117
+ result : ndarray
118
+ Contains the values in `obs` scaled by the standard deviation
119
+ of each column.
120
+
121
+ Examples
122
+ --------
123
+ >>> import numpy as np
124
+ >>> from scipy.cluster.vq import whiten
125
+ >>> features = np.array([[1.9, 2.3, 1.7],
126
+ ... [1.5, 2.5, 2.2],
127
+ ... [0.8, 0.6, 1.7,]])
128
+ >>> whiten(features)
129
+ array([[ 4.17944278, 2.69811351, 7.21248917],
130
+ [ 3.29956009, 2.93273208, 9.33380951],
131
+ [ 1.75976538, 0.7038557 , 7.21248917]])
132
+
133
+ """
134
+ xp = array_namespace(obs)
135
+ obs = _asarray(obs, check_finite=check_finite, xp=xp)
136
+ std_dev = xp.std(obs, axis=0)
137
+ zero_std_mask = std_dev == 0
138
+ if xp.any(zero_std_mask):
139
+ std_dev[zero_std_mask] = 1.0
140
+ warnings.warn("Some columns have standard deviation zero. "
141
+ "The values of these columns will not change.",
142
+ RuntimeWarning, stacklevel=2)
143
+ return obs / std_dev
144
+
145
+
146
+ def vq(obs, code_book, check_finite=True):
147
+ """
148
+ Assign codes from a code book to observations.
149
+
150
+ Assigns a code from a code book to each observation. Each
151
+ observation vector in the 'M' by 'N' `obs` array is compared with the
152
+ centroids in the code book and assigned the code of the closest
153
+ centroid.
154
+
155
+ The features in `obs` should have unit variance, which can be
156
+ achieved by passing them through the whiten function. The code
157
+ book can be created with the k-means algorithm or a different
158
+ encoding algorithm.
159
+
160
+ Parameters
161
+ ----------
162
+ obs : ndarray
163
+ Each row of the 'M' x 'N' array is an observation. The columns are
164
+ the "features" seen during each observation. The features must be
165
+ whitened first using the whiten function or something equivalent.
166
+ code_book : ndarray
167
+ The code book is usually generated using the k-means algorithm.
168
+ Each row of the array holds a different code, and the columns are
169
+ the features of the code.
170
+
171
+ >>> # f0 f1 f2 f3
172
+ >>> code_book = [
173
+ ... [ 1., 2., 3., 4.], #c0
174
+ ... [ 1., 2., 3., 4.], #c1
175
+ ... [ 1., 2., 3., 4.]] #c2
176
+
177
+ check_finite : bool, optional
178
+ Whether to check that the input matrices contain only finite numbers.
179
+ Disabling may give a performance gain, but may result in problems
180
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
181
+ Default: True
182
+
183
+ Returns
184
+ -------
185
+ code : ndarray
186
+ A length M array holding the code book index for each observation.
187
+ dist : ndarray
188
+ The distortion (distance) between the observation and its nearest
189
+ code.
190
+
191
+ Examples
192
+ --------
193
+ >>> import numpy as np
194
+ >>> from scipy.cluster.vq import vq
195
+ >>> code_book = np.array([[1., 1., 1.],
196
+ ... [2., 2., 2.]])
197
+ >>> features = np.array([[1.9, 2.3, 1.7],
198
+ ... [1.5, 2.5, 2.2],
199
+ ... [0.8, 0.6, 1.7]])
200
+ >>> vq(features, code_book)
201
+ (array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239]))
202
+
203
+ """
204
+ xp = array_namespace(obs, code_book)
205
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
206
+ code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
207
+ ct = xp.result_type(obs, code_book)
208
+
209
+ c_obs = xp.astype(obs, ct, copy=False)
210
+ c_code_book = xp.astype(code_book, ct, copy=False)
211
+
212
+ if xp.isdtype(ct, kind='real floating'):
213
+ c_obs = np.asarray(c_obs)
214
+ c_code_book = np.asarray(c_code_book)
215
+ result = _vq.vq(c_obs, c_code_book)
216
+ return xp.asarray(result[0]), xp.asarray(result[1])
217
+ return py_vq(obs, code_book, check_finite=False)
218
+
219
+
220
+ def py_vq(obs, code_book, check_finite=True):
221
+ """ Python version of vq algorithm.
222
+
223
+ The algorithm computes the Euclidean distance between each
224
+ observation and every frame in the code_book.
225
+
226
+ Parameters
227
+ ----------
228
+ obs : ndarray
229
+ Expects a rank 2 array. Each row is one observation.
230
+ code_book : ndarray
231
+ Code book to use. Same format than obs. Should have same number of
232
+ features (e.g., columns) than obs.
233
+ check_finite : bool, optional
234
+ Whether to check that the input matrices contain only finite numbers.
235
+ Disabling may give a performance gain, but may result in problems
236
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
237
+ Default: True
238
+
239
+ Returns
240
+ -------
241
+ code : ndarray
242
+ code[i] gives the label of the ith obversation; its code is
243
+ code_book[code[i]].
244
+ mind_dist : ndarray
245
+ min_dist[i] gives the distance between the ith observation and its
246
+ corresponding code.
247
+
248
+ Notes
249
+ -----
250
+ This function is slower than the C version but works for
251
+ all input types. If the inputs have the wrong types for the
252
+ C versions of the function, this one is called as a last resort.
253
+
254
+ It is about 20 times slower than the C version.
255
+
256
+ """
257
+ xp = array_namespace(obs, code_book)
258
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
259
+ code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
260
+
261
+ if obs.ndim != code_book.ndim:
262
+ raise ValueError("Observation and code_book should have the same rank")
263
+
264
+ if obs.ndim == 1:
265
+ obs = obs[:, xp.newaxis]
266
+ code_book = code_book[:, xp.newaxis]
267
+
268
+ # Once `cdist` has array API support, this `xp.asarray` call can be removed
269
+ dist = xp.asarray(cdist(obs, code_book))
270
+ code = xp.argmin(dist, axis=1)
271
+ min_dist = xp.min(dist, axis=1)
272
+ return code, min_dist
273
+
274
+
275
+ def _kmeans(obs, guess, thresh=1e-5, xp=None):
276
+ """ "raw" version of k-means.
277
+
278
+ Returns
279
+ -------
280
+ code_book
281
+ The lowest distortion codebook found.
282
+ avg_dist
283
+ The average distance a observation is from a code in the book.
284
+ Lower means the code_book matches the data better.
285
+
286
+ See Also
287
+ --------
288
+ kmeans : wrapper around k-means
289
+
290
+ Examples
291
+ --------
292
+ Note: not whitened in this example.
293
+
294
+ >>> import numpy as np
295
+ >>> from scipy.cluster.vq import _kmeans
296
+ >>> features = np.array([[ 1.9,2.3],
297
+ ... [ 1.5,2.5],
298
+ ... [ 0.8,0.6],
299
+ ... [ 0.4,1.8],
300
+ ... [ 1.0,1.0]])
301
+ >>> book = np.array((features[0],features[2]))
302
+ >>> _kmeans(features,book)
303
+ (array([[ 1.7 , 2.4 ],
304
+ [ 0.73333333, 1.13333333]]), 0.40563916697728591)
305
+
306
+ """
307
+ xp = np if xp is None else xp
308
+ code_book = guess
309
+ diff = xp.inf
310
+ prev_avg_dists = deque([diff], maxlen=2)
311
+ while diff > thresh:
312
+ # compute membership and distances between obs and code_book
313
+ obs_code, distort = vq(obs, code_book, check_finite=False)
314
+ prev_avg_dists.append(xp.mean(distort, axis=-1))
315
+ # recalc code_book as centroids of associated obs
316
+ obs = np.asarray(obs)
317
+ obs_code = np.asarray(obs_code)
318
+ code_book, has_members = _vq.update_cluster_means(obs, obs_code,
319
+ code_book.shape[0])
320
+ obs = xp.asarray(obs)
321
+ obs_code = xp.asarray(obs_code)
322
+ code_book = xp.asarray(code_book)
323
+ has_members = xp.asarray(has_members)
324
+ code_book = code_book[has_members]
325
+ diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1])
326
+
327
+ return code_book, prev_avg_dists[1]
328
+
329
+
330
+ def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True,
331
+ *, seed=None):
332
+ """
333
+ Performs k-means on a set of observation vectors forming k clusters.
334
+
335
+ The k-means algorithm adjusts the classification of the observations
336
+ into clusters and updates the cluster centroids until the position of
337
+ the centroids is stable over successive iterations. In this
338
+ implementation of the algorithm, the stability of the centroids is
339
+ determined by comparing the absolute value of the change in the average
340
+ Euclidean distance between the observations and their corresponding
341
+ centroids against a threshold. This yields
342
+ a code book mapping centroids to codes and vice versa.
343
+
344
+ Parameters
345
+ ----------
346
+ obs : ndarray
347
+ Each row of the M by N array is an observation vector. The
348
+ columns are the features seen during each observation.
349
+ The features must be whitened first with the `whiten` function.
350
+
351
+ k_or_guess : int or ndarray
352
+ The number of centroids to generate. A code is assigned to
353
+ each centroid, which is also the row index of the centroid
354
+ in the code_book matrix generated.
355
+
356
+ The initial k centroids are chosen by randomly selecting
357
+ observations from the observation matrix. Alternatively,
358
+ passing a k by N array specifies the initial k centroids.
359
+
360
+ iter : int, optional
361
+ The number of times to run k-means, returning the codebook
362
+ with the lowest distortion. This argument is ignored if
363
+ initial centroids are specified with an array for the
364
+ ``k_or_guess`` parameter. This parameter does not represent the
365
+ number of iterations of the k-means algorithm.
366
+
367
+ thresh : float, optional
368
+ Terminates the k-means algorithm if the change in
369
+ distortion since the last k-means iteration is less than
370
+ or equal to threshold.
371
+
372
+ check_finite : bool, optional
373
+ Whether to check that the input matrices contain only finite numbers.
374
+ Disabling may give a performance gain, but may result in problems
375
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
376
+ Default: True
377
+
378
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
379
+ Seed for initializing the pseudo-random number generator.
380
+ If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
381
+ singleton is used.
382
+ If `seed` is an int, a new ``RandomState`` instance is used,
383
+ seeded with `seed`.
384
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
385
+ that instance is used.
386
+ The default is None.
387
+
388
+ Returns
389
+ -------
390
+ codebook : ndarray
391
+ A k by N array of k centroids. The ith centroid
392
+ codebook[i] is represented with the code i. The centroids
393
+ and codes generated represent the lowest distortion seen,
394
+ not necessarily the globally minimal distortion.
395
+ Note that the number of centroids is not necessarily the same as the
396
+ ``k_or_guess`` parameter, because centroids assigned to no observations
397
+ are removed during iterations.
398
+
399
+ distortion : float
400
+ The mean (non-squared) Euclidean distance between the observations
401
+ passed and the centroids generated. Note the difference to the standard
402
+ definition of distortion in the context of the k-means algorithm, which
403
+ is the sum of the squared distances.
404
+
405
+ See Also
406
+ --------
407
+ kmeans2 : a different implementation of k-means clustering
408
+ with more methods for generating initial centroids but without
409
+ using a distortion change threshold as a stopping criterion.
410
+
411
+ whiten : must be called prior to passing an observation matrix
412
+ to kmeans.
413
+
414
+ Notes
415
+ -----
416
+ For more functionalities or optimal performance, you can use
417
+ `sklearn.cluster.KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_.
418
+ `This <https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html#comparison-of-high-performance-implementations>`_
419
+ is a benchmark result of several implementations.
420
+
421
+ Examples
422
+ --------
423
+ >>> import numpy as np
424
+ >>> from scipy.cluster.vq import vq, kmeans, whiten
425
+ >>> import matplotlib.pyplot as plt
426
+ >>> features = np.array([[ 1.9,2.3],
427
+ ... [ 1.5,2.5],
428
+ ... [ 0.8,0.6],
429
+ ... [ 0.4,1.8],
430
+ ... [ 0.1,0.1],
431
+ ... [ 0.2,1.8],
432
+ ... [ 2.0,0.5],
433
+ ... [ 0.3,1.5],
434
+ ... [ 1.0,1.0]])
435
+ >>> whitened = whiten(features)
436
+ >>> book = np.array((whitened[0],whitened[2]))
437
+ >>> kmeans(whitened,book)
438
+ (array([[ 2.3110306 , 2.86287398], # random
439
+ [ 0.93218041, 1.24398691]]), 0.85684700941625547)
440
+
441
+ >>> codes = 3
442
+ >>> kmeans(whitened,codes)
443
+ (array([[ 2.3110306 , 2.86287398], # random
444
+ [ 1.32544402, 0.65607529],
445
+ [ 0.40782893, 2.02786907]]), 0.5196582527686241)
446
+
447
+ >>> # Create 50 datapoints in two clusters a and b
448
+ >>> pts = 50
449
+ >>> rng = np.random.default_rng()
450
+ >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
451
+ >>> b = rng.multivariate_normal([30, 10],
452
+ ... [[10, 2], [2, 1]],
453
+ ... size=pts)
454
+ >>> features = np.concatenate((a, b))
455
+ >>> # Whiten data
456
+ >>> whitened = whiten(features)
457
+ >>> # Find 2 clusters in the data
458
+ >>> codebook, distortion = kmeans(whitened, 2)
459
+ >>> # Plot whitened data and cluster centers in red
460
+ >>> plt.scatter(whitened[:, 0], whitened[:, 1])
461
+ >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
462
+ >>> plt.show()
463
+
464
+ """
465
+ if isinstance(k_or_guess, int):
466
+ xp = array_namespace(obs)
467
+ else:
468
+ xp = array_namespace(obs, k_or_guess)
469
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
470
+ guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite)
471
+ if iter < 1:
472
+ raise ValueError("iter must be at least 1, got %s" % iter)
473
+
474
+ # Determine whether a count (scalar) or an initial guess (array) was passed.
475
+ if size(guess) != 1:
476
+ if size(guess) < 1:
477
+ raise ValueError("Asked for 0 clusters. Initial book was %s" %
478
+ guess)
479
+ return _kmeans(obs, guess, thresh=thresh, xp=xp)
480
+
481
+ # k_or_guess is a scalar, now verify that it's an integer
482
+ k = int(guess)
483
+ if k != guess:
484
+ raise ValueError("If k_or_guess is a scalar, it must be an integer.")
485
+ if k < 1:
486
+ raise ValueError("Asked for %d clusters." % k)
487
+
488
+ rng = check_random_state(seed)
489
+
490
+ # initialize best distance value to a large value
491
+ best_dist = xp.inf
492
+ for i in range(iter):
493
+ # the initial code book is randomly selected from observations
494
+ guess = _kpoints(obs, k, rng, xp)
495
+ book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp)
496
+ if dist < best_dist:
497
+ best_book = book
498
+ best_dist = dist
499
+ return best_book, best_dist
500
+
501
+
502
+ def _kpoints(data, k, rng, xp):
503
+ """Pick k points at random in data (one row = one observation).
504
+
505
+ Parameters
506
+ ----------
507
+ data : ndarray
508
+ Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
509
+ dimensional data, rank 2 multidimensional data, in which case one
510
+ row is one observation.
511
+ k : int
512
+ Number of samples to generate.
513
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
514
+ Random number generator.
515
+
516
+ Returns
517
+ -------
518
+ x : ndarray
519
+ A 'k' by 'N' containing the initial centroids
520
+
521
+ """
522
+ idx = rng.choice(data.shape[0], size=int(k), replace=False)
523
+ # convert to array with default integer dtype (avoids numpy#25607)
524
+ idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype)
525
+ return xp.take(data, idx, axis=0)
526
+
527
+
528
+ def _krandinit(data, k, rng, xp):
529
+ """Returns k samples of a random variable whose parameters depend on data.
530
+
531
+ More precisely, it returns k observations sampled from a Gaussian random
532
+ variable whose mean and covariances are the ones estimated from the data.
533
+
534
+ Parameters
535
+ ----------
536
+ data : ndarray
537
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
538
+ data, rank 2 multidimensional data, in which case one
539
+ row is one observation.
540
+ k : int
541
+ Number of samples to generate.
542
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
543
+ Random number generator.
544
+
545
+ Returns
546
+ -------
547
+ x : ndarray
548
+ A 'k' by 'N' containing the initial centroids
549
+
550
+ """
551
+ mu = xp.mean(data, axis=0)
552
+ k = np.asarray(k)
553
+
554
+ if data.ndim == 1:
555
+ _cov = cov(data)
556
+ x = rng.standard_normal(size=k)
557
+ x = xp.asarray(x)
558
+ x *= xp.sqrt(_cov)
559
+ elif data.shape[1] > data.shape[0]:
560
+ # initialize when the covariance matrix is rank deficient
561
+ _, s, vh = xp.linalg.svd(data - mu, full_matrices=False)
562
+ x = rng.standard_normal(size=(k, size(s)))
563
+ x = xp.asarray(x)
564
+ sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.))
565
+ x = x @ sVh
566
+ else:
567
+ _cov = atleast_nd(cov(data.T), ndim=2)
568
+
569
+ # k rows, d cols (one row = one obs)
570
+ # Generate k sample of a random variable ~ Gaussian(mu, cov)
571
+ x = rng.standard_normal(size=(k, size(mu)))
572
+ x = xp.asarray(x)
573
+ x = x @ xp.linalg.cholesky(_cov).T
574
+
575
+ x += mu
576
+ return x
577
+
578
+
579
+ def _kpp(data, k, rng, xp):
580
+ """ Picks k points in the data based on the kmeans++ method.
581
+
582
+ Parameters
583
+ ----------
584
+ data : ndarray
585
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
586
+ data, rank 2 multidimensional data, in which case one
587
+ row is one observation.
588
+ k : int
589
+ Number of samples to generate.
590
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
591
+ Random number generator.
592
+
593
+ Returns
594
+ -------
595
+ init : ndarray
596
+ A 'k' by 'N' containing the initial centroids.
597
+
598
+ References
599
+ ----------
600
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
601
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
602
+ on Discrete Algorithms, 2007.
603
+ """
604
+
605
+ ndim = len(data.shape)
606
+ if ndim == 1:
607
+ data = data[:, None]
608
+
609
+ dims = data.shape[1]
610
+
611
+ init = xp.empty((int(k), dims))
612
+
613
+ for i in range(k):
614
+ if i == 0:
615
+ init[i, :] = data[rng_integers(rng, data.shape[0]), :]
616
+
617
+ else:
618
+ D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
619
+ probs = D2/D2.sum()
620
+ cumprobs = probs.cumsum()
621
+ r = rng.uniform()
622
+ cumprobs = np.asarray(cumprobs)
623
+ init[i, :] = data[np.searchsorted(cumprobs, r), :]
624
+
625
+ if ndim == 1:
626
+ init = init[:, 0]
627
+ return init
628
+
629
+
630
+ _valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
631
+
632
+
633
+ def _missing_warn():
634
+ """Print a warning when called."""
635
+ warnings.warn("One of the clusters is empty. "
636
+ "Re-run kmeans with a different initialization.",
637
+ stacklevel=3)
638
+
639
+
640
+ def _missing_raise():
641
+ """Raise a ClusterError when called."""
642
+ raise ClusterError("One of the clusters is empty. "
643
+ "Re-run kmeans with a different initialization.")
644
+
645
+
646
+ _valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
647
+
648
+
649
+ def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
650
+ missing='warn', check_finite=True, *, seed=None):
651
+ """
652
+ Classify a set of observations into k clusters using the k-means algorithm.
653
+
654
+ The algorithm attempts to minimize the Euclidean distance between
655
+ observations and centroids. Several initialization methods are
656
+ included.
657
+
658
+ Parameters
659
+ ----------
660
+ data : ndarray
661
+ A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
662
+ 'M' array of 'M' 1-D observations.
663
+ k : int or ndarray
664
+ The number of clusters to form as well as the number of
665
+ centroids to generate. If `minit` initialization string is
666
+ 'matrix', or if a ndarray is given instead, it is
667
+ interpreted as initial cluster to use instead.
668
+ iter : int, optional
669
+ Number of iterations of the k-means algorithm to run. Note
670
+ that this differs in meaning from the iters parameter to
671
+ the kmeans function.
672
+ thresh : float, optional
673
+ (not used yet)
674
+ minit : str, optional
675
+ Method for initialization. Available methods are 'random',
676
+ 'points', '++' and 'matrix':
677
+
678
+ 'random': generate k centroids from a Gaussian with mean and
679
+ variance estimated from the data.
680
+
681
+ 'points': choose k observations (rows) at random from data for
682
+ the initial centroids.
683
+
684
+ '++': choose k observations accordingly to the kmeans++ method
685
+ (careful seeding)
686
+
687
+ 'matrix': interpret the k parameter as a k by M (or length k
688
+ array for 1-D data) array of initial centroids.
689
+ missing : str, optional
690
+ Method to deal with empty clusters. Available methods are
691
+ 'warn' and 'raise':
692
+
693
+ 'warn': give a warning and continue.
694
+
695
+ 'raise': raise an ClusterError and terminate the algorithm.
696
+ check_finite : bool, optional
697
+ Whether to check that the input matrices contain only finite numbers.
698
+ Disabling may give a performance gain, but may result in problems
699
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
700
+ Default: True
701
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
702
+ Seed for initializing the pseudo-random number generator.
703
+ If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
704
+ singleton is used.
705
+ If `seed` is an int, a new ``RandomState`` instance is used,
706
+ seeded with `seed`.
707
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
708
+ that instance is used.
709
+ The default is None.
710
+
711
+ Returns
712
+ -------
713
+ centroid : ndarray
714
+ A 'k' by 'N' array of centroids found at the last iteration of
715
+ k-means.
716
+ label : ndarray
717
+ label[i] is the code or index of the centroid the
718
+ ith observation is closest to.
719
+
720
+ See Also
721
+ --------
722
+ kmeans
723
+
724
+ References
725
+ ----------
726
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
727
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
728
+ on Discrete Algorithms, 2007.
729
+
730
+ Examples
731
+ --------
732
+ >>> from scipy.cluster.vq import kmeans2
733
+ >>> import matplotlib.pyplot as plt
734
+ >>> import numpy as np
735
+
736
+ Create z, an array with shape (100, 2) containing a mixture of samples
737
+ from three multivariate normal distributions.
738
+
739
+ >>> rng = np.random.default_rng()
740
+ >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
741
+ >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
742
+ >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
743
+ >>> z = np.concatenate((a, b, c))
744
+ >>> rng.shuffle(z)
745
+
746
+ Compute three clusters.
747
+
748
+ >>> centroid, label = kmeans2(z, 3, minit='points')
749
+ >>> centroid
750
+ array([[ 2.22274463, -0.61666946], # may vary
751
+ [ 0.54069047, 5.86541444],
752
+ [ 6.73846769, 4.01991898]])
753
+
754
+ How many points are in each cluster?
755
+
756
+ >>> counts = np.bincount(label)
757
+ >>> counts
758
+ array([29, 51, 20]) # may vary
759
+
760
+ Plot the clusters.
761
+
762
+ >>> w0 = z[label == 0]
763
+ >>> w1 = z[label == 1]
764
+ >>> w2 = z[label == 2]
765
+ >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
766
+ >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
767
+ >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
768
+ >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
769
+ >>> plt.axis('equal')
770
+ >>> plt.legend(shadow=True)
771
+ >>> plt.show()
772
+
773
+ """
774
+ if int(iter) < 1:
775
+ raise ValueError("Invalid iter (%s), "
776
+ "must be a positive integer." % iter)
777
+ try:
778
+ miss_meth = _valid_miss_meth[missing]
779
+ except KeyError as e:
780
+ raise ValueError(f"Unknown missing method {missing!r}") from e
781
+
782
+ if isinstance(k, int):
783
+ xp = array_namespace(data)
784
+ else:
785
+ xp = array_namespace(data, k)
786
+ data = _asarray(data, xp=xp, check_finite=check_finite)
787
+ code_book = copy(k, xp=xp)
788
+ if data.ndim == 1:
789
+ d = 1
790
+ elif data.ndim == 2:
791
+ d = data.shape[1]
792
+ else:
793
+ raise ValueError("Input of rank > 2 is not supported.")
794
+
795
+ if size(data) < 1 or size(code_book) < 1:
796
+ raise ValueError("Empty input is not supported.")
797
+
798
+ # If k is not a single value, it should be compatible with data's shape
799
+ if minit == 'matrix' or size(code_book) > 1:
800
+ if data.ndim != code_book.ndim:
801
+ raise ValueError("k array doesn't match data rank")
802
+ nc = code_book.shape[0]
803
+ if data.ndim > 1 and code_book.shape[1] != d:
804
+ raise ValueError("k array doesn't match data dimension")
805
+ else:
806
+ nc = int(code_book)
807
+
808
+ if nc < 1:
809
+ raise ValueError("Cannot ask kmeans2 for %d clusters"
810
+ " (k was %s)" % (nc, code_book))
811
+ elif nc != code_book:
812
+ warnings.warn("k was not an integer, was converted.", stacklevel=2)
813
+
814
+ try:
815
+ init_meth = _valid_init_meth[minit]
816
+ except KeyError as e:
817
+ raise ValueError(f"Unknown init method {minit!r}") from e
818
+ else:
819
+ rng = check_random_state(seed)
820
+ code_book = init_meth(data, code_book, rng, xp)
821
+
822
+ data = np.asarray(data)
823
+ code_book = np.asarray(code_book)
824
+ for i in range(iter):
825
+ # Compute the nearest neighbor for each obs using the current code book
826
+ label = vq(data, code_book, check_finite=check_finite)[0]
827
+ # Update the code book by computing centroids
828
+ new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
829
+ if not has_members.all():
830
+ miss_meth()
831
+ # Set the empty clusters to their previous positions
832
+ new_code_book[~has_members] = code_book[~has_members]
833
+ code_book = new_code_book
834
+
835
+ return xp.asarray(code_book), xp.asarray(label)
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__init__.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================================
3
+ Legacy discrete Fourier transforms (:mod:`scipy.fftpack`)
4
+ =========================================================
5
+
6
+ .. legacy::
7
+
8
+ New code should use :mod:`scipy.fft`.
9
+
10
+ Fast Fourier Transforms (FFTs)
11
+ ==============================
12
+
13
+ .. autosummary::
14
+ :toctree: generated/
15
+
16
+ fft - Fast (discrete) Fourier Transform (FFT)
17
+ ifft - Inverse FFT
18
+ fft2 - 2-D FFT
19
+ ifft2 - 2-D inverse FFT
20
+ fftn - N-D FFT
21
+ ifftn - N-D inverse FFT
22
+ rfft - FFT of strictly real-valued sequence
23
+ irfft - Inverse of rfft
24
+ dct - Discrete cosine transform
25
+ idct - Inverse discrete cosine transform
26
+ dctn - N-D Discrete cosine transform
27
+ idctn - N-D Inverse discrete cosine transform
28
+ dst - Discrete sine transform
29
+ idst - Inverse discrete sine transform
30
+ dstn - N-D Discrete sine transform
31
+ idstn - N-D Inverse discrete sine transform
32
+
33
+ Differential and pseudo-differential operators
34
+ ==============================================
35
+
36
+ .. autosummary::
37
+ :toctree: generated/
38
+
39
+ diff - Differentiation and integration of periodic sequences
40
+ tilbert - Tilbert transform: cs_diff(x,h,h)
41
+ itilbert - Inverse Tilbert transform: sc_diff(x,h,h)
42
+ hilbert - Hilbert transform: cs_diff(x,inf,inf)
43
+ ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf)
44
+ cs_diff - cosh/sinh pseudo-derivative of periodic sequences
45
+ sc_diff - sinh/cosh pseudo-derivative of periodic sequences
46
+ ss_diff - sinh/sinh pseudo-derivative of periodic sequences
47
+ cc_diff - cosh/cosh pseudo-derivative of periodic sequences
48
+ shift - Shift periodic sequences
49
+
50
+ Helper functions
51
+ ================
52
+
53
+ .. autosummary::
54
+ :toctree: generated/
55
+
56
+ fftshift - Shift the zero-frequency component to the center of the spectrum
57
+ ifftshift - The inverse of `fftshift`
58
+ fftfreq - Return the Discrete Fourier Transform sample frequencies
59
+ rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
60
+ next_fast_len - Find the optimal length to zero-pad an FFT for speed
61
+
62
+ Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions
63
+ exposed by ``fftpack``; importing them from ``numpy`` should be preferred.
64
+
65
+ Convolutions (:mod:`scipy.fftpack.convolve`)
66
+ ============================================
67
+
68
+ .. module:: scipy.fftpack.convolve
69
+
70
+ .. autosummary::
71
+ :toctree: generated/
72
+
73
+ convolve
74
+ convolve_z
75
+ init_convolution_kernel
76
+ destroy_convolve_cache
77
+
78
+ """
79
+
80
+
81
+ __all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
82
+ 'fft2','ifft2',
83
+ 'diff',
84
+ 'tilbert','itilbert','hilbert','ihilbert',
85
+ 'sc_diff','cs_diff','cc_diff','ss_diff',
86
+ 'shift',
87
+ 'fftfreq', 'rfftfreq',
88
+ 'fftshift', 'ifftshift',
89
+ 'next_fast_len',
90
+ 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'
91
+ ]
92
+
93
+ from ._basic import *
94
+ from ._pseudo_diffs import *
95
+ from ._helper import *
96
+ from ._realtransforms import *
97
+
98
+ # Deprecated namespaces, to be removed in v2.0.0
99
+ from . import basic, helper, pseudo_diffs, realtransforms
100
+
101
+ from scipy._lib._testutils import PytestTester
102
+ test = PytestTester(__name__)
103
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_basic.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_helper.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc ADDED
Binary file (19.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/basic.cpython-310.pyc ADDED
Binary file (638 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/helper.cpython-310.pyc ADDED
Binary file (646 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc ADDED
Binary file (720 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc ADDED
Binary file (663 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_basic.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Discrete Fourier Transforms - _basic.py
3
+ """
4
+ # Created by Pearu Peterson, August,September 2002
5
+ __all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
6
+ 'fft2','ifft2']
7
+
8
+ from scipy.fft import _pocketfft
9
+ from ._helper import _good_shape
10
+
11
+
12
+ def fft(x, n=None, axis=-1, overwrite_x=False):
13
+ """
14
+ Return discrete Fourier transform of real or complex sequence.
15
+
16
+ The returned complex array contains ``y(0), y(1),..., y(n-1)``, where
17
+
18
+ ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``.
19
+
20
+ Parameters
21
+ ----------
22
+ x : array_like
23
+ Array to Fourier transform.
24
+ n : int, optional
25
+ Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is
26
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
27
+ default results in ``n = x.shape[axis]``.
28
+ axis : int, optional
29
+ Axis along which the fft's are computed; the default is over the
30
+ last axis (i.e., ``axis=-1``).
31
+ overwrite_x : bool, optional
32
+ If True, the contents of `x` can be destroyed; the default is False.
33
+
34
+ Returns
35
+ -------
36
+ z : complex ndarray
37
+ with the elements::
38
+
39
+ [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even
40
+ [y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
41
+
42
+ where::
43
+
44
+ y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
45
+
46
+ See Also
47
+ --------
48
+ ifft : Inverse FFT
49
+ rfft : FFT of a real sequence
50
+
51
+ Notes
52
+ -----
53
+ The packing of the result is "standard": If ``A = fft(a, n)``, then
54
+ ``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the
55
+ positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency
56
+ terms, in order of decreasingly negative frequency. So ,for an 8-point
57
+ transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1].
58
+ To rearrange the fft output so that the zero-frequency component is
59
+ centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`.
60
+
61
+ Both single and double precision routines are implemented. Half precision
62
+ inputs will be converted to single precision. Non-floating-point inputs
63
+ will be converted to double precision. Long-double precision inputs are
64
+ not supported.
65
+
66
+ This function is most efficient when `n` is a power of two, and least
67
+ efficient when `n` is prime.
68
+
69
+ Note that if ``x`` is real-valued, then ``A[j] == A[n-j].conjugate()``.
70
+ If ``x`` is real-valued and ``n`` is even, then ``A[n/2]`` is real.
71
+
72
+ If the data type of `x` is real, a "real FFT" algorithm is automatically
73
+ used, which roughly halves the computation time. To increase efficiency
74
+ a little further, use `rfft`, which does the same calculation, but only
75
+ outputs half of the symmetrical spectrum. If the data is both real and
76
+ symmetrical, the `dct` can again double the efficiency by generating
77
+ half of the spectrum from half of the signal.
78
+
79
+ Examples
80
+ --------
81
+ >>> import numpy as np
82
+ >>> from scipy.fftpack import fft, ifft
83
+ >>> x = np.arange(5)
84
+ >>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy.
85
+ True
86
+
87
+ """
88
+ return _pocketfft.fft(x, n, axis, None, overwrite_x)
89
+
90
+
91
+ def ifft(x, n=None, axis=-1, overwrite_x=False):
92
+ """
93
+ Return discrete inverse Fourier transform of real or complex sequence.
94
+
95
+ The returned complex array contains ``y(0), y(1),..., y(n-1)``, where
96
+
97
+ ``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
98
+
99
+ Parameters
100
+ ----------
101
+ x : array_like
102
+ Transformed data to invert.
103
+ n : int, optional
104
+ Length of the inverse Fourier transform. If ``n < x.shape[axis]``,
105
+ `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
106
+ The default results in ``n = x.shape[axis]``.
107
+ axis : int, optional
108
+ Axis along which the ifft's are computed; the default is over the
109
+ last axis (i.e., ``axis=-1``).
110
+ overwrite_x : bool, optional
111
+ If True, the contents of `x` can be destroyed; the default is False.
112
+
113
+ Returns
114
+ -------
115
+ ifft : ndarray of floats
116
+ The inverse discrete Fourier transform.
117
+
118
+ See Also
119
+ --------
120
+ fft : Forward FFT
121
+
122
+ Notes
123
+ -----
124
+ Both single and double precision routines are implemented. Half precision
125
+ inputs will be converted to single precision. Non-floating-point inputs
126
+ will be converted to double precision. Long-double precision inputs are
127
+ not supported.
128
+
129
+ This function is most efficient when `n` is a power of two, and least
130
+ efficient when `n` is prime.
131
+
132
+ If the data type of `x` is real, a "real IFFT" algorithm is automatically
133
+ used, which roughly halves the computation time.
134
+
135
+ Examples
136
+ --------
137
+ >>> from scipy.fftpack import fft, ifft
138
+ >>> import numpy as np
139
+ >>> x = np.arange(5)
140
+ >>> np.allclose(ifft(fft(x)), x, atol=1e-15) # within numerical accuracy.
141
+ True
142
+
143
+ """
144
+ return _pocketfft.ifft(x, n, axis, None, overwrite_x)
145
+
146
+
147
+ def rfft(x, n=None, axis=-1, overwrite_x=False):
148
+ """
149
+ Discrete Fourier transform of a real sequence.
150
+
151
+ Parameters
152
+ ----------
153
+ x : array_like, real-valued
154
+ The data to transform.
155
+ n : int, optional
156
+ Defines the length of the Fourier transform. If `n` is not specified
157
+ (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
158
+ `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
159
+ axis : int, optional
160
+ The axis along which the transform is applied. The default is the
161
+ last axis.
162
+ overwrite_x : bool, optional
163
+ If set to true, the contents of `x` can be overwritten. Default is
164
+ False.
165
+
166
+ Returns
167
+ -------
168
+ z : real ndarray
169
+ The returned real array contains::
170
+
171
+ [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even
172
+ [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd
173
+
174
+ where::
175
+
176
+ y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
177
+ j = 0..n-1
178
+
179
+ See Also
180
+ --------
181
+ fft, irfft, scipy.fft.rfft
182
+
183
+ Notes
184
+ -----
185
+ Within numerical accuracy, ``y == rfft(irfft(y))``.
186
+
187
+ Both single and double precision routines are implemented. Half precision
188
+ inputs will be converted to single precision. Non-floating-point inputs
189
+ will be converted to double precision. Long-double precision inputs are
190
+ not supported.
191
+
192
+ To get an output with a complex datatype, consider using the newer
193
+ function `scipy.fft.rfft`.
194
+
195
+ Examples
196
+ --------
197
+ >>> from scipy.fftpack import fft, rfft
198
+ >>> a = [9, -9, 1, 3]
199
+ >>> fft(a)
200
+ array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j])
201
+ >>> rfft(a)
202
+ array([ 4., 8., 12., 16.])
203
+
204
+ """
205
+ return _pocketfft.rfft_fftpack(x, n, axis, None, overwrite_x)
206
+
207
+
208
+ def irfft(x, n=None, axis=-1, overwrite_x=False):
209
+ """
210
+ Return inverse discrete Fourier transform of real sequence x.
211
+
212
+ The contents of `x` are interpreted as the output of the `rfft`
213
+ function.
214
+
215
+ Parameters
216
+ ----------
217
+ x : array_like
218
+ Transformed data to invert.
219
+ n : int, optional
220
+ Length of the inverse Fourier transform.
221
+ If n < x.shape[axis], x is truncated.
222
+ If n > x.shape[axis], x is zero-padded.
223
+ The default results in n = x.shape[axis].
224
+ axis : int, optional
225
+ Axis along which the ifft's are computed; the default is over
226
+ the last axis (i.e., axis=-1).
227
+ overwrite_x : bool, optional
228
+ If True, the contents of `x` can be destroyed; the default is False.
229
+
230
+ Returns
231
+ -------
232
+ irfft : ndarray of floats
233
+ The inverse discrete Fourier transform.
234
+
235
+ See Also
236
+ --------
237
+ rfft, ifft, scipy.fft.irfft
238
+
239
+ Notes
240
+ -----
241
+ The returned real array contains::
242
+
243
+ [y(0),y(1),...,y(n-1)]
244
+
245
+ where for n is even::
246
+
247
+ y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
248
+ * exp(sqrt(-1)*j*k* 2*pi/n)
249
+ + c.c. + x[0] + (-1)**(j) x[n-1])
250
+
251
+ and for n is odd::
252
+
253
+ y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
254
+ * exp(sqrt(-1)*j*k* 2*pi/n)
255
+ + c.c. + x[0])
256
+
257
+ c.c. denotes complex conjugate of preceding expression.
258
+
259
+ For details on input parameters, see `rfft`.
260
+
261
+ To process (conjugate-symmetric) frequency-domain data with a complex
262
+ datatype, consider using the newer function `scipy.fft.irfft`.
263
+
264
+ Examples
265
+ --------
266
+ >>> from scipy.fftpack import rfft, irfft
267
+ >>> a = [1.0, 2.0, 3.0, 4.0, 5.0]
268
+ >>> irfft(a)
269
+ array([ 2.6 , -3.16405192, 1.24398433, -1.14955713, 1.46962473])
270
+ >>> irfft(rfft(a))
271
+ array([1., 2., 3., 4., 5.])
272
+
273
+ """
274
+ return _pocketfft.irfft_fftpack(x, n, axis, None, overwrite_x)
275
+
276
+
277
+ def fftn(x, shape=None, axes=None, overwrite_x=False):
278
+ """
279
+ Return multidimensional discrete Fourier transform.
280
+
281
+ The returned array contains::
282
+
283
+ y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
284
+ x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
285
+
286
+ where d = len(x.shape) and n = x.shape.
287
+
288
+ Parameters
289
+ ----------
290
+ x : array_like
291
+ The (N-D) array to transform.
292
+ shape : int or array_like of ints or None, optional
293
+ The shape of the result. If both `shape` and `axes` (see below) are
294
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
295
+ not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
296
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
297
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
298
+ length ``shape[i]``.
299
+ If any element of `shape` is -1, the size of the corresponding
300
+ dimension of `x` is used.
301
+ axes : int or array_like of ints or None, optional
302
+ The axes of `x` (`y` if `shape` is not None) along which the
303
+ transform is applied.
304
+ The default is over all axes.
305
+ overwrite_x : bool, optional
306
+ If True, the contents of `x` can be destroyed. Default is False.
307
+
308
+ Returns
309
+ -------
310
+ y : complex-valued N-D NumPy array
311
+ The (N-D) DFT of the input array.
312
+
313
+ See Also
314
+ --------
315
+ ifftn
316
+
317
+ Notes
318
+ -----
319
+ If ``x`` is real-valued, then
320
+ ``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``.
321
+
322
+ Both single and double precision routines are implemented. Half precision
323
+ inputs will be converted to single precision. Non-floating-point inputs
324
+ will be converted to double precision. Long-double precision inputs are
325
+ not supported.
326
+
327
+ Examples
328
+ --------
329
+ >>> import numpy as np
330
+ >>> from scipy.fftpack import fftn, ifftn
331
+ >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
332
+ >>> np.allclose(y, fftn(ifftn(y)))
333
+ True
334
+
335
+ """
336
+ shape = _good_shape(x, shape, axes)
337
+ return _pocketfft.fftn(x, shape, axes, None, overwrite_x)
338
+
339
+
340
+ def ifftn(x, shape=None, axes=None, overwrite_x=False):
341
+ """
342
+ Return inverse multidimensional discrete Fourier transform.
343
+
344
+ The sequence can be of an arbitrary type.
345
+
346
+ The returned array contains::
347
+
348
+ y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
349
+ x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
350
+
351
+ where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
352
+
353
+ For description of parameters see `fftn`.
354
+
355
+ See Also
356
+ --------
357
+ fftn : for detailed information.
358
+
359
+ Examples
360
+ --------
361
+ >>> from scipy.fftpack import fftn, ifftn
362
+ >>> import numpy as np
363
+ >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
364
+ >>> np.allclose(y, ifftn(fftn(y)))
365
+ True
366
+
367
+ """
368
+ shape = _good_shape(x, shape, axes)
369
+ return _pocketfft.ifftn(x, shape, axes, None, overwrite_x)
370
+
371
+
372
+ def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
373
+ """
374
+ 2-D discrete Fourier transform.
375
+
376
+ Return the 2-D discrete Fourier transform of the 2-D argument
377
+ `x`.
378
+
379
+ See Also
380
+ --------
381
+ fftn : for detailed information.
382
+
383
+ Examples
384
+ --------
385
+ >>> import numpy as np
386
+ >>> from scipy.fftpack import fft2, ifft2
387
+ >>> y = np.mgrid[:5, :5][0]
388
+ >>> y
389
+ array([[0, 0, 0, 0, 0],
390
+ [1, 1, 1, 1, 1],
391
+ [2, 2, 2, 2, 2],
392
+ [3, 3, 3, 3, 3],
393
+ [4, 4, 4, 4, 4]])
394
+ >>> np.allclose(y, ifft2(fft2(y)))
395
+ True
396
+ """
397
+ return fftn(x,shape,axes,overwrite_x)
398
+
399
+
400
+ def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
401
+ """
402
+ 2-D discrete inverse Fourier transform of real or complex sequence.
403
+
404
+ Return inverse 2-D discrete Fourier transform of
405
+ arbitrary type sequence x.
406
+
407
+ See `ifft` for more information.
408
+
409
+ See Also
410
+ --------
411
+ fft2, ifft
412
+
413
+ Examples
414
+ --------
415
+ >>> import numpy as np
416
+ >>> from scipy.fftpack import fft2, ifft2
417
+ >>> y = np.mgrid[:5, :5][0]
418
+ >>> y
419
+ array([[0, 0, 0, 0, 0],
420
+ [1, 1, 1, 1, 1],
421
+ [2, 2, 2, 2, 2],
422
+ [3, 3, 3, 3, 3],
423
+ [4, 4, 4, 4, 4]])
424
+ >>> np.allclose(y, fft2(ifft2(y)))
425
+ True
426
+
427
+ """
428
+ return ifftn(x,shape,axes,overwrite_x)
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_helper.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import numpy as np
4
+ from numpy.fft import fftshift, ifftshift, fftfreq
5
+
6
+ import scipy.fft._pocketfft.helper as _helper
7
+
8
+ __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len']
9
+
10
+
11
+ def rfftfreq(n, d=1.0):
12
+ """DFT sample frequencies (for usage with rfft, irfft).
13
+
14
+ The returned float array contains the frequency bins in
15
+ cycles/unit (with zero at the start) given a window length `n` and a
16
+ sample spacing `d`::
17
+
18
+ f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even
19
+ f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd
20
+
21
+ Parameters
22
+ ----------
23
+ n : int
24
+ Window length.
25
+ d : scalar, optional
26
+ Sample spacing. Default is 1.
27
+
28
+ Returns
29
+ -------
30
+ out : ndarray
31
+ The array of length `n`, containing the sample frequencies.
32
+
33
+ Examples
34
+ --------
35
+ >>> import numpy as np
36
+ >>> from scipy import fftpack
37
+ >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
38
+ >>> sig_fft = fftpack.rfft(sig)
39
+ >>> n = sig_fft.size
40
+ >>> timestep = 0.1
41
+ >>> freq = fftpack.rfftfreq(n, d=timestep)
42
+ >>> freq
43
+ array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ])
44
+
45
+ """
46
+ n = operator.index(n)
47
+ if n < 0:
48
+ raise ValueError("n = %s is not valid. "
49
+ "n must be a nonnegative integer." % n)
50
+
51
+ return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d)
52
+
53
+
54
+ def next_fast_len(target):
55
+ """
56
+ Find the next fast size of input data to `fft`, for zero-padding, etc.
57
+
58
+ SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this
59
+ returns the next composite of the prime factors 2, 3, and 5 which is
60
+ greater than or equal to `target`. (These are also known as 5-smooth
61
+ numbers, regular numbers, or Hamming numbers.)
62
+
63
+ Parameters
64
+ ----------
65
+ target : int
66
+ Length to start searching from. Must be a positive integer.
67
+
68
+ Returns
69
+ -------
70
+ out : int
71
+ The first 5-smooth number greater than or equal to `target`.
72
+
73
+ Notes
74
+ -----
75
+ .. versionadded:: 0.18.0
76
+
77
+ Examples
78
+ --------
79
+ On a particular machine, an FFT of prime length takes 133 ms:
80
+
81
+ >>> from scipy import fftpack
82
+ >>> import numpy as np
83
+ >>> rng = np.random.default_rng()
84
+ >>> min_len = 10007 # prime length is worst case for speed
85
+ >>> a = rng.standard_normal(min_len)
86
+ >>> b = fftpack.fft(a)
87
+
88
+ Zero-padding to the next 5-smooth length reduces computation time to
89
+ 211 us, a speedup of 630 times:
90
+
91
+ >>> fftpack.next_fast_len(min_len)
92
+ 10125
93
+ >>> b = fftpack.fft(a, 10125)
94
+
95
+ Rounding up to the next power of 2 is not optimal, taking 367 us to
96
+ compute, 1.7 times as long as the 5-smooth size:
97
+
98
+ >>> b = fftpack.fft(a, 16384)
99
+
100
+ """
101
+ # Real transforms use regular sizes so this is backwards compatible
102
+ return _helper.good_size(target, True)
103
+
104
+
105
+ def _good_shape(x, shape, axes):
106
+ """Ensure that shape argument is valid for scipy.fftpack
107
+
108
+ scipy.fftpack does not support len(shape) < x.ndim when axes is not given.
109
+ """
110
+ if shape is not None and axes is None:
111
+ shape = _helper._iterable_of_int(shape, 'shape')
112
+ if len(shape) != np.ndim(x):
113
+ raise ValueError("when given, axes and shape arguments"
114
+ " have to be of the same length")
115
+ return shape
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/_realtransforms.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Real spectrum transforms (DCT, DST, MDCT)
3
+ """
4
+
5
+ __all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
6
+
7
+ from scipy.fft import _pocketfft
8
+ from ._helper import _good_shape
9
+
10
+ _inverse_typemap = {1: 1, 2: 3, 3: 2, 4: 4}
11
+
12
+
13
+ def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
14
+ """
15
+ Return multidimensional Discrete Cosine Transform along the specified axes.
16
+
17
+ Parameters
18
+ ----------
19
+ x : array_like
20
+ The input array.
21
+ type : {1, 2, 3, 4}, optional
22
+ Type of the DCT (see Notes). Default type is 2.
23
+ shape : int or array_like of ints or None, optional
24
+ The shape of the result. If both `shape` and `axes` (see below) are
25
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
26
+ not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
27
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
28
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
29
+ length ``shape[i]``.
30
+ If any element of `shape` is -1, the size of the corresponding
31
+ dimension of `x` is used.
32
+ axes : int or array_like of ints or None, optional
33
+ Axes along which the DCT is computed.
34
+ The default is over all axes.
35
+ norm : {None, 'ortho'}, optional
36
+ Normalization mode (see Notes). Default is None.
37
+ overwrite_x : bool, optional
38
+ If True, the contents of `x` can be destroyed; the default is False.
39
+
40
+ Returns
41
+ -------
42
+ y : ndarray of real
43
+ The transformed input array.
44
+
45
+ See Also
46
+ --------
47
+ idctn : Inverse multidimensional DCT
48
+
49
+ Notes
50
+ -----
51
+ For full details of the DCT types and normalization modes, as well as
52
+ references, see `dct`.
53
+
54
+ Examples
55
+ --------
56
+ >>> import numpy as np
57
+ >>> from scipy.fftpack import dctn, idctn
58
+ >>> rng = np.random.default_rng()
59
+ >>> y = rng.standard_normal((16, 16))
60
+ >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho'))
61
+ True
62
+
63
+ """
64
+ shape = _good_shape(x, shape, axes)
65
+ return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
66
+
67
+
68
+ def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
69
+ """
70
+ Return multidimensional Discrete Cosine Transform along the specified axes.
71
+
72
+ Parameters
73
+ ----------
74
+ x : array_like
75
+ The input array.
76
+ type : {1, 2, 3, 4}, optional
77
+ Type of the DCT (see Notes). Default type is 2.
78
+ shape : int or array_like of ints or None, optional
79
+ The shape of the result. If both `shape` and `axes` (see below) are
80
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
81
+ not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
82
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
83
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
84
+ length ``shape[i]``.
85
+ If any element of `shape` is -1, the size of the corresponding
86
+ dimension of `x` is used.
87
+ axes : int or array_like of ints or None, optional
88
+ Axes along which the IDCT is computed.
89
+ The default is over all axes.
90
+ norm : {None, 'ortho'}, optional
91
+ Normalization mode (see Notes). Default is None.
92
+ overwrite_x : bool, optional
93
+ If True, the contents of `x` can be destroyed; the default is False.
94
+
95
+ Returns
96
+ -------
97
+ y : ndarray of real
98
+ The transformed input array.
99
+
100
+ See Also
101
+ --------
102
+ dctn : multidimensional DCT
103
+
104
+ Notes
105
+ -----
106
+ For full details of the IDCT types and normalization modes, as well as
107
+ references, see `idct`.
108
+
109
+ Examples
110
+ --------
111
+ >>> import numpy as np
112
+ >>> from scipy.fftpack import dctn, idctn
113
+ >>> rng = np.random.default_rng()
114
+ >>> y = rng.standard_normal((16, 16))
115
+ >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho'))
116
+ True
117
+
118
+ """
119
+ type = _inverse_typemap[type]
120
+ shape = _good_shape(x, shape, axes)
121
+ return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
122
+
123
+
124
+ def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
125
+ """
126
+ Return multidimensional Discrete Sine Transform along the specified axes.
127
+
128
+ Parameters
129
+ ----------
130
+ x : array_like
131
+ The input array.
132
+ type : {1, 2, 3, 4}, optional
133
+ Type of the DST (see Notes). Default type is 2.
134
+ shape : int or array_like of ints or None, optional
135
+ The shape of the result. If both `shape` and `axes` (see below) are
136
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
137
+ not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
138
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
139
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
140
+ length ``shape[i]``.
141
+ If any element of `shape` is -1, the size of the corresponding
142
+ dimension of `x` is used.
143
+ axes : int or array_like of ints or None, optional
144
+ Axes along which the DCT is computed.
145
+ The default is over all axes.
146
+ norm : {None, 'ortho'}, optional
147
+ Normalization mode (see Notes). Default is None.
148
+ overwrite_x : bool, optional
149
+ If True, the contents of `x` can be destroyed; the default is False.
150
+
151
+ Returns
152
+ -------
153
+ y : ndarray of real
154
+ The transformed input array.
155
+
156
+ See Also
157
+ --------
158
+ idstn : Inverse multidimensional DST
159
+
160
+ Notes
161
+ -----
162
+ For full details of the DST types and normalization modes, as well as
163
+ references, see `dst`.
164
+
165
+ Examples
166
+ --------
167
+ >>> import numpy as np
168
+ >>> from scipy.fftpack import dstn, idstn
169
+ >>> rng = np.random.default_rng()
170
+ >>> y = rng.standard_normal((16, 16))
171
+ >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho'))
172
+ True
173
+
174
+ """
175
+ shape = _good_shape(x, shape, axes)
176
+ return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)
177
+
178
+
179
+ def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
180
+ """
181
+ Return multidimensional Discrete Sine Transform along the specified axes.
182
+
183
+ Parameters
184
+ ----------
185
+ x : array_like
186
+ The input array.
187
+ type : {1, 2, 3, 4}, optional
188
+ Type of the DST (see Notes). Default type is 2.
189
+ shape : int or array_like of ints or None, optional
190
+ The shape of the result. If both `shape` and `axes` (see below) are
191
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
192
+ not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
193
+ If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
194
+ If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
195
+ length ``shape[i]``.
196
+ If any element of `shape` is -1, the size of the corresponding
197
+ dimension of `x` is used.
198
+ axes : int or array_like of ints or None, optional
199
+ Axes along which the IDST is computed.
200
+ The default is over all axes.
201
+ norm : {None, 'ortho'}, optional
202
+ Normalization mode (see Notes). Default is None.
203
+ overwrite_x : bool, optional
204
+ If True, the contents of `x` can be destroyed; the default is False.
205
+
206
+ Returns
207
+ -------
208
+ y : ndarray of real
209
+ The transformed input array.
210
+
211
+ See Also
212
+ --------
213
+ dstn : multidimensional DST
214
+
215
+ Notes
216
+ -----
217
+ For full details of the IDST types and normalization modes, as well as
218
+ references, see `idst`.
219
+
220
+ Examples
221
+ --------
222
+ >>> import numpy as np
223
+ >>> from scipy.fftpack import dstn, idstn
224
+ >>> rng = np.random.default_rng()
225
+ >>> y = rng.standard_normal((16, 16))
226
+ >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho'))
227
+ True
228
+
229
+ """
230
+ type = _inverse_typemap[type]
231
+ shape = _good_shape(x, shape, axes)
232
+ return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)
233
+
234
+
235
+ def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
236
+ r"""
237
+ Return the Discrete Cosine Transform of arbitrary type sequence x.
238
+
239
+ Parameters
240
+ ----------
241
+ x : array_like
242
+ The input array.
243
+ type : {1, 2, 3, 4}, optional
244
+ Type of the DCT (see Notes). Default type is 2.
245
+ n : int, optional
246
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
247
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
248
+ default results in ``n = x.shape[axis]``.
249
+ axis : int, optional
250
+ Axis along which the dct is computed; the default is over the
251
+ last axis (i.e., ``axis=-1``).
252
+ norm : {None, 'ortho'}, optional
253
+ Normalization mode (see Notes). Default is None.
254
+ overwrite_x : bool, optional
255
+ If True, the contents of `x` can be destroyed; the default is False.
256
+
257
+ Returns
258
+ -------
259
+ y : ndarray of real
260
+ The transformed input array.
261
+
262
+ See Also
263
+ --------
264
+ idct : Inverse DCT
265
+
266
+ Notes
267
+ -----
268
+ For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
269
+ MATLAB ``dct(x)``.
270
+
271
+ There are, theoretically, 8 types of the DCT, only the first 4 types are
272
+ implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the'
273
+ Inverse DCT generally refers to DCT type 3.
274
+
275
+ **Type I**
276
+
277
+ There are several definitions of the DCT-I; we use the following
278
+ (for ``norm=None``)
279
+
280
+ .. math::
281
+
282
+ y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left(
283
+ \frac{\pi k n}{N-1} \right)
284
+
285
+ If ``norm='ortho'``, ``x[0]`` and ``x[N-1]`` are multiplied by a scaling
286
+ factor of :math:`\sqrt{2}`, and ``y[k]`` is multiplied by a scaling factor
287
+ ``f``
288
+
289
+ .. math::
290
+
291
+ f = \begin{cases}
292
+ \frac{1}{2}\sqrt{\frac{1}{N-1}} & \text{if }k=0\text{ or }N-1, \\
293
+ \frac{1}{2}\sqrt{\frac{2}{N-1}} & \text{otherwise} \end{cases}
294
+
295
+ .. versionadded:: 1.2.0
296
+ Orthonormalization in DCT-I.
297
+
298
+ .. note::
299
+ The DCT-I is only supported for input size > 1.
300
+
301
+ **Type II**
302
+
303
+ There are several definitions of the DCT-II; we use the following
304
+ (for ``norm=None``)
305
+
306
+ .. math::
307
+
308
+ y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right)
309
+
310
+ If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
311
+
312
+ .. math::
313
+ f = \begin{cases}
314
+ \sqrt{\frac{1}{4N}} & \text{if }k=0, \\
315
+ \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
316
+
317
+ which makes the corresponding matrix of coefficients orthonormal
318
+ (``O @ O.T = np.eye(N)``).
319
+
320
+ **Type III**
321
+
322
+ There are several definitions, we use the following (for ``norm=None``)
323
+
324
+ .. math::
325
+
326
+ y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right)
327
+
328
+ or, for ``norm='ortho'``
329
+
330
+ .. math::
331
+
332
+ y_k = \frac{x_0}{\sqrt{N}} + \sqrt{\frac{2}{N}} \sum_{n=1}^{N-1} x_n
333
+ \cos\left(\frac{\pi(2k+1)n}{2N}\right)
334
+
335
+ The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
336
+ to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
337
+ the orthonormalized DCT-II.
338
+
339
+ **Type IV**
340
+
341
+ There are several definitions of the DCT-IV; we use the following
342
+ (for ``norm=None``)
343
+
344
+ .. math::
345
+
346
+ y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right)
347
+
348
+ If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
349
+
350
+ .. math::
351
+
352
+ f = \frac{1}{\sqrt{2N}}
353
+
354
+ .. versionadded:: 1.2.0
355
+ Support for DCT-IV.
356
+
357
+ References
358
+ ----------
359
+ .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
360
+ Makhoul, `IEEE Transactions on acoustics, speech and signal
361
+ processing` vol. 28(1), pp. 27-34,
362
+ :doi:`10.1109/TASSP.1980.1163351` (1980).
363
+ .. [2] Wikipedia, "Discrete cosine transform",
364
+ https://en.wikipedia.org/wiki/Discrete_cosine_transform
365
+
366
+ Examples
367
+ --------
368
+ The Type 1 DCT is equivalent to the FFT (though faster) for real,
369
+ even-symmetrical inputs. The output is also real and even-symmetrical.
370
+ Half of the FFT input is used to generate half of the FFT output:
371
+
372
+ >>> from scipy.fftpack import fft, dct
373
+ >>> import numpy as np
374
+ >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
375
+ array([ 30., -8., 6., -2., 6., -8.])
376
+ >>> dct(np.array([4., 3., 5., 10.]), 1)
377
+ array([ 30., -8., 6., -2.])
378
+
379
+ """
380
+ return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)
381
+
382
+
383
+ def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
384
+ """
385
+ Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
386
+
387
+ Parameters
388
+ ----------
389
+ x : array_like
390
+ The input array.
391
+ type : {1, 2, 3, 4}, optional
392
+ Type of the DCT (see Notes). Default type is 2.
393
+ n : int, optional
394
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
395
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
396
+ default results in ``n = x.shape[axis]``.
397
+ axis : int, optional
398
+ Axis along which the idct is computed; the default is over the
399
+ last axis (i.e., ``axis=-1``).
400
+ norm : {None, 'ortho'}, optional
401
+ Normalization mode (see Notes). Default is None.
402
+ overwrite_x : bool, optional
403
+ If True, the contents of `x` can be destroyed; the default is False.
404
+
405
+ Returns
406
+ -------
407
+ idct : ndarray of real
408
+ The transformed input array.
409
+
410
+ See Also
411
+ --------
412
+ dct : Forward DCT
413
+
414
+ Notes
415
+ -----
416
+ For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
417
+ MATLAB ``idct(x)``.
418
+
419
+ 'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3.
420
+
421
+ IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type
422
+ 3, and IDCT of type 3 is the DCT of type 2. IDCT of type 4 is the DCT
423
+ of type 4. For the definition of these types, see `dct`.
424
+
425
+ Examples
426
+ --------
427
+ The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
428
+ inputs. The output is also real and even-symmetrical. Half of the IFFT
429
+ input is used to generate half of the IFFT output:
430
+
431
+ >>> from scipy.fftpack import ifft, idct
432
+ >>> import numpy as np
433
+ >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real
434
+ array([ 4., 3., 5., 10., 5., 3.])
435
+ >>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6
436
+ array([ 4., 3., 5., 10.])
437
+
438
+ """
439
+ type = _inverse_typemap[type]
440
+ return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)
441
+
442
+
443
+ def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
444
+ r"""
445
+ Return the Discrete Sine Transform of arbitrary type sequence x.
446
+
447
+ Parameters
448
+ ----------
449
+ x : array_like
450
+ The input array.
451
+ type : {1, 2, 3, 4}, optional
452
+ Type of the DST (see Notes). Default type is 2.
453
+ n : int, optional
454
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
455
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
456
+ default results in ``n = x.shape[axis]``.
457
+ axis : int, optional
458
+ Axis along which the dst is computed; the default is over the
459
+ last axis (i.e., ``axis=-1``).
460
+ norm : {None, 'ortho'}, optional
461
+ Normalization mode (see Notes). Default is None.
462
+ overwrite_x : bool, optional
463
+ If True, the contents of `x` can be destroyed; the default is False.
464
+
465
+ Returns
466
+ -------
467
+ dst : ndarray of reals
468
+ The transformed input array.
469
+
470
+ See Also
471
+ --------
472
+ idst : Inverse DST
473
+
474
+ Notes
475
+ -----
476
+ For a single dimension array ``x``.
477
+
478
+ There are, theoretically, 8 types of the DST for different combinations of
479
+ even/odd boundary conditions and boundary off sets [1]_, only the first
480
+ 4 types are implemented in scipy.
481
+
482
+ **Type I**
483
+
484
+ There are several definitions of the DST-I; we use the following
485
+ for ``norm=None``. DST-I assumes the input is odd around `n=-1` and `n=N`.
486
+
487
+ .. math::
488
+
489
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right)
490
+
491
+ Note that the DST-I is only supported for input size > 1.
492
+ The (unnormalized) DST-I is its own inverse, up to a factor `2(N+1)`.
493
+ The orthonormalized DST-I is exactly its own inverse.
494
+
495
+ **Type II**
496
+
497
+ There are several definitions of the DST-II; we use the following for
498
+ ``norm=None``. DST-II assumes the input is odd around `n=-1/2` and
499
+ `n=N-1/2`; the output is odd around :math:`k=-1` and even around `k=N-1`
500
+
501
+ .. math::
502
+
503
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right)
504
+
505
+ if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
506
+
507
+ .. math::
508
+
509
+ f = \begin{cases}
510
+ \sqrt{\frac{1}{4N}} & \text{if }k = 0, \\
511
+ \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
512
+
513
+ **Type III**
514
+
515
+ There are several definitions of the DST-III, we use the following (for
516
+ ``norm=None``). DST-III assumes the input is odd around `n=-1` and even
517
+ around `n=N-1`
518
+
519
+ .. math::
520
+
521
+ y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left(
522
+ \frac{\pi(2k+1)(n+1)}{2N}\right)
523
+
524
+ The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up
525
+ to a factor `2N`. The orthonormalized DST-III is exactly the inverse of the
526
+ orthonormalized DST-II.
527
+
528
+ .. versionadded:: 0.11.0
529
+
530
+ **Type IV**
531
+
532
+ There are several definitions of the DST-IV, we use the following (for
533
+ ``norm=None``). DST-IV assumes the input is odd around `n=-0.5` and even
534
+ around `n=N-0.5`
535
+
536
+ .. math::
537
+
538
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right)
539
+
540
+ The (unnormalized) DST-IV is its own inverse, up to a factor `2N`. The
541
+ orthonormalized DST-IV is exactly its own inverse.
542
+
543
+ .. versionadded:: 1.2.0
544
+ Support for DST-IV.
545
+
546
+ References
547
+ ----------
548
+ .. [1] Wikipedia, "Discrete sine transform",
549
+ https://en.wikipedia.org/wiki/Discrete_sine_transform
550
+
551
+ """
552
+ return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)
553
+
554
+
555
+ def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
556
+ """
557
+ Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
558
+
559
+ Parameters
560
+ ----------
561
+ x : array_like
562
+ The input array.
563
+ type : {1, 2, 3, 4}, optional
564
+ Type of the DST (see Notes). Default type is 2.
565
+ n : int, optional
566
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
567
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
568
+ default results in ``n = x.shape[axis]``.
569
+ axis : int, optional
570
+ Axis along which the idst is computed; the default is over the
571
+ last axis (i.e., ``axis=-1``).
572
+ norm : {None, 'ortho'}, optional
573
+ Normalization mode (see Notes). Default is None.
574
+ overwrite_x : bool, optional
575
+ If True, the contents of `x` can be destroyed; the default is False.
576
+
577
+ Returns
578
+ -------
579
+ idst : ndarray of real
580
+ The transformed input array.
581
+
582
+ See Also
583
+ --------
584
+ dst : Forward DST
585
+
586
+ Notes
587
+ -----
588
+ 'The' IDST is the IDST of type 2, which is the same as DST of type 3.
589
+
590
+ IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type
591
+ 3, and IDST of type 3 is the DST of type 2. For the definition of these
592
+ types, see `dst`.
593
+
594
+ .. versionadded:: 0.11.0
595
+
596
+ """
597
+ type = _inverse_typemap[type]
598
+ return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (273 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/helper.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.fftpack` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'
9
+ ]
10
+
11
+
12
+ def __dir__():
13
+ return __all__
14
+
15
+
16
+ def __getattr__(name):
17
+ return _sub_module_deprecation(sub_package="fftpack", module="helper",
18
+ private_modules=["_helper"], all=__all__,
19
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/pseudo_diffs.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.fftpack` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'diff',
9
+ 'tilbert', 'itilbert', 'hilbert', 'ihilbert',
10
+ 'cs_diff', 'cc_diff', 'sc_diff', 'ss_diff',
11
+ 'shift', 'iscomplexobj', 'convolve'
12
+ ]
13
+
14
+
15
+ def __dir__():
16
+ return __all__
17
+
18
+
19
+ def __getattr__(name):
20
+ return _sub_module_deprecation(sub_package="fftpack", module="pseudo_diffs",
21
+ private_modules=["_pseudo_diffs"], all=__all__,
22
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc ADDED
Binary file (28.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc ADDED
Binary file (2.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_basic.py ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by Pearu Peterson, September 2002
2
+
3
+ from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
4
+ assert_array_almost_equal_nulp, assert_array_less)
5
+ import pytest
6
+ from pytest import raises as assert_raises
7
+ from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2
8
+
9
+ from numpy import (arange, array, asarray, zeros, dot, exp, pi,
10
+ swapaxes, double, cdouble)
11
+ import numpy as np
12
+ import numpy.fft
13
+ from numpy.random import rand
14
+
15
+ # "large" composite numbers supported by FFTPACK
16
+ LARGE_COMPOSITE_SIZES = [
17
+ 2**13,
18
+ 2**5 * 3**5,
19
+ 2**3 * 3**3 * 5**2,
20
+ ]
21
+ SMALL_COMPOSITE_SIZES = [
22
+ 2,
23
+ 2*3*5,
24
+ 2*2*3*3,
25
+ ]
26
+ # prime
27
+ LARGE_PRIME_SIZES = [
28
+ 2011
29
+ ]
30
+ SMALL_PRIME_SIZES = [
31
+ 29
32
+ ]
33
+
34
+
35
+ def _assert_close_in_norm(x, y, rtol, size, rdt):
36
+ # helper function for testing
37
+ err_msg = f"size: {size} rdt: {rdt}"
38
+ assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
39
+
40
+
41
+ def random(size):
42
+ return rand(*size)
43
+
44
+
45
+ def direct_dft(x):
46
+ x = asarray(x)
47
+ n = len(x)
48
+ y = zeros(n, dtype=cdouble)
49
+ w = -arange(n)*(2j*pi/n)
50
+ for i in range(n):
51
+ y[i] = dot(exp(i*w), x)
52
+ return y
53
+
54
+
55
+ def direct_idft(x):
56
+ x = asarray(x)
57
+ n = len(x)
58
+ y = zeros(n, dtype=cdouble)
59
+ w = arange(n)*(2j*pi/n)
60
+ for i in range(n):
61
+ y[i] = dot(exp(i*w), x)/n
62
+ return y
63
+
64
+
65
+ def direct_dftn(x):
66
+ x = asarray(x)
67
+ for axis in range(len(x.shape)):
68
+ x = fft(x, axis=axis)
69
+ return x
70
+
71
+
72
+ def direct_idftn(x):
73
+ x = asarray(x)
74
+ for axis in range(len(x.shape)):
75
+ x = ifft(x, axis=axis)
76
+ return x
77
+
78
+
79
+ def direct_rdft(x):
80
+ x = asarray(x)
81
+ n = len(x)
82
+ w = -arange(n)*(2j*pi/n)
83
+ r = zeros(n, dtype=double)
84
+ for i in range(n//2+1):
85
+ y = dot(exp(i*w), x)
86
+ if i:
87
+ r[2*i-1] = y.real
88
+ if 2*i < n:
89
+ r[2*i] = y.imag
90
+ else:
91
+ r[0] = y.real
92
+ return r
93
+
94
+
95
+ def direct_irdft(x):
96
+ x = asarray(x)
97
+ n = len(x)
98
+ x1 = zeros(n, dtype=cdouble)
99
+ for i in range(n//2+1):
100
+ if i:
101
+ if 2*i < n:
102
+ x1[i] = x[2*i-1] + 1j*x[2*i]
103
+ x1[n-i] = x[2*i-1] - 1j*x[2*i]
104
+ else:
105
+ x1[i] = x[2*i-1]
106
+ else:
107
+ x1[0] = x[0]
108
+ return direct_idft(x1).real
109
+
110
+
111
+ class _TestFFTBase:
112
+ def setup_method(self):
113
+ self.cdt = None
114
+ self.rdt = None
115
+ np.random.seed(1234)
116
+
117
+ def test_definition(self):
118
+ x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
119
+ y = fft(x)
120
+ assert_equal(y.dtype, self.cdt)
121
+ y1 = direct_dft(x)
122
+ assert_array_almost_equal(y,y1)
123
+ x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
124
+ assert_array_almost_equal(fft(x),direct_dft(x))
125
+
126
+ def test_n_argument_real(self):
127
+ x1 = np.array([1,2,3,4], dtype=self.rdt)
128
+ x2 = np.array([1,2,3,4], dtype=self.rdt)
129
+ y = fft([x1,x2],n=4)
130
+ assert_equal(y.dtype, self.cdt)
131
+ assert_equal(y.shape,(2,4))
132
+ assert_array_almost_equal(y[0],direct_dft(x1))
133
+ assert_array_almost_equal(y[1],direct_dft(x2))
134
+
135
+ def _test_n_argument_complex(self):
136
+ x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
137
+ x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
138
+ y = fft([x1,x2],n=4)
139
+ assert_equal(y.dtype, self.cdt)
140
+ assert_equal(y.shape,(2,4))
141
+ assert_array_almost_equal(y[0],direct_dft(x1))
142
+ assert_array_almost_equal(y[1],direct_dft(x2))
143
+
144
+ def test_invalid_sizes(self):
145
+ assert_raises(ValueError, fft, [])
146
+ assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
147
+
148
+
149
+ class TestDoubleFFT(_TestFFTBase):
150
+ def setup_method(self):
151
+ self.cdt = np.complex128
152
+ self.rdt = np.float64
153
+
154
+
155
+ class TestSingleFFT(_TestFFTBase):
156
+ def setup_method(self):
157
+ self.cdt = np.complex64
158
+ self.rdt = np.float32
159
+
160
+ reason = ("single-precision FFT implementation is partially disabled, "
161
+ "until accuracy issues with large prime powers are resolved")
162
+
163
+ @pytest.mark.xfail(run=False, reason=reason)
164
+ def test_notice(self):
165
+ pass
166
+
167
+
168
+ class TestFloat16FFT:
169
+
170
+ def test_1_argument_real(self):
171
+ x1 = np.array([1, 2, 3, 4], dtype=np.float16)
172
+ y = fft(x1, n=4)
173
+ assert_equal(y.dtype, np.complex64)
174
+ assert_equal(y.shape, (4, ))
175
+ assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
176
+
177
+ def test_n_argument_real(self):
178
+ x1 = np.array([1, 2, 3, 4], dtype=np.float16)
179
+ x2 = np.array([1, 2, 3, 4], dtype=np.float16)
180
+ y = fft([x1, x2], n=4)
181
+ assert_equal(y.dtype, np.complex64)
182
+ assert_equal(y.shape, (2, 4))
183
+ assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
184
+ assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
185
+
186
+
187
+ class _TestIFFTBase:
188
+ def setup_method(self):
189
+ np.random.seed(1234)
190
+
191
+ def test_definition(self):
192
+ x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
193
+ y = ifft(x)
194
+ y1 = direct_idft(x)
195
+ assert_equal(y.dtype, self.cdt)
196
+ assert_array_almost_equal(y,y1)
197
+
198
+ x = np.array([1,2,3,4+0j,5], self.cdt)
199
+ assert_array_almost_equal(ifft(x),direct_idft(x))
200
+
201
+ def test_definition_real(self):
202
+ x = np.array([1,2,3,4,1,2,3,4], self.rdt)
203
+ y = ifft(x)
204
+ assert_equal(y.dtype, self.cdt)
205
+ y1 = direct_idft(x)
206
+ assert_array_almost_equal(y,y1)
207
+
208
+ x = np.array([1,2,3,4,5], dtype=self.rdt)
209
+ assert_equal(y.dtype, self.cdt)
210
+ assert_array_almost_equal(ifft(x),direct_idft(x))
211
+
212
+ def test_random_complex(self):
213
+ for size in [1,51,111,100,200,64,128,256,1024]:
214
+ x = random([size]).astype(self.cdt)
215
+ x = random([size]).astype(self.cdt) + 1j*x
216
+ y1 = ifft(fft(x))
217
+ y2 = fft(ifft(x))
218
+ assert_equal(y1.dtype, self.cdt)
219
+ assert_equal(y2.dtype, self.cdt)
220
+ assert_array_almost_equal(y1, x)
221
+ assert_array_almost_equal(y2, x)
222
+
223
+ def test_random_real(self):
224
+ for size in [1,51,111,100,200,64,128,256,1024]:
225
+ x = random([size]).astype(self.rdt)
226
+ y1 = ifft(fft(x))
227
+ y2 = fft(ifft(x))
228
+ assert_equal(y1.dtype, self.cdt)
229
+ assert_equal(y2.dtype, self.cdt)
230
+ assert_array_almost_equal(y1, x)
231
+ assert_array_almost_equal(y2, x)
232
+
233
+ def test_size_accuracy(self):
234
+ # Sanity check for the accuracy for prime and non-prime sized inputs
235
+ if self.rdt == np.float32:
236
+ rtol = 1e-5
237
+ elif self.rdt == np.float64:
238
+ rtol = 1e-10
239
+
240
+ for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
241
+ np.random.seed(1234)
242
+ x = np.random.rand(size).astype(self.rdt)
243
+ y = ifft(fft(x))
244
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
245
+ y = fft(ifft(x))
246
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
247
+
248
+ x = (x + 1j*np.random.rand(size)).astype(self.cdt)
249
+ y = ifft(fft(x))
250
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
251
+ y = fft(ifft(x))
252
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
253
+
254
+ def test_invalid_sizes(self):
255
+ assert_raises(ValueError, ifft, [])
256
+ assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
257
+
258
+
259
+ class TestDoubleIFFT(_TestIFFTBase):
260
+ def setup_method(self):
261
+ self.cdt = np.complex128
262
+ self.rdt = np.float64
263
+
264
+
265
+ class TestSingleIFFT(_TestIFFTBase):
266
+ def setup_method(self):
267
+ self.cdt = np.complex64
268
+ self.rdt = np.float32
269
+
270
+
271
+ class _TestRFFTBase:
272
+ def setup_method(self):
273
+ np.random.seed(1234)
274
+
275
+ def test_definition(self):
276
+ for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
277
+ x = np.array(t, dtype=self.rdt)
278
+ y = rfft(x)
279
+ y1 = direct_rdft(x)
280
+ assert_array_almost_equal(y,y1)
281
+ assert_equal(y.dtype, self.rdt)
282
+
283
+ def test_invalid_sizes(self):
284
+ assert_raises(ValueError, rfft, [])
285
+ assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
286
+
287
+ # See gh-5790
288
+ class MockSeries:
289
+ def __init__(self, data):
290
+ self.data = np.asarray(data)
291
+
292
+ def __getattr__(self, item):
293
+ try:
294
+ return getattr(self.data, item)
295
+ except AttributeError as e:
296
+ raise AttributeError("'MockSeries' object "
297
+ f"has no attribute '{item}'") from e
298
+
299
+ def test_non_ndarray_with_dtype(self):
300
+ x = np.array([1., 2., 3., 4., 5.])
301
+ xs = _TestRFFTBase.MockSeries(x)
302
+
303
+ expected = [1, 2, 3, 4, 5]
304
+ rfft(xs)
305
+
306
+ # Data should not have been overwritten
307
+ assert_equal(x, expected)
308
+ assert_equal(xs.data, expected)
309
+
310
+ def test_complex_input(self):
311
+ assert_raises(TypeError, rfft, np.arange(4, dtype=np.complex64))
312
+
313
+
314
+ class TestRFFTDouble(_TestRFFTBase):
315
+ def setup_method(self):
316
+ self.cdt = np.complex128
317
+ self.rdt = np.float64
318
+
319
+
320
+ class TestRFFTSingle(_TestRFFTBase):
321
+ def setup_method(self):
322
+ self.cdt = np.complex64
323
+ self.rdt = np.float32
324
+
325
+
326
+ class _TestIRFFTBase:
327
+ def setup_method(self):
328
+ np.random.seed(1234)
329
+
330
+ def test_definition(self):
331
+ x1 = [1,2,3,4,1,2,3,4]
332
+ x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
333
+ x2 = [1,2,3,4,1,2,3,4,5]
334
+ x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
335
+
336
+ def _test(x, xr):
337
+ y = irfft(np.array(x, dtype=self.rdt))
338
+ y1 = direct_irdft(x)
339
+ assert_equal(y.dtype, self.rdt)
340
+ assert_array_almost_equal(y,y1, decimal=self.ndec)
341
+ assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
342
+
343
+ _test(x1, x1_1)
344
+ _test(x2, x2_1)
345
+
346
+ def test_random_real(self):
347
+ for size in [1,51,111,100,200,64,128,256,1024]:
348
+ x = random([size]).astype(self.rdt)
349
+ y1 = irfft(rfft(x))
350
+ y2 = rfft(irfft(x))
351
+ assert_equal(y1.dtype, self.rdt)
352
+ assert_equal(y2.dtype, self.rdt)
353
+ assert_array_almost_equal(y1, x, decimal=self.ndec,
354
+ err_msg="size=%d" % size)
355
+ assert_array_almost_equal(y2, x, decimal=self.ndec,
356
+ err_msg="size=%d" % size)
357
+
358
+ def test_size_accuracy(self):
359
+ # Sanity check for the accuracy for prime and non-prime sized inputs
360
+ if self.rdt == np.float32:
361
+ rtol = 1e-5
362
+ elif self.rdt == np.float64:
363
+ rtol = 1e-10
364
+
365
+ for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
366
+ np.random.seed(1234)
367
+ x = np.random.rand(size).astype(self.rdt)
368
+ y = irfft(rfft(x))
369
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
370
+ y = rfft(irfft(x))
371
+ _assert_close_in_norm(x, y, rtol, size, self.rdt)
372
+
373
+ def test_invalid_sizes(self):
374
+ assert_raises(ValueError, irfft, [])
375
+ assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
376
+
377
+ def test_complex_input(self):
378
+ assert_raises(TypeError, irfft, np.arange(4, dtype=np.complex64))
379
+
380
+
381
+ # self.ndec is bogus; we should have a assert_array_approx_equal for number of
382
+ # significant digits
383
+
384
+ class TestIRFFTDouble(_TestIRFFTBase):
385
+ def setup_method(self):
386
+ self.cdt = np.complex128
387
+ self.rdt = np.float64
388
+ self.ndec = 14
389
+
390
+
391
+ class TestIRFFTSingle(_TestIRFFTBase):
392
+ def setup_method(self):
393
+ self.cdt = np.complex64
394
+ self.rdt = np.float32
395
+ self.ndec = 5
396
+
397
+
398
+ class Testfft2:
399
+ def setup_method(self):
400
+ np.random.seed(1234)
401
+
402
+ def test_regression_244(self):
403
+ """FFT returns wrong result with axes parameter."""
404
+ # fftn (and hence fft2) used to break when both axes and shape were
405
+ # used
406
+ x = numpy.ones((4, 4, 2))
407
+ y = fft2(x, shape=(8, 8), axes=(-3, -2))
408
+ y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
409
+ assert_array_almost_equal(y, y_r)
410
+
411
+ def test_invalid_sizes(self):
412
+ assert_raises(ValueError, fft2, [[]])
413
+ assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
414
+
415
+
416
+ class TestFftnSingle:
417
+ def setup_method(self):
418
+ np.random.seed(1234)
419
+
420
+ def test_definition(self):
421
+ x = [[1, 2, 3],
422
+ [4, 5, 6],
423
+ [7, 8, 9]]
424
+ y = fftn(np.array(x, np.float32))
425
+ assert_(y.dtype == np.complex64,
426
+ msg="double precision output with single precision")
427
+
428
+ y_r = np.array(fftn(x), np.complex64)
429
+ assert_array_almost_equal_nulp(y, y_r)
430
+
431
+ @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
432
+ def test_size_accuracy_small(self, size):
433
+ x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
434
+ y1 = fftn(x.real.astype(np.float32))
435
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
436
+
437
+ assert_equal(y1.dtype, np.complex64)
438
+ assert_array_almost_equal_nulp(y1, y2, 2000)
439
+
440
+ @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
441
+ def test_size_accuracy_large(self, size):
442
+ x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
443
+ y1 = fftn(x.real.astype(np.float32))
444
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
445
+
446
+ assert_equal(y1.dtype, np.complex64)
447
+ assert_array_almost_equal_nulp(y1, y2, 2000)
448
+
449
+ def test_definition_float16(self):
450
+ x = [[1, 2, 3],
451
+ [4, 5, 6],
452
+ [7, 8, 9]]
453
+ y = fftn(np.array(x, np.float16))
454
+ assert_equal(y.dtype, np.complex64)
455
+ y_r = np.array(fftn(x), np.complex64)
456
+ assert_array_almost_equal_nulp(y, y_r)
457
+
458
+ @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
459
+ def test_float16_input_small(self, size):
460
+ x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
461
+ y1 = fftn(x.real.astype(np.float16))
462
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
463
+
464
+ assert_equal(y1.dtype, np.complex64)
465
+ assert_array_almost_equal_nulp(y1, y2, 5e5)
466
+
467
+ @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
468
+ def test_float16_input_large(self, size):
469
+ x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
470
+ y1 = fftn(x.real.astype(np.float16))
471
+ y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
472
+
473
+ assert_equal(y1.dtype, np.complex64)
474
+ assert_array_almost_equal_nulp(y1, y2, 2e6)
475
+
476
+
477
+ class TestFftn:
478
+ def setup_method(self):
479
+ np.random.seed(1234)
480
+
481
+ def test_definition(self):
482
+ x = [[1, 2, 3],
483
+ [4, 5, 6],
484
+ [7, 8, 9]]
485
+ y = fftn(x)
486
+ assert_array_almost_equal(y, direct_dftn(x))
487
+
488
+ x = random((20, 26))
489
+ assert_array_almost_equal(fftn(x), direct_dftn(x))
490
+
491
+ x = random((5, 4, 3, 20))
492
+ assert_array_almost_equal(fftn(x), direct_dftn(x))
493
+
494
+ def test_axes_argument(self):
495
+ # plane == ji_plane, x== kji_space
496
+ plane1 = [[1, 2, 3],
497
+ [4, 5, 6],
498
+ [7, 8, 9]]
499
+ plane2 = [[10, 11, 12],
500
+ [13, 14, 15],
501
+ [16, 17, 18]]
502
+ plane3 = [[19, 20, 21],
503
+ [22, 23, 24],
504
+ [25, 26, 27]]
505
+ ki_plane1 = [[1, 2, 3],
506
+ [10, 11, 12],
507
+ [19, 20, 21]]
508
+ ki_plane2 = [[4, 5, 6],
509
+ [13, 14, 15],
510
+ [22, 23, 24]]
511
+ ki_plane3 = [[7, 8, 9],
512
+ [16, 17, 18],
513
+ [25, 26, 27]]
514
+ jk_plane1 = [[1, 10, 19],
515
+ [4, 13, 22],
516
+ [7, 16, 25]]
517
+ jk_plane2 = [[2, 11, 20],
518
+ [5, 14, 23],
519
+ [8, 17, 26]]
520
+ jk_plane3 = [[3, 12, 21],
521
+ [6, 15, 24],
522
+ [9, 18, 27]]
523
+ kj_plane1 = [[1, 4, 7],
524
+ [10, 13, 16], [19, 22, 25]]
525
+ kj_plane2 = [[2, 5, 8],
526
+ [11, 14, 17], [20, 23, 26]]
527
+ kj_plane3 = [[3, 6, 9],
528
+ [12, 15, 18], [21, 24, 27]]
529
+ ij_plane1 = [[1, 4, 7],
530
+ [2, 5, 8],
531
+ [3, 6, 9]]
532
+ ij_plane2 = [[10, 13, 16],
533
+ [11, 14, 17],
534
+ [12, 15, 18]]
535
+ ij_plane3 = [[19, 22, 25],
536
+ [20, 23, 26],
537
+ [21, 24, 27]]
538
+ ik_plane1 = [[1, 10, 19],
539
+ [2, 11, 20],
540
+ [3, 12, 21]]
541
+ ik_plane2 = [[4, 13, 22],
542
+ [5, 14, 23],
543
+ [6, 15, 24]]
544
+ ik_plane3 = [[7, 16, 25],
545
+ [8, 17, 26],
546
+ [9, 18, 27]]
547
+ ijk_space = [jk_plane1, jk_plane2, jk_plane3]
548
+ ikj_space = [kj_plane1, kj_plane2, kj_plane3]
549
+ jik_space = [ik_plane1, ik_plane2, ik_plane3]
550
+ jki_space = [ki_plane1, ki_plane2, ki_plane3]
551
+ kij_space = [ij_plane1, ij_plane2, ij_plane3]
552
+ x = array([plane1, plane2, plane3])
553
+
554
+ assert_array_almost_equal(fftn(x),
555
+ fftn(x, axes=(-3, -2, -1))) # kji_space
556
+ assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
557
+ assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
558
+ y = fftn(x, axes=(2, 1, 0)) # ijk_space
559
+ assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
560
+ y = fftn(x, axes=(2, 0, 1)) # ikj_space
561
+ assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
562
+ fftn(ikj_space))
563
+ y = fftn(x, axes=(1, 2, 0)) # jik_space
564
+ assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
565
+ fftn(jik_space))
566
+ y = fftn(x, axes=(1, 0, 2)) # jki_space
567
+ assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
568
+ y = fftn(x, axes=(0, 2, 1)) # kij_space
569
+ assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
570
+
571
+ y = fftn(x, axes=(-2, -1)) # ji_plane
572
+ assert_array_almost_equal(fftn(plane1), y[0])
573
+ assert_array_almost_equal(fftn(plane2), y[1])
574
+ assert_array_almost_equal(fftn(plane3), y[2])
575
+
576
+ y = fftn(x, axes=(1, 2)) # ji_plane
577
+ assert_array_almost_equal(fftn(plane1), y[0])
578
+ assert_array_almost_equal(fftn(plane2), y[1])
579
+ assert_array_almost_equal(fftn(plane3), y[2])
580
+
581
+ y = fftn(x, axes=(-3, -2)) # kj_plane
582
+ assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
583
+ assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
584
+ assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
585
+
586
+ y = fftn(x, axes=(-3, -1)) # ki_plane
587
+ assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
588
+ assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
589
+ assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
590
+
591
+ y = fftn(x, axes=(-1, -2)) # ij_plane
592
+ assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
593
+ assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
594
+ assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
595
+
596
+ y = fftn(x, axes=(-1, -3)) # ik_plane
597
+ assert_array_almost_equal(fftn(ik_plane1),
598
+ swapaxes(y[:, 0, :], -1, -2))
599
+ assert_array_almost_equal(fftn(ik_plane2),
600
+ swapaxes(y[:, 1, :], -1, -2))
601
+ assert_array_almost_equal(fftn(ik_plane3),
602
+ swapaxes(y[:, 2, :], -1, -2))
603
+
604
+ y = fftn(x, axes=(-2, -3)) # jk_plane
605
+ assert_array_almost_equal(fftn(jk_plane1),
606
+ swapaxes(y[:, :, 0], -1, -2))
607
+ assert_array_almost_equal(fftn(jk_plane2),
608
+ swapaxes(y[:, :, 1], -1, -2))
609
+ assert_array_almost_equal(fftn(jk_plane3),
610
+ swapaxes(y[:, :, 2], -1, -2))
611
+
612
+ y = fftn(x, axes=(-1,)) # i_line
613
+ for i in range(3):
614
+ for j in range(3):
615
+ assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
616
+ y = fftn(x, axes=(-2,)) # j_line
617
+ for i in range(3):
618
+ for j in range(3):
619
+ assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
620
+ y = fftn(x, axes=(0,)) # k_line
621
+ for i in range(3):
622
+ for j in range(3):
623
+ assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
624
+
625
+ y = fftn(x, axes=()) # point
626
+ assert_array_almost_equal(y, x)
627
+
628
+ def test_shape_argument(self):
629
+ small_x = [[1, 2, 3],
630
+ [4, 5, 6]]
631
+ large_x1 = [[1, 2, 3, 0],
632
+ [4, 5, 6, 0],
633
+ [0, 0, 0, 0],
634
+ [0, 0, 0, 0]]
635
+
636
+ y = fftn(small_x, shape=(4, 4))
637
+ assert_array_almost_equal(y, fftn(large_x1))
638
+
639
+ y = fftn(small_x, shape=(3, 4))
640
+ assert_array_almost_equal(y, fftn(large_x1[:-1]))
641
+
642
+ def test_shape_axes_argument(self):
643
+ small_x = [[1, 2, 3],
644
+ [4, 5, 6],
645
+ [7, 8, 9]]
646
+ large_x1 = array([[1, 2, 3, 0],
647
+ [4, 5, 6, 0],
648
+ [7, 8, 9, 0],
649
+ [0, 0, 0, 0]])
650
+ y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
651
+ assert_array_almost_equal(y, fftn(large_x1))
652
+ y = fftn(small_x, shape=(4, 4), axes=(-1, -2))
653
+
654
+ assert_array_almost_equal(y, swapaxes(
655
+ fftn(swapaxes(large_x1, -1, -2)), -1, -2))
656
+
657
+ def test_shape_axes_argument2(self):
658
+ # Change shape of the last axis
659
+ x = numpy.random.random((10, 5, 3, 7))
660
+ y = fftn(x, axes=(-1,), shape=(8,))
661
+ assert_array_almost_equal(y, fft(x, axis=-1, n=8))
662
+
663
+ # Change shape of an arbitrary axis which is not the last one
664
+ x = numpy.random.random((10, 5, 3, 7))
665
+ y = fftn(x, axes=(-2,), shape=(8,))
666
+ assert_array_almost_equal(y, fft(x, axis=-2, n=8))
667
+
668
+ # Change shape of axes: cf #244, where shape and axes were mixed up
669
+ x = numpy.random.random((4, 4, 2))
670
+ y = fftn(x, axes=(-3, -2), shape=(8, 8))
671
+ assert_array_almost_equal(y,
672
+ numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
673
+
674
+ def test_shape_argument_more(self):
675
+ x = zeros((4, 4, 2))
676
+ with assert_raises(ValueError,
677
+ match="when given, axes and shape arguments"
678
+ " have to be of the same length"):
679
+ fftn(x, shape=(8, 8, 2, 1))
680
+
681
+ def test_invalid_sizes(self):
682
+ with assert_raises(ValueError,
683
+ match="invalid number of data points"
684
+ r" \(\[1, 0\]\) specified"):
685
+ fftn([[]])
686
+
687
+ with assert_raises(ValueError,
688
+ match="invalid number of data points"
689
+ r" \(\[4, -3\]\) specified"):
690
+ fftn([[1, 1], [2, 2]], (4, -3))
691
+
692
+
693
+ class TestIfftn:
694
+ dtype = None
695
+ cdtype = None
696
+
697
+ def setup_method(self):
698
+ np.random.seed(1234)
699
+
700
+ @pytest.mark.parametrize('dtype,cdtype,maxnlp',
701
+ [(np.float64, np.complex128, 2000),
702
+ (np.float32, np.complex64, 3500)])
703
+ def test_definition(self, dtype, cdtype, maxnlp):
704
+ x = np.array([[1, 2, 3],
705
+ [4, 5, 6],
706
+ [7, 8, 9]], dtype=dtype)
707
+ y = ifftn(x)
708
+ assert_equal(y.dtype, cdtype)
709
+ assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
710
+
711
+ x = random((20, 26))
712
+ assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
713
+
714
+ x = random((5, 4, 3, 20))
715
+ assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
716
+
717
+ @pytest.mark.parametrize('maxnlp', [2000, 3500])
718
+ @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
719
+ def test_random_complex(self, maxnlp, size):
720
+ x = random([size, size]) + 1j*random([size, size])
721
+ assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
722
+ assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
723
+
724
+ def test_invalid_sizes(self):
725
+ with assert_raises(ValueError,
726
+ match="invalid number of data points"
727
+ r" \(\[1, 0\]\) specified"):
728
+ ifftn([[]])
729
+
730
+ with assert_raises(ValueError,
731
+ match="invalid number of data points"
732
+ r" \(\[4, -3\]\) specified"):
733
+ ifftn([[1, 1], [2, 2]], (4, -3))
734
+
735
+
736
+ class FakeArray:
737
+ def __init__(self, data):
738
+ self._data = data
739
+ self.__array_interface__ = data.__array_interface__
740
+
741
+
742
+ class FakeArray2:
743
+ def __init__(self, data):
744
+ self._data = data
745
+
746
+ def __array__(self, dtype=None, copy=None):
747
+ return self._data
748
+
749
+
750
+ class TestOverwrite:
751
+ """Check input overwrite behavior of the FFT functions."""
752
+
753
+ real_dtypes = (np.float32, np.float64)
754
+ dtypes = real_dtypes + (np.complex64, np.complex128)
755
+ fftsizes = [8, 16, 32]
756
+
757
+ def _check(self, x, routine, fftsize, axis, overwrite_x):
758
+ x2 = x.copy()
759
+ for fake in [lambda x: x, FakeArray, FakeArray2]:
760
+ routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
761
+
762
+ sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format(
763
+ routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
764
+ if not overwrite_x:
765
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
766
+
767
+ def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
768
+ fftsize, overwrite_x):
769
+ np.random.seed(1234)
770
+ if np.issubdtype(dtype, np.complexfloating):
771
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
772
+ else:
773
+ data = np.random.randn(*shape)
774
+ data = data.astype(dtype)
775
+
776
+ self._check(data, routine, fftsize, axis,
777
+ overwrite_x=overwrite_x)
778
+
779
+ @pytest.mark.parametrize('dtype', dtypes)
780
+ @pytest.mark.parametrize('fftsize', fftsizes)
781
+ @pytest.mark.parametrize('overwrite_x', [True, False])
782
+ @pytest.mark.parametrize('shape,axes', [((16,), -1),
783
+ ((16, 2), 0),
784
+ ((2, 16), 1)])
785
+ def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
786
+ overwritable = (np.complex128, np.complex64)
787
+ self._check_1d(fft, dtype, shape, axes, overwritable,
788
+ fftsize, overwrite_x)
789
+ self._check_1d(ifft, dtype, shape, axes, overwritable,
790
+ fftsize, overwrite_x)
791
+
792
+ @pytest.mark.parametrize('dtype', real_dtypes)
793
+ @pytest.mark.parametrize('fftsize', fftsizes)
794
+ @pytest.mark.parametrize('overwrite_x', [True, False])
795
+ @pytest.mark.parametrize('shape,axes', [((16,), -1),
796
+ ((16, 2), 0),
797
+ ((2, 16), 1)])
798
+ def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
799
+ overwritable = self.real_dtypes
800
+ self._check_1d(irfft, dtype, shape, axes, overwritable,
801
+ fftsize, overwrite_x)
802
+ self._check_1d(rfft, dtype, shape, axes, overwritable,
803
+ fftsize, overwrite_x)
804
+
805
+ def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
806
+ overwrite_x):
807
+ np.random.seed(1234)
808
+ if np.issubdtype(dtype, np.complexfloating):
809
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
810
+ else:
811
+ data = np.random.randn(*shape)
812
+ data = data.astype(dtype)
813
+
814
+ def fftshape_iter(shp):
815
+ if len(shp) <= 0:
816
+ yield ()
817
+ else:
818
+ for j in (shp[0]//2, shp[0], shp[0]*2):
819
+ for rest in fftshape_iter(shp[1:]):
820
+ yield (j,) + rest
821
+
822
+ if axes is None:
823
+ part_shape = shape
824
+ else:
825
+ part_shape = tuple(np.take(shape, axes))
826
+
827
+ for fftshape in fftshape_iter(part_shape):
828
+ self._check(data, routine, fftshape, axes,
829
+ overwrite_x=overwrite_x)
830
+ if data.ndim > 1:
831
+ self._check(data.T, routine, fftshape, axes,
832
+ overwrite_x=overwrite_x)
833
+
834
+ @pytest.mark.parametrize('dtype', dtypes)
835
+ @pytest.mark.parametrize('overwrite_x', [True, False])
836
+ @pytest.mark.parametrize('shape,axes', [((16,), None),
837
+ ((16,), (0,)),
838
+ ((16, 2), (0,)),
839
+ ((2, 16), (1,)),
840
+ ((8, 16), None),
841
+ ((8, 16), (0, 1)),
842
+ ((8, 16, 2), (0, 1)),
843
+ ((8, 16, 2), (1, 2)),
844
+ ((8, 16, 2), (0,)),
845
+ ((8, 16, 2), (1,)),
846
+ ((8, 16, 2), (2,)),
847
+ ((8, 16, 2), None),
848
+ ((8, 16, 2), (0, 1, 2))])
849
+ def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
850
+ overwritable = (np.complex128, np.complex64)
851
+ self._check_nd_one(fftn, dtype, shape, axes, overwritable,
852
+ overwrite_x)
853
+ self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
854
+ overwrite_x)
855
+
856
+
857
+ @pytest.mark.parametrize('func', [fftn, ifftn, fft2])
858
+ def test_shape_axes_ndarray(func):
859
+ # Test fftn and ifftn work with NumPy arrays for shape and axes arguments
860
+ # Regression test for gh-13342
861
+ a = np.random.rand(10, 10)
862
+
863
+ expect = func(a, shape=(5, 5))
864
+ actual = func(a, shape=np.array([5, 5]))
865
+ assert_equal(expect, actual)
866
+
867
+ expect = func(a, axes=(-1,))
868
+ actual = func(a, axes=np.array([-1,]))
869
+ assert_equal(expect, actual)
870
+
871
+ expect = func(a, shape=(4, 7), axes=(1, 0))
872
+ actual = func(a, shape=np.array([4, 7]), axes=np.array([1, 0]))
873
+ assert_equal(expect, actual)
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_helper.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by Pearu Peterson, September 2002
2
+
3
+ __usage__ = """
4
+ Build fftpack:
5
+ python setup_fftpack.py build
6
+ Run tests if scipy is installed:
7
+ python -c 'import scipy;scipy.fftpack.test(<level>)'
8
+ Run tests if fftpack is not installed:
9
+ python tests/test_helper.py [<level>]
10
+ """
11
+
12
+ from numpy.testing import assert_array_almost_equal
13
+ from scipy.fftpack import fftshift, ifftshift, fftfreq, rfftfreq
14
+
15
+ from numpy import pi, random
16
+
17
+ class TestFFTShift:
18
+
19
+ def test_definition(self):
20
+ x = [0,1,2,3,4,-4,-3,-2,-1]
21
+ y = [-4,-3,-2,-1,0,1,2,3,4]
22
+ assert_array_almost_equal(fftshift(x),y)
23
+ assert_array_almost_equal(ifftshift(y),x)
24
+ x = [0,1,2,3,4,-5,-4,-3,-2,-1]
25
+ y = [-5,-4,-3,-2,-1,0,1,2,3,4]
26
+ assert_array_almost_equal(fftshift(x),y)
27
+ assert_array_almost_equal(ifftshift(y),x)
28
+
29
+ def test_inverse(self):
30
+ for n in [1,4,9,100,211]:
31
+ x = random.random((n,))
32
+ assert_array_almost_equal(ifftshift(fftshift(x)),x)
33
+
34
+
35
+ class TestFFTFreq:
36
+
37
+ def test_definition(self):
38
+ x = [0,1,2,3,4,-4,-3,-2,-1]
39
+ assert_array_almost_equal(9*fftfreq(9),x)
40
+ assert_array_almost_equal(9*pi*fftfreq(9,pi),x)
41
+ x = [0,1,2,3,4,-5,-4,-3,-2,-1]
42
+ assert_array_almost_equal(10*fftfreq(10),x)
43
+ assert_array_almost_equal(10*pi*fftfreq(10,pi),x)
44
+
45
+
46
+ class TestRFFTFreq:
47
+
48
+ def test_definition(self):
49
+ x = [0,1,1,2,2,3,3,4,4]
50
+ assert_array_almost_equal(9*rfftfreq(9),x)
51
+ assert_array_almost_equal(9*pi*rfftfreq(9,pi),x)
52
+ x = [0,1,1,2,2,3,3,4,4,5]
53
+ assert_array_almost_equal(10*rfftfreq(10),x)
54
+ assert_array_almost_equal(10*pi*rfftfreq(10,pi),x)
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_import.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test possibility of patching fftpack with pyfftw.
2
+
3
+ No module source outside of scipy.fftpack should contain an import of
4
+ the form `from scipy.fftpack import ...`, so that a simple replacement
5
+ of scipy.fftpack by the corresponding fftw interface completely swaps
6
+ the two FFT implementations.
7
+
8
+ Because this simply inspects source files, we only need to run the test
9
+ on one version of Python.
10
+ """
11
+
12
+
13
+ from pathlib import Path
14
+ import re
15
+ import tokenize
16
+ from numpy.testing import assert_
17
+ import scipy
18
+
19
+ class TestFFTPackImport:
20
+ def test_fftpack_import(self):
21
+ base = Path(scipy.__file__).parent
22
+ regexp = r"\s*from.+\.fftpack import .*\n"
23
+ for path in base.rglob("*.py"):
24
+ if base / "fftpack" in path.parents:
25
+ continue
26
+ # use tokenize to auto-detect encoding on systems where no
27
+ # default encoding is defined (e.g., LANG='C')
28
+ with tokenize.open(str(path)) as file:
29
+ assert_(all(not re.fullmatch(regexp, line)
30
+ for line in file),
31
+ f"{path} contains an import from fftpack")
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_pseudo_diffs.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by Pearu Peterson, September 2002
2
+
3
+ __usage__ = """
4
+ Build fftpack:
5
+ python setup_fftpack.py build
6
+ Run tests if scipy is installed:
7
+ python -c 'import scipy;scipy.fftpack.test(<level>)'
8
+ Run tests if fftpack is not installed:
9
+ python tests/test_pseudo_diffs.py [<level>]
10
+ """
11
+
12
+ from numpy.testing import (assert_equal, assert_almost_equal,
13
+ assert_array_almost_equal)
14
+ from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert,
15
+ ihilbert, shift, fftfreq, cs_diff, sc_diff,
16
+ ss_diff, cc_diff)
17
+
18
+ import numpy as np
19
+ from numpy import arange, sin, cos, pi, exp, tanh, sum, sign
20
+ from numpy.random import random
21
+
22
+
23
+ def direct_diff(x,k=1,period=None):
24
+ fx = fft(x)
25
+ n = len(fx)
26
+ if period is None:
27
+ period = 2*pi
28
+ w = fftfreq(n)*2j*pi/period*n
29
+ if k < 0:
30
+ w = 1 / w**k
31
+ w[0] = 0.0
32
+ else:
33
+ w = w**k
34
+ if n > 2000:
35
+ w[250:n-250] = 0.0
36
+ return ifft(w*fx).real
37
+
38
+
39
+ def direct_tilbert(x,h=1,period=None):
40
+ fx = fft(x)
41
+ n = len(fx)
42
+ if period is None:
43
+ period = 2*pi
44
+ w = fftfreq(n)*h*2*pi/period*n
45
+ w[0] = 1
46
+ w = 1j/tanh(w)
47
+ w[0] = 0j
48
+ return ifft(w*fx)
49
+
50
+
51
+ def direct_itilbert(x,h=1,period=None):
52
+ fx = fft(x)
53
+ n = len(fx)
54
+ if period is None:
55
+ period = 2*pi
56
+ w = fftfreq(n)*h*2*pi/period*n
57
+ w = -1j*tanh(w)
58
+ return ifft(w*fx)
59
+
60
+
61
+ def direct_hilbert(x):
62
+ fx = fft(x)
63
+ n = len(fx)
64
+ w = fftfreq(n)*n
65
+ w = 1j*sign(w)
66
+ return ifft(w*fx)
67
+
68
+
69
+ def direct_ihilbert(x):
70
+ return -direct_hilbert(x)
71
+
72
+
73
+ def direct_shift(x,a,period=None):
74
+ n = len(x)
75
+ if period is None:
76
+ k = fftfreq(n)*1j*n
77
+ else:
78
+ k = fftfreq(n)*2j*pi/period*n
79
+ return ifft(fft(x)*exp(k*a)).real
80
+
81
+
82
+ class TestDiff:
83
+
84
+ def test_definition(self):
85
+ for n in [16,17,64,127,32]:
86
+ x = arange(n)*2*pi/n
87
+ assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x)))
88
+ assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2))
89
+ assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3))
90
+ assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4))
91
+ assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5))
92
+ assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3))
93
+ assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4))
94
+ assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x)))
95
+ assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2))
96
+ assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3))
97
+ assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4))
98
+ assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x)))
99
+ assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8)))
100
+ assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8)))
101
+ for k in range(5):
102
+ assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k))
103
+ assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k))
104
+
105
+ def test_period(self):
106
+ for n in [17,64]:
107
+ x = arange(n)/float(n)
108
+ assert_array_almost_equal(diff(sin(2*pi*x),period=1),
109
+ 2*pi*cos(2*pi*x))
110
+ assert_array_almost_equal(diff(sin(2*pi*x),3,period=1),
111
+ -(2*pi)**3*cos(2*pi*x))
112
+
113
+ def test_sin(self):
114
+ for n in [32,64,77]:
115
+ x = arange(n)*2*pi/n
116
+ assert_array_almost_equal(diff(sin(x)),cos(x))
117
+ assert_array_almost_equal(diff(cos(x)),-sin(x))
118
+ assert_array_almost_equal(diff(sin(x),2),-sin(x))
119
+ assert_array_almost_equal(diff(sin(x),4),sin(x))
120
+ assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x))
121
+ assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x)))
122
+
123
+ def test_expr(self):
124
+ for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]:
125
+ x = arange(n)*2*pi/n
126
+ f = sin(x)*cos(4*x)+exp(sin(3*x))
127
+ df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
128
+ ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
129
+ - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
130
+ d1 = diff(f)
131
+ assert_array_almost_equal(d1,df)
132
+ assert_array_almost_equal(diff(df),ddf)
133
+ assert_array_almost_equal(diff(f,2),ddf)
134
+ assert_array_almost_equal(diff(ddf,-1),df)
135
+
136
+ def test_expr_large(self):
137
+ for n in [2048,4096]:
138
+ x = arange(n)*2*pi/n
139
+ f = sin(x)*cos(4*x)+exp(sin(3*x))
140
+ df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
141
+ ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
142
+ - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
143
+ assert_array_almost_equal(diff(f),df)
144
+ assert_array_almost_equal(diff(df),ddf)
145
+ assert_array_almost_equal(diff(ddf,-1),df)
146
+ assert_array_almost_equal(diff(f,2),ddf)
147
+
148
+ def test_int(self):
149
+ n = 64
150
+ x = arange(n)*2*pi/n
151
+ assert_array_almost_equal(diff(sin(x),-1),-cos(x))
152
+ assert_array_almost_equal(diff(sin(x),-2),-sin(x))
153
+ assert_array_almost_equal(diff(sin(x),-4),sin(x))
154
+ assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x))
155
+
156
+ def test_random_even(self):
157
+ for k in [0,2,4,6]:
158
+ for n in [60,32,64,56,55]:
159
+ f = random((n,))
160
+ af = sum(f,axis=0)/n
161
+ f = f-af
162
+ # zeroing Nyquist mode:
163
+ f = diff(diff(f,1),-1)
164
+ assert_almost_equal(sum(f,axis=0),0.0)
165
+ assert_array_almost_equal(diff(diff(f,k),-k),f)
166
+ assert_array_almost_equal(diff(diff(f,-k),k),f)
167
+
168
+ def test_random_odd(self):
169
+ for k in [0,1,2,3,4,5,6]:
170
+ for n in [33,65,55]:
171
+ f = random((n,))
172
+ af = sum(f,axis=0)/n
173
+ f = f-af
174
+ assert_almost_equal(sum(f,axis=0),0.0)
175
+ assert_array_almost_equal(diff(diff(f,k),-k),f)
176
+ assert_array_almost_equal(diff(diff(f,-k),k),f)
177
+
178
+ def test_zero_nyquist(self):
179
+ for k in [0,1,2,3,4,5,6]:
180
+ for n in [32,33,64,56,55]:
181
+ f = random((n,))
182
+ af = sum(f,axis=0)/n
183
+ f = f-af
184
+ # zeroing Nyquist mode:
185
+ f = diff(diff(f,1),-1)
186
+ assert_almost_equal(sum(f,axis=0),0.0)
187
+ assert_array_almost_equal(diff(diff(f,k),-k),f)
188
+ assert_array_almost_equal(diff(diff(f,-k),k),f)
189
+
190
+
191
+ class TestTilbert:
192
+
193
+ def test_definition(self):
194
+ for h in [0.1,0.5,1,5.5,10]:
195
+ for n in [16,17,64,127]:
196
+ x = arange(n)*2*pi/n
197
+ y = tilbert(sin(x),h)
198
+ y1 = direct_tilbert(sin(x),h)
199
+ assert_array_almost_equal(y,y1)
200
+ assert_array_almost_equal(tilbert(sin(x),h),
201
+ direct_tilbert(sin(x),h))
202
+ assert_array_almost_equal(tilbert(sin(2*x),h),
203
+ direct_tilbert(sin(2*x),h))
204
+
205
+ def test_random_even(self):
206
+ for h in [0.1,0.5,1,5.5,10]:
207
+ for n in [32,64,56]:
208
+ f = random((n,))
209
+ af = sum(f,axis=0)/n
210
+ f = f-af
211
+ assert_almost_equal(sum(f,axis=0),0.0)
212
+ assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f)
213
+
214
+ def test_random_odd(self):
215
+ for h in [0.1,0.5,1,5.5,10]:
216
+ for n in [33,65,55]:
217
+ f = random((n,))
218
+ af = sum(f,axis=0)/n
219
+ f = f-af
220
+ assert_almost_equal(sum(f,axis=0),0.0)
221
+ assert_array_almost_equal(itilbert(tilbert(f,h),h),f)
222
+ assert_array_almost_equal(tilbert(itilbert(f,h),h),f)
223
+
224
+
225
+ class TestITilbert:
226
+
227
+ def test_definition(self):
228
+ for h in [0.1,0.5,1,5.5,10]:
229
+ for n in [16,17,64,127]:
230
+ x = arange(n)*2*pi/n
231
+ y = itilbert(sin(x),h)
232
+ y1 = direct_itilbert(sin(x),h)
233
+ assert_array_almost_equal(y,y1)
234
+ assert_array_almost_equal(itilbert(sin(x),h),
235
+ direct_itilbert(sin(x),h))
236
+ assert_array_almost_equal(itilbert(sin(2*x),h),
237
+ direct_itilbert(sin(2*x),h))
238
+
239
+
240
+ class TestHilbert:
241
+
242
+ def test_definition(self):
243
+ for n in [16,17,64,127]:
244
+ x = arange(n)*2*pi/n
245
+ y = hilbert(sin(x))
246
+ y1 = direct_hilbert(sin(x))
247
+ assert_array_almost_equal(y,y1)
248
+ assert_array_almost_equal(hilbert(sin(2*x)),
249
+ direct_hilbert(sin(2*x)))
250
+
251
+ def test_tilbert_relation(self):
252
+ for n in [16,17,64,127]:
253
+ x = arange(n)*2*pi/n
254
+ f = sin(x)+cos(2*x)*sin(x)
255
+ y = hilbert(f)
256
+ y1 = direct_hilbert(f)
257
+ assert_array_almost_equal(y,y1)
258
+ y2 = tilbert(f,h=10)
259
+ assert_array_almost_equal(y,y2)
260
+
261
+ def test_random_odd(self):
262
+ for n in [33,65,55]:
263
+ f = random((n,))
264
+ af = sum(f,axis=0)/n
265
+ f = f-af
266
+ assert_almost_equal(sum(f,axis=0),0.0)
267
+ assert_array_almost_equal(ihilbert(hilbert(f)),f)
268
+ assert_array_almost_equal(hilbert(ihilbert(f)),f)
269
+
270
+ def test_random_even(self):
271
+ for n in [32,64,56]:
272
+ f = random((n,))
273
+ af = sum(f,axis=0)/n
274
+ f = f-af
275
+ # zeroing Nyquist mode:
276
+ f = diff(diff(f,1),-1)
277
+ assert_almost_equal(sum(f,axis=0),0.0)
278
+ assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)
279
+ assert_array_almost_equal(hilbert(ihilbert(f)),f)
280
+
281
+
282
+ class TestIHilbert:
283
+
284
+ def test_definition(self):
285
+ for n in [16,17,64,127]:
286
+ x = arange(n)*2*pi/n
287
+ y = ihilbert(sin(x))
288
+ y1 = direct_ihilbert(sin(x))
289
+ assert_array_almost_equal(y,y1)
290
+ assert_array_almost_equal(ihilbert(sin(2*x)),
291
+ direct_ihilbert(sin(2*x)))
292
+
293
+ def test_itilbert_relation(self):
294
+ for n in [16,17,64,127]:
295
+ x = arange(n)*2*pi/n
296
+ f = sin(x)+cos(2*x)*sin(x)
297
+ y = ihilbert(f)
298
+ y1 = direct_ihilbert(f)
299
+ assert_array_almost_equal(y,y1)
300
+ y2 = itilbert(f,h=10)
301
+ assert_array_almost_equal(y,y2)
302
+
303
+
304
+ class TestShift:
305
+
306
+ def test_definition(self):
307
+ for n in [18,17,64,127,32,2048,256]:
308
+ x = arange(n)*2*pi/n
309
+ for a in [0.1,3]:
310
+ assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a))
311
+ assert_array_almost_equal(shift(sin(x),a),sin(x+a))
312
+ assert_array_almost_equal(shift(cos(x),a),cos(x+a))
313
+ assert_array_almost_equal(shift(cos(2*x)+sin(x),a),
314
+ cos(2*(x+a))+sin(x+a))
315
+ assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a)))
316
+ assert_array_almost_equal(shift(sin(x),2*pi),sin(x))
317
+ assert_array_almost_equal(shift(sin(x),pi),-sin(x))
318
+ assert_array_almost_equal(shift(sin(x),pi/2),cos(x))
319
+
320
+
321
+ class TestOverwrite:
322
+ """Check input overwrite behavior """
323
+
324
+ real_dtypes = (np.float32, np.float64)
325
+ dtypes = real_dtypes + (np.complex64, np.complex128)
326
+
327
+ def _check(self, x, routine, *args, **kwargs):
328
+ x2 = x.copy()
329
+ routine(x2, *args, **kwargs)
330
+ sig = routine.__name__
331
+ if args:
332
+ sig += repr(args)
333
+ if kwargs:
334
+ sig += repr(kwargs)
335
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
336
+
337
+ def _check_1d(self, routine, dtype, shape, *args, **kwargs):
338
+ np.random.seed(1234)
339
+ if np.issubdtype(dtype, np.complexfloating):
340
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
341
+ else:
342
+ data = np.random.randn(*shape)
343
+ data = data.astype(dtype)
344
+ self._check(data, routine, *args, **kwargs)
345
+
346
+ def test_diff(self):
347
+ for dtype in self.dtypes:
348
+ self._check_1d(diff, dtype, (16,))
349
+
350
+ def test_tilbert(self):
351
+ for dtype in self.dtypes:
352
+ self._check_1d(tilbert, dtype, (16,), 1.6)
353
+
354
+ def test_itilbert(self):
355
+ for dtype in self.dtypes:
356
+ self._check_1d(itilbert, dtype, (16,), 1.6)
357
+
358
+ def test_hilbert(self):
359
+ for dtype in self.dtypes:
360
+ self._check_1d(hilbert, dtype, (16,))
361
+
362
+ def test_cs_diff(self):
363
+ for dtype in self.dtypes:
364
+ self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0)
365
+
366
+ def test_sc_diff(self):
367
+ for dtype in self.dtypes:
368
+ self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0)
369
+
370
+ def test_ss_diff(self):
371
+ for dtype in self.dtypes:
372
+ self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0)
373
+
374
+ def test_cc_diff(self):
375
+ for dtype in self.dtypes:
376
+ self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0)
377
+
378
+ def test_shift(self):
379
+ for dtype in self.dtypes:
380
+ self._check_1d(shift, dtype, (16,), 1.0)
env-llmeval/lib/python3.10/site-packages/scipy/fftpack/tests/test_real_transforms.py ADDED
@@ -0,0 +1,815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import join, dirname
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_array_almost_equal, assert_equal
5
+ import pytest
6
+ from pytest import raises as assert_raises
7
+
8
+ from scipy.fftpack._realtransforms import (
9
+ dct, idct, dst, idst, dctn, idctn, dstn, idstn)
10
+
11
+ # Matlab reference data
12
+ MDATA = np.load(join(dirname(__file__), 'test.npz'))
13
+ X = [MDATA['x%d' % i] for i in range(8)]
14
+ Y = [MDATA['y%d' % i] for i in range(8)]
15
+
16
+ # FFTW reference data: the data are organized as follows:
17
+ # * SIZES is an array containing all available sizes
18
+ # * for every type (1, 2, 3, 4) and every size, the array dct_type_size
19
+ # contains the output of the DCT applied to the input np.linspace(0, size-1,
20
+ # size)
21
+ FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
22
+ FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
23
+ FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
24
+
25
+
26
+ def fftw_dct_ref(type, size, dt):
27
+ x = np.linspace(0, size-1, size).astype(dt)
28
+ dt = np.result_type(np.float32, dt)
29
+ if dt == np.float64:
30
+ data = FFTWDATA_DOUBLE
31
+ elif dt == np.float32:
32
+ data = FFTWDATA_SINGLE
33
+ else:
34
+ raise ValueError()
35
+ y = (data['dct_%d_%d' % (type, size)]).astype(dt)
36
+ return x, y, dt
37
+
38
+
39
+ def fftw_dst_ref(type, size, dt):
40
+ x = np.linspace(0, size-1, size).astype(dt)
41
+ dt = np.result_type(np.float32, dt)
42
+ if dt == np.float64:
43
+ data = FFTWDATA_DOUBLE
44
+ elif dt == np.float32:
45
+ data = FFTWDATA_SINGLE
46
+ else:
47
+ raise ValueError()
48
+ y = (data['dst_%d_%d' % (type, size)]).astype(dt)
49
+ return x, y, dt
50
+
51
+
52
+ def dct_2d_ref(x, **kwargs):
53
+ """Calculate reference values for testing dct2."""
54
+ x = np.array(x, copy=True)
55
+ for row in range(x.shape[0]):
56
+ x[row, :] = dct(x[row, :], **kwargs)
57
+ for col in range(x.shape[1]):
58
+ x[:, col] = dct(x[:, col], **kwargs)
59
+ return x
60
+
61
+
62
+ def idct_2d_ref(x, **kwargs):
63
+ """Calculate reference values for testing idct2."""
64
+ x = np.array(x, copy=True)
65
+ for row in range(x.shape[0]):
66
+ x[row, :] = idct(x[row, :], **kwargs)
67
+ for col in range(x.shape[1]):
68
+ x[:, col] = idct(x[:, col], **kwargs)
69
+ return x
70
+
71
+
72
+ def dst_2d_ref(x, **kwargs):
73
+ """Calculate reference values for testing dst2."""
74
+ x = np.array(x, copy=True)
75
+ for row in range(x.shape[0]):
76
+ x[row, :] = dst(x[row, :], **kwargs)
77
+ for col in range(x.shape[1]):
78
+ x[:, col] = dst(x[:, col], **kwargs)
79
+ return x
80
+
81
+
82
+ def idst_2d_ref(x, **kwargs):
83
+ """Calculate reference values for testing idst2."""
84
+ x = np.array(x, copy=True)
85
+ for row in range(x.shape[0]):
86
+ x[row, :] = idst(x[row, :], **kwargs)
87
+ for col in range(x.shape[1]):
88
+ x[:, col] = idst(x[:, col], **kwargs)
89
+ return x
90
+
91
+
92
+ def naive_dct1(x, norm=None):
93
+ """Calculate textbook definition version of DCT-I."""
94
+ x = np.array(x, copy=True)
95
+ N = len(x)
96
+ M = N-1
97
+ y = np.zeros(N)
98
+ m0, m = 1, 2
99
+ if norm == 'ortho':
100
+ m0 = np.sqrt(1.0/M)
101
+ m = np.sqrt(2.0/M)
102
+ for k in range(N):
103
+ for n in range(1, N-1):
104
+ y[k] += m*x[n]*np.cos(np.pi*n*k/M)
105
+ y[k] += m0 * x[0]
106
+ y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)
107
+ if norm == 'ortho':
108
+ y[0] *= 1/np.sqrt(2)
109
+ y[N-1] *= 1/np.sqrt(2)
110
+ return y
111
+
112
+
113
+ def naive_dst1(x, norm=None):
114
+ """Calculate textbook definition version of DST-I."""
115
+ x = np.array(x, copy=True)
116
+ N = len(x)
117
+ M = N+1
118
+ y = np.zeros(N)
119
+ for k in range(N):
120
+ for n in range(N):
121
+ y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)
122
+ if norm == 'ortho':
123
+ y *= np.sqrt(0.5/M)
124
+ return y
125
+
126
+
127
+ def naive_dct4(x, norm=None):
128
+ """Calculate textbook definition version of DCT-IV."""
129
+ x = np.array(x, copy=True)
130
+ N = len(x)
131
+ y = np.zeros(N)
132
+ for k in range(N):
133
+ for n in range(N):
134
+ y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))
135
+ if norm == 'ortho':
136
+ y *= np.sqrt(2.0/N)
137
+ else:
138
+ y *= 2
139
+ return y
140
+
141
+
142
+ def naive_dst4(x, norm=None):
143
+ """Calculate textbook definition version of DST-IV."""
144
+ x = np.array(x, copy=True)
145
+ N = len(x)
146
+ y = np.zeros(N)
147
+ for k in range(N):
148
+ for n in range(N):
149
+ y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))
150
+ if norm == 'ortho':
151
+ y *= np.sqrt(2.0/N)
152
+ else:
153
+ y *= 2
154
+ return y
155
+
156
+
157
+ class TestComplex:
158
+ def test_dct_complex64(self):
159
+ y = dct(1j*np.arange(5, dtype=np.complex64))
160
+ x = 1j*dct(np.arange(5))
161
+ assert_array_almost_equal(x, y)
162
+
163
+ def test_dct_complex(self):
164
+ y = dct(np.arange(5)*1j)
165
+ x = 1j*dct(np.arange(5))
166
+ assert_array_almost_equal(x, y)
167
+
168
+ def test_idct_complex(self):
169
+ y = idct(np.arange(5)*1j)
170
+ x = 1j*idct(np.arange(5))
171
+ assert_array_almost_equal(x, y)
172
+
173
+ def test_dst_complex64(self):
174
+ y = dst(np.arange(5, dtype=np.complex64)*1j)
175
+ x = 1j*dst(np.arange(5))
176
+ assert_array_almost_equal(x, y)
177
+
178
+ def test_dst_complex(self):
179
+ y = dst(np.arange(5)*1j)
180
+ x = 1j*dst(np.arange(5))
181
+ assert_array_almost_equal(x, y)
182
+
183
+ def test_idst_complex(self):
184
+ y = idst(np.arange(5)*1j)
185
+ x = 1j*idst(np.arange(5))
186
+ assert_array_almost_equal(x, y)
187
+
188
+
189
+ class _TestDCTBase:
190
+ def setup_method(self):
191
+ self.rdt = None
192
+ self.dec = 14
193
+ self.type = None
194
+
195
+ def test_definition(self):
196
+ for i in FFTWDATA_SIZES:
197
+ x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
198
+ y = dct(x, type=self.type)
199
+ assert_equal(y.dtype, dt)
200
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
201
+ # should really use something like assert_array_approx_equal. The
202
+ # difference is due to fftw using a better algorithm w.r.t error
203
+ # propagation compared to the ones from fftpack.
204
+ assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
205
+ err_msg="Size %d failed" % i)
206
+
207
+ def test_axis(self):
208
+ nt = 2
209
+ for i in [7, 8, 9, 16, 32, 64]:
210
+ x = np.random.randn(nt, i)
211
+ y = dct(x, type=self.type)
212
+ for j in range(nt):
213
+ assert_array_almost_equal(y[j], dct(x[j], type=self.type),
214
+ decimal=self.dec)
215
+
216
+ x = x.T
217
+ y = dct(x, axis=0, type=self.type)
218
+ for j in range(nt):
219
+ assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
220
+ decimal=self.dec)
221
+
222
+
223
+ class _TestDCTIBase(_TestDCTBase):
224
+ def test_definition_ortho(self):
225
+ # Test orthornomal mode.
226
+ dt = np.result_type(np.float32, self.rdt)
227
+ for xr in X:
228
+ x = np.array(xr, dtype=self.rdt)
229
+ y = dct(x, norm='ortho', type=1)
230
+ y2 = naive_dct1(x, norm='ortho')
231
+ assert_equal(y.dtype, dt)
232
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
233
+
234
+ class _TestDCTIIBase(_TestDCTBase):
235
+ def test_definition_matlab(self):
236
+ # Test correspondence with MATLAB (orthornomal mode).
237
+ dt = np.result_type(np.float32, self.rdt)
238
+ for xr, yr in zip(X, Y):
239
+ x = np.array(xr, dtype=dt)
240
+ y = dct(x, norm="ortho", type=2)
241
+ assert_equal(y.dtype, dt)
242
+ assert_array_almost_equal(y, yr, decimal=self.dec)
243
+
244
+
245
+ class _TestDCTIIIBase(_TestDCTBase):
246
+ def test_definition_ortho(self):
247
+ # Test orthornomal mode.
248
+ dt = np.result_type(np.float32, self.rdt)
249
+ for xr in X:
250
+ x = np.array(xr, dtype=self.rdt)
251
+ y = dct(x, norm='ortho', type=2)
252
+ xi = dct(y, norm="ortho", type=3)
253
+ assert_equal(xi.dtype, dt)
254
+ assert_array_almost_equal(xi, x, decimal=self.dec)
255
+
256
+ class _TestDCTIVBase(_TestDCTBase):
257
+ def test_definition_ortho(self):
258
+ # Test orthornomal mode.
259
+ dt = np.result_type(np.float32, self.rdt)
260
+ for xr in X:
261
+ x = np.array(xr, dtype=self.rdt)
262
+ y = dct(x, norm='ortho', type=4)
263
+ y2 = naive_dct4(x, norm='ortho')
264
+ assert_equal(y.dtype, dt)
265
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
266
+
267
+
268
+ class TestDCTIDouble(_TestDCTIBase):
269
+ def setup_method(self):
270
+ self.rdt = np.float64
271
+ self.dec = 10
272
+ self.type = 1
273
+
274
+
275
+ class TestDCTIFloat(_TestDCTIBase):
276
+ def setup_method(self):
277
+ self.rdt = np.float32
278
+ self.dec = 4
279
+ self.type = 1
280
+
281
+
282
+ class TestDCTIInt(_TestDCTIBase):
283
+ def setup_method(self):
284
+ self.rdt = int
285
+ self.dec = 5
286
+ self.type = 1
287
+
288
+
289
+ class TestDCTIIDouble(_TestDCTIIBase):
290
+ def setup_method(self):
291
+ self.rdt = np.float64
292
+ self.dec = 10
293
+ self.type = 2
294
+
295
+
296
+ class TestDCTIIFloat(_TestDCTIIBase):
297
+ def setup_method(self):
298
+ self.rdt = np.float32
299
+ self.dec = 5
300
+ self.type = 2
301
+
302
+
303
+ class TestDCTIIInt(_TestDCTIIBase):
304
+ def setup_method(self):
305
+ self.rdt = int
306
+ self.dec = 5
307
+ self.type = 2
308
+
309
+
310
+ class TestDCTIIIDouble(_TestDCTIIIBase):
311
+ def setup_method(self):
312
+ self.rdt = np.float64
313
+ self.dec = 14
314
+ self.type = 3
315
+
316
+
317
+ class TestDCTIIIFloat(_TestDCTIIIBase):
318
+ def setup_method(self):
319
+ self.rdt = np.float32
320
+ self.dec = 5
321
+ self.type = 3
322
+
323
+
324
+ class TestDCTIIIInt(_TestDCTIIIBase):
325
+ def setup_method(self):
326
+ self.rdt = int
327
+ self.dec = 5
328
+ self.type = 3
329
+
330
+
331
+ class TestDCTIVDouble(_TestDCTIVBase):
332
+ def setup_method(self):
333
+ self.rdt = np.float64
334
+ self.dec = 12
335
+ self.type = 3
336
+
337
+
338
+ class TestDCTIVFloat(_TestDCTIVBase):
339
+ def setup_method(self):
340
+ self.rdt = np.float32
341
+ self.dec = 5
342
+ self.type = 3
343
+
344
+
345
+ class TestDCTIVInt(_TestDCTIVBase):
346
+ def setup_method(self):
347
+ self.rdt = int
348
+ self.dec = 5
349
+ self.type = 3
350
+
351
+
352
+ class _TestIDCTBase:
353
+ def setup_method(self):
354
+ self.rdt = None
355
+ self.dec = 14
356
+ self.type = None
357
+
358
+ def test_definition(self):
359
+ for i in FFTWDATA_SIZES:
360
+ xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
361
+ x = idct(yr, type=self.type)
362
+ if self.type == 1:
363
+ x /= 2 * (i-1)
364
+ else:
365
+ x /= 2 * i
366
+ assert_equal(x.dtype, dt)
367
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
368
+ # should really use something like assert_array_approx_equal. The
369
+ # difference is due to fftw using a better algorithm w.r.t error
370
+ # propagation compared to the ones from fftpack.
371
+ assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
372
+ err_msg="Size %d failed" % i)
373
+
374
+
375
+ class TestIDCTIDouble(_TestIDCTBase):
376
+ def setup_method(self):
377
+ self.rdt = np.float64
378
+ self.dec = 10
379
+ self.type = 1
380
+
381
+
382
+ class TestIDCTIFloat(_TestIDCTBase):
383
+ def setup_method(self):
384
+ self.rdt = np.float32
385
+ self.dec = 4
386
+ self.type = 1
387
+
388
+
389
+ class TestIDCTIInt(_TestIDCTBase):
390
+ def setup_method(self):
391
+ self.rdt = int
392
+ self.dec = 4
393
+ self.type = 1
394
+
395
+
396
+ class TestIDCTIIDouble(_TestIDCTBase):
397
+ def setup_method(self):
398
+ self.rdt = np.float64
399
+ self.dec = 10
400
+ self.type = 2
401
+
402
+
403
+ class TestIDCTIIFloat(_TestIDCTBase):
404
+ def setup_method(self):
405
+ self.rdt = np.float32
406
+ self.dec = 5
407
+ self.type = 2
408
+
409
+
410
+ class TestIDCTIIInt(_TestIDCTBase):
411
+ def setup_method(self):
412
+ self.rdt = int
413
+ self.dec = 5
414
+ self.type = 2
415
+
416
+
417
+ class TestIDCTIIIDouble(_TestIDCTBase):
418
+ def setup_method(self):
419
+ self.rdt = np.float64
420
+ self.dec = 14
421
+ self.type = 3
422
+
423
+
424
+ class TestIDCTIIIFloat(_TestIDCTBase):
425
+ def setup_method(self):
426
+ self.rdt = np.float32
427
+ self.dec = 5
428
+ self.type = 3
429
+
430
+
431
+ class TestIDCTIIIInt(_TestIDCTBase):
432
+ def setup_method(self):
433
+ self.rdt = int
434
+ self.dec = 5
435
+ self.type = 3
436
+
437
+ class TestIDCTIVDouble(_TestIDCTBase):
438
+ def setup_method(self):
439
+ self.rdt = np.float64
440
+ self.dec = 12
441
+ self.type = 4
442
+
443
+
444
+ class TestIDCTIVFloat(_TestIDCTBase):
445
+ def setup_method(self):
446
+ self.rdt = np.float32
447
+ self.dec = 5
448
+ self.type = 4
449
+
450
+
451
+ class TestIDCTIVInt(_TestIDCTBase):
452
+ def setup_method(self):
453
+ self.rdt = int
454
+ self.dec = 5
455
+ self.type = 4
456
+
457
+ class _TestDSTBase:
458
+ def setup_method(self):
459
+ self.rdt = None # dtype
460
+ self.dec = None # number of decimals to match
461
+ self.type = None # dst type
462
+
463
+ def test_definition(self):
464
+ for i in FFTWDATA_SIZES:
465
+ xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
466
+ y = dst(xr, type=self.type)
467
+ assert_equal(y.dtype, dt)
468
+ # XXX: we divide by np.max(y) because the tests fail otherwise. We
469
+ # should really use something like assert_array_approx_equal. The
470
+ # difference is due to fftw using a better algorithm w.r.t error
471
+ # propagation compared to the ones from fftpack.
472
+ assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
473
+ err_msg="Size %d failed" % i)
474
+
475
+
476
+ class _TestDSTIBase(_TestDSTBase):
477
+ def test_definition_ortho(self):
478
+ # Test orthornomal mode.
479
+ dt = np.result_type(np.float32, self.rdt)
480
+ for xr in X:
481
+ x = np.array(xr, dtype=self.rdt)
482
+ y = dst(x, norm='ortho', type=1)
483
+ y2 = naive_dst1(x, norm='ortho')
484
+ assert_equal(y.dtype, dt)
485
+ assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
486
+
487
+ class _TestDSTIVBase(_TestDSTBase):
488
+ def test_definition_ortho(self):
489
+ # Test orthornomal mode.
490
+ dt = np.result_type(np.float32, self.rdt)
491
+ for xr in X:
492
+ x = np.array(xr, dtype=self.rdt)
493
+ y = dst(x, norm='ortho', type=4)
494
+ y2 = naive_dst4(x, norm='ortho')
495
+ assert_equal(y.dtype, dt)
496
+ assert_array_almost_equal(y, y2, decimal=self.dec)
497
+
498
+ class TestDSTIDouble(_TestDSTIBase):
499
+ def setup_method(self):
500
+ self.rdt = np.float64
501
+ self.dec = 12
502
+ self.type = 1
503
+
504
+
505
+ class TestDSTIFloat(_TestDSTIBase):
506
+ def setup_method(self):
507
+ self.rdt = np.float32
508
+ self.dec = 4
509
+ self.type = 1
510
+
511
+
512
+ class TestDSTIInt(_TestDSTIBase):
513
+ def setup_method(self):
514
+ self.rdt = int
515
+ self.dec = 5
516
+ self.type = 1
517
+
518
+
519
+ class TestDSTIIDouble(_TestDSTBase):
520
+ def setup_method(self):
521
+ self.rdt = np.float64
522
+ self.dec = 14
523
+ self.type = 2
524
+
525
+
526
+ class TestDSTIIFloat(_TestDSTBase):
527
+ def setup_method(self):
528
+ self.rdt = np.float32
529
+ self.dec = 6
530
+ self.type = 2
531
+
532
+
533
+ class TestDSTIIInt(_TestDSTBase):
534
+ def setup_method(self):
535
+ self.rdt = int
536
+ self.dec = 6
537
+ self.type = 2
538
+
539
+
540
+ class TestDSTIIIDouble(_TestDSTBase):
541
+ def setup_method(self):
542
+ self.rdt = np.float64
543
+ self.dec = 14
544
+ self.type = 3
545
+
546
+
547
+ class TestDSTIIIFloat(_TestDSTBase):
548
+ def setup_method(self):
549
+ self.rdt = np.float32
550
+ self.dec = 7
551
+ self.type = 3
552
+
553
+
554
+ class TestDSTIIIInt(_TestDSTBase):
555
+ def setup_method(self):
556
+ self.rdt = int
557
+ self.dec = 7
558
+ self.type = 3
559
+
560
+
561
+ class TestDSTIVDouble(_TestDSTIVBase):
562
+ def setup_method(self):
563
+ self.rdt = np.float64
564
+ self.dec = 12
565
+ self.type = 4
566
+
567
+
568
+ class TestDSTIVFloat(_TestDSTIVBase):
569
+ def setup_method(self):
570
+ self.rdt = np.float32
571
+ self.dec = 4
572
+ self.type = 4
573
+
574
+
575
+ class TestDSTIVInt(_TestDSTIVBase):
576
+ def setup_method(self):
577
+ self.rdt = int
578
+ self.dec = 5
579
+ self.type = 4
580
+
581
+
582
+ class _TestIDSTBase:
583
+ def setup_method(self):
584
+ self.rdt = None
585
+ self.dec = None
586
+ self.type = None
587
+
588
+ def test_definition(self):
589
+ for i in FFTWDATA_SIZES:
590
+ xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
591
+ x = idst(yr, type=self.type)
592
+ if self.type == 1:
593
+ x /= 2 * (i+1)
594
+ else:
595
+ x /= 2 * i
596
+ assert_equal(x.dtype, dt)
597
+ # XXX: we divide by np.max(x) because the tests fail otherwise. We
598
+ # should really use something like assert_array_approx_equal. The
599
+ # difference is due to fftw using a better algorithm w.r.t error
600
+ # propagation compared to the ones from fftpack.
601
+ assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
602
+ err_msg="Size %d failed" % i)
603
+
604
+
605
+ class TestIDSTIDouble(_TestIDSTBase):
606
+ def setup_method(self):
607
+ self.rdt = np.float64
608
+ self.dec = 12
609
+ self.type = 1
610
+
611
+
612
+ class TestIDSTIFloat(_TestIDSTBase):
613
+ def setup_method(self):
614
+ self.rdt = np.float32
615
+ self.dec = 4
616
+ self.type = 1
617
+
618
+
619
+ class TestIDSTIInt(_TestIDSTBase):
620
+ def setup_method(self):
621
+ self.rdt = int
622
+ self.dec = 4
623
+ self.type = 1
624
+
625
+
626
+ class TestIDSTIIDouble(_TestIDSTBase):
627
+ def setup_method(self):
628
+ self.rdt = np.float64
629
+ self.dec = 14
630
+ self.type = 2
631
+
632
+
633
+ class TestIDSTIIFloat(_TestIDSTBase):
634
+ def setup_method(self):
635
+ self.rdt = np.float32
636
+ self.dec = 6
637
+ self.type = 2
638
+
639
+
640
+ class TestIDSTIIInt(_TestIDSTBase):
641
+ def setup_method(self):
642
+ self.rdt = int
643
+ self.dec = 6
644
+ self.type = 2
645
+
646
+
647
+ class TestIDSTIIIDouble(_TestIDSTBase):
648
+ def setup_method(self):
649
+ self.rdt = np.float64
650
+ self.dec = 14
651
+ self.type = 3
652
+
653
+
654
+ class TestIDSTIIIFloat(_TestIDSTBase):
655
+ def setup_method(self):
656
+ self.rdt = np.float32
657
+ self.dec = 6
658
+ self.type = 3
659
+
660
+
661
+ class TestIDSTIIIInt(_TestIDSTBase):
662
+ def setup_method(self):
663
+ self.rdt = int
664
+ self.dec = 6
665
+ self.type = 3
666
+
667
+
668
+ class TestIDSTIVDouble(_TestIDSTBase):
669
+ def setup_method(self):
670
+ self.rdt = np.float64
671
+ self.dec = 12
672
+ self.type = 4
673
+
674
+
675
+ class TestIDSTIVFloat(_TestIDSTBase):
676
+ def setup_method(self):
677
+ self.rdt = np.float32
678
+ self.dec = 6
679
+ self.type = 4
680
+
681
+
682
+ class TestIDSTIVnt(_TestIDSTBase):
683
+ def setup_method(self):
684
+ self.rdt = int
685
+ self.dec = 6
686
+ self.type = 4
687
+
688
+
689
+ class TestOverwrite:
690
+ """Check input overwrite behavior."""
691
+
692
+ real_dtypes = [np.float32, np.float64]
693
+
694
+ def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw):
695
+ x2 = x.copy()
696
+ routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
697
+
698
+ sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format(
699
+ routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
700
+ if not overwrite_x:
701
+ assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
702
+
703
+ def _check_1d(self, routine, dtype, shape, axis):
704
+ np.random.seed(1234)
705
+ if np.issubdtype(dtype, np.complexfloating):
706
+ data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
707
+ else:
708
+ data = np.random.randn(*shape)
709
+ data = data.astype(dtype)
710
+
711
+ for type in [1, 2, 3, 4]:
712
+ for overwrite_x in [True, False]:
713
+ for norm in [None, 'ortho']:
714
+ self._check(data, routine, type, None, axis, norm,
715
+ overwrite_x)
716
+
717
+ def test_dct(self):
718
+ for dtype in self.real_dtypes:
719
+ self._check_1d(dct, dtype, (16,), -1)
720
+ self._check_1d(dct, dtype, (16, 2), 0)
721
+ self._check_1d(dct, dtype, (2, 16), 1)
722
+
723
+ def test_idct(self):
724
+ for dtype in self.real_dtypes:
725
+ self._check_1d(idct, dtype, (16,), -1)
726
+ self._check_1d(idct, dtype, (16, 2), 0)
727
+ self._check_1d(idct, dtype, (2, 16), 1)
728
+
729
+ def test_dst(self):
730
+ for dtype in self.real_dtypes:
731
+ self._check_1d(dst, dtype, (16,), -1)
732
+ self._check_1d(dst, dtype, (16, 2), 0)
733
+ self._check_1d(dst, dtype, (2, 16), 1)
734
+
735
+ def test_idst(self):
736
+ for dtype in self.real_dtypes:
737
+ self._check_1d(idst, dtype, (16,), -1)
738
+ self._check_1d(idst, dtype, (16, 2), 0)
739
+ self._check_1d(idst, dtype, (2, 16), 1)
740
+
741
+
742
+ class Test_DCTN_IDCTN:
743
+ dec = 14
744
+ dct_type = [1, 2, 3, 4]
745
+ norms = [None, 'ortho']
746
+ rstate = np.random.RandomState(1234)
747
+ shape = (32, 16)
748
+ data = rstate.randn(*shape)
749
+
750
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
751
+ (dstn, idstn)])
752
+ @pytest.mark.parametrize('axes', [None,
753
+ 1, (1,), [1],
754
+ 0, (0,), [0],
755
+ (0, 1), [0, 1],
756
+ (-2, -1), [-2, -1]])
757
+ @pytest.mark.parametrize('dct_type', dct_type)
758
+ @pytest.mark.parametrize('norm', ['ortho'])
759
+ def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
760
+ tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
761
+ tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
762
+ assert_array_almost_equal(self.data, tmp, decimal=12)
763
+
764
+ @pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref),
765
+ (dstn, dst_2d_ref)])
766
+ @pytest.mark.parametrize('dct_type', dct_type)
767
+ @pytest.mark.parametrize('norm', norms)
768
+ def test_dctn_vs_2d_reference(self, fforward, fforward_ref,
769
+ dct_type, norm):
770
+ y1 = fforward(self.data, type=dct_type, axes=None, norm=norm)
771
+ y2 = fforward_ref(self.data, type=dct_type, norm=norm)
772
+ assert_array_almost_equal(y1, y2, decimal=11)
773
+
774
+ @pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref),
775
+ (idstn, idst_2d_ref)])
776
+ @pytest.mark.parametrize('dct_type', dct_type)
777
+ @pytest.mark.parametrize('norm', [None, 'ortho'])
778
+ def test_idctn_vs_2d_reference(self, finverse, finverse_ref,
779
+ dct_type, norm):
780
+ fdata = dctn(self.data, type=dct_type, norm=norm)
781
+ y1 = finverse(fdata, type=dct_type, norm=norm)
782
+ y2 = finverse_ref(fdata, type=dct_type, norm=norm)
783
+ assert_array_almost_equal(y1, y2, decimal=11)
784
+
785
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
786
+ (dstn, idstn)])
787
+ def test_axes_and_shape(self, fforward, finverse):
788
+ with assert_raises(ValueError,
789
+ match="when given, axes and shape arguments"
790
+ " have to be of the same length"):
791
+ fforward(self.data, shape=self.data.shape[0], axes=(0, 1))
792
+
793
+ with assert_raises(ValueError,
794
+ match="when given, axes and shape arguments"
795
+ " have to be of the same length"):
796
+ fforward(self.data, shape=self.data.shape[0], axes=None)
797
+
798
+ with assert_raises(ValueError,
799
+ match="when given, axes and shape arguments"
800
+ " have to be of the same length"):
801
+ fforward(self.data, shape=self.data.shape, axes=0)
802
+
803
+ @pytest.mark.parametrize('fforward', [dctn, dstn])
804
+ def test_shape(self, fforward):
805
+ tmp = fforward(self.data, shape=(128, 128), axes=None)
806
+ assert_equal(tmp.shape, (128, 128))
807
+
808
+ @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
809
+ (dstn, idstn)])
810
+ @pytest.mark.parametrize('axes', [1, (1,), [1],
811
+ 0, (0,), [0]])
812
+ def test_shape_is_none_with_axes(self, fforward, finverse, axes):
813
+ tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
814
+ tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
815
+ assert_array_almost_equal(self.data, tmp, decimal=self.dec)
env-llmeval/lib/python3.10/site-packages/scipy/io/mmio.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.io` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'mminfo', 'mmread', 'mmwrite', 'MMFile',
9
+ 'coo_matrix', 'asstr'
10
+ ]
11
+
12
+
13
+ def __dir__():
14
+ return __all__
15
+
16
+
17
+ def __getattr__(name):
18
+ return _sub_module_deprecation(sub_package="io", module="mmio",
19
+ private_modules=["_mmio"], all=__all__,
20
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/io/tests/data/array_float32_2d.sav ADDED
Binary file (3.19 kB). View file