Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/17.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/17.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/17.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_filters.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/filters.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/fourier.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/measurements.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/morphology.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py +13 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_datatypes.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_inputs.txt +21 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_results.txt +294 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_strels.txt +42 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py +102 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py +66 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py +2189 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_fourier.py +151 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py +1327 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py +1409 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py +0 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_ni_support.py +77 -0
- venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py +65 -0
- venv/lib/python3.10/site-packages/scipy/stats/__init__.py +643 -0
- venv/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py +642 -0
- venv/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/stats/_bws_test.py +177 -0
- venv/lib/python3.10/site-packages/scipy/stats/_censored_data.py +459 -0
- venv/lib/python3.10/site-packages/scipy/stats/_common.py +5 -0
- venv/lib/python3.10/site-packages/scipy/stats/_constants.py +39 -0
- venv/lib/python3.10/site-packages/scipy/stats/_continuous_distns.py +0 -0
ckpts/universal/global_step40/zero/16.attention.query_key_value.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac468ceabd93f514aa7d003d1dbab05b8e68035900d5d25ef6938c4616ea959c
|
3 |
+
size 50332843
|
ckpts/universal/global_step40/zero/17.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4da93b9372f8c88f63165119a244af2e9becd0ac17f694915d6905196348d603
|
3 |
+
size 33555612
|
ckpts/universal/global_step40/zero/17.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4313de6920348f98073a99238c9a87ab3cb78baba2bd66d265f5e6b76c80a5b5
|
3 |
+
size 33555627
|
ckpts/universal/global_step40/zero/17.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3089e77b53620c3658b64c419e0ef3fc307e45c68fdaff018ade8e4c68bd36d
|
3 |
+
size 33555533
|
ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66912cee30aeef84c558b59578dbd7376a05e35377acd70a85a2d8e02035c4ce
|
3 |
+
size 9293
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.82 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_filters.cpython-310.pyc
ADDED
Binary file (52.3 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc
ADDED
Binary file (8.73 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc
ADDED
Binary file (28.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc
ADDED
Binary file (47.6 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc
ADDED
Binary file (83.2 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc
ADDED
Binary file (8.33 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc
ADDED
Binary file (2.84 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/filters.cpython-310.pyc
ADDED
Binary file (979 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/fourier.cpython-310.pyc
ADDED
Binary file (666 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc
ADDED
Binary file (735 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/measurements.cpython-310.pyc
ADDED
Binary file (822 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/__pycache__/morphology.cpython-310.pyc
ADDED
Binary file (979 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import numpy
|
3 |
+
|
4 |
+
# list of numarray data types
|
5 |
+
integer_types: list[type] = [
|
6 |
+
numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,
|
7 |
+
numpy.int32, numpy.uint32, numpy.int64, numpy.uint64]
|
8 |
+
|
9 |
+
float_types: list[type] = [numpy.float32, numpy.float64]
|
10 |
+
|
11 |
+
complex_types: list[type] = [numpy.complex64, numpy.complex128]
|
12 |
+
|
13 |
+
types: list[type] = integer_types + float_types
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (562 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc
ADDED
Binary file (4.14 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_datatypes.cpython-310.pyc
ADDED
Binary file (1.9 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc
ADDED
Binary file (63.9 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc
ADDED
Binary file (5.23 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc
ADDED
Binary file (46.8 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc
ADDED
Binary file (36.2 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc
ADDED
Binary file (60 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc
ADDED
Binary file (1.88 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc
ADDED
Binary file (2.01 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_inputs.txt
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1 1 1 1 1 1 1
|
2 |
+
1 1 1 1 1 1 1
|
3 |
+
1 1 1 1 1 1 1
|
4 |
+
1 1 1 1 1 1 1
|
5 |
+
1 1 1 1 1 1 1
|
6 |
+
1 1 1 1 1 1 1
|
7 |
+
1 1 1 1 1 1 1
|
8 |
+
1 1 1 0 1 1 1
|
9 |
+
1 1 0 0 0 1 1
|
10 |
+
1 0 1 0 1 0 1
|
11 |
+
0 0 0 1 0 0 0
|
12 |
+
1 0 1 0 1 0 1
|
13 |
+
1 1 0 0 0 1 1
|
14 |
+
1 1 1 0 1 1 1
|
15 |
+
1 0 1 1 1 0 1
|
16 |
+
0 0 0 1 0 0 0
|
17 |
+
1 0 0 1 0 0 1
|
18 |
+
1 1 1 1 1 1 1
|
19 |
+
1 0 0 1 0 0 1
|
20 |
+
0 0 0 1 0 0 0
|
21 |
+
1 0 1 1 1 0 1
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_results.txt
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1 1 1 1 1 1 1
|
2 |
+
1 1 1 1 1 1 1
|
3 |
+
1 1 1 1 1 1 1
|
4 |
+
1 1 1 1 1 1 1
|
5 |
+
1 1 1 1 1 1 1
|
6 |
+
1 1 1 1 1 1 1
|
7 |
+
1 1 1 1 1 1 1
|
8 |
+
1 1 1 1 1 1 1
|
9 |
+
1 1 1 1 1 1 1
|
10 |
+
1 1 1 1 1 1 1
|
11 |
+
1 1 1 1 1 1 1
|
12 |
+
1 1 1 1 1 1 1
|
13 |
+
1 1 1 1 1 1 1
|
14 |
+
1 1 1 1 1 1 1
|
15 |
+
1 1 1 1 1 1 1
|
16 |
+
2 2 2 2 2 2 2
|
17 |
+
3 3 3 3 3 3 3
|
18 |
+
4 4 4 4 4 4 4
|
19 |
+
5 5 5 5 5 5 5
|
20 |
+
6 6 6 6 6 6 6
|
21 |
+
7 7 7 7 7 7 7
|
22 |
+
1 1 1 1 1 1 1
|
23 |
+
1 1 1 1 1 1 1
|
24 |
+
1 1 1 1 1 1 1
|
25 |
+
1 1 1 1 1 1 1
|
26 |
+
1 1 1 1 1 1 1
|
27 |
+
1 1 1 1 1 1 1
|
28 |
+
1 1 1 1 1 1 1
|
29 |
+
1 2 3 4 5 6 7
|
30 |
+
8 9 10 11 12 13 14
|
31 |
+
15 16 17 18 19 20 21
|
32 |
+
22 23 24 25 26 27 28
|
33 |
+
29 30 31 32 33 34 35
|
34 |
+
36 37 38 39 40 41 42
|
35 |
+
43 44 45 46 47 48 49
|
36 |
+
1 1 1 1 1 1 1
|
37 |
+
1 1 1 1 1 1 1
|
38 |
+
1 1 1 1 1 1 1
|
39 |
+
1 1 1 1 1 1 1
|
40 |
+
1 1 1 1 1 1 1
|
41 |
+
1 1 1 1 1 1 1
|
42 |
+
1 1 1 1 1 1 1
|
43 |
+
1 1 1 1 1 1 1
|
44 |
+
1 1 1 1 1 1 1
|
45 |
+
1 1 1 1 1 1 1
|
46 |
+
1 1 1 1 1 1 1
|
47 |
+
1 1 1 1 1 1 1
|
48 |
+
1 1 1 1 1 1 1
|
49 |
+
1 1 1 1 1 1 1
|
50 |
+
1 2 3 4 5 6 7
|
51 |
+
8 1 2 3 4 5 6
|
52 |
+
9 8 1 2 3 4 5
|
53 |
+
10 9 8 1 2 3 4
|
54 |
+
11 10 9 8 1 2 3
|
55 |
+
12 11 10 9 8 1 2
|
56 |
+
13 12 11 10 9 8 1
|
57 |
+
1 2 3 4 5 6 7
|
58 |
+
1 2 3 4 5 6 7
|
59 |
+
1 2 3 4 5 6 7
|
60 |
+
1 2 3 4 5 6 7
|
61 |
+
1 2 3 4 5 6 7
|
62 |
+
1 2 3 4 5 6 7
|
63 |
+
1 2 3 4 5 6 7
|
64 |
+
1 1 1 1 1 1 1
|
65 |
+
1 1 1 1 1 1 1
|
66 |
+
1 1 1 1 1 1 1
|
67 |
+
1 1 1 1 1 1 1
|
68 |
+
1 1 1 1 1 1 1
|
69 |
+
1 1 1 1 1 1 1
|
70 |
+
1 1 1 1 1 1 1
|
71 |
+
1 1 1 1 1 1 1
|
72 |
+
1 1 1 1 1 1 1
|
73 |
+
1 1 1 1 1 1 1
|
74 |
+
1 1 1 1 1 1 1
|
75 |
+
1 1 1 1 1 1 1
|
76 |
+
1 1 1 1 1 1 1
|
77 |
+
1 1 1 1 1 1 1
|
78 |
+
1 2 1 2 1 2 1
|
79 |
+
2 1 2 1 2 1 2
|
80 |
+
1 2 1 2 1 2 1
|
81 |
+
2 1 2 1 2 1 2
|
82 |
+
1 2 1 2 1 2 1
|
83 |
+
2 1 2 1 2 1 2
|
84 |
+
1 2 1 2 1 2 1
|
85 |
+
1 2 3 4 5 6 7
|
86 |
+
2 3 4 5 6 7 8
|
87 |
+
3 4 5 6 7 8 9
|
88 |
+
4 5 6 7 8 9 10
|
89 |
+
5 6 7 8 9 10 11
|
90 |
+
6 7 8 9 10 11 12
|
91 |
+
7 8 9 10 11 12 13
|
92 |
+
1 1 1 1 1 1 1
|
93 |
+
1 1 1 1 1 1 1
|
94 |
+
1 1 1 1 1 1 1
|
95 |
+
1 1 1 1 1 1 1
|
96 |
+
1 1 1 1 1 1 1
|
97 |
+
1 1 1 1 1 1 1
|
98 |
+
1 1 1 1 1 1 1
|
99 |
+
1 1 1 0 2 2 2
|
100 |
+
1 1 0 0 0 2 2
|
101 |
+
1 0 3 0 2 0 4
|
102 |
+
0 0 0 2 0 0 0
|
103 |
+
5 0 2 0 6 0 7
|
104 |
+
2 2 0 0 0 7 7
|
105 |
+
2 2 2 0 7 7 7
|
106 |
+
1 1 1 0 2 2 2
|
107 |
+
1 1 0 0 0 2 2
|
108 |
+
3 0 1 0 4 0 2
|
109 |
+
0 0 0 1 0 0 0
|
110 |
+
5 0 6 0 1 0 7
|
111 |
+
5 5 0 0 0 1 1
|
112 |
+
5 5 5 0 1 1 1
|
113 |
+
1 1 1 0 2 2 2
|
114 |
+
3 3 0 0 0 4 4
|
115 |
+
5 0 6 0 7 0 8
|
116 |
+
0 0 0 9 0 0 0
|
117 |
+
10 0 11 0 12 0 13
|
118 |
+
14 14 0 0 0 15 15
|
119 |
+
16 16 16 0 17 17 17
|
120 |
+
1 1 1 0 2 3 3
|
121 |
+
1 1 0 0 0 3 3
|
122 |
+
1 0 4 0 3 0 3
|
123 |
+
0 0 0 3 0 0 0
|
124 |
+
3 0 3 0 5 0 6
|
125 |
+
3 3 0 0 0 6 6
|
126 |
+
3 3 7 0 6 6 6
|
127 |
+
1 2 3 0 4 5 6
|
128 |
+
7 8 0 0 0 9 10
|
129 |
+
11 0 12 0 13 0 14
|
130 |
+
0 0 0 15 0 0 0
|
131 |
+
16 0 17 0 18 0 19
|
132 |
+
20 21 0 0 0 22 23
|
133 |
+
24 25 26 0 27 28 29
|
134 |
+
1 1 1 0 2 2 2
|
135 |
+
1 1 0 0 0 2 2
|
136 |
+
1 0 3 0 2 0 2
|
137 |
+
0 0 0 2 0 0 0
|
138 |
+
2 0 2 0 4 0 5
|
139 |
+
2 2 0 0 0 5 5
|
140 |
+
2 2 2 0 5 5 5
|
141 |
+
1 1 1 0 2 2 2
|
142 |
+
1 1 0 0 0 2 2
|
143 |
+
1 0 3 0 4 0 2
|
144 |
+
0 0 0 5 0 0 0
|
145 |
+
6 0 7 0 8 0 9
|
146 |
+
6 6 0 0 0 9 9
|
147 |
+
6 6 6 0 9 9 9
|
148 |
+
1 2 3 0 4 5 6
|
149 |
+
7 1 0 0 0 4 5
|
150 |
+
8 0 1 0 9 0 4
|
151 |
+
0 0 0 1 0 0 0
|
152 |
+
10 0 11 0 1 0 12
|
153 |
+
13 10 0 0 0 1 14
|
154 |
+
15 13 10 0 16 17 1
|
155 |
+
1 2 3 0 4 5 6
|
156 |
+
1 2 0 0 0 5 6
|
157 |
+
1 0 7 0 8 0 6
|
158 |
+
0 0 0 9 0 0 0
|
159 |
+
10 0 11 0 12 0 13
|
160 |
+
10 14 0 0 0 15 13
|
161 |
+
10 14 16 0 17 15 13
|
162 |
+
1 1 1 0 1 1 1
|
163 |
+
1 1 0 0 0 1 1
|
164 |
+
1 0 1 0 1 0 1
|
165 |
+
0 0 0 1 0 0 0
|
166 |
+
1 0 1 0 1 0 1
|
167 |
+
1 1 0 0 0 1 1
|
168 |
+
1 1 1 0 1 1 1
|
169 |
+
1 1 2 0 3 3 3
|
170 |
+
1 1 0 0 0 3 3
|
171 |
+
1 0 1 0 4 0 3
|
172 |
+
0 0 0 1 0 0 0
|
173 |
+
5 0 6 0 1 0 1
|
174 |
+
5 5 0 0 0 1 1
|
175 |
+
5 5 5 0 7 1 1
|
176 |
+
1 2 1 0 1 3 1
|
177 |
+
2 1 0 0 0 1 3
|
178 |
+
1 0 1 0 1 0 1
|
179 |
+
0 0 0 1 0 0 0
|
180 |
+
1 0 1 0 1 0 1
|
181 |
+
4 1 0 0 0 1 5
|
182 |
+
1 4 1 0 1 5 1
|
183 |
+
1 2 3 0 4 5 6
|
184 |
+
2 3 0 0 0 6 7
|
185 |
+
3 0 8 0 6 0 9
|
186 |
+
0 0 0 6 0 0 0
|
187 |
+
10 0 6 0 11 0 12
|
188 |
+
13 6 0 0 0 12 14
|
189 |
+
6 15 16 0 12 14 17
|
190 |
+
1 1 1 0 2 2 2
|
191 |
+
1 1 0 0 0 2 2
|
192 |
+
1 0 1 0 3 0 2
|
193 |
+
0 0 0 1 0 0 0
|
194 |
+
4 0 5 0 1 0 1
|
195 |
+
4 4 0 0 0 1 1
|
196 |
+
4 4 4 0 1 1 1
|
197 |
+
1 0 2 2 2 0 3
|
198 |
+
0 0 0 2 0 0 0
|
199 |
+
4 0 0 5 0 0 5
|
200 |
+
5 5 5 5 5 5 5
|
201 |
+
5 0 0 5 0 0 6
|
202 |
+
0 0 0 7 0 0 0
|
203 |
+
8 0 7 7 7 0 9
|
204 |
+
1 0 2 2 2 0 3
|
205 |
+
0 0 0 2 0 0 0
|
206 |
+
4 0 0 4 0 0 5
|
207 |
+
4 4 4 4 4 4 4
|
208 |
+
6 0 0 4 0 0 4
|
209 |
+
0 0 0 7 0 0 0
|
210 |
+
8 0 7 7 7 0 9
|
211 |
+
1 0 2 2 2 0 3
|
212 |
+
0 0 0 4 0 0 0
|
213 |
+
5 0 0 6 0 0 7
|
214 |
+
8 8 8 8 8 8 8
|
215 |
+
9 0 0 10 0 0 11
|
216 |
+
0 0 0 12 0 0 0
|
217 |
+
13 0 14 14 14 0 15
|
218 |
+
1 0 2 3 3 0 4
|
219 |
+
0 0 0 3 0 0 0
|
220 |
+
5 0 0 3 0 0 6
|
221 |
+
5 5 3 3 3 6 6
|
222 |
+
5 0 0 3 0 0 6
|
223 |
+
0 0 0 3 0 0 0
|
224 |
+
7 0 3 3 8 0 9
|
225 |
+
1 0 2 3 4 0 5
|
226 |
+
0 0 0 6 0 0 0
|
227 |
+
7 0 0 8 0 0 9
|
228 |
+
10 11 12 13 14 15 16
|
229 |
+
17 0 0 18 0 0 19
|
230 |
+
0 0 0 20 0 0 0
|
231 |
+
21 0 22 23 24 0 25
|
232 |
+
1 0 2 2 2 0 3
|
233 |
+
0 0 0 2 0 0 0
|
234 |
+
2 0 0 2 0 0 2
|
235 |
+
2 2 2 2 2 2 2
|
236 |
+
2 0 0 2 0 0 2
|
237 |
+
0 0 0 2 0 0 0
|
238 |
+
4 0 2 2 2 0 5
|
239 |
+
1 0 2 2 2 0 3
|
240 |
+
0 0 0 2 0 0 0
|
241 |
+
2 0 0 2 0 0 2
|
242 |
+
2 2 2 2 2 2 2
|
243 |
+
2 0 0 2 0 0 2
|
244 |
+
0 0 0 2 0 0 0
|
245 |
+
4 0 2 2 2 0 5
|
246 |
+
1 0 2 3 4 0 5
|
247 |
+
0 0 0 2 0 0 0
|
248 |
+
6 0 0 7 0 0 8
|
249 |
+
9 6 10 11 7 12 13
|
250 |
+
14 0 0 10 0 0 12
|
251 |
+
0 0 0 15 0 0 0
|
252 |
+
16 0 17 18 15 0 19
|
253 |
+
1 0 2 3 4 0 5
|
254 |
+
0 0 0 3 0 0 0
|
255 |
+
6 0 0 3 0 0 7
|
256 |
+
6 8 9 3 10 11 7
|
257 |
+
6 0 0 3 0 0 7
|
258 |
+
0 0 0 3 0 0 0
|
259 |
+
12 0 13 3 14 0 15
|
260 |
+
1 0 2 2 2 0 3
|
261 |
+
0 0 0 2 0 0 0
|
262 |
+
2 0 0 2 0 0 2
|
263 |
+
2 2 2 2 2 2 2
|
264 |
+
2 0 0 2 0 0 2
|
265 |
+
0 0 0 2 0 0 0
|
266 |
+
4 0 2 2 2 0 5
|
267 |
+
1 0 2 2 3 0 4
|
268 |
+
0 0 0 2 0 0 0
|
269 |
+
5 0 0 2 0 0 6
|
270 |
+
5 5 2 2 2 6 6
|
271 |
+
5 0 0 2 0 0 6
|
272 |
+
0 0 0 2 0 0 0
|
273 |
+
7 0 8 2 2 0 9
|
274 |
+
1 0 2 3 2 0 4
|
275 |
+
0 0 0 2 0 0 0
|
276 |
+
5 0 0 6 0 0 7
|
277 |
+
8 5 6 9 6 7 10
|
278 |
+
5 0 0 6 0 0 7
|
279 |
+
0 0 0 11 0 0 0
|
280 |
+
12 0 11 13 11 0 14
|
281 |
+
1 0 2 3 4 0 5
|
282 |
+
0 0 0 4 0 0 0
|
283 |
+
6 0 0 7 0 0 8
|
284 |
+
9 10 7 11 12 8 13
|
285 |
+
10 0 0 12 0 0 14
|
286 |
+
0 0 0 15 0 0 0
|
287 |
+
16 0 15 17 18 0 19
|
288 |
+
1 0 2 2 2 0 3
|
289 |
+
0 0 0 2 0 0 0
|
290 |
+
2 0 0 2 0 0 2
|
291 |
+
2 2 2 2 2 2 2
|
292 |
+
2 0 0 2 0 0 2
|
293 |
+
0 0 0 2 0 0 0
|
294 |
+
4 0 2 2 2 0 5
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_strels.txt
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
0 0 1
|
2 |
+
1 1 1
|
3 |
+
1 0 0
|
4 |
+
1 0 0
|
5 |
+
1 1 1
|
6 |
+
0 0 1
|
7 |
+
0 0 0
|
8 |
+
1 1 1
|
9 |
+
0 0 0
|
10 |
+
0 1 1
|
11 |
+
0 1 0
|
12 |
+
1 1 0
|
13 |
+
0 0 0
|
14 |
+
0 0 0
|
15 |
+
0 0 0
|
16 |
+
0 1 1
|
17 |
+
1 1 1
|
18 |
+
1 1 0
|
19 |
+
0 1 0
|
20 |
+
1 1 1
|
21 |
+
0 1 0
|
22 |
+
1 0 0
|
23 |
+
0 1 0
|
24 |
+
0 0 1
|
25 |
+
0 1 0
|
26 |
+
0 1 0
|
27 |
+
0 1 0
|
28 |
+
1 1 1
|
29 |
+
1 1 1
|
30 |
+
1 1 1
|
31 |
+
1 1 0
|
32 |
+
0 1 0
|
33 |
+
0 1 1
|
34 |
+
1 0 1
|
35 |
+
0 1 0
|
36 |
+
1 0 1
|
37 |
+
0 0 1
|
38 |
+
0 1 0
|
39 |
+
1 0 0
|
40 |
+
1 1 0
|
41 |
+
1 1 1
|
42 |
+
0 1 1
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_allclose
|
3 |
+
|
4 |
+
from scipy import ndimage
|
5 |
+
from scipy.ndimage import _ctest
|
6 |
+
from scipy.ndimage import _cytest
|
7 |
+
from scipy._lib._ccallback import LowLevelCallable
|
8 |
+
|
9 |
+
FILTER1D_FUNCTIONS = [
|
10 |
+
lambda filter_size: _ctest.filter1d(filter_size),
|
11 |
+
lambda filter_size: _cytest.filter1d(filter_size, with_signature=False),
|
12 |
+
lambda filter_size: LowLevelCallable(
|
13 |
+
_cytest.filter1d(filter_size, with_signature=True)
|
14 |
+
),
|
15 |
+
lambda filter_size: LowLevelCallable.from_cython(
|
16 |
+
_cytest, "_filter1d",
|
17 |
+
_cytest.filter1d_capsule(filter_size),
|
18 |
+
),
|
19 |
+
]
|
20 |
+
|
21 |
+
FILTER2D_FUNCTIONS = [
|
22 |
+
lambda weights: _ctest.filter2d(weights),
|
23 |
+
lambda weights: _cytest.filter2d(weights, with_signature=False),
|
24 |
+
lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)),
|
25 |
+
lambda weights: LowLevelCallable.from_cython(_cytest,
|
26 |
+
"_filter2d",
|
27 |
+
_cytest.filter2d_capsule(weights),),
|
28 |
+
]
|
29 |
+
|
30 |
+
TRANSFORM_FUNCTIONS = [
|
31 |
+
lambda shift: _ctest.transform(shift),
|
32 |
+
lambda shift: _cytest.transform(shift, with_signature=False),
|
33 |
+
lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)),
|
34 |
+
lambda shift: LowLevelCallable.from_cython(_cytest,
|
35 |
+
"_transform",
|
36 |
+
_cytest.transform_capsule(shift),),
|
37 |
+
]
|
38 |
+
|
39 |
+
|
40 |
+
def test_generic_filter():
|
41 |
+
def filter2d(footprint_elements, weights):
|
42 |
+
return (weights*footprint_elements).sum()
|
43 |
+
|
44 |
+
def check(j):
|
45 |
+
func = FILTER2D_FUNCTIONS[j]
|
46 |
+
|
47 |
+
im = np.ones((20, 20))
|
48 |
+
im[:10,:10] = 0
|
49 |
+
footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
|
50 |
+
footprint_size = np.count_nonzero(footprint)
|
51 |
+
weights = np.ones(footprint_size)/footprint_size
|
52 |
+
|
53 |
+
res = ndimage.generic_filter(im, func(weights),
|
54 |
+
footprint=footprint)
|
55 |
+
std = ndimage.generic_filter(im, filter2d, footprint=footprint,
|
56 |
+
extra_arguments=(weights,))
|
57 |
+
assert_allclose(res, std, err_msg=f"#{j} failed")
|
58 |
+
|
59 |
+
for j, func in enumerate(FILTER2D_FUNCTIONS):
|
60 |
+
check(j)
|
61 |
+
|
62 |
+
|
63 |
+
def test_generic_filter1d():
|
64 |
+
def filter1d(input_line, output_line, filter_size):
|
65 |
+
for i in range(output_line.size):
|
66 |
+
output_line[i] = 0
|
67 |
+
for j in range(filter_size):
|
68 |
+
output_line[i] += input_line[i+j]
|
69 |
+
output_line /= filter_size
|
70 |
+
|
71 |
+
def check(j):
|
72 |
+
func = FILTER1D_FUNCTIONS[j]
|
73 |
+
|
74 |
+
im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1))
|
75 |
+
filter_size = 3
|
76 |
+
|
77 |
+
res = ndimage.generic_filter1d(im, func(filter_size),
|
78 |
+
filter_size)
|
79 |
+
std = ndimage.generic_filter1d(im, filter1d, filter_size,
|
80 |
+
extra_arguments=(filter_size,))
|
81 |
+
assert_allclose(res, std, err_msg=f"#{j} failed")
|
82 |
+
|
83 |
+
for j, func in enumerate(FILTER1D_FUNCTIONS):
|
84 |
+
check(j)
|
85 |
+
|
86 |
+
|
87 |
+
def test_geometric_transform():
|
88 |
+
def transform(output_coordinates, shift):
|
89 |
+
return output_coordinates[0] - shift, output_coordinates[1] - shift
|
90 |
+
|
91 |
+
def check(j):
|
92 |
+
func = TRANSFORM_FUNCTIONS[j]
|
93 |
+
|
94 |
+
im = np.arange(12).reshape(4, 3).astype(np.float64)
|
95 |
+
shift = 0.5
|
96 |
+
|
97 |
+
res = ndimage.geometric_transform(im, func(shift))
|
98 |
+
std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,))
|
99 |
+
assert_allclose(res, std, err_msg=f"#{j} failed")
|
100 |
+
|
101 |
+
for j, func in enumerate(TRANSFORM_FUNCTIONS):
|
102 |
+
check(j)
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Testing data types for ndimage calls
|
2 |
+
"""
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import assert_array_almost_equal, assert_
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from scipy import ndimage
|
8 |
+
|
9 |
+
|
10 |
+
def test_map_coordinates_dts():
|
11 |
+
# check that ndimage accepts different data types for interpolation
|
12 |
+
data = np.array([[4, 1, 3, 2],
|
13 |
+
[7, 6, 8, 5],
|
14 |
+
[3, 5, 3, 6]])
|
15 |
+
shifted_data = np.array([[0, 0, 0, 0],
|
16 |
+
[0, 4, 1, 3],
|
17 |
+
[0, 7, 6, 8]])
|
18 |
+
idx = np.indices(data.shape)
|
19 |
+
dts = (np.uint8, np.uint16, np.uint32, np.uint64,
|
20 |
+
np.int8, np.int16, np.int32, np.int64,
|
21 |
+
np.intp, np.uintp, np.float32, np.float64)
|
22 |
+
for order in range(0, 6):
|
23 |
+
for data_dt in dts:
|
24 |
+
these_data = data.astype(data_dt)
|
25 |
+
for coord_dt in dts:
|
26 |
+
# affine mapping
|
27 |
+
mat = np.eye(2, dtype=coord_dt)
|
28 |
+
off = np.zeros((2,), dtype=coord_dt)
|
29 |
+
out = ndimage.affine_transform(these_data, mat, off)
|
30 |
+
assert_array_almost_equal(these_data, out)
|
31 |
+
# map coordinates
|
32 |
+
coords_m1 = idx.astype(coord_dt) - 1
|
33 |
+
coords_p10 = idx.astype(coord_dt) + 10
|
34 |
+
out = ndimage.map_coordinates(these_data, coords_m1, order=order)
|
35 |
+
assert_array_almost_equal(out, shifted_data)
|
36 |
+
# check constant fill works
|
37 |
+
out = ndimage.map_coordinates(these_data, coords_p10, order=order)
|
38 |
+
assert_array_almost_equal(out, np.zeros((3,4)))
|
39 |
+
# check shift and zoom
|
40 |
+
out = ndimage.shift(these_data, 1)
|
41 |
+
assert_array_almost_equal(out, shifted_data)
|
42 |
+
out = ndimage.zoom(these_data, 1)
|
43 |
+
assert_array_almost_equal(these_data, out)
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.xfail(True, reason="Broken on many platforms")
|
47 |
+
def test_uint64_max():
|
48 |
+
# Test interpolation respects uint64 max. Reported to fail at least on
|
49 |
+
# win32 (due to the 32 bit visual C compiler using signed int64 when
|
50 |
+
# converting between uint64 to double) and Debian on s390x.
|
51 |
+
# Interpolation is always done in double precision floating point, so
|
52 |
+
# we use the largest uint64 value for which int(float(big)) still fits
|
53 |
+
# in a uint64.
|
54 |
+
# This test was last enabled on macOS only, and there it started failing
|
55 |
+
# on arm64 as well (see gh-19117).
|
56 |
+
big = 2**64 - 1025
|
57 |
+
arr = np.array([big, big, big], dtype=np.uint64)
|
58 |
+
# Tests geometric transform (map_coordinates, affine_transform)
|
59 |
+
inds = np.indices(arr.shape) - 0.1
|
60 |
+
x = ndimage.map_coordinates(arr, inds)
|
61 |
+
assert_(x[1] == int(float(big)))
|
62 |
+
assert_(x[2] == int(float(big)))
|
63 |
+
# Tests zoom / shift
|
64 |
+
x = ndimage.shift(arr, 0.1)
|
65 |
+
assert_(x[1] == int(float(big)))
|
66 |
+
assert_(x[2] == int(float(big)))
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py
ADDED
@@ -0,0 +1,2189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
''' Some tests for filters '''
|
2 |
+
import functools
|
3 |
+
import itertools
|
4 |
+
import math
|
5 |
+
import numpy
|
6 |
+
|
7 |
+
from numpy.testing import (assert_equal, assert_allclose,
|
8 |
+
assert_array_almost_equal,
|
9 |
+
assert_array_equal, assert_almost_equal,
|
10 |
+
suppress_warnings, assert_)
|
11 |
+
import pytest
|
12 |
+
from pytest import raises as assert_raises
|
13 |
+
|
14 |
+
from scipy import ndimage
|
15 |
+
from scipy.ndimage._filters import _gaussian_kernel1d
|
16 |
+
|
17 |
+
from . import types, float_types, complex_types
|
18 |
+
|
19 |
+
|
20 |
+
def sumsq(a, b):
|
21 |
+
return math.sqrt(((a - b)**2).sum())
|
22 |
+
|
23 |
+
|
24 |
+
def _complex_correlate(array, kernel, real_dtype, convolve=False,
|
25 |
+
mode="reflect", cval=0, ):
|
26 |
+
"""Utility to perform a reference complex-valued convolutions.
|
27 |
+
|
28 |
+
When convolve==False, correlation is performed instead
|
29 |
+
"""
|
30 |
+
array = numpy.asarray(array)
|
31 |
+
kernel = numpy.asarray(kernel)
|
32 |
+
complex_array = array.dtype.kind == 'c'
|
33 |
+
complex_kernel = kernel.dtype.kind == 'c'
|
34 |
+
if array.ndim == 1:
|
35 |
+
func = ndimage.convolve1d if convolve else ndimage.correlate1d
|
36 |
+
else:
|
37 |
+
func = ndimage.convolve if convolve else ndimage.correlate
|
38 |
+
if not convolve:
|
39 |
+
kernel = kernel.conj()
|
40 |
+
if complex_array and complex_kernel:
|
41 |
+
# use: real(cval) for array.real component
|
42 |
+
# imag(cval) for array.imag component
|
43 |
+
output = (
|
44 |
+
func(array.real, kernel.real, output=real_dtype,
|
45 |
+
mode=mode, cval=numpy.real(cval)) -
|
46 |
+
func(array.imag, kernel.imag, output=real_dtype,
|
47 |
+
mode=mode, cval=numpy.imag(cval)) +
|
48 |
+
1j * func(array.imag, kernel.real, output=real_dtype,
|
49 |
+
mode=mode, cval=numpy.imag(cval)) +
|
50 |
+
1j * func(array.real, kernel.imag, output=real_dtype,
|
51 |
+
mode=mode, cval=numpy.real(cval))
|
52 |
+
)
|
53 |
+
elif complex_array:
|
54 |
+
output = (
|
55 |
+
func(array.real, kernel, output=real_dtype, mode=mode,
|
56 |
+
cval=numpy.real(cval)) +
|
57 |
+
1j * func(array.imag, kernel, output=real_dtype, mode=mode,
|
58 |
+
cval=numpy.imag(cval))
|
59 |
+
)
|
60 |
+
elif complex_kernel:
|
61 |
+
# real array so cval is real too
|
62 |
+
output = (
|
63 |
+
func(array, kernel.real, output=real_dtype, mode=mode, cval=cval) +
|
64 |
+
1j * func(array, kernel.imag, output=real_dtype, mode=mode,
|
65 |
+
cval=cval)
|
66 |
+
)
|
67 |
+
return output
|
68 |
+
|
69 |
+
|
70 |
+
def _cases_axes_tuple_length_mismatch():
|
71 |
+
# Generate combinations of filter function, valid kwargs, and
|
72 |
+
# keyword-value pairs for which the value will become with mismatched
|
73 |
+
# (invalid) size
|
74 |
+
filter_func = ndimage.gaussian_filter
|
75 |
+
kwargs = dict(radius=3, mode='constant', sigma=1.0, order=0)
|
76 |
+
for key, val in kwargs.items():
|
77 |
+
yield filter_func, kwargs, key, val
|
78 |
+
|
79 |
+
filter_funcs = [ndimage.uniform_filter, ndimage.minimum_filter,
|
80 |
+
ndimage.maximum_filter]
|
81 |
+
kwargs = dict(size=3, mode='constant', origin=0)
|
82 |
+
for filter_func in filter_funcs:
|
83 |
+
for key, val in kwargs.items():
|
84 |
+
yield filter_func, kwargs, key, val
|
85 |
+
|
86 |
+
|
87 |
+
class TestNdimageFilters:
|
88 |
+
|
89 |
+
def _validate_complex(self, array, kernel, type2, mode='reflect', cval=0):
|
90 |
+
# utility for validating complex-valued correlations
|
91 |
+
real_dtype = numpy.asarray([], dtype=type2).real.dtype
|
92 |
+
expected = _complex_correlate(
|
93 |
+
array, kernel, real_dtype, convolve=False, mode=mode, cval=cval
|
94 |
+
)
|
95 |
+
|
96 |
+
if array.ndim == 1:
|
97 |
+
correlate = functools.partial(ndimage.correlate1d, axis=-1,
|
98 |
+
mode=mode, cval=cval)
|
99 |
+
convolve = functools.partial(ndimage.convolve1d, axis=-1,
|
100 |
+
mode=mode, cval=cval)
|
101 |
+
else:
|
102 |
+
correlate = functools.partial(ndimage.correlate, mode=mode,
|
103 |
+
cval=cval)
|
104 |
+
convolve = functools.partial(ndimage.convolve, mode=mode,
|
105 |
+
cval=cval)
|
106 |
+
|
107 |
+
# test correlate output dtype
|
108 |
+
output = correlate(array, kernel, output=type2)
|
109 |
+
assert_array_almost_equal(expected, output)
|
110 |
+
assert_equal(output.dtype.type, type2)
|
111 |
+
|
112 |
+
# test correlate with pre-allocated output
|
113 |
+
output = numpy.zeros_like(array, dtype=type2)
|
114 |
+
correlate(array, kernel, output=output)
|
115 |
+
assert_array_almost_equal(expected, output)
|
116 |
+
|
117 |
+
# test convolve output dtype
|
118 |
+
output = convolve(array, kernel, output=type2)
|
119 |
+
expected = _complex_correlate(
|
120 |
+
array, kernel, real_dtype, convolve=True, mode=mode, cval=cval,
|
121 |
+
)
|
122 |
+
assert_array_almost_equal(expected, output)
|
123 |
+
assert_equal(output.dtype.type, type2)
|
124 |
+
|
125 |
+
# convolve with pre-allocated output
|
126 |
+
convolve(array, kernel, output=output)
|
127 |
+
assert_array_almost_equal(expected, output)
|
128 |
+
assert_equal(output.dtype.type, type2)
|
129 |
+
|
130 |
+
# warns if the output is not a complex dtype
|
131 |
+
with pytest.warns(UserWarning,
|
132 |
+
match="promoting specified output dtype to complex"):
|
133 |
+
correlate(array, kernel, output=real_dtype)
|
134 |
+
|
135 |
+
with pytest.warns(UserWarning,
|
136 |
+
match="promoting specified output dtype to complex"):
|
137 |
+
convolve(array, kernel, output=real_dtype)
|
138 |
+
|
139 |
+
# raises if output array is provided, but is not complex-valued
|
140 |
+
output_real = numpy.zeros_like(array, dtype=real_dtype)
|
141 |
+
with assert_raises(RuntimeError):
|
142 |
+
correlate(array, kernel, output=output_real)
|
143 |
+
|
144 |
+
with assert_raises(RuntimeError):
|
145 |
+
convolve(array, kernel, output=output_real)
|
146 |
+
|
147 |
+
def test_correlate01(self):
|
148 |
+
array = numpy.array([1, 2])
|
149 |
+
weights = numpy.array([2])
|
150 |
+
expected = [2, 4]
|
151 |
+
|
152 |
+
output = ndimage.correlate(array, weights)
|
153 |
+
assert_array_almost_equal(output, expected)
|
154 |
+
|
155 |
+
output = ndimage.convolve(array, weights)
|
156 |
+
assert_array_almost_equal(output, expected)
|
157 |
+
|
158 |
+
output = ndimage.correlate1d(array, weights)
|
159 |
+
assert_array_almost_equal(output, expected)
|
160 |
+
|
161 |
+
output = ndimage.convolve1d(array, weights)
|
162 |
+
assert_array_almost_equal(output, expected)
|
163 |
+
|
164 |
+
def test_correlate01_overlap(self):
|
165 |
+
array = numpy.arange(256).reshape(16, 16)
|
166 |
+
weights = numpy.array([2])
|
167 |
+
expected = 2 * array
|
168 |
+
|
169 |
+
ndimage.correlate1d(array, weights, output=array)
|
170 |
+
assert_array_almost_equal(array, expected)
|
171 |
+
|
172 |
+
def test_correlate02(self):
|
173 |
+
array = numpy.array([1, 2, 3])
|
174 |
+
kernel = numpy.array([1])
|
175 |
+
|
176 |
+
output = ndimage.correlate(array, kernel)
|
177 |
+
assert_array_almost_equal(array, output)
|
178 |
+
|
179 |
+
output = ndimage.convolve(array, kernel)
|
180 |
+
assert_array_almost_equal(array, output)
|
181 |
+
|
182 |
+
output = ndimage.correlate1d(array, kernel)
|
183 |
+
assert_array_almost_equal(array, output)
|
184 |
+
|
185 |
+
output = ndimage.convolve1d(array, kernel)
|
186 |
+
assert_array_almost_equal(array, output)
|
187 |
+
|
188 |
+
def test_correlate03(self):
|
189 |
+
array = numpy.array([1])
|
190 |
+
weights = numpy.array([1, 1])
|
191 |
+
expected = [2]
|
192 |
+
|
193 |
+
output = ndimage.correlate(array, weights)
|
194 |
+
assert_array_almost_equal(output, expected)
|
195 |
+
|
196 |
+
output = ndimage.convolve(array, weights)
|
197 |
+
assert_array_almost_equal(output, expected)
|
198 |
+
|
199 |
+
output = ndimage.correlate1d(array, weights)
|
200 |
+
assert_array_almost_equal(output, expected)
|
201 |
+
|
202 |
+
output = ndimage.convolve1d(array, weights)
|
203 |
+
assert_array_almost_equal(output, expected)
|
204 |
+
|
205 |
+
def test_correlate04(self):
|
206 |
+
array = numpy.array([1, 2])
|
207 |
+
tcor = [2, 3]
|
208 |
+
tcov = [3, 4]
|
209 |
+
weights = numpy.array([1, 1])
|
210 |
+
output = ndimage.correlate(array, weights)
|
211 |
+
assert_array_almost_equal(output, tcor)
|
212 |
+
output = ndimage.convolve(array, weights)
|
213 |
+
assert_array_almost_equal(output, tcov)
|
214 |
+
output = ndimage.correlate1d(array, weights)
|
215 |
+
assert_array_almost_equal(output, tcor)
|
216 |
+
output = ndimage.convolve1d(array, weights)
|
217 |
+
assert_array_almost_equal(output, tcov)
|
218 |
+
|
219 |
+
def test_correlate05(self):
|
220 |
+
array = numpy.array([1, 2, 3])
|
221 |
+
tcor = [2, 3, 5]
|
222 |
+
tcov = [3, 5, 6]
|
223 |
+
kernel = numpy.array([1, 1])
|
224 |
+
output = ndimage.correlate(array, kernel)
|
225 |
+
assert_array_almost_equal(tcor, output)
|
226 |
+
output = ndimage.convolve(array, kernel)
|
227 |
+
assert_array_almost_equal(tcov, output)
|
228 |
+
output = ndimage.correlate1d(array, kernel)
|
229 |
+
assert_array_almost_equal(tcor, output)
|
230 |
+
output = ndimage.convolve1d(array, kernel)
|
231 |
+
assert_array_almost_equal(tcov, output)
|
232 |
+
|
233 |
+
def test_correlate06(self):
|
234 |
+
array = numpy.array([1, 2, 3])
|
235 |
+
tcor = [9, 14, 17]
|
236 |
+
tcov = [7, 10, 15]
|
237 |
+
weights = numpy.array([1, 2, 3])
|
238 |
+
output = ndimage.correlate(array, weights)
|
239 |
+
assert_array_almost_equal(output, tcor)
|
240 |
+
output = ndimage.convolve(array, weights)
|
241 |
+
assert_array_almost_equal(output, tcov)
|
242 |
+
output = ndimage.correlate1d(array, weights)
|
243 |
+
assert_array_almost_equal(output, tcor)
|
244 |
+
output = ndimage.convolve1d(array, weights)
|
245 |
+
assert_array_almost_equal(output, tcov)
|
246 |
+
|
247 |
+
def test_correlate07(self):
|
248 |
+
array = numpy.array([1, 2, 3])
|
249 |
+
expected = [5, 8, 11]
|
250 |
+
weights = numpy.array([1, 2, 1])
|
251 |
+
output = ndimage.correlate(array, weights)
|
252 |
+
assert_array_almost_equal(output, expected)
|
253 |
+
output = ndimage.convolve(array, weights)
|
254 |
+
assert_array_almost_equal(output, expected)
|
255 |
+
output = ndimage.correlate1d(array, weights)
|
256 |
+
assert_array_almost_equal(output, expected)
|
257 |
+
output = ndimage.convolve1d(array, weights)
|
258 |
+
assert_array_almost_equal(output, expected)
|
259 |
+
|
260 |
+
def test_correlate08(self):
|
261 |
+
array = numpy.array([1, 2, 3])
|
262 |
+
tcor = [1, 2, 5]
|
263 |
+
tcov = [3, 6, 7]
|
264 |
+
weights = numpy.array([1, 2, -1])
|
265 |
+
output = ndimage.correlate(array, weights)
|
266 |
+
assert_array_almost_equal(output, tcor)
|
267 |
+
output = ndimage.convolve(array, weights)
|
268 |
+
assert_array_almost_equal(output, tcov)
|
269 |
+
output = ndimage.correlate1d(array, weights)
|
270 |
+
assert_array_almost_equal(output, tcor)
|
271 |
+
output = ndimage.convolve1d(array, weights)
|
272 |
+
assert_array_almost_equal(output, tcov)
|
273 |
+
|
274 |
+
def test_correlate09(self):
|
275 |
+
array = []
|
276 |
+
kernel = numpy.array([1, 1])
|
277 |
+
output = ndimage.correlate(array, kernel)
|
278 |
+
assert_array_almost_equal(array, output)
|
279 |
+
output = ndimage.convolve(array, kernel)
|
280 |
+
assert_array_almost_equal(array, output)
|
281 |
+
output = ndimage.correlate1d(array, kernel)
|
282 |
+
assert_array_almost_equal(array, output)
|
283 |
+
output = ndimage.convolve1d(array, kernel)
|
284 |
+
assert_array_almost_equal(array, output)
|
285 |
+
|
286 |
+
def test_correlate10(self):
|
287 |
+
array = [[]]
|
288 |
+
kernel = numpy.array([[1, 1]])
|
289 |
+
output = ndimage.correlate(array, kernel)
|
290 |
+
assert_array_almost_equal(array, output)
|
291 |
+
output = ndimage.convolve(array, kernel)
|
292 |
+
assert_array_almost_equal(array, output)
|
293 |
+
|
294 |
+
def test_correlate11(self):
|
295 |
+
array = numpy.array([[1, 2, 3],
|
296 |
+
[4, 5, 6]])
|
297 |
+
kernel = numpy.array([[1, 1],
|
298 |
+
[1, 1]])
|
299 |
+
output = ndimage.correlate(array, kernel)
|
300 |
+
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
|
301 |
+
output = ndimage.convolve(array, kernel)
|
302 |
+
assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
|
303 |
+
|
304 |
+
def test_correlate12(self):
|
305 |
+
array = numpy.array([[1, 2, 3],
|
306 |
+
[4, 5, 6]])
|
307 |
+
kernel = numpy.array([[1, 0],
|
308 |
+
[0, 1]])
|
309 |
+
output = ndimage.correlate(array, kernel)
|
310 |
+
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
|
311 |
+
output = ndimage.convolve(array, kernel)
|
312 |
+
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
|
313 |
+
|
314 |
+
@pytest.mark.parametrize('dtype_array', types)
|
315 |
+
@pytest.mark.parametrize('dtype_kernel', types)
|
316 |
+
def test_correlate13(self, dtype_array, dtype_kernel):
|
317 |
+
kernel = numpy.array([[1, 0],
|
318 |
+
[0, 1]])
|
319 |
+
array = numpy.array([[1, 2, 3],
|
320 |
+
[4, 5, 6]], dtype_array)
|
321 |
+
output = ndimage.correlate(array, kernel, output=dtype_kernel)
|
322 |
+
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
|
323 |
+
assert_equal(output.dtype.type, dtype_kernel)
|
324 |
+
|
325 |
+
output = ndimage.convolve(array, kernel,
|
326 |
+
output=dtype_kernel)
|
327 |
+
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
|
328 |
+
assert_equal(output.dtype.type, dtype_kernel)
|
329 |
+
|
330 |
+
@pytest.mark.parametrize('dtype_array', types)
|
331 |
+
@pytest.mark.parametrize('dtype_output', types)
|
332 |
+
def test_correlate14(self, dtype_array, dtype_output):
|
333 |
+
kernel = numpy.array([[1, 0],
|
334 |
+
[0, 1]])
|
335 |
+
array = numpy.array([[1, 2, 3],
|
336 |
+
[4, 5, 6]], dtype_array)
|
337 |
+
output = numpy.zeros(array.shape, dtype_output)
|
338 |
+
ndimage.correlate(array, kernel, output=output)
|
339 |
+
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
|
340 |
+
assert_equal(output.dtype.type, dtype_output)
|
341 |
+
|
342 |
+
ndimage.convolve(array, kernel, output=output)
|
343 |
+
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
|
344 |
+
assert_equal(output.dtype.type, dtype_output)
|
345 |
+
|
346 |
+
@pytest.mark.parametrize('dtype_array', types)
|
347 |
+
def test_correlate15(self, dtype_array):
|
348 |
+
kernel = numpy.array([[1, 0],
|
349 |
+
[0, 1]])
|
350 |
+
array = numpy.array([[1, 2, 3],
|
351 |
+
[4, 5, 6]], dtype_array)
|
352 |
+
output = ndimage.correlate(array, kernel, output=numpy.float32)
|
353 |
+
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
|
354 |
+
assert_equal(output.dtype.type, numpy.float32)
|
355 |
+
|
356 |
+
output = ndimage.convolve(array, kernel, output=numpy.float32)
|
357 |
+
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
|
358 |
+
assert_equal(output.dtype.type, numpy.float32)
|
359 |
+
|
360 |
+
@pytest.mark.parametrize('dtype_array', types)
|
361 |
+
def test_correlate16(self, dtype_array):
|
362 |
+
kernel = numpy.array([[0.5, 0],
|
363 |
+
[0, 0.5]])
|
364 |
+
array = numpy.array([[1, 2, 3], [4, 5, 6]], dtype_array)
|
365 |
+
output = ndimage.correlate(array, kernel, output=numpy.float32)
|
366 |
+
assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
|
367 |
+
assert_equal(output.dtype.type, numpy.float32)
|
368 |
+
|
369 |
+
output = ndimage.convolve(array, kernel, output=numpy.float32)
|
370 |
+
assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
|
371 |
+
assert_equal(output.dtype.type, numpy.float32)
|
372 |
+
|
373 |
+
def test_correlate17(self):
|
374 |
+
array = numpy.array([1, 2, 3])
|
375 |
+
tcor = [3, 5, 6]
|
376 |
+
tcov = [2, 3, 5]
|
377 |
+
kernel = numpy.array([1, 1])
|
378 |
+
output = ndimage.correlate(array, kernel, origin=-1)
|
379 |
+
assert_array_almost_equal(tcor, output)
|
380 |
+
output = ndimage.convolve(array, kernel, origin=-1)
|
381 |
+
assert_array_almost_equal(tcov, output)
|
382 |
+
output = ndimage.correlate1d(array, kernel, origin=-1)
|
383 |
+
assert_array_almost_equal(tcor, output)
|
384 |
+
output = ndimage.convolve1d(array, kernel, origin=-1)
|
385 |
+
assert_array_almost_equal(tcov, output)
|
386 |
+
|
387 |
+
@pytest.mark.parametrize('dtype_array', types)
|
388 |
+
def test_correlate18(self, dtype_array):
|
389 |
+
kernel = numpy.array([[1, 0],
|
390 |
+
[0, 1]])
|
391 |
+
array = numpy.array([[1, 2, 3],
|
392 |
+
[4, 5, 6]], dtype_array)
|
393 |
+
output = ndimage.correlate(array, kernel,
|
394 |
+
output=numpy.float32,
|
395 |
+
mode='nearest', origin=-1)
|
396 |
+
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
|
397 |
+
assert_equal(output.dtype.type, numpy.float32)
|
398 |
+
|
399 |
+
output = ndimage.convolve(array, kernel,
|
400 |
+
output=numpy.float32,
|
401 |
+
mode='nearest', origin=-1)
|
402 |
+
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
|
403 |
+
assert_equal(output.dtype.type, numpy.float32)
|
404 |
+
|
405 |
+
def test_correlate_mode_sequence(self):
|
406 |
+
kernel = numpy.ones((2, 2))
|
407 |
+
array = numpy.ones((3, 3), float)
|
408 |
+
with assert_raises(RuntimeError):
|
409 |
+
ndimage.correlate(array, kernel, mode=['nearest', 'reflect'])
|
410 |
+
with assert_raises(RuntimeError):
|
411 |
+
ndimage.convolve(array, kernel, mode=['nearest', 'reflect'])
|
412 |
+
|
413 |
+
@pytest.mark.parametrize('dtype_array', types)
|
414 |
+
def test_correlate19(self, dtype_array):
|
415 |
+
kernel = numpy.array([[1, 0],
|
416 |
+
[0, 1]])
|
417 |
+
array = numpy.array([[1, 2, 3],
|
418 |
+
[4, 5, 6]], dtype_array)
|
419 |
+
output = ndimage.correlate(array, kernel,
|
420 |
+
output=numpy.float32,
|
421 |
+
mode='nearest', origin=[-1, 0])
|
422 |
+
assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
|
423 |
+
assert_equal(output.dtype.type, numpy.float32)
|
424 |
+
|
425 |
+
output = ndimage.convolve(array, kernel,
|
426 |
+
output=numpy.float32,
|
427 |
+
mode='nearest', origin=[-1, 0])
|
428 |
+
assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
|
429 |
+
assert_equal(output.dtype.type, numpy.float32)
|
430 |
+
|
431 |
+
@pytest.mark.parametrize('dtype_array', types)
|
432 |
+
@pytest.mark.parametrize('dtype_output', types)
|
433 |
+
def test_correlate20(self, dtype_array, dtype_output):
|
434 |
+
weights = numpy.array([1, 2, 1])
|
435 |
+
expected = [[5, 10, 15], [7, 14, 21]]
|
436 |
+
array = numpy.array([[1, 2, 3],
|
437 |
+
[2, 4, 6]], dtype_array)
|
438 |
+
output = numpy.zeros((2, 3), dtype_output)
|
439 |
+
ndimage.correlate1d(array, weights, axis=0, output=output)
|
440 |
+
assert_array_almost_equal(output, expected)
|
441 |
+
ndimage.convolve1d(array, weights, axis=0, output=output)
|
442 |
+
assert_array_almost_equal(output, expected)
|
443 |
+
|
444 |
+
def test_correlate21(self):
|
445 |
+
array = numpy.array([[1, 2, 3],
|
446 |
+
[2, 4, 6]])
|
447 |
+
expected = [[5, 10, 15], [7, 14, 21]]
|
448 |
+
weights = numpy.array([1, 2, 1])
|
449 |
+
output = ndimage.correlate1d(array, weights, axis=0)
|
450 |
+
assert_array_almost_equal(output, expected)
|
451 |
+
output = ndimage.convolve1d(array, weights, axis=0)
|
452 |
+
assert_array_almost_equal(output, expected)
|
453 |
+
|
454 |
+
@pytest.mark.parametrize('dtype_array', types)
|
455 |
+
@pytest.mark.parametrize('dtype_output', types)
|
456 |
+
def test_correlate22(self, dtype_array, dtype_output):
|
457 |
+
weights = numpy.array([1, 2, 1])
|
458 |
+
expected = [[6, 12, 18], [6, 12, 18]]
|
459 |
+
array = numpy.array([[1, 2, 3],
|
460 |
+
[2, 4, 6]], dtype_array)
|
461 |
+
output = numpy.zeros((2, 3), dtype_output)
|
462 |
+
ndimage.correlate1d(array, weights, axis=0,
|
463 |
+
mode='wrap', output=output)
|
464 |
+
assert_array_almost_equal(output, expected)
|
465 |
+
ndimage.convolve1d(array, weights, axis=0,
|
466 |
+
mode='wrap', output=output)
|
467 |
+
assert_array_almost_equal(output, expected)
|
468 |
+
|
469 |
+
@pytest.mark.parametrize('dtype_array', types)
|
470 |
+
@pytest.mark.parametrize('dtype_output', types)
|
471 |
+
def test_correlate23(self, dtype_array, dtype_output):
|
472 |
+
weights = numpy.array([1, 2, 1])
|
473 |
+
expected = [[5, 10, 15], [7, 14, 21]]
|
474 |
+
array = numpy.array([[1, 2, 3],
|
475 |
+
[2, 4, 6]], dtype_array)
|
476 |
+
output = numpy.zeros((2, 3), dtype_output)
|
477 |
+
ndimage.correlate1d(array, weights, axis=0,
|
478 |
+
mode='nearest', output=output)
|
479 |
+
assert_array_almost_equal(output, expected)
|
480 |
+
ndimage.convolve1d(array, weights, axis=0,
|
481 |
+
mode='nearest', output=output)
|
482 |
+
assert_array_almost_equal(output, expected)
|
483 |
+
|
484 |
+
@pytest.mark.parametrize('dtype_array', types)
|
485 |
+
@pytest.mark.parametrize('dtype_output', types)
|
486 |
+
def test_correlate24(self, dtype_array, dtype_output):
|
487 |
+
weights = numpy.array([1, 2, 1])
|
488 |
+
tcor = [[7, 14, 21], [8, 16, 24]]
|
489 |
+
tcov = [[4, 8, 12], [5, 10, 15]]
|
490 |
+
array = numpy.array([[1, 2, 3],
|
491 |
+
[2, 4, 6]], dtype_array)
|
492 |
+
output = numpy.zeros((2, 3), dtype_output)
|
493 |
+
ndimage.correlate1d(array, weights, axis=0,
|
494 |
+
mode='nearest', output=output, origin=-1)
|
495 |
+
assert_array_almost_equal(output, tcor)
|
496 |
+
ndimage.convolve1d(array, weights, axis=0,
|
497 |
+
mode='nearest', output=output, origin=-1)
|
498 |
+
assert_array_almost_equal(output, tcov)
|
499 |
+
|
500 |
+
@pytest.mark.parametrize('dtype_array', types)
|
501 |
+
@pytest.mark.parametrize('dtype_output', types)
|
502 |
+
def test_correlate25(self, dtype_array, dtype_output):
|
503 |
+
weights = numpy.array([1, 2, 1])
|
504 |
+
tcor = [[4, 8, 12], [5, 10, 15]]
|
505 |
+
tcov = [[7, 14, 21], [8, 16, 24]]
|
506 |
+
array = numpy.array([[1, 2, 3],
|
507 |
+
[2, 4, 6]], dtype_array)
|
508 |
+
output = numpy.zeros((2, 3), dtype_output)
|
509 |
+
ndimage.correlate1d(array, weights, axis=0,
|
510 |
+
mode='nearest', output=output, origin=1)
|
511 |
+
assert_array_almost_equal(output, tcor)
|
512 |
+
ndimage.convolve1d(array, weights, axis=0,
|
513 |
+
mode='nearest', output=output, origin=1)
|
514 |
+
assert_array_almost_equal(output, tcov)
|
515 |
+
|
516 |
+
def test_correlate26(self):
|
517 |
+
# test fix for gh-11661 (mirror extension of a length 1 signal)
|
518 |
+
y = ndimage.convolve1d(numpy.ones(1), numpy.ones(5), mode='mirror')
|
519 |
+
assert_array_equal(y, numpy.array(5.))
|
520 |
+
|
521 |
+
y = ndimage.correlate1d(numpy.ones(1), numpy.ones(5), mode='mirror')
|
522 |
+
assert_array_equal(y, numpy.array(5.))
|
523 |
+
|
524 |
+
@pytest.mark.parametrize('dtype_kernel', complex_types)
|
525 |
+
@pytest.mark.parametrize('dtype_input', types)
|
526 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
527 |
+
def test_correlate_complex_kernel(self, dtype_input, dtype_kernel,
|
528 |
+
dtype_output):
|
529 |
+
kernel = numpy.array([[1, 0],
|
530 |
+
[0, 1 + 1j]], dtype_kernel)
|
531 |
+
array = numpy.array([[1, 2, 3],
|
532 |
+
[4, 5, 6]], dtype_input)
|
533 |
+
self._validate_complex(array, kernel, dtype_output)
|
534 |
+
|
535 |
+
@pytest.mark.parametrize('dtype_kernel', complex_types)
|
536 |
+
@pytest.mark.parametrize('dtype_input', types)
|
537 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
538 |
+
@pytest.mark.parametrize('mode', ['grid-constant', 'constant'])
|
539 |
+
def test_correlate_complex_kernel_cval(self, dtype_input, dtype_kernel,
|
540 |
+
dtype_output, mode):
|
541 |
+
# test use of non-zero cval with complex inputs
|
542 |
+
# also verifies that mode 'grid-constant' does not segfault
|
543 |
+
kernel = numpy.array([[1, 0],
|
544 |
+
[0, 1 + 1j]], dtype_kernel)
|
545 |
+
array = numpy.array([[1, 2, 3],
|
546 |
+
[4, 5, 6]], dtype_input)
|
547 |
+
self._validate_complex(array, kernel, dtype_output, mode=mode,
|
548 |
+
cval=5.0)
|
549 |
+
|
550 |
+
@pytest.mark.parametrize('dtype_kernel', complex_types)
|
551 |
+
@pytest.mark.parametrize('dtype_input', types)
|
552 |
+
def test_correlate_complex_kernel_invalid_cval(self, dtype_input,
|
553 |
+
dtype_kernel):
|
554 |
+
# cannot give complex cval with a real image
|
555 |
+
kernel = numpy.array([[1, 0],
|
556 |
+
[0, 1 + 1j]], dtype_kernel)
|
557 |
+
array = numpy.array([[1, 2, 3],
|
558 |
+
[4, 5, 6]], dtype_input)
|
559 |
+
for func in [ndimage.convolve, ndimage.correlate, ndimage.convolve1d,
|
560 |
+
ndimage.correlate1d]:
|
561 |
+
with pytest.raises(ValueError):
|
562 |
+
func(array, kernel, mode='constant', cval=5.0 + 1.0j,
|
563 |
+
output=numpy.complex64)
|
564 |
+
|
565 |
+
@pytest.mark.parametrize('dtype_kernel', complex_types)
|
566 |
+
@pytest.mark.parametrize('dtype_input', types)
|
567 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
568 |
+
def test_correlate1d_complex_kernel(self, dtype_input, dtype_kernel,
|
569 |
+
dtype_output):
|
570 |
+
kernel = numpy.array([1, 1 + 1j], dtype_kernel)
|
571 |
+
array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
|
572 |
+
self._validate_complex(array, kernel, dtype_output)
|
573 |
+
|
574 |
+
@pytest.mark.parametrize('dtype_kernel', complex_types)
|
575 |
+
@pytest.mark.parametrize('dtype_input', types)
|
576 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
577 |
+
def test_correlate1d_complex_kernel_cval(self, dtype_input, dtype_kernel,
|
578 |
+
dtype_output):
|
579 |
+
kernel = numpy.array([1, 1 + 1j], dtype_kernel)
|
580 |
+
array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
|
581 |
+
self._validate_complex(array, kernel, dtype_output, mode='constant',
|
582 |
+
cval=5.0)
|
583 |
+
|
584 |
+
@pytest.mark.parametrize('dtype_kernel', types)
|
585 |
+
@pytest.mark.parametrize('dtype_input', complex_types)
|
586 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
587 |
+
def test_correlate_complex_input(self, dtype_input, dtype_kernel,
|
588 |
+
dtype_output):
|
589 |
+
kernel = numpy.array([[1, 0],
|
590 |
+
[0, 1]], dtype_kernel)
|
591 |
+
array = numpy.array([[1, 2j, 3],
|
592 |
+
[1 + 4j, 5, 6j]], dtype_input)
|
593 |
+
self._validate_complex(array, kernel, dtype_output)
|
594 |
+
|
595 |
+
@pytest.mark.parametrize('dtype_kernel', types)
|
596 |
+
@pytest.mark.parametrize('dtype_input', complex_types)
|
597 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
598 |
+
def test_correlate1d_complex_input(self, dtype_input, dtype_kernel,
|
599 |
+
dtype_output):
|
600 |
+
kernel = numpy.array([1, 0, 1], dtype_kernel)
|
601 |
+
array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
|
602 |
+
self._validate_complex(array, kernel, dtype_output)
|
603 |
+
|
604 |
+
@pytest.mark.parametrize('dtype_kernel', types)
|
605 |
+
@pytest.mark.parametrize('dtype_input', complex_types)
|
606 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
607 |
+
def test_correlate1d_complex_input_cval(self, dtype_input, dtype_kernel,
|
608 |
+
dtype_output):
|
609 |
+
kernel = numpy.array([1, 0, 1], dtype_kernel)
|
610 |
+
array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
|
611 |
+
self._validate_complex(array, kernel, dtype_output, mode='constant',
|
612 |
+
cval=5 - 3j)
|
613 |
+
|
614 |
+
@pytest.mark.parametrize('dtype', complex_types)
|
615 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
616 |
+
def test_correlate_complex_input_and_kernel(self, dtype, dtype_output):
|
617 |
+
kernel = numpy.array([[1, 0],
|
618 |
+
[0, 1 + 1j]], dtype)
|
619 |
+
array = numpy.array([[1, 2j, 3],
|
620 |
+
[1 + 4j, 5, 6j]], dtype)
|
621 |
+
self._validate_complex(array, kernel, dtype_output)
|
622 |
+
|
623 |
+
@pytest.mark.parametrize('dtype', complex_types)
|
624 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
625 |
+
def test_correlate_complex_input_and_kernel_cval(self, dtype,
|
626 |
+
dtype_output):
|
627 |
+
kernel = numpy.array([[1, 0],
|
628 |
+
[0, 1 + 1j]], dtype)
|
629 |
+
array = numpy.array([[1, 2, 3],
|
630 |
+
[4, 5, 6]], dtype)
|
631 |
+
self._validate_complex(array, kernel, dtype_output, mode='constant',
|
632 |
+
cval=5.0 + 2.0j)
|
633 |
+
|
634 |
+
@pytest.mark.parametrize('dtype', complex_types)
|
635 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
636 |
+
def test_correlate1d_complex_input_and_kernel(self, dtype, dtype_output):
|
637 |
+
kernel = numpy.array([1, 1 + 1j], dtype)
|
638 |
+
array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
|
639 |
+
self._validate_complex(array, kernel, dtype_output)
|
640 |
+
|
641 |
+
@pytest.mark.parametrize('dtype', complex_types)
|
642 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
643 |
+
def test_correlate1d_complex_input_and_kernel_cval(self, dtype,
|
644 |
+
dtype_output):
|
645 |
+
kernel = numpy.array([1, 1 + 1j], dtype)
|
646 |
+
array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
|
647 |
+
self._validate_complex(array, kernel, dtype_output, mode='constant',
|
648 |
+
cval=5.0 + 2.0j)
|
649 |
+
|
650 |
+
def test_gauss01(self):
|
651 |
+
input = numpy.array([[1, 2, 3],
|
652 |
+
[2, 4, 6]], numpy.float32)
|
653 |
+
output = ndimage.gaussian_filter(input, 0)
|
654 |
+
assert_array_almost_equal(output, input)
|
655 |
+
|
656 |
+
def test_gauss02(self):
|
657 |
+
input = numpy.array([[1, 2, 3],
|
658 |
+
[2, 4, 6]], numpy.float32)
|
659 |
+
output = ndimage.gaussian_filter(input, 1.0)
|
660 |
+
assert_equal(input.dtype, output.dtype)
|
661 |
+
assert_equal(input.shape, output.shape)
|
662 |
+
|
663 |
+
def test_gauss03(self):
|
664 |
+
# single precision data
|
665 |
+
input = numpy.arange(100 * 100).astype(numpy.float32)
|
666 |
+
input.shape = (100, 100)
|
667 |
+
output = ndimage.gaussian_filter(input, [1.0, 1.0])
|
668 |
+
|
669 |
+
assert_equal(input.dtype, output.dtype)
|
670 |
+
assert_equal(input.shape, output.shape)
|
671 |
+
|
672 |
+
# input.sum() is 49995000.0. With single precision floats, we can't
|
673 |
+
# expect more than 8 digits of accuracy, so use decimal=0 in this test.
|
674 |
+
assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'),
|
675 |
+
decimal=0)
|
676 |
+
assert_(sumsq(input, output) > 1.0)
|
677 |
+
|
678 |
+
def test_gauss04(self):
|
679 |
+
input = numpy.arange(100 * 100).astype(numpy.float32)
|
680 |
+
input.shape = (100, 100)
|
681 |
+
otype = numpy.float64
|
682 |
+
output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
|
683 |
+
assert_equal(output.dtype.type, numpy.float64)
|
684 |
+
assert_equal(input.shape, output.shape)
|
685 |
+
assert_(sumsq(input, output) > 1.0)
|
686 |
+
|
687 |
+
def test_gauss05(self):
|
688 |
+
input = numpy.arange(100 * 100).astype(numpy.float32)
|
689 |
+
input.shape = (100, 100)
|
690 |
+
otype = numpy.float64
|
691 |
+
output = ndimage.gaussian_filter(input, [1.0, 1.0],
|
692 |
+
order=1, output=otype)
|
693 |
+
assert_equal(output.dtype.type, numpy.float64)
|
694 |
+
assert_equal(input.shape, output.shape)
|
695 |
+
assert_(sumsq(input, output) > 1.0)
|
696 |
+
|
697 |
+
def test_gauss06(self):
|
698 |
+
input = numpy.arange(100 * 100).astype(numpy.float32)
|
699 |
+
input.shape = (100, 100)
|
700 |
+
otype = numpy.float64
|
701 |
+
output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
|
702 |
+
output2 = ndimage.gaussian_filter(input, 1.0, output=otype)
|
703 |
+
assert_array_almost_equal(output1, output2)
|
704 |
+
|
705 |
+
def test_gauss_memory_overlap(self):
|
706 |
+
input = numpy.arange(100 * 100).astype(numpy.float32)
|
707 |
+
input.shape = (100, 100)
|
708 |
+
output1 = ndimage.gaussian_filter(input, 1.0)
|
709 |
+
ndimage.gaussian_filter(input, 1.0, output=input)
|
710 |
+
assert_array_almost_equal(output1, input)
|
711 |
+
|
712 |
+
@pytest.mark.parametrize(('filter_func', 'extra_args', 'size0', 'size'),
|
713 |
+
[(ndimage.gaussian_filter, (), 0, 1.0),
|
714 |
+
(ndimage.uniform_filter, (), 1, 3),
|
715 |
+
(ndimage.minimum_filter, (), 1, 3),
|
716 |
+
(ndimage.maximum_filter, (), 1, 3),
|
717 |
+
(ndimage.median_filter, (), 1, 3),
|
718 |
+
(ndimage.rank_filter, (1,), 1, 3),
|
719 |
+
(ndimage.percentile_filter, (40,), 1, 3)])
|
720 |
+
@pytest.mark.parametrize(
|
721 |
+
'axes',
|
722 |
+
tuple(itertools.combinations(range(-3, 3), 1))
|
723 |
+
+ tuple(itertools.combinations(range(-3, 3), 2))
|
724 |
+
+ ((0, 1, 2),))
|
725 |
+
def test_filter_axes(self, filter_func, extra_args, size0, size, axes):
|
726 |
+
# Note: `size` is called `sigma` in `gaussian_filter`
|
727 |
+
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
|
728 |
+
axes = numpy.array(axes)
|
729 |
+
|
730 |
+
if len(set(axes % array.ndim)) != len(axes):
|
731 |
+
# parametrized cases with duplicate axes raise an error
|
732 |
+
with pytest.raises(ValueError, match="axes must be unique"):
|
733 |
+
filter_func(array, *extra_args, size, axes=axes)
|
734 |
+
return
|
735 |
+
output = filter_func(array, *extra_args, size, axes=axes)
|
736 |
+
|
737 |
+
# result should be equivalent to sigma=0.0/size=1 on unfiltered axes
|
738 |
+
all_sizes = (size if ax in (axes % array.ndim) else size0
|
739 |
+
for ax in range(array.ndim))
|
740 |
+
expected = filter_func(array, *extra_args, all_sizes)
|
741 |
+
assert_allclose(output, expected)
|
742 |
+
|
743 |
+
kwargs_gauss = dict(radius=[4, 2, 3], order=[0, 1, 2],
|
744 |
+
mode=['reflect', 'nearest', 'constant'])
|
745 |
+
kwargs_other = dict(origin=(-1, 0, 1),
|
746 |
+
mode=['reflect', 'nearest', 'constant'])
|
747 |
+
kwargs_rank = dict(origin=(-1, 0, 1))
|
748 |
+
|
749 |
+
@pytest.mark.parametrize("filter_func, size0, size, kwargs",
|
750 |
+
[(ndimage.gaussian_filter, 0, 1.0, kwargs_gauss),
|
751 |
+
(ndimage.uniform_filter, 1, 3, kwargs_other),
|
752 |
+
(ndimage.maximum_filter, 1, 3, kwargs_other),
|
753 |
+
(ndimage.minimum_filter, 1, 3, kwargs_other),
|
754 |
+
(ndimage.median_filter, 1, 3, kwargs_rank),
|
755 |
+
(ndimage.rank_filter, 1, 3, kwargs_rank),
|
756 |
+
(ndimage.percentile_filter, 1, 3, kwargs_rank)])
|
757 |
+
@pytest.mark.parametrize('axes', itertools.combinations(range(-3, 3), 2))
|
758 |
+
def test_filter_axes_kwargs(self, filter_func, size0, size, kwargs, axes):
|
759 |
+
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
|
760 |
+
|
761 |
+
kwargs = {key: numpy.array(val) for key, val in kwargs.items()}
|
762 |
+
axes = numpy.array(axes)
|
763 |
+
n_axes = axes.size
|
764 |
+
|
765 |
+
if filter_func == ndimage.rank_filter:
|
766 |
+
args = (2,) # (rank,)
|
767 |
+
elif filter_func == ndimage.percentile_filter:
|
768 |
+
args = (30,) # (percentile,)
|
769 |
+
else:
|
770 |
+
args = ()
|
771 |
+
|
772 |
+
# form kwargs that specify only the axes in `axes`
|
773 |
+
reduced_kwargs = {key: val[axes] for key, val in kwargs.items()}
|
774 |
+
if len(set(axes % array.ndim)) != len(axes):
|
775 |
+
# parametrized cases with duplicate axes raise an error
|
776 |
+
with pytest.raises(ValueError, match="axes must be unique"):
|
777 |
+
filter_func(array, *args, [size]*n_axes, axes=axes,
|
778 |
+
**reduced_kwargs)
|
779 |
+
return
|
780 |
+
|
781 |
+
output = filter_func(array, *args, [size]*n_axes, axes=axes,
|
782 |
+
**reduced_kwargs)
|
783 |
+
|
784 |
+
# result should be equivalent to sigma=0.0/size=1 on unfiltered axes
|
785 |
+
size_3d = numpy.full(array.ndim, fill_value=size0)
|
786 |
+
size_3d[axes] = size
|
787 |
+
if 'origin' in kwargs:
|
788 |
+
# origin should be zero on the axis that has size 0
|
789 |
+
origin = numpy.array([0, 0, 0])
|
790 |
+
origin[axes] = reduced_kwargs['origin']
|
791 |
+
kwargs['origin'] = origin
|
792 |
+
expected = filter_func(array, *args, size_3d, **kwargs)
|
793 |
+
assert_allclose(output, expected)
|
794 |
+
|
795 |
+
@pytest.mark.parametrize(
|
796 |
+
'filter_func, args',
|
797 |
+
[(ndimage.gaussian_filter, (1.0,)), # args = (sigma,)
|
798 |
+
(ndimage.uniform_filter, (3,)), # args = (size,)
|
799 |
+
(ndimage.minimum_filter, (3,)), # args = (size,)
|
800 |
+
(ndimage.maximum_filter, (3,)), # args = (size,)
|
801 |
+
(ndimage.median_filter, (3,)), # args = (size,)
|
802 |
+
(ndimage.rank_filter, (2, 3)), # args = (rank, size)
|
803 |
+
(ndimage.percentile_filter, (30, 3))]) # args = (percentile, size)
|
804 |
+
@pytest.mark.parametrize(
|
805 |
+
'axes', [(1.5,), (0, 1, 2, 3), (3,), (-4,)]
|
806 |
+
)
|
807 |
+
def test_filter_invalid_axes(self, filter_func, args, axes):
|
808 |
+
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
|
809 |
+
if any(isinstance(ax, float) for ax in axes):
|
810 |
+
error_class = TypeError
|
811 |
+
match = "cannot be interpreted as an integer"
|
812 |
+
else:
|
813 |
+
error_class = ValueError
|
814 |
+
match = "out of range"
|
815 |
+
with pytest.raises(error_class, match=match):
|
816 |
+
filter_func(array, *args, axes=axes)
|
817 |
+
|
818 |
+
@pytest.mark.parametrize(
|
819 |
+
'filter_func, kwargs',
|
820 |
+
[(ndimage.minimum_filter, {}),
|
821 |
+
(ndimage.maximum_filter, {}),
|
822 |
+
(ndimage.median_filter, {}),
|
823 |
+
(ndimage.rank_filter, dict(rank=3)),
|
824 |
+
(ndimage.percentile_filter, dict(percentile=30))])
|
825 |
+
@pytest.mark.parametrize(
|
826 |
+
'axes', [(0, ), (1, 2), (0, 1, 2)]
|
827 |
+
)
|
828 |
+
@pytest.mark.parametrize('separable_footprint', [False, True])
|
829 |
+
def test_filter_invalid_footprint_ndim(self, filter_func, kwargs, axes,
|
830 |
+
separable_footprint):
|
831 |
+
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
|
832 |
+
# create a footprint with one too many dimensions
|
833 |
+
footprint = numpy.ones((3,) * (len(axes) + 1))
|
834 |
+
if not separable_footprint:
|
835 |
+
footprint[(0,) * footprint.ndim] = 0
|
836 |
+
if (filter_func in [ndimage.minimum_filter, ndimage.maximum_filter]
|
837 |
+
and separable_footprint):
|
838 |
+
match = "sequence argument must have length equal to input rank"
|
839 |
+
else:
|
840 |
+
match = "footprint array has incorrect shape"
|
841 |
+
with pytest.raises(RuntimeError, match=match):
|
842 |
+
filter_func(array, **kwargs, footprint=footprint, axes=axes)
|
843 |
+
|
844 |
+
@pytest.mark.parametrize('n_mismatch', [1, 3])
|
845 |
+
@pytest.mark.parametrize('filter_func, kwargs, key, val',
|
846 |
+
_cases_axes_tuple_length_mismatch())
|
847 |
+
def test_filter_tuple_length_mismatch(self, n_mismatch, filter_func,
|
848 |
+
kwargs, key, val):
|
849 |
+
# Test for the intended RuntimeError when a kwargs has an invalid size
|
850 |
+
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
|
851 |
+
kwargs = dict(**kwargs, axes=(0, 1))
|
852 |
+
kwargs[key] = (val,) * n_mismatch
|
853 |
+
err_msg = "sequence argument must have length equal to input rank"
|
854 |
+
with pytest.raises(RuntimeError, match=err_msg):
|
855 |
+
filter_func(array, **kwargs)
|
856 |
+
|
857 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
858 |
+
def test_prewitt01(self, dtype):
|
859 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
860 |
+
[5, 8, 3, 7, 1],
|
861 |
+
[5, 6, 9, 3, 5]], dtype)
|
862 |
+
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
|
863 |
+
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
|
864 |
+
output = ndimage.prewitt(array, 0)
|
865 |
+
assert_array_almost_equal(t, output)
|
866 |
+
|
867 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
868 |
+
def test_prewitt02(self, dtype):
|
869 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
870 |
+
[5, 8, 3, 7, 1],
|
871 |
+
[5, 6, 9, 3, 5]], dtype)
|
872 |
+
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
|
873 |
+
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
|
874 |
+
output = numpy.zeros(array.shape, dtype)
|
875 |
+
ndimage.prewitt(array, 0, output)
|
876 |
+
assert_array_almost_equal(t, output)
|
877 |
+
|
878 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
879 |
+
def test_prewitt03(self, dtype):
|
880 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
881 |
+
[5, 8, 3, 7, 1],
|
882 |
+
[5, 6, 9, 3, 5]], dtype)
|
883 |
+
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
|
884 |
+
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
|
885 |
+
output = ndimage.prewitt(array, 1)
|
886 |
+
assert_array_almost_equal(t, output)
|
887 |
+
|
888 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
889 |
+
def test_prewitt04(self, dtype):
|
890 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
891 |
+
[5, 8, 3, 7, 1],
|
892 |
+
[5, 6, 9, 3, 5]], dtype)
|
893 |
+
t = ndimage.prewitt(array, -1)
|
894 |
+
output = ndimage.prewitt(array, 1)
|
895 |
+
assert_array_almost_equal(t, output)
|
896 |
+
|
897 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
898 |
+
def test_sobel01(self, dtype):
|
899 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
900 |
+
[5, 8, 3, 7, 1],
|
901 |
+
[5, 6, 9, 3, 5]], dtype)
|
902 |
+
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
|
903 |
+
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
|
904 |
+
output = ndimage.sobel(array, 0)
|
905 |
+
assert_array_almost_equal(t, output)
|
906 |
+
|
907 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
908 |
+
def test_sobel02(self, dtype):
|
909 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
910 |
+
[5, 8, 3, 7, 1],
|
911 |
+
[5, 6, 9, 3, 5]], dtype)
|
912 |
+
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
|
913 |
+
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
|
914 |
+
output = numpy.zeros(array.shape, dtype)
|
915 |
+
ndimage.sobel(array, 0, output)
|
916 |
+
assert_array_almost_equal(t, output)
|
917 |
+
|
918 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
919 |
+
def test_sobel03(self, dtype):
|
920 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
921 |
+
[5, 8, 3, 7, 1],
|
922 |
+
[5, 6, 9, 3, 5]], dtype)
|
923 |
+
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
|
924 |
+
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
|
925 |
+
output = numpy.zeros(array.shape, dtype)
|
926 |
+
output = ndimage.sobel(array, 1)
|
927 |
+
assert_array_almost_equal(t, output)
|
928 |
+
|
929 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
930 |
+
def test_sobel04(self, dtype):
|
931 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
932 |
+
[5, 8, 3, 7, 1],
|
933 |
+
[5, 6, 9, 3, 5]], dtype)
|
934 |
+
t = ndimage.sobel(array, -1)
|
935 |
+
output = ndimage.sobel(array, 1)
|
936 |
+
assert_array_almost_equal(t, output)
|
937 |
+
|
938 |
+
@pytest.mark.parametrize('dtype',
|
939 |
+
[numpy.int32, numpy.float32, numpy.float64,
|
940 |
+
numpy.complex64, numpy.complex128])
|
941 |
+
def test_laplace01(self, dtype):
|
942 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
943 |
+
[5, 8, 3, 7, 1],
|
944 |
+
[5, 6, 9, 3, 5]], dtype) * 100
|
945 |
+
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
|
946 |
+
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
|
947 |
+
output = ndimage.laplace(array)
|
948 |
+
assert_array_almost_equal(tmp1 + tmp2, output)
|
949 |
+
|
950 |
+
@pytest.mark.parametrize('dtype',
|
951 |
+
[numpy.int32, numpy.float32, numpy.float64,
|
952 |
+
numpy.complex64, numpy.complex128])
|
953 |
+
def test_laplace02(self, dtype):
|
954 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
955 |
+
[5, 8, 3, 7, 1],
|
956 |
+
[5, 6, 9, 3, 5]], dtype) * 100
|
957 |
+
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
|
958 |
+
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
|
959 |
+
output = numpy.zeros(array.shape, dtype)
|
960 |
+
ndimage.laplace(array, output=output)
|
961 |
+
assert_array_almost_equal(tmp1 + tmp2, output)
|
962 |
+
|
963 |
+
@pytest.mark.parametrize('dtype',
|
964 |
+
[numpy.int32, numpy.float32, numpy.float64,
|
965 |
+
numpy.complex64, numpy.complex128])
|
966 |
+
def test_gaussian_laplace01(self, dtype):
|
967 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
968 |
+
[5, 8, 3, 7, 1],
|
969 |
+
[5, 6, 9, 3, 5]], dtype) * 100
|
970 |
+
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
|
971 |
+
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
|
972 |
+
output = ndimage.gaussian_laplace(array, 1.0)
|
973 |
+
assert_array_almost_equal(tmp1 + tmp2, output)
|
974 |
+
|
975 |
+
@pytest.mark.parametrize('dtype',
|
976 |
+
[numpy.int32, numpy.float32, numpy.float64,
|
977 |
+
numpy.complex64, numpy.complex128])
|
978 |
+
def test_gaussian_laplace02(self, dtype):
|
979 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
980 |
+
[5, 8, 3, 7, 1],
|
981 |
+
[5, 6, 9, 3, 5]], dtype) * 100
|
982 |
+
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
|
983 |
+
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
|
984 |
+
output = numpy.zeros(array.shape, dtype)
|
985 |
+
ndimage.gaussian_laplace(array, 1.0, output)
|
986 |
+
assert_array_almost_equal(tmp1 + tmp2, output)
|
987 |
+
|
988 |
+
@pytest.mark.parametrize('dtype', types + complex_types)
|
989 |
+
def test_generic_laplace01(self, dtype):
|
990 |
+
def derivative2(input, axis, output, mode, cval, a, b):
|
991 |
+
sigma = [a, b / 2.0]
|
992 |
+
input = numpy.asarray(input)
|
993 |
+
order = [0] * input.ndim
|
994 |
+
order[axis] = 2
|
995 |
+
return ndimage.gaussian_filter(input, sigma, order,
|
996 |
+
output, mode, cval)
|
997 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
998 |
+
[5, 8, 3, 7, 1],
|
999 |
+
[5, 6, 9, 3, 5]], dtype)
|
1000 |
+
output = numpy.zeros(array.shape, dtype)
|
1001 |
+
tmp = ndimage.generic_laplace(array, derivative2,
|
1002 |
+
extra_arguments=(1.0,),
|
1003 |
+
extra_keywords={'b': 2.0})
|
1004 |
+
ndimage.gaussian_laplace(array, 1.0, output)
|
1005 |
+
assert_array_almost_equal(tmp, output)
|
1006 |
+
|
1007 |
+
@pytest.mark.parametrize('dtype',
|
1008 |
+
[numpy.int32, numpy.float32, numpy.float64,
|
1009 |
+
numpy.complex64, numpy.complex128])
|
1010 |
+
def test_gaussian_gradient_magnitude01(self, dtype):
|
1011 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1012 |
+
[5, 8, 3, 7, 1],
|
1013 |
+
[5, 6, 9, 3, 5]], dtype) * 100
|
1014 |
+
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
|
1015 |
+
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
|
1016 |
+
output = ndimage.gaussian_gradient_magnitude(array, 1.0)
|
1017 |
+
expected = tmp1 * tmp1 + tmp2 * tmp2
|
1018 |
+
expected = numpy.sqrt(expected).astype(dtype)
|
1019 |
+
assert_array_almost_equal(expected, output)
|
1020 |
+
|
1021 |
+
@pytest.mark.parametrize('dtype',
|
1022 |
+
[numpy.int32, numpy.float32, numpy.float64,
|
1023 |
+
numpy.complex64, numpy.complex128])
|
1024 |
+
def test_gaussian_gradient_magnitude02(self, dtype):
|
1025 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1026 |
+
[5, 8, 3, 7, 1],
|
1027 |
+
[5, 6, 9, 3, 5]], dtype) * 100
|
1028 |
+
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
|
1029 |
+
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
|
1030 |
+
output = numpy.zeros(array.shape, dtype)
|
1031 |
+
ndimage.gaussian_gradient_magnitude(array, 1.0, output)
|
1032 |
+
expected = tmp1 * tmp1 + tmp2 * tmp2
|
1033 |
+
expected = numpy.sqrt(expected).astype(dtype)
|
1034 |
+
assert_array_almost_equal(expected, output)
|
1035 |
+
|
1036 |
+
def test_generic_gradient_magnitude01(self):
|
1037 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1038 |
+
[5, 8, 3, 7, 1],
|
1039 |
+
[5, 6, 9, 3, 5]], numpy.float64)
|
1040 |
+
|
1041 |
+
def derivative(input, axis, output, mode, cval, a, b):
|
1042 |
+
sigma = [a, b / 2.0]
|
1043 |
+
input = numpy.asarray(input)
|
1044 |
+
order = [0] * input.ndim
|
1045 |
+
order[axis] = 1
|
1046 |
+
return ndimage.gaussian_filter(input, sigma, order,
|
1047 |
+
output, mode, cval)
|
1048 |
+
tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
|
1049 |
+
tmp2 = ndimage.generic_gradient_magnitude(
|
1050 |
+
array, derivative, extra_arguments=(1.0,),
|
1051 |
+
extra_keywords={'b': 2.0})
|
1052 |
+
assert_array_almost_equal(tmp1, tmp2)
|
1053 |
+
|
1054 |
+
def test_uniform01(self):
|
1055 |
+
array = numpy.array([2, 4, 6])
|
1056 |
+
size = 2
|
1057 |
+
output = ndimage.uniform_filter1d(array, size, origin=-1)
|
1058 |
+
assert_array_almost_equal([3, 5, 6], output)
|
1059 |
+
|
1060 |
+
def test_uniform01_complex(self):
|
1061 |
+
array = numpy.array([2 + 1j, 4 + 2j, 6 + 3j], dtype=numpy.complex128)
|
1062 |
+
size = 2
|
1063 |
+
output = ndimage.uniform_filter1d(array, size, origin=-1)
|
1064 |
+
assert_array_almost_equal([3, 5, 6], output.real)
|
1065 |
+
assert_array_almost_equal([1.5, 2.5, 3], output.imag)
|
1066 |
+
|
1067 |
+
def test_uniform02(self):
|
1068 |
+
array = numpy.array([1, 2, 3])
|
1069 |
+
filter_shape = [0]
|
1070 |
+
output = ndimage.uniform_filter(array, filter_shape)
|
1071 |
+
assert_array_almost_equal(array, output)
|
1072 |
+
|
1073 |
+
def test_uniform03(self):
|
1074 |
+
array = numpy.array([1, 2, 3])
|
1075 |
+
filter_shape = [1]
|
1076 |
+
output = ndimage.uniform_filter(array, filter_shape)
|
1077 |
+
assert_array_almost_equal(array, output)
|
1078 |
+
|
1079 |
+
def test_uniform04(self):
|
1080 |
+
array = numpy.array([2, 4, 6])
|
1081 |
+
filter_shape = [2]
|
1082 |
+
output = ndimage.uniform_filter(array, filter_shape)
|
1083 |
+
assert_array_almost_equal([2, 3, 5], output)
|
1084 |
+
|
1085 |
+
def test_uniform05(self):
|
1086 |
+
array = []
|
1087 |
+
filter_shape = [1]
|
1088 |
+
output = ndimage.uniform_filter(array, filter_shape)
|
1089 |
+
assert_array_almost_equal([], output)
|
1090 |
+
|
1091 |
+
@pytest.mark.parametrize('dtype_array', types)
|
1092 |
+
@pytest.mark.parametrize('dtype_output', types)
|
1093 |
+
def test_uniform06(self, dtype_array, dtype_output):
|
1094 |
+
filter_shape = [2, 2]
|
1095 |
+
array = numpy.array([[4, 8, 12],
|
1096 |
+
[16, 20, 24]], dtype_array)
|
1097 |
+
output = ndimage.uniform_filter(
|
1098 |
+
array, filter_shape, output=dtype_output)
|
1099 |
+
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
|
1100 |
+
assert_equal(output.dtype.type, dtype_output)
|
1101 |
+
|
1102 |
+
@pytest.mark.parametrize('dtype_array', complex_types)
|
1103 |
+
@pytest.mark.parametrize('dtype_output', complex_types)
|
1104 |
+
def test_uniform06_complex(self, dtype_array, dtype_output):
|
1105 |
+
filter_shape = [2, 2]
|
1106 |
+
array = numpy.array([[4, 8 + 5j, 12],
|
1107 |
+
[16, 20, 24]], dtype_array)
|
1108 |
+
output = ndimage.uniform_filter(
|
1109 |
+
array, filter_shape, output=dtype_output)
|
1110 |
+
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output.real)
|
1111 |
+
assert_equal(output.dtype.type, dtype_output)
|
1112 |
+
|
1113 |
+
def test_minimum_filter01(self):
|
1114 |
+
array = numpy.array([1, 2, 3, 4, 5])
|
1115 |
+
filter_shape = numpy.array([2])
|
1116 |
+
output = ndimage.minimum_filter(array, filter_shape)
|
1117 |
+
assert_array_almost_equal([1, 1, 2, 3, 4], output)
|
1118 |
+
|
1119 |
+
def test_minimum_filter02(self):
|
1120 |
+
array = numpy.array([1, 2, 3, 4, 5])
|
1121 |
+
filter_shape = numpy.array([3])
|
1122 |
+
output = ndimage.minimum_filter(array, filter_shape)
|
1123 |
+
assert_array_almost_equal([1, 1, 2, 3, 4], output)
|
1124 |
+
|
1125 |
+
def test_minimum_filter03(self):
|
1126 |
+
array = numpy.array([3, 2, 5, 1, 4])
|
1127 |
+
filter_shape = numpy.array([2])
|
1128 |
+
output = ndimage.minimum_filter(array, filter_shape)
|
1129 |
+
assert_array_almost_equal([3, 2, 2, 1, 1], output)
|
1130 |
+
|
1131 |
+
def test_minimum_filter04(self):
|
1132 |
+
array = numpy.array([3, 2, 5, 1, 4])
|
1133 |
+
filter_shape = numpy.array([3])
|
1134 |
+
output = ndimage.minimum_filter(array, filter_shape)
|
1135 |
+
assert_array_almost_equal([2, 2, 1, 1, 1], output)
|
1136 |
+
|
1137 |
+
def test_minimum_filter05(self):
|
1138 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1139 |
+
[7, 6, 9, 3, 5],
|
1140 |
+
[5, 8, 3, 7, 1]])
|
1141 |
+
filter_shape = numpy.array([2, 3])
|
1142 |
+
output = ndimage.minimum_filter(array, filter_shape)
|
1143 |
+
assert_array_almost_equal([[2, 2, 1, 1, 1],
|
1144 |
+
[2, 2, 1, 1, 1],
|
1145 |
+
[5, 3, 3, 1, 1]], output)
|
1146 |
+
|
1147 |
+
def test_minimum_filter05_overlap(self):
|
1148 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1149 |
+
[7, 6, 9, 3, 5],
|
1150 |
+
[5, 8, 3, 7, 1]])
|
1151 |
+
filter_shape = numpy.array([2, 3])
|
1152 |
+
ndimage.minimum_filter(array, filter_shape, output=array)
|
1153 |
+
assert_array_almost_equal([[2, 2, 1, 1, 1],
|
1154 |
+
[2, 2, 1, 1, 1],
|
1155 |
+
[5, 3, 3, 1, 1]], array)
|
1156 |
+
|
1157 |
+
def test_minimum_filter06(self):
|
1158 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1159 |
+
[7, 6, 9, 3, 5],
|
1160 |
+
[5, 8, 3, 7, 1]])
|
1161 |
+
footprint = [[1, 1, 1], [1, 1, 1]]
|
1162 |
+
output = ndimage.minimum_filter(array, footprint=footprint)
|
1163 |
+
assert_array_almost_equal([[2, 2, 1, 1, 1],
|
1164 |
+
[2, 2, 1, 1, 1],
|
1165 |
+
[5, 3, 3, 1, 1]], output)
|
1166 |
+
# separable footprint should allow mode sequence
|
1167 |
+
output2 = ndimage.minimum_filter(array, footprint=footprint,
|
1168 |
+
mode=['reflect', 'reflect'])
|
1169 |
+
assert_array_almost_equal(output2, output)
|
1170 |
+
|
1171 |
+
def test_minimum_filter07(self):
|
1172 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1173 |
+
[7, 6, 9, 3, 5],
|
1174 |
+
[5, 8, 3, 7, 1]])
|
1175 |
+
footprint = [[1, 0, 1], [1, 1, 0]]
|
1176 |
+
output = ndimage.minimum_filter(array, footprint=footprint)
|
1177 |
+
assert_array_almost_equal([[2, 2, 1, 1, 1],
|
1178 |
+
[2, 3, 1, 3, 1],
|
1179 |
+
[5, 5, 3, 3, 1]], output)
|
1180 |
+
with assert_raises(RuntimeError):
|
1181 |
+
ndimage.minimum_filter(array, footprint=footprint,
|
1182 |
+
mode=['reflect', 'constant'])
|
1183 |
+
|
1184 |
+
def test_minimum_filter08(self):
|
1185 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1186 |
+
[7, 6, 9, 3, 5],
|
1187 |
+
[5, 8, 3, 7, 1]])
|
1188 |
+
footprint = [[1, 0, 1], [1, 1, 0]]
|
1189 |
+
output = ndimage.minimum_filter(array, footprint=footprint, origin=-1)
|
1190 |
+
assert_array_almost_equal([[3, 1, 3, 1, 1],
|
1191 |
+
[5, 3, 3, 1, 1],
|
1192 |
+
[3, 3, 1, 1, 1]], output)
|
1193 |
+
|
1194 |
+
def test_minimum_filter09(self):
|
1195 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1196 |
+
[7, 6, 9, 3, 5],
|
1197 |
+
[5, 8, 3, 7, 1]])
|
1198 |
+
footprint = [[1, 0, 1], [1, 1, 0]]
|
1199 |
+
output = ndimage.minimum_filter(array, footprint=footprint,
|
1200 |
+
origin=[-1, 0])
|
1201 |
+
assert_array_almost_equal([[2, 3, 1, 3, 1],
|
1202 |
+
[5, 5, 3, 3, 1],
|
1203 |
+
[5, 3, 3, 1, 1]], output)
|
1204 |
+
|
1205 |
+
def test_maximum_filter01(self):
|
1206 |
+
array = numpy.array([1, 2, 3, 4, 5])
|
1207 |
+
filter_shape = numpy.array([2])
|
1208 |
+
output = ndimage.maximum_filter(array, filter_shape)
|
1209 |
+
assert_array_almost_equal([1, 2, 3, 4, 5], output)
|
1210 |
+
|
1211 |
+
def test_maximum_filter02(self):
|
1212 |
+
array = numpy.array([1, 2, 3, 4, 5])
|
1213 |
+
filter_shape = numpy.array([3])
|
1214 |
+
output = ndimage.maximum_filter(array, filter_shape)
|
1215 |
+
assert_array_almost_equal([2, 3, 4, 5, 5], output)
|
1216 |
+
|
1217 |
+
def test_maximum_filter03(self):
|
1218 |
+
array = numpy.array([3, 2, 5, 1, 4])
|
1219 |
+
filter_shape = numpy.array([2])
|
1220 |
+
output = ndimage.maximum_filter(array, filter_shape)
|
1221 |
+
assert_array_almost_equal([3, 3, 5, 5, 4], output)
|
1222 |
+
|
1223 |
+
def test_maximum_filter04(self):
|
1224 |
+
array = numpy.array([3, 2, 5, 1, 4])
|
1225 |
+
filter_shape = numpy.array([3])
|
1226 |
+
output = ndimage.maximum_filter(array, filter_shape)
|
1227 |
+
assert_array_almost_equal([3, 5, 5, 5, 4], output)
|
1228 |
+
|
1229 |
+
def test_maximum_filter05(self):
|
1230 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1231 |
+
[7, 6, 9, 3, 5],
|
1232 |
+
[5, 8, 3, 7, 1]])
|
1233 |
+
filter_shape = numpy.array([2, 3])
|
1234 |
+
output = ndimage.maximum_filter(array, filter_shape)
|
1235 |
+
assert_array_almost_equal([[3, 5, 5, 5, 4],
|
1236 |
+
[7, 9, 9, 9, 5],
|
1237 |
+
[8, 9, 9, 9, 7]], output)
|
1238 |
+
|
1239 |
+
def test_maximum_filter06(self):
|
1240 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1241 |
+
[7, 6, 9, 3, 5],
|
1242 |
+
[5, 8, 3, 7, 1]])
|
1243 |
+
footprint = [[1, 1, 1], [1, 1, 1]]
|
1244 |
+
output = ndimage.maximum_filter(array, footprint=footprint)
|
1245 |
+
assert_array_almost_equal([[3, 5, 5, 5, 4],
|
1246 |
+
[7, 9, 9, 9, 5],
|
1247 |
+
[8, 9, 9, 9, 7]], output)
|
1248 |
+
# separable footprint should allow mode sequence
|
1249 |
+
output2 = ndimage.maximum_filter(array, footprint=footprint,
|
1250 |
+
mode=['reflect', 'reflect'])
|
1251 |
+
assert_array_almost_equal(output2, output)
|
1252 |
+
|
1253 |
+
def test_maximum_filter07(self):
|
1254 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1255 |
+
[7, 6, 9, 3, 5],
|
1256 |
+
[5, 8, 3, 7, 1]])
|
1257 |
+
footprint = [[1, 0, 1], [1, 1, 0]]
|
1258 |
+
output = ndimage.maximum_filter(array, footprint=footprint)
|
1259 |
+
assert_array_almost_equal([[3, 5, 5, 5, 4],
|
1260 |
+
[7, 7, 9, 9, 5],
|
1261 |
+
[7, 9, 8, 9, 7]], output)
|
1262 |
+
# non-separable footprint should not allow mode sequence
|
1263 |
+
with assert_raises(RuntimeError):
|
1264 |
+
ndimage.maximum_filter(array, footprint=footprint,
|
1265 |
+
mode=['reflect', 'reflect'])
|
1266 |
+
|
1267 |
+
def test_maximum_filter08(self):
|
1268 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1269 |
+
[7, 6, 9, 3, 5],
|
1270 |
+
[5, 8, 3, 7, 1]])
|
1271 |
+
footprint = [[1, 0, 1], [1, 1, 0]]
|
1272 |
+
output = ndimage.maximum_filter(array, footprint=footprint, origin=-1)
|
1273 |
+
assert_array_almost_equal([[7, 9, 9, 5, 5],
|
1274 |
+
[9, 8, 9, 7, 5],
|
1275 |
+
[8, 8, 7, 7, 7]], output)
|
1276 |
+
|
1277 |
+
def test_maximum_filter09(self):
|
1278 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1279 |
+
[7, 6, 9, 3, 5],
|
1280 |
+
[5, 8, 3, 7, 1]])
|
1281 |
+
footprint = [[1, 0, 1], [1, 1, 0]]
|
1282 |
+
output = ndimage.maximum_filter(array, footprint=footprint,
|
1283 |
+
origin=[-1, 0])
|
1284 |
+
assert_array_almost_equal([[7, 7, 9, 9, 5],
|
1285 |
+
[7, 9, 8, 9, 7],
|
1286 |
+
[8, 8, 8, 7, 7]], output)
|
1287 |
+
|
1288 |
+
@pytest.mark.parametrize(
|
1289 |
+
'axes', tuple(itertools.combinations(range(-3, 3), 2))
|
1290 |
+
)
|
1291 |
+
@pytest.mark.parametrize(
|
1292 |
+
'filter_func, kwargs',
|
1293 |
+
[(ndimage.minimum_filter, {}),
|
1294 |
+
(ndimage.maximum_filter, {}),
|
1295 |
+
(ndimage.median_filter, {}),
|
1296 |
+
(ndimage.rank_filter, dict(rank=3)),
|
1297 |
+
(ndimage.percentile_filter, dict(percentile=60))]
|
1298 |
+
)
|
1299 |
+
def test_minmax_nonseparable_axes(self, filter_func, axes, kwargs):
|
1300 |
+
array = numpy.arange(6 * 8 * 12, dtype=numpy.float32).reshape(6, 8, 12)
|
1301 |
+
# use 2D triangular footprint because it is non-separable
|
1302 |
+
footprint = numpy.tri(5)
|
1303 |
+
axes = numpy.array(axes)
|
1304 |
+
|
1305 |
+
if len(set(axes % array.ndim)) != len(axes):
|
1306 |
+
# parametrized cases with duplicate axes raise an error
|
1307 |
+
with pytest.raises(ValueError):
|
1308 |
+
filter_func(array, footprint=footprint, axes=axes, **kwargs)
|
1309 |
+
return
|
1310 |
+
output = filter_func(array, footprint=footprint, axes=axes, **kwargs)
|
1311 |
+
|
1312 |
+
missing_axis = tuple(set(range(3)) - set(axes % array.ndim))[0]
|
1313 |
+
footprint_3d = numpy.expand_dims(footprint, missing_axis)
|
1314 |
+
expected = filter_func(array, footprint=footprint_3d, **kwargs)
|
1315 |
+
assert_allclose(output, expected)
|
1316 |
+
|
1317 |
+
def test_rank01(self):
|
1318 |
+
array = numpy.array([1, 2, 3, 4, 5])
|
1319 |
+
output = ndimage.rank_filter(array, 1, size=2)
|
1320 |
+
assert_array_almost_equal(array, output)
|
1321 |
+
output = ndimage.percentile_filter(array, 100, size=2)
|
1322 |
+
assert_array_almost_equal(array, output)
|
1323 |
+
output = ndimage.median_filter(array, 2)
|
1324 |
+
assert_array_almost_equal(array, output)
|
1325 |
+
|
1326 |
+
def test_rank02(self):
|
1327 |
+
array = numpy.array([1, 2, 3, 4, 5])
|
1328 |
+
output = ndimage.rank_filter(array, 1, size=[3])
|
1329 |
+
assert_array_almost_equal(array, output)
|
1330 |
+
output = ndimage.percentile_filter(array, 50, size=3)
|
1331 |
+
assert_array_almost_equal(array, output)
|
1332 |
+
output = ndimage.median_filter(array, (3,))
|
1333 |
+
assert_array_almost_equal(array, output)
|
1334 |
+
|
1335 |
+
def test_rank03(self):
|
1336 |
+
array = numpy.array([3, 2, 5, 1, 4])
|
1337 |
+
output = ndimage.rank_filter(array, 1, size=[2])
|
1338 |
+
assert_array_almost_equal([3, 3, 5, 5, 4], output)
|
1339 |
+
output = ndimage.percentile_filter(array, 100, size=2)
|
1340 |
+
assert_array_almost_equal([3, 3, 5, 5, 4], output)
|
1341 |
+
|
1342 |
+
def test_rank04(self):
|
1343 |
+
array = numpy.array([3, 2, 5, 1, 4])
|
1344 |
+
expected = [3, 3, 2, 4, 4]
|
1345 |
+
output = ndimage.rank_filter(array, 1, size=3)
|
1346 |
+
assert_array_almost_equal(expected, output)
|
1347 |
+
output = ndimage.percentile_filter(array, 50, size=3)
|
1348 |
+
assert_array_almost_equal(expected, output)
|
1349 |
+
output = ndimage.median_filter(array, size=3)
|
1350 |
+
assert_array_almost_equal(expected, output)
|
1351 |
+
|
1352 |
+
def test_rank05(self):
|
1353 |
+
array = numpy.array([3, 2, 5, 1, 4])
|
1354 |
+
expected = [3, 3, 2, 4, 4]
|
1355 |
+
output = ndimage.rank_filter(array, -2, size=3)
|
1356 |
+
assert_array_almost_equal(expected, output)
|
1357 |
+
|
1358 |
+
def test_rank06(self):
|
1359 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1360 |
+
[5, 8, 3, 7, 1],
|
1361 |
+
[5, 6, 9, 3, 5]])
|
1362 |
+
expected = [[2, 2, 1, 1, 1],
|
1363 |
+
[3, 3, 2, 1, 1],
|
1364 |
+
[5, 5, 3, 3, 1]]
|
1365 |
+
output = ndimage.rank_filter(array, 1, size=[2, 3])
|
1366 |
+
assert_array_almost_equal(expected, output)
|
1367 |
+
output = ndimage.percentile_filter(array, 17, size=(2, 3))
|
1368 |
+
assert_array_almost_equal(expected, output)
|
1369 |
+
|
1370 |
+
def test_rank06_overlap(self):
|
1371 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1372 |
+
[5, 8, 3, 7, 1],
|
1373 |
+
[5, 6, 9, 3, 5]])
|
1374 |
+
array_copy = array.copy()
|
1375 |
+
expected = [[2, 2, 1, 1, 1],
|
1376 |
+
[3, 3, 2, 1, 1],
|
1377 |
+
[5, 5, 3, 3, 1]]
|
1378 |
+
ndimage.rank_filter(array, 1, size=[2, 3], output=array)
|
1379 |
+
assert_array_almost_equal(expected, array)
|
1380 |
+
|
1381 |
+
ndimage.percentile_filter(array_copy, 17, size=(2, 3),
|
1382 |
+
output=array_copy)
|
1383 |
+
assert_array_almost_equal(expected, array_copy)
|
1384 |
+
|
1385 |
+
def test_rank07(self):
|
1386 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1387 |
+
[5, 8, 3, 7, 1],
|
1388 |
+
[5, 6, 9, 3, 5]])
|
1389 |
+
expected = [[3, 5, 5, 5, 4],
|
1390 |
+
[5, 5, 7, 5, 4],
|
1391 |
+
[6, 8, 8, 7, 5]]
|
1392 |
+
output = ndimage.rank_filter(array, -2, size=[2, 3])
|
1393 |
+
assert_array_almost_equal(expected, output)
|
1394 |
+
|
1395 |
+
def test_rank08(self):
|
1396 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1397 |
+
[5, 8, 3, 7, 1],
|
1398 |
+
[5, 6, 9, 3, 5]])
|
1399 |
+
expected = [[3, 3, 2, 4, 4],
|
1400 |
+
[5, 5, 5, 4, 4],
|
1401 |
+
[5, 6, 7, 5, 5]]
|
1402 |
+
output = ndimage.percentile_filter(array, 50.0, size=(2, 3))
|
1403 |
+
assert_array_almost_equal(expected, output)
|
1404 |
+
output = ndimage.rank_filter(array, 3, size=(2, 3))
|
1405 |
+
assert_array_almost_equal(expected, output)
|
1406 |
+
output = ndimage.median_filter(array, size=(2, 3))
|
1407 |
+
assert_array_almost_equal(expected, output)
|
1408 |
+
|
1409 |
+
# non-separable: does not allow mode sequence
|
1410 |
+
with assert_raises(RuntimeError):
|
1411 |
+
ndimage.percentile_filter(array, 50.0, size=(2, 3),
|
1412 |
+
mode=['reflect', 'constant'])
|
1413 |
+
with assert_raises(RuntimeError):
|
1414 |
+
ndimage.rank_filter(array, 3, size=(2, 3), mode=['reflect']*2)
|
1415 |
+
with assert_raises(RuntimeError):
|
1416 |
+
ndimage.median_filter(array, size=(2, 3), mode=['reflect']*2)
|
1417 |
+
|
1418 |
+
@pytest.mark.parametrize('dtype', types)
|
1419 |
+
def test_rank09(self, dtype):
|
1420 |
+
expected = [[3, 3, 2, 4, 4],
|
1421 |
+
[3, 5, 2, 5, 1],
|
1422 |
+
[5, 5, 8, 3, 5]]
|
1423 |
+
footprint = [[1, 0, 1], [0, 1, 0]]
|
1424 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1425 |
+
[5, 8, 3, 7, 1],
|
1426 |
+
[5, 6, 9, 3, 5]], dtype)
|
1427 |
+
output = ndimage.rank_filter(array, 1, footprint=footprint)
|
1428 |
+
assert_array_almost_equal(expected, output)
|
1429 |
+
output = ndimage.percentile_filter(array, 35, footprint=footprint)
|
1430 |
+
assert_array_almost_equal(expected, output)
|
1431 |
+
|
1432 |
+
def test_rank10(self):
|
1433 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1434 |
+
[7, 6, 9, 3, 5],
|
1435 |
+
[5, 8, 3, 7, 1]])
|
1436 |
+
expected = [[2, 2, 1, 1, 1],
|
1437 |
+
[2, 3, 1, 3, 1],
|
1438 |
+
[5, 5, 3, 3, 1]]
|
1439 |
+
footprint = [[1, 0, 1], [1, 1, 0]]
|
1440 |
+
output = ndimage.rank_filter(array, 0, footprint=footprint)
|
1441 |
+
assert_array_almost_equal(expected, output)
|
1442 |
+
output = ndimage.percentile_filter(array, 0.0, footprint=footprint)
|
1443 |
+
assert_array_almost_equal(expected, output)
|
1444 |
+
|
1445 |
+
def test_rank11(self):
|
1446 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1447 |
+
[7, 6, 9, 3, 5],
|
1448 |
+
[5, 8, 3, 7, 1]])
|
1449 |
+
expected = [[3, 5, 5, 5, 4],
|
1450 |
+
[7, 7, 9, 9, 5],
|
1451 |
+
[7, 9, 8, 9, 7]]
|
1452 |
+
footprint = [[1, 0, 1], [1, 1, 0]]
|
1453 |
+
output = ndimage.rank_filter(array, -1, footprint=footprint)
|
1454 |
+
assert_array_almost_equal(expected, output)
|
1455 |
+
output = ndimage.percentile_filter(array, 100.0, footprint=footprint)
|
1456 |
+
assert_array_almost_equal(expected, output)
|
1457 |
+
|
1458 |
+
@pytest.mark.parametrize('dtype', types)
|
1459 |
+
def test_rank12(self, dtype):
|
1460 |
+
expected = [[3, 3, 2, 4, 4],
|
1461 |
+
[3, 5, 2, 5, 1],
|
1462 |
+
[5, 5, 8, 3, 5]]
|
1463 |
+
footprint = [[1, 0, 1], [0, 1, 0]]
|
1464 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1465 |
+
[5, 8, 3, 7, 1],
|
1466 |
+
[5, 6, 9, 3, 5]], dtype)
|
1467 |
+
output = ndimage.rank_filter(array, 1, footprint=footprint)
|
1468 |
+
assert_array_almost_equal(expected, output)
|
1469 |
+
output = ndimage.percentile_filter(array, 50.0,
|
1470 |
+
footprint=footprint)
|
1471 |
+
assert_array_almost_equal(expected, output)
|
1472 |
+
output = ndimage.median_filter(array, footprint=footprint)
|
1473 |
+
assert_array_almost_equal(expected, output)
|
1474 |
+
|
1475 |
+
@pytest.mark.parametrize('dtype', types)
|
1476 |
+
def test_rank13(self, dtype):
|
1477 |
+
expected = [[5, 2, 5, 1, 1],
|
1478 |
+
[5, 8, 3, 5, 5],
|
1479 |
+
[6, 6, 5, 5, 5]]
|
1480 |
+
footprint = [[1, 0, 1], [0, 1, 0]]
|
1481 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1482 |
+
[5, 8, 3, 7, 1],
|
1483 |
+
[5, 6, 9, 3, 5]], dtype)
|
1484 |
+
output = ndimage.rank_filter(array, 1, footprint=footprint,
|
1485 |
+
origin=-1)
|
1486 |
+
assert_array_almost_equal(expected, output)
|
1487 |
+
|
1488 |
+
@pytest.mark.parametrize('dtype', types)
|
1489 |
+
def test_rank14(self, dtype):
|
1490 |
+
expected = [[3, 5, 2, 5, 1],
|
1491 |
+
[5, 5, 8, 3, 5],
|
1492 |
+
[5, 6, 6, 5, 5]]
|
1493 |
+
footprint = [[1, 0, 1], [0, 1, 0]]
|
1494 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1495 |
+
[5, 8, 3, 7, 1],
|
1496 |
+
[5, 6, 9, 3, 5]], dtype)
|
1497 |
+
output = ndimage.rank_filter(array, 1, footprint=footprint,
|
1498 |
+
origin=[-1, 0])
|
1499 |
+
assert_array_almost_equal(expected, output)
|
1500 |
+
|
1501 |
+
@pytest.mark.parametrize('dtype', types)
|
1502 |
+
def test_rank15(self, dtype):
|
1503 |
+
expected = [[2, 3, 1, 4, 1],
|
1504 |
+
[5, 3, 7, 1, 1],
|
1505 |
+
[5, 5, 3, 3, 3]]
|
1506 |
+
footprint = [[1, 0, 1], [0, 1, 0]]
|
1507 |
+
array = numpy.array([[3, 2, 5, 1, 4],
|
1508 |
+
[5, 8, 3, 7, 1],
|
1509 |
+
[5, 6, 9, 3, 5]], dtype)
|
1510 |
+
output = ndimage.rank_filter(array, 0, footprint=footprint,
|
1511 |
+
origin=[-1, 0])
|
1512 |
+
assert_array_almost_equal(expected, output)
|
1513 |
+
|
1514 |
+
@pytest.mark.parametrize('dtype', types)
|
1515 |
+
def test_generic_filter1d01(self, dtype):
|
1516 |
+
weights = numpy.array([1.1, 2.2, 3.3])
|
1517 |
+
|
1518 |
+
def _filter_func(input, output, fltr, total):
|
1519 |
+
fltr = fltr / total
|
1520 |
+
for ii in range(input.shape[0] - 2):
|
1521 |
+
output[ii] = input[ii] * fltr[0]
|
1522 |
+
output[ii] += input[ii + 1] * fltr[1]
|
1523 |
+
output[ii] += input[ii + 2] * fltr[2]
|
1524 |
+
a = numpy.arange(12, dtype=dtype)
|
1525 |
+
a.shape = (3, 4)
|
1526 |
+
r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1)
|
1527 |
+
r2 = ndimage.generic_filter1d(
|
1528 |
+
a, _filter_func, 3, axis=0, origin=-1,
|
1529 |
+
extra_arguments=(weights,),
|
1530 |
+
extra_keywords={'total': weights.sum()})
|
1531 |
+
assert_array_almost_equal(r1, r2)
|
1532 |
+
|
1533 |
+
@pytest.mark.parametrize('dtype', types)
|
1534 |
+
def test_generic_filter01(self, dtype):
|
1535 |
+
filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
|
1536 |
+
footprint = numpy.array([[1, 0], [0, 1]])
|
1537 |
+
cf = numpy.array([1., 4.])
|
1538 |
+
|
1539 |
+
def _filter_func(buffer, weights, total=1.0):
|
1540 |
+
weights = cf / total
|
1541 |
+
return (buffer * weights).sum()
|
1542 |
+
|
1543 |
+
a = numpy.arange(12, dtype=dtype)
|
1544 |
+
a.shape = (3, 4)
|
1545 |
+
r1 = ndimage.correlate(a, filter_ * footprint)
|
1546 |
+
if dtype in float_types:
|
1547 |
+
r1 /= 5
|
1548 |
+
else:
|
1549 |
+
r1 //= 5
|
1550 |
+
r2 = ndimage.generic_filter(
|
1551 |
+
a, _filter_func, footprint=footprint, extra_arguments=(cf,),
|
1552 |
+
extra_keywords={'total': cf.sum()})
|
1553 |
+
assert_array_almost_equal(r1, r2)
|
1554 |
+
|
1555 |
+
# generic_filter doesn't allow mode sequence
|
1556 |
+
with assert_raises(RuntimeError):
|
1557 |
+
r2 = ndimage.generic_filter(
|
1558 |
+
a, _filter_func, mode=['reflect', 'reflect'],
|
1559 |
+
footprint=footprint, extra_arguments=(cf,),
|
1560 |
+
extra_keywords={'total': cf.sum()})
|
1561 |
+
|
1562 |
+
@pytest.mark.parametrize(
|
1563 |
+
'mode, expected_value',
|
1564 |
+
[('nearest', [1, 1, 2]),
|
1565 |
+
('wrap', [3, 1, 2]),
|
1566 |
+
('reflect', [1, 1, 2]),
|
1567 |
+
('mirror', [2, 1, 2]),
|
1568 |
+
('constant', [0, 1, 2])]
|
1569 |
+
)
|
1570 |
+
def test_extend01(self, mode, expected_value):
|
1571 |
+
array = numpy.array([1, 2, 3])
|
1572 |
+
weights = numpy.array([1, 0])
|
1573 |
+
output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
|
1574 |
+
assert_array_equal(output, expected_value)
|
1575 |
+
|
1576 |
+
@pytest.mark.parametrize(
|
1577 |
+
'mode, expected_value',
|
1578 |
+
[('nearest', [1, 1, 1]),
|
1579 |
+
('wrap', [3, 1, 2]),
|
1580 |
+
('reflect', [3, 3, 2]),
|
1581 |
+
('mirror', [1, 2, 3]),
|
1582 |
+
('constant', [0, 0, 0])]
|
1583 |
+
)
|
1584 |
+
def test_extend02(self, mode, expected_value):
|
1585 |
+
array = numpy.array([1, 2, 3])
|
1586 |
+
weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0])
|
1587 |
+
output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
|
1588 |
+
assert_array_equal(output, expected_value)
|
1589 |
+
|
1590 |
+
@pytest.mark.parametrize(
|
1591 |
+
'mode, expected_value',
|
1592 |
+
[('nearest', [2, 3, 3]),
|
1593 |
+
('wrap', [2, 3, 1]),
|
1594 |
+
('reflect', [2, 3, 3]),
|
1595 |
+
('mirror', [2, 3, 2]),
|
1596 |
+
('constant', [2, 3, 0])]
|
1597 |
+
)
|
1598 |
+
def test_extend03(self, mode, expected_value):
|
1599 |
+
array = numpy.array([1, 2, 3])
|
1600 |
+
weights = numpy.array([0, 0, 1])
|
1601 |
+
output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
|
1602 |
+
assert_array_equal(output, expected_value)
|
1603 |
+
|
1604 |
+
@pytest.mark.parametrize(
|
1605 |
+
'mode, expected_value',
|
1606 |
+
[('nearest', [3, 3, 3]),
|
1607 |
+
('wrap', [2, 3, 1]),
|
1608 |
+
('reflect', [2, 1, 1]),
|
1609 |
+
('mirror', [1, 2, 3]),
|
1610 |
+
('constant', [0, 0, 0])]
|
1611 |
+
)
|
1612 |
+
def test_extend04(self, mode, expected_value):
|
1613 |
+
array = numpy.array([1, 2, 3])
|
1614 |
+
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
|
1615 |
+
output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
|
1616 |
+
assert_array_equal(output, expected_value)
|
1617 |
+
|
1618 |
+
@pytest.mark.parametrize(
|
1619 |
+
'mode, expected_value',
|
1620 |
+
[('nearest', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
|
1621 |
+
('wrap', [[9, 7, 8], [3, 1, 2], [6, 4, 5]]),
|
1622 |
+
('reflect', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
|
1623 |
+
('mirror', [[5, 4, 5], [2, 1, 2], [5, 4, 5]]),
|
1624 |
+
('constant', [[0, 0, 0], [0, 1, 2], [0, 4, 5]])]
|
1625 |
+
)
|
1626 |
+
def test_extend05(self, mode, expected_value):
|
1627 |
+
array = numpy.array([[1, 2, 3],
|
1628 |
+
[4, 5, 6],
|
1629 |
+
[7, 8, 9]])
|
1630 |
+
weights = numpy.array([[1, 0], [0, 0]])
|
1631 |
+
output = ndimage.correlate(array, weights, mode=mode, cval=0)
|
1632 |
+
assert_array_equal(output, expected_value)
|
1633 |
+
|
1634 |
+
@pytest.mark.parametrize(
|
1635 |
+
'mode, expected_value',
|
1636 |
+
[('nearest', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
|
1637 |
+
('wrap', [[5, 6, 4], [8, 9, 7], [2, 3, 1]]),
|
1638 |
+
('reflect', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
|
1639 |
+
('mirror', [[5, 6, 5], [8, 9, 8], [5, 6, 5]]),
|
1640 |
+
('constant', [[5, 6, 0], [8, 9, 0], [0, 0, 0]])]
|
1641 |
+
)
|
1642 |
+
def test_extend06(self, mode, expected_value):
|
1643 |
+
array = numpy.array([[1, 2, 3],
|
1644 |
+
[4, 5, 6],
|
1645 |
+
[7, 8, 9]])
|
1646 |
+
weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
|
1647 |
+
output = ndimage.correlate(array, weights, mode=mode, cval=0)
|
1648 |
+
assert_array_equal(output, expected_value)
|
1649 |
+
|
1650 |
+
@pytest.mark.parametrize(
|
1651 |
+
'mode, expected_value',
|
1652 |
+
[('nearest', [3, 3, 3]),
|
1653 |
+
('wrap', [2, 3, 1]),
|
1654 |
+
('reflect', [2, 1, 1]),
|
1655 |
+
('mirror', [1, 2, 3]),
|
1656 |
+
('constant', [0, 0, 0])]
|
1657 |
+
)
|
1658 |
+
def test_extend07(self, mode, expected_value):
|
1659 |
+
array = numpy.array([1, 2, 3])
|
1660 |
+
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
|
1661 |
+
output = ndimage.correlate(array, weights, mode=mode, cval=0)
|
1662 |
+
assert_array_equal(output, expected_value)
|
1663 |
+
|
1664 |
+
@pytest.mark.parametrize(
|
1665 |
+
'mode, expected_value',
|
1666 |
+
[('nearest', [[3], [3], [3]]),
|
1667 |
+
('wrap', [[2], [3], [1]]),
|
1668 |
+
('reflect', [[2], [1], [1]]),
|
1669 |
+
('mirror', [[1], [2], [3]]),
|
1670 |
+
('constant', [[0], [0], [0]])]
|
1671 |
+
)
|
1672 |
+
def test_extend08(self, mode, expected_value):
|
1673 |
+
array = numpy.array([[1], [2], [3]])
|
1674 |
+
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
|
1675 |
+
output = ndimage.correlate(array, weights, mode=mode, cval=0)
|
1676 |
+
assert_array_equal(output, expected_value)
|
1677 |
+
|
1678 |
+
@pytest.mark.parametrize(
|
1679 |
+
'mode, expected_value',
|
1680 |
+
[('nearest', [3, 3, 3]),
|
1681 |
+
('wrap', [2, 3, 1]),
|
1682 |
+
('reflect', [2, 1, 1]),
|
1683 |
+
('mirror', [1, 2, 3]),
|
1684 |
+
('constant', [0, 0, 0])]
|
1685 |
+
)
|
1686 |
+
def test_extend09(self, mode, expected_value):
|
1687 |
+
array = numpy.array([1, 2, 3])
|
1688 |
+
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
|
1689 |
+
output = ndimage.correlate(array, weights, mode=mode, cval=0)
|
1690 |
+
assert_array_equal(output, expected_value)
|
1691 |
+
|
1692 |
+
@pytest.mark.parametrize(
|
1693 |
+
'mode, expected_value',
|
1694 |
+
[('nearest', [[3], [3], [3]]),
|
1695 |
+
('wrap', [[2], [3], [1]]),
|
1696 |
+
('reflect', [[2], [1], [1]]),
|
1697 |
+
('mirror', [[1], [2], [3]]),
|
1698 |
+
('constant', [[0], [0], [0]])]
|
1699 |
+
)
|
1700 |
+
def test_extend10(self, mode, expected_value):
|
1701 |
+
array = numpy.array([[1], [2], [3]])
|
1702 |
+
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
|
1703 |
+
output = ndimage.correlate(array, weights, mode=mode, cval=0)
|
1704 |
+
assert_array_equal(output, expected_value)
|
1705 |
+
|
1706 |
+
|
1707 |
+
def test_ticket_701():
|
1708 |
+
# Test generic filter sizes
|
1709 |
+
arr = numpy.arange(4).reshape((2, 2))
|
1710 |
+
def func(x):
|
1711 |
+
return numpy.min(x)
|
1712 |
+
res = ndimage.generic_filter(arr, func, size=(1, 1))
|
1713 |
+
# The following raises an error unless ticket 701 is fixed
|
1714 |
+
res2 = ndimage.generic_filter(arr, func, size=1)
|
1715 |
+
assert_equal(res, res2)
|
1716 |
+
|
1717 |
+
|
1718 |
+
def test_gh_5430():
|
1719 |
+
# At least one of these raises an error unless gh-5430 is
|
1720 |
+
# fixed. In py2k an int is implemented using a C long, so
|
1721 |
+
# which one fails depends on your system. In py3k there is only
|
1722 |
+
# one arbitrary precision integer type, so both should fail.
|
1723 |
+
sigma = numpy.int32(1)
|
1724 |
+
out = ndimage._ni_support._normalize_sequence(sigma, 1)
|
1725 |
+
assert_equal(out, [sigma])
|
1726 |
+
sigma = numpy.int64(1)
|
1727 |
+
out = ndimage._ni_support._normalize_sequence(sigma, 1)
|
1728 |
+
assert_equal(out, [sigma])
|
1729 |
+
# This worked before; make sure it still works
|
1730 |
+
sigma = 1
|
1731 |
+
out = ndimage._ni_support._normalize_sequence(sigma, 1)
|
1732 |
+
assert_equal(out, [sigma])
|
1733 |
+
# This worked before; make sure it still works
|
1734 |
+
sigma = [1, 1]
|
1735 |
+
out = ndimage._ni_support._normalize_sequence(sigma, 2)
|
1736 |
+
assert_equal(out, sigma)
|
1737 |
+
# Also include the OPs original example to make sure we fixed the issue
|
1738 |
+
x = numpy.random.normal(size=(256, 256))
|
1739 |
+
perlin = numpy.zeros_like(x)
|
1740 |
+
for i in 2**numpy.arange(6):
|
1741 |
+
perlin += ndimage.gaussian_filter(x, i, mode="wrap") * i**2
|
1742 |
+
# This also fixes gh-4106, show that the OPs example now runs.
|
1743 |
+
x = numpy.int64(21)
|
1744 |
+
ndimage._ni_support._normalize_sequence(x, 0)
|
1745 |
+
|
1746 |
+
|
1747 |
+
def test_gaussian_kernel1d():
|
1748 |
+
radius = 10
|
1749 |
+
sigma = 2
|
1750 |
+
sigma2 = sigma * sigma
|
1751 |
+
x = numpy.arange(-radius, radius + 1, dtype=numpy.double)
|
1752 |
+
phi_x = numpy.exp(-0.5 * x * x / sigma2)
|
1753 |
+
phi_x /= phi_x.sum()
|
1754 |
+
assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
|
1755 |
+
assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
|
1756 |
+
assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
|
1757 |
+
_gaussian_kernel1d(sigma, 2, radius))
|
1758 |
+
assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
|
1759 |
+
_gaussian_kernel1d(sigma, 3, radius))
|
1760 |
+
|
1761 |
+
|
1762 |
+
def test_orders_gauss():
|
1763 |
+
# Check order inputs to Gaussians
|
1764 |
+
arr = numpy.zeros((1,))
|
1765 |
+
assert_equal(0, ndimage.gaussian_filter(arr, 1, order=0))
|
1766 |
+
assert_equal(0, ndimage.gaussian_filter(arr, 1, order=3))
|
1767 |
+
assert_raises(ValueError, ndimage.gaussian_filter, arr, 1, -1)
|
1768 |
+
assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=0))
|
1769 |
+
assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=3))
|
1770 |
+
assert_raises(ValueError, ndimage.gaussian_filter1d, arr, 1, -1, -1)
|
1771 |
+
|
1772 |
+
|
1773 |
+
def test_valid_origins():
|
1774 |
+
"""Regression test for #1311."""
|
1775 |
+
def func(x):
|
1776 |
+
return numpy.mean(x)
|
1777 |
+
data = numpy.array([1, 2, 3, 4, 5], dtype=numpy.float64)
|
1778 |
+
assert_raises(ValueError, ndimage.generic_filter, data, func, size=3,
|
1779 |
+
origin=2)
|
1780 |
+
assert_raises(ValueError, ndimage.generic_filter1d, data, func,
|
1781 |
+
filter_size=3, origin=2)
|
1782 |
+
assert_raises(ValueError, ndimage.percentile_filter, data, 0.2, size=3,
|
1783 |
+
origin=2)
|
1784 |
+
|
1785 |
+
for filter in [ndimage.uniform_filter, ndimage.minimum_filter,
|
1786 |
+
ndimage.maximum_filter, ndimage.maximum_filter1d,
|
1787 |
+
ndimage.median_filter, ndimage.minimum_filter1d]:
|
1788 |
+
# This should work, since for size == 3, the valid range for origin is
|
1789 |
+
# -1 to 1.
|
1790 |
+
list(filter(data, 3, origin=-1))
|
1791 |
+
list(filter(data, 3, origin=1))
|
1792 |
+
# Just check this raises an error instead of silently accepting or
|
1793 |
+
# segfaulting.
|
1794 |
+
assert_raises(ValueError, filter, data, 3, origin=2)
|
1795 |
+
|
1796 |
+
|
1797 |
+
def test_bad_convolve_and_correlate_origins():
|
1798 |
+
"""Regression test for gh-822."""
|
1799 |
+
# Before gh-822 was fixed, these would generate seg. faults or
|
1800 |
+
# other crashes on many system.
|
1801 |
+
assert_raises(ValueError, ndimage.correlate1d,
|
1802 |
+
[0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
|
1803 |
+
assert_raises(ValueError, ndimage.correlate,
|
1804 |
+
[0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
|
1805 |
+
assert_raises(ValueError, ndimage.correlate,
|
1806 |
+
numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, 1])
|
1807 |
+
|
1808 |
+
assert_raises(ValueError, ndimage.convolve1d,
|
1809 |
+
numpy.arange(10), numpy.ones(3), origin=-2)
|
1810 |
+
assert_raises(ValueError, ndimage.convolve,
|
1811 |
+
numpy.arange(10), numpy.ones(3), origin=[-2])
|
1812 |
+
assert_raises(ValueError, ndimage.convolve,
|
1813 |
+
numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, -2])
|
1814 |
+
|
1815 |
+
|
1816 |
+
def test_multiple_modes():
|
1817 |
+
# Test that the filters with multiple mode cababilities for different
|
1818 |
+
# dimensions give the same result as applying a single mode.
|
1819 |
+
arr = numpy.array([[1., 0., 0.],
|
1820 |
+
[1., 1., 0.],
|
1821 |
+
[0., 0., 0.]])
|
1822 |
+
|
1823 |
+
mode1 = 'reflect'
|
1824 |
+
mode2 = ['reflect', 'reflect']
|
1825 |
+
|
1826 |
+
assert_equal(ndimage.gaussian_filter(arr, 1, mode=mode1),
|
1827 |
+
ndimage.gaussian_filter(arr, 1, mode=mode2))
|
1828 |
+
assert_equal(ndimage.prewitt(arr, mode=mode1),
|
1829 |
+
ndimage.prewitt(arr, mode=mode2))
|
1830 |
+
assert_equal(ndimage.sobel(arr, mode=mode1),
|
1831 |
+
ndimage.sobel(arr, mode=mode2))
|
1832 |
+
assert_equal(ndimage.laplace(arr, mode=mode1),
|
1833 |
+
ndimage.laplace(arr, mode=mode2))
|
1834 |
+
assert_equal(ndimage.gaussian_laplace(arr, 1, mode=mode1),
|
1835 |
+
ndimage.gaussian_laplace(arr, 1, mode=mode2))
|
1836 |
+
assert_equal(ndimage.maximum_filter(arr, size=5, mode=mode1),
|
1837 |
+
ndimage.maximum_filter(arr, size=5, mode=mode2))
|
1838 |
+
assert_equal(ndimage.minimum_filter(arr, size=5, mode=mode1),
|
1839 |
+
ndimage.minimum_filter(arr, size=5, mode=mode2))
|
1840 |
+
assert_equal(ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode1),
|
1841 |
+
ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode2))
|
1842 |
+
assert_equal(ndimage.uniform_filter(arr, 5, mode=mode1),
|
1843 |
+
ndimage.uniform_filter(arr, 5, mode=mode2))
|
1844 |
+
|
1845 |
+
|
1846 |
+
def test_multiple_modes_sequentially():
|
1847 |
+
# Test that the filters with multiple mode cababilities for different
|
1848 |
+
# dimensions give the same result as applying the filters with
|
1849 |
+
# different modes sequentially
|
1850 |
+
arr = numpy.array([[1., 0., 0.],
|
1851 |
+
[1., 1., 0.],
|
1852 |
+
[0., 0., 0.]])
|
1853 |
+
|
1854 |
+
modes = ['reflect', 'wrap']
|
1855 |
+
|
1856 |
+
expected = ndimage.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
|
1857 |
+
expected = ndimage.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
|
1858 |
+
assert_equal(expected,
|
1859 |
+
ndimage.gaussian_filter(arr, 1, mode=modes))
|
1860 |
+
|
1861 |
+
expected = ndimage.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
|
1862 |
+
expected = ndimage.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
|
1863 |
+
assert_equal(expected,
|
1864 |
+
ndimage.uniform_filter(arr, 5, mode=modes))
|
1865 |
+
|
1866 |
+
expected = ndimage.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
|
1867 |
+
expected = ndimage.maximum_filter1d(expected, size=5, axis=1,
|
1868 |
+
mode=modes[1])
|
1869 |
+
assert_equal(expected,
|
1870 |
+
ndimage.maximum_filter(arr, size=5, mode=modes))
|
1871 |
+
|
1872 |
+
expected = ndimage.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
|
1873 |
+
expected = ndimage.minimum_filter1d(expected, size=5, axis=1,
|
1874 |
+
mode=modes[1])
|
1875 |
+
assert_equal(expected,
|
1876 |
+
ndimage.minimum_filter(arr, size=5, mode=modes))
|
1877 |
+
|
1878 |
+
|
1879 |
+
def test_multiple_modes_prewitt():
|
1880 |
+
# Test prewitt filter for multiple extrapolation modes
|
1881 |
+
arr = numpy.array([[1., 0., 0.],
|
1882 |
+
[1., 1., 0.],
|
1883 |
+
[0., 0., 0.]])
|
1884 |
+
|
1885 |
+
expected = numpy.array([[1., -3., 2.],
|
1886 |
+
[1., -2., 1.],
|
1887 |
+
[1., -1., 0.]])
|
1888 |
+
|
1889 |
+
modes = ['reflect', 'wrap']
|
1890 |
+
|
1891 |
+
assert_equal(expected,
|
1892 |
+
ndimage.prewitt(arr, mode=modes))
|
1893 |
+
|
1894 |
+
|
1895 |
+
def test_multiple_modes_sobel():
|
1896 |
+
# Test sobel filter for multiple extrapolation modes
|
1897 |
+
arr = numpy.array([[1., 0., 0.],
|
1898 |
+
[1., 1., 0.],
|
1899 |
+
[0., 0., 0.]])
|
1900 |
+
|
1901 |
+
expected = numpy.array([[1., -4., 3.],
|
1902 |
+
[2., -3., 1.],
|
1903 |
+
[1., -1., 0.]])
|
1904 |
+
|
1905 |
+
modes = ['reflect', 'wrap']
|
1906 |
+
|
1907 |
+
assert_equal(expected,
|
1908 |
+
ndimage.sobel(arr, mode=modes))
|
1909 |
+
|
1910 |
+
|
1911 |
+
def test_multiple_modes_laplace():
|
1912 |
+
# Test laplace filter for multiple extrapolation modes
|
1913 |
+
arr = numpy.array([[1., 0., 0.],
|
1914 |
+
[1., 1., 0.],
|
1915 |
+
[0., 0., 0.]])
|
1916 |
+
|
1917 |
+
expected = numpy.array([[-2., 2., 1.],
|
1918 |
+
[-2., -3., 2.],
|
1919 |
+
[1., 1., 0.]])
|
1920 |
+
|
1921 |
+
modes = ['reflect', 'wrap']
|
1922 |
+
|
1923 |
+
assert_equal(expected,
|
1924 |
+
ndimage.laplace(arr, mode=modes))
|
1925 |
+
|
1926 |
+
|
1927 |
+
def test_multiple_modes_gaussian_laplace():
|
1928 |
+
# Test gaussian_laplace filter for multiple extrapolation modes
|
1929 |
+
arr = numpy.array([[1., 0., 0.],
|
1930 |
+
[1., 1., 0.],
|
1931 |
+
[0., 0., 0.]])
|
1932 |
+
|
1933 |
+
expected = numpy.array([[-0.28438687, 0.01559809, 0.19773499],
|
1934 |
+
[-0.36630503, -0.20069774, 0.07483620],
|
1935 |
+
[0.15849176, 0.18495566, 0.21934094]])
|
1936 |
+
|
1937 |
+
modes = ['reflect', 'wrap']
|
1938 |
+
|
1939 |
+
assert_almost_equal(expected,
|
1940 |
+
ndimage.gaussian_laplace(arr, 1, mode=modes))
|
1941 |
+
|
1942 |
+
|
1943 |
+
def test_multiple_modes_gaussian_gradient_magnitude():
|
1944 |
+
# Test gaussian_gradient_magnitude filter for multiple
|
1945 |
+
# extrapolation modes
|
1946 |
+
arr = numpy.array([[1., 0., 0.],
|
1947 |
+
[1., 1., 0.],
|
1948 |
+
[0., 0., 0.]])
|
1949 |
+
|
1950 |
+
expected = numpy.array([[0.04928965, 0.09745625, 0.06405368],
|
1951 |
+
[0.23056905, 0.14025305, 0.04550846],
|
1952 |
+
[0.19894369, 0.14950060, 0.06796850]])
|
1953 |
+
|
1954 |
+
modes = ['reflect', 'wrap']
|
1955 |
+
|
1956 |
+
calculated = ndimage.gaussian_gradient_magnitude(arr, 1, mode=modes)
|
1957 |
+
|
1958 |
+
assert_almost_equal(expected, calculated)
|
1959 |
+
|
1960 |
+
|
1961 |
+
def test_multiple_modes_uniform():
|
1962 |
+
# Test uniform filter for multiple extrapolation modes
|
1963 |
+
arr = numpy.array([[1., 0., 0.],
|
1964 |
+
[1., 1., 0.],
|
1965 |
+
[0., 0., 0.]])
|
1966 |
+
|
1967 |
+
expected = numpy.array([[0.32, 0.40, 0.48],
|
1968 |
+
[0.20, 0.28, 0.32],
|
1969 |
+
[0.28, 0.32, 0.40]])
|
1970 |
+
|
1971 |
+
modes = ['reflect', 'wrap']
|
1972 |
+
|
1973 |
+
assert_almost_equal(expected,
|
1974 |
+
ndimage.uniform_filter(arr, 5, mode=modes))
|
1975 |
+
|
1976 |
+
|
1977 |
+
def test_gaussian_truncate():
|
1978 |
+
# Test that Gaussian filters can be truncated at different widths.
|
1979 |
+
# These tests only check that the result has the expected number
|
1980 |
+
# of nonzero elements.
|
1981 |
+
arr = numpy.zeros((100, 100), float)
|
1982 |
+
arr[50, 50] = 1
|
1983 |
+
num_nonzeros_2 = (ndimage.gaussian_filter(arr, 5, truncate=2) > 0).sum()
|
1984 |
+
assert_equal(num_nonzeros_2, 21**2)
|
1985 |
+
num_nonzeros_5 = (ndimage.gaussian_filter(arr, 5, truncate=5) > 0).sum()
|
1986 |
+
assert_equal(num_nonzeros_5, 51**2)
|
1987 |
+
|
1988 |
+
# Test truncate when sigma is a sequence.
|
1989 |
+
f = ndimage.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
|
1990 |
+
fpos = f > 0
|
1991 |
+
n0 = fpos.any(axis=0).sum()
|
1992 |
+
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
|
1993 |
+
assert_equal(n0, 19)
|
1994 |
+
n1 = fpos.any(axis=1).sum()
|
1995 |
+
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
|
1996 |
+
assert_equal(n1, 5)
|
1997 |
+
|
1998 |
+
# Test gaussian_filter1d.
|
1999 |
+
x = numpy.zeros(51)
|
2000 |
+
x[25] = 1
|
2001 |
+
f = ndimage.gaussian_filter1d(x, sigma=2, truncate=3.5)
|
2002 |
+
n = (f > 0).sum()
|
2003 |
+
assert_equal(n, 15)
|
2004 |
+
|
2005 |
+
# Test gaussian_laplace
|
2006 |
+
y = ndimage.gaussian_laplace(x, sigma=2, truncate=3.5)
|
2007 |
+
nonzero_indices = numpy.nonzero(y != 0)[0]
|
2008 |
+
n = numpy.ptp(nonzero_indices) + 1
|
2009 |
+
assert_equal(n, 15)
|
2010 |
+
|
2011 |
+
# Test gaussian_gradient_magnitude
|
2012 |
+
y = ndimage.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
|
2013 |
+
nonzero_indices = numpy.nonzero(y != 0)[0]
|
2014 |
+
n = numpy.ptp(nonzero_indices) + 1
|
2015 |
+
assert_equal(n, 15)
|
2016 |
+
|
2017 |
+
|
2018 |
+
def test_gaussian_radius():
|
2019 |
+
# Test that Gaussian filters with radius argument produce the same
|
2020 |
+
# results as the filters with corresponding truncate argument.
|
2021 |
+
# radius = int(truncate * sigma + 0.5)
|
2022 |
+
# Test gaussian_filter1d
|
2023 |
+
x = numpy.zeros(7)
|
2024 |
+
x[3] = 1
|
2025 |
+
f1 = ndimage.gaussian_filter1d(x, sigma=2, truncate=1.5)
|
2026 |
+
f2 = ndimage.gaussian_filter1d(x, sigma=2, radius=3)
|
2027 |
+
assert_equal(f1, f2)
|
2028 |
+
|
2029 |
+
# Test gaussian_filter when sigma is a number.
|
2030 |
+
a = numpy.zeros((9, 9))
|
2031 |
+
a[4, 4] = 1
|
2032 |
+
f1 = ndimage.gaussian_filter(a, sigma=0.5, truncate=3.5)
|
2033 |
+
f2 = ndimage.gaussian_filter(a, sigma=0.5, radius=2)
|
2034 |
+
assert_equal(f1, f2)
|
2035 |
+
|
2036 |
+
# Test gaussian_filter when sigma is a sequence.
|
2037 |
+
a = numpy.zeros((50, 50))
|
2038 |
+
a[25, 25] = 1
|
2039 |
+
f1 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], truncate=3.5)
|
2040 |
+
f2 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], radius=[2, 9])
|
2041 |
+
assert_equal(f1, f2)
|
2042 |
+
|
2043 |
+
|
2044 |
+
def test_gaussian_radius_invalid():
|
2045 |
+
# radius must be a nonnegative integer
|
2046 |
+
with assert_raises(ValueError):
|
2047 |
+
ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=-1)
|
2048 |
+
with assert_raises(ValueError):
|
2049 |
+
ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=1.1)
|
2050 |
+
|
2051 |
+
|
2052 |
+
class TestThreading:
|
2053 |
+
def check_func_thread(self, n, fun, args, out):
|
2054 |
+
from threading import Thread
|
2055 |
+
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]})
|
2056 |
+
for x in range(n)]
|
2057 |
+
[t.start() for t in thrds]
|
2058 |
+
[t.join() for t in thrds]
|
2059 |
+
|
2060 |
+
def check_func_serial(self, n, fun, args, out):
|
2061 |
+
for i in range(n):
|
2062 |
+
fun(*args, output=out[i])
|
2063 |
+
|
2064 |
+
def test_correlate1d(self):
|
2065 |
+
d = numpy.random.randn(5000)
|
2066 |
+
os = numpy.empty((4, d.size))
|
2067 |
+
ot = numpy.empty_like(os)
|
2068 |
+
k = numpy.arange(5)
|
2069 |
+
self.check_func_serial(4, ndimage.correlate1d, (d, k), os)
|
2070 |
+
self.check_func_thread(4, ndimage.correlate1d, (d, k), ot)
|
2071 |
+
assert_array_equal(os, ot)
|
2072 |
+
|
2073 |
+
def test_correlate(self):
|
2074 |
+
d = numpy.random.randn(500, 500)
|
2075 |
+
k = numpy.random.randn(10, 10)
|
2076 |
+
os = numpy.empty([4] + list(d.shape))
|
2077 |
+
ot = numpy.empty_like(os)
|
2078 |
+
self.check_func_serial(4, ndimage.correlate, (d, k), os)
|
2079 |
+
self.check_func_thread(4, ndimage.correlate, (d, k), ot)
|
2080 |
+
assert_array_equal(os, ot)
|
2081 |
+
|
2082 |
+
def test_median_filter(self):
|
2083 |
+
d = numpy.random.randn(500, 500)
|
2084 |
+
os = numpy.empty([4] + list(d.shape))
|
2085 |
+
ot = numpy.empty_like(os)
|
2086 |
+
self.check_func_serial(4, ndimage.median_filter, (d, 3), os)
|
2087 |
+
self.check_func_thread(4, ndimage.median_filter, (d, 3), ot)
|
2088 |
+
assert_array_equal(os, ot)
|
2089 |
+
|
2090 |
+
def test_uniform_filter1d(self):
|
2091 |
+
d = numpy.random.randn(5000)
|
2092 |
+
os = numpy.empty((4, d.size))
|
2093 |
+
ot = numpy.empty_like(os)
|
2094 |
+
self.check_func_serial(4, ndimage.uniform_filter1d, (d, 5), os)
|
2095 |
+
self.check_func_thread(4, ndimage.uniform_filter1d, (d, 5), ot)
|
2096 |
+
assert_array_equal(os, ot)
|
2097 |
+
|
2098 |
+
def test_minmax_filter(self):
|
2099 |
+
d = numpy.random.randn(500, 500)
|
2100 |
+
os = numpy.empty([4] + list(d.shape))
|
2101 |
+
ot = numpy.empty_like(os)
|
2102 |
+
self.check_func_serial(4, ndimage.maximum_filter, (d, 3), os)
|
2103 |
+
self.check_func_thread(4, ndimage.maximum_filter, (d, 3), ot)
|
2104 |
+
assert_array_equal(os, ot)
|
2105 |
+
self.check_func_serial(4, ndimage.minimum_filter, (d, 3), os)
|
2106 |
+
self.check_func_thread(4, ndimage.minimum_filter, (d, 3), ot)
|
2107 |
+
assert_array_equal(os, ot)
|
2108 |
+
|
2109 |
+
|
2110 |
+
def test_minmaximum_filter1d():
|
2111 |
+
# Regression gh-3898
|
2112 |
+
in_ = numpy.arange(10)
|
2113 |
+
out = ndimage.minimum_filter1d(in_, 1)
|
2114 |
+
assert_equal(in_, out)
|
2115 |
+
out = ndimage.maximum_filter1d(in_, 1)
|
2116 |
+
assert_equal(in_, out)
|
2117 |
+
# Test reflect
|
2118 |
+
out = ndimage.minimum_filter1d(in_, 5, mode='reflect')
|
2119 |
+
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
|
2120 |
+
out = ndimage.maximum_filter1d(in_, 5, mode='reflect')
|
2121 |
+
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
|
2122 |
+
# Test constant
|
2123 |
+
out = ndimage.minimum_filter1d(in_, 5, mode='constant', cval=-1)
|
2124 |
+
assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
|
2125 |
+
out = ndimage.maximum_filter1d(in_, 5, mode='constant', cval=10)
|
2126 |
+
assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
|
2127 |
+
# Test nearest
|
2128 |
+
out = ndimage.minimum_filter1d(in_, 5, mode='nearest')
|
2129 |
+
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
|
2130 |
+
out = ndimage.maximum_filter1d(in_, 5, mode='nearest')
|
2131 |
+
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
|
2132 |
+
# Test wrap
|
2133 |
+
out = ndimage.minimum_filter1d(in_, 5, mode='wrap')
|
2134 |
+
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
|
2135 |
+
out = ndimage.maximum_filter1d(in_, 5, mode='wrap')
|
2136 |
+
assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
|
2137 |
+
|
2138 |
+
|
2139 |
+
def test_uniform_filter1d_roundoff_errors():
|
2140 |
+
# gh-6930
|
2141 |
+
in_ = numpy.repeat([0, 1, 0], [9, 9, 9])
|
2142 |
+
for filter_size in range(3, 10):
|
2143 |
+
out = ndimage.uniform_filter1d(in_, filter_size)
|
2144 |
+
assert_equal(out.sum(), 10 - filter_size)
|
2145 |
+
|
2146 |
+
|
2147 |
+
def test_footprint_all_zeros():
|
2148 |
+
# regression test for gh-6876: footprint of all zeros segfaults
|
2149 |
+
arr = numpy.random.randint(0, 100, (100, 100))
|
2150 |
+
kernel = numpy.zeros((3, 3), bool)
|
2151 |
+
with assert_raises(ValueError):
|
2152 |
+
ndimage.maximum_filter(arr, footprint=kernel)
|
2153 |
+
|
2154 |
+
|
2155 |
+
def test_gaussian_filter():
|
2156 |
+
# Test gaussian filter with numpy.float16
|
2157 |
+
# gh-8207
|
2158 |
+
data = numpy.array([1], dtype=numpy.float16)
|
2159 |
+
sigma = 1.0
|
2160 |
+
with assert_raises(RuntimeError):
|
2161 |
+
ndimage.gaussian_filter(data, sigma)
|
2162 |
+
|
2163 |
+
|
2164 |
+
def test_rank_filter_noninteger_rank():
|
2165 |
+
# regression test for issue 9388: ValueError for
|
2166 |
+
# non integer rank when performing rank_filter
|
2167 |
+
arr = numpy.random.random((10, 20, 30))
|
2168 |
+
assert_raises(TypeError, ndimage.rank_filter, arr, 0.5,
|
2169 |
+
footprint=numpy.ones((1, 1, 10), dtype=bool))
|
2170 |
+
|
2171 |
+
|
2172 |
+
def test_size_footprint_both_set():
|
2173 |
+
# test for input validation, expect user warning when
|
2174 |
+
# size and footprint is set
|
2175 |
+
with suppress_warnings() as sup:
|
2176 |
+
sup.filter(UserWarning,
|
2177 |
+
"ignoring size because footprint is set")
|
2178 |
+
arr = numpy.random.random((10, 20, 30))
|
2179 |
+
ndimage.rank_filter(arr, 5, size=2, footprint=numpy.ones((1, 1, 10),
|
2180 |
+
dtype=bool))
|
2181 |
+
|
2182 |
+
|
2183 |
+
def test_byte_order_median():
|
2184 |
+
"""Regression test for #413: median_filter does not handle bytes orders."""
|
2185 |
+
a = numpy.arange(9, dtype='<f4').reshape(3, 3)
|
2186 |
+
ref = ndimage.median_filter(a, (3, 3))
|
2187 |
+
b = numpy.arange(9, dtype='>f4').reshape(3, 3)
|
2188 |
+
t = ndimage.median_filter(b, (3, 3))
|
2189 |
+
assert_array_almost_equal(ref, t)
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_fourier.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy
|
2 |
+
from numpy import fft
|
3 |
+
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
|
4 |
+
assert_equal)
|
5 |
+
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from scipy import ndimage
|
9 |
+
|
10 |
+
|
11 |
+
class TestNdimageFourier:
|
12 |
+
|
13 |
+
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
|
14 |
+
@pytest.mark.parametrize('dtype, dec',
|
15 |
+
[(numpy.float32, 6), (numpy.float64, 14)])
|
16 |
+
def test_fourier_gaussian_real01(self, shape, dtype, dec):
|
17 |
+
a = numpy.zeros(shape, dtype)
|
18 |
+
a[0, 0] = 1.0
|
19 |
+
a = fft.rfft(a, shape[0], 0)
|
20 |
+
a = fft.fft(a, shape[1], 1)
|
21 |
+
a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0)
|
22 |
+
a = fft.ifft(a, shape[1], 1)
|
23 |
+
a = fft.irfft(a, shape[0], 0)
|
24 |
+
assert_almost_equal(ndimage.sum(a), 1, decimal=dec)
|
25 |
+
|
26 |
+
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
|
27 |
+
@pytest.mark.parametrize('dtype, dec',
|
28 |
+
[(numpy.complex64, 6), (numpy.complex128, 14)])
|
29 |
+
def test_fourier_gaussian_complex01(self, shape, dtype, dec):
|
30 |
+
a = numpy.zeros(shape, dtype)
|
31 |
+
a[0, 0] = 1.0
|
32 |
+
a = fft.fft(a, shape[0], 0)
|
33 |
+
a = fft.fft(a, shape[1], 1)
|
34 |
+
a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, 0)
|
35 |
+
a = fft.ifft(a, shape[1], 1)
|
36 |
+
a = fft.ifft(a, shape[0], 0)
|
37 |
+
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
|
38 |
+
|
39 |
+
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
|
40 |
+
@pytest.mark.parametrize('dtype, dec',
|
41 |
+
[(numpy.float32, 6), (numpy.float64, 14)])
|
42 |
+
def test_fourier_uniform_real01(self, shape, dtype, dec):
|
43 |
+
a = numpy.zeros(shape, dtype)
|
44 |
+
a[0, 0] = 1.0
|
45 |
+
a = fft.rfft(a, shape[0], 0)
|
46 |
+
a = fft.fft(a, shape[1], 1)
|
47 |
+
a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0)
|
48 |
+
a = fft.ifft(a, shape[1], 1)
|
49 |
+
a = fft.irfft(a, shape[0], 0)
|
50 |
+
assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)
|
51 |
+
|
52 |
+
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
|
53 |
+
@pytest.mark.parametrize('dtype, dec',
|
54 |
+
[(numpy.complex64, 6), (numpy.complex128, 14)])
|
55 |
+
def test_fourier_uniform_complex01(self, shape, dtype, dec):
|
56 |
+
a = numpy.zeros(shape, dtype)
|
57 |
+
a[0, 0] = 1.0
|
58 |
+
a = fft.fft(a, shape[0], 0)
|
59 |
+
a = fft.fft(a, shape[1], 1)
|
60 |
+
a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)
|
61 |
+
a = fft.ifft(a, shape[1], 1)
|
62 |
+
a = fft.ifft(a, shape[0], 0)
|
63 |
+
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
|
64 |
+
|
65 |
+
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
|
66 |
+
@pytest.mark.parametrize('dtype, dec',
|
67 |
+
[(numpy.float32, 4), (numpy.float64, 11)])
|
68 |
+
def test_fourier_shift_real01(self, shape, dtype, dec):
|
69 |
+
expected = numpy.arange(shape[0] * shape[1], dtype=dtype)
|
70 |
+
expected.shape = shape
|
71 |
+
a = fft.rfft(expected, shape[0], 0)
|
72 |
+
a = fft.fft(a, shape[1], 1)
|
73 |
+
a = ndimage.fourier_shift(a, [1, 1], shape[0], 0)
|
74 |
+
a = fft.ifft(a, shape[1], 1)
|
75 |
+
a = fft.irfft(a, shape[0], 0)
|
76 |
+
assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1],
|
77 |
+
decimal=dec)
|
78 |
+
assert_array_almost_equal(a.imag, numpy.zeros(shape),
|
79 |
+
decimal=dec)
|
80 |
+
|
81 |
+
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
|
82 |
+
@pytest.mark.parametrize('dtype, dec',
|
83 |
+
[(numpy.complex64, 4), (numpy.complex128, 11)])
|
84 |
+
def test_fourier_shift_complex01(self, shape, dtype, dec):
|
85 |
+
expected = numpy.arange(shape[0] * shape[1], dtype=dtype)
|
86 |
+
expected.shape = shape
|
87 |
+
a = fft.fft(expected, shape[0], 0)
|
88 |
+
a = fft.fft(a, shape[1], 1)
|
89 |
+
a = ndimage.fourier_shift(a, [1, 1], -1, 0)
|
90 |
+
a = fft.ifft(a, shape[1], 1)
|
91 |
+
a = fft.ifft(a, shape[0], 0)
|
92 |
+
assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1],
|
93 |
+
decimal=dec)
|
94 |
+
assert_array_almost_equal(a.imag, numpy.zeros(shape),
|
95 |
+
decimal=dec)
|
96 |
+
|
97 |
+
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
|
98 |
+
@pytest.mark.parametrize('dtype, dec',
|
99 |
+
[(numpy.float32, 5), (numpy.float64, 14)])
|
100 |
+
def test_fourier_ellipsoid_real01(self, shape, dtype, dec):
|
101 |
+
a = numpy.zeros(shape, dtype)
|
102 |
+
a[0, 0] = 1.0
|
103 |
+
a = fft.rfft(a, shape[0], 0)
|
104 |
+
a = fft.fft(a, shape[1], 1)
|
105 |
+
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],
|
106 |
+
shape[0], 0)
|
107 |
+
a = fft.ifft(a, shape[1], 1)
|
108 |
+
a = fft.irfft(a, shape[0], 0)
|
109 |
+
assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)
|
110 |
+
|
111 |
+
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
|
112 |
+
@pytest.mark.parametrize('dtype, dec',
|
113 |
+
[(numpy.complex64, 5), (numpy.complex128, 14)])
|
114 |
+
def test_fourier_ellipsoid_complex01(self, shape, dtype, dec):
|
115 |
+
a = numpy.zeros(shape, dtype)
|
116 |
+
a[0, 0] = 1.0
|
117 |
+
a = fft.fft(a, shape[0], 0)
|
118 |
+
a = fft.fft(a, shape[1], 1)
|
119 |
+
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0)
|
120 |
+
a = fft.ifft(a, shape[1], 1)
|
121 |
+
a = fft.ifft(a, shape[0], 0)
|
122 |
+
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
|
123 |
+
|
124 |
+
def test_fourier_ellipsoid_unimplemented_ndim(self):
|
125 |
+
# arrays with ndim > 3 raise NotImplementedError
|
126 |
+
x = numpy.ones((4, 6, 8, 10), dtype=numpy.complex128)
|
127 |
+
with pytest.raises(NotImplementedError):
|
128 |
+
ndimage.fourier_ellipsoid(x, 3)
|
129 |
+
|
130 |
+
def test_fourier_ellipsoid_1d_complex(self):
|
131 |
+
# expected result of 1d ellipsoid is the same as for fourier_uniform
|
132 |
+
for shape in [(32, ), (31, )]:
|
133 |
+
for type_, dec in zip([numpy.complex64, numpy.complex128],
|
134 |
+
[5, 14]):
|
135 |
+
x = numpy.ones(shape, dtype=type_)
|
136 |
+
a = ndimage.fourier_ellipsoid(x, 5, -1, 0)
|
137 |
+
b = ndimage.fourier_uniform(x, 5, -1, 0)
|
138 |
+
assert_array_almost_equal(a, b, decimal=dec)
|
139 |
+
|
140 |
+
@pytest.mark.parametrize('shape', [(0, ), (0, 10), (10, 0)])
|
141 |
+
@pytest.mark.parametrize('dtype',
|
142 |
+
[numpy.float32, numpy.float64,
|
143 |
+
numpy.complex64, numpy.complex128])
|
144 |
+
@pytest.mark.parametrize('test_func',
|
145 |
+
[ndimage.fourier_ellipsoid,
|
146 |
+
ndimage.fourier_gaussian,
|
147 |
+
ndimage.fourier_uniform])
|
148 |
+
def test_fourier_zero_length_dims(self, shape, dtype, test_func):
|
149 |
+
a = numpy.ones(shape, dtype)
|
150 |
+
b = test_func(a, 3)
|
151 |
+
assert_equal(a, b)
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py
ADDED
@@ -0,0 +1,1327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
import numpy
|
4 |
+
from numpy.testing import (assert_, assert_equal, assert_array_equal,
|
5 |
+
assert_array_almost_equal, assert_allclose,
|
6 |
+
suppress_warnings)
|
7 |
+
import pytest
|
8 |
+
from pytest import raises as assert_raises
|
9 |
+
import scipy.ndimage as ndimage
|
10 |
+
|
11 |
+
from . import types
|
12 |
+
|
13 |
+
eps = 1e-12
|
14 |
+
|
15 |
+
ndimage_to_numpy_mode = {
|
16 |
+
'mirror': 'reflect',
|
17 |
+
'reflect': 'symmetric',
|
18 |
+
'grid-mirror': 'symmetric',
|
19 |
+
'grid-wrap': 'wrap',
|
20 |
+
'nearest': 'edge',
|
21 |
+
'grid-constant': 'constant',
|
22 |
+
}
|
23 |
+
|
24 |
+
|
25 |
+
class TestNdimageInterpolation:
|
26 |
+
|
27 |
+
@pytest.mark.parametrize(
|
28 |
+
'mode, expected_value',
|
29 |
+
[('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
|
30 |
+
('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
|
31 |
+
('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
|
32 |
+
('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
|
33 |
+
('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
|
34 |
+
('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
|
35 |
+
('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
|
36 |
+
)
|
37 |
+
def test_boundaries(self, mode, expected_value):
|
38 |
+
def shift(x):
|
39 |
+
return (x[0] + 0.5,)
|
40 |
+
|
41 |
+
data = numpy.array([1, 2, 3, 4.])
|
42 |
+
assert_array_equal(
|
43 |
+
expected_value,
|
44 |
+
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
|
45 |
+
output_shape=(7,), order=1))
|
46 |
+
|
47 |
+
@pytest.mark.parametrize(
|
48 |
+
'mode, expected_value',
|
49 |
+
[('nearest', [1, 1, 2, 3]),
|
50 |
+
('wrap', [3, 1, 2, 3]),
|
51 |
+
('grid-wrap', [4, 1, 2, 3]),
|
52 |
+
('mirror', [2, 1, 2, 3]),
|
53 |
+
('reflect', [1, 1, 2, 3]),
|
54 |
+
('constant', [-1, 1, 2, 3]),
|
55 |
+
('grid-constant', [-1, 1, 2, 3])]
|
56 |
+
)
|
57 |
+
def test_boundaries2(self, mode, expected_value):
|
58 |
+
def shift(x):
|
59 |
+
return (x[0] - 0.9,)
|
60 |
+
|
61 |
+
data = numpy.array([1, 2, 3, 4])
|
62 |
+
assert_array_equal(
|
63 |
+
expected_value,
|
64 |
+
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
|
65 |
+
output_shape=(4,)))
|
66 |
+
|
67 |
+
@pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
|
68 |
+
'grid-wrap', 'grid-constant',
|
69 |
+
'nearest'])
|
70 |
+
@pytest.mark.parametrize('order', range(6))
|
71 |
+
def test_boundary_spline_accuracy(self, mode, order):
|
72 |
+
"""Tests based on examples from gh-2640"""
|
73 |
+
data = numpy.arange(-6, 7, dtype=float)
|
74 |
+
x = numpy.linspace(-8, 15, num=1000)
|
75 |
+
y = ndimage.map_coordinates(data, [x], order=order, mode=mode)
|
76 |
+
|
77 |
+
# compute expected value using explicit padding via numpy.pad
|
78 |
+
npad = 32
|
79 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
80 |
+
padded = numpy.pad(data, npad, mode=pad_mode)
|
81 |
+
expected = ndimage.map_coordinates(padded, [npad + x], order=order,
|
82 |
+
mode=mode)
|
83 |
+
|
84 |
+
atol = 1e-5 if mode == 'grid-constant' else 1e-12
|
85 |
+
assert_allclose(y, expected, rtol=1e-7, atol=atol)
|
86 |
+
|
87 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
88 |
+
@pytest.mark.parametrize('dtype', types)
|
89 |
+
def test_spline01(self, dtype, order):
|
90 |
+
data = numpy.ones([], dtype)
|
91 |
+
out = ndimage.spline_filter(data, order=order)
|
92 |
+
assert_array_almost_equal(out, 1)
|
93 |
+
|
94 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
95 |
+
@pytest.mark.parametrize('dtype', types)
|
96 |
+
def test_spline02(self, dtype, order):
|
97 |
+
data = numpy.array([1], dtype)
|
98 |
+
out = ndimage.spline_filter(data, order=order)
|
99 |
+
assert_array_almost_equal(out, [1])
|
100 |
+
|
101 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
102 |
+
@pytest.mark.parametrize('dtype', types)
|
103 |
+
def test_spline03(self, dtype, order):
|
104 |
+
data = numpy.ones([], dtype)
|
105 |
+
out = ndimage.spline_filter(data, order, output=dtype)
|
106 |
+
assert_array_almost_equal(out, 1)
|
107 |
+
|
108 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
109 |
+
@pytest.mark.parametrize('dtype', types)
|
110 |
+
def test_spline04(self, dtype, order):
|
111 |
+
data = numpy.ones([4], dtype)
|
112 |
+
out = ndimage.spline_filter(data, order)
|
113 |
+
assert_array_almost_equal(out, [1, 1, 1, 1])
|
114 |
+
|
115 |
+
@pytest.mark.parametrize('order', range(2, 6))
|
116 |
+
@pytest.mark.parametrize('dtype', types)
|
117 |
+
def test_spline05(self, dtype, order):
|
118 |
+
data = numpy.ones([4, 4], dtype)
|
119 |
+
out = ndimage.spline_filter(data, order=order)
|
120 |
+
assert_array_almost_equal(out, [[1, 1, 1, 1],
|
121 |
+
[1, 1, 1, 1],
|
122 |
+
[1, 1, 1, 1],
|
123 |
+
[1, 1, 1, 1]])
|
124 |
+
|
125 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
126 |
+
def test_geometric_transform01(self, order):
|
127 |
+
data = numpy.array([1])
|
128 |
+
|
129 |
+
def mapping(x):
|
130 |
+
return x
|
131 |
+
|
132 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
133 |
+
order=order)
|
134 |
+
assert_array_almost_equal(out, [1])
|
135 |
+
|
136 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
137 |
+
def test_geometric_transform02(self, order):
|
138 |
+
data = numpy.ones([4])
|
139 |
+
|
140 |
+
def mapping(x):
|
141 |
+
return x
|
142 |
+
|
143 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
144 |
+
order=order)
|
145 |
+
assert_array_almost_equal(out, [1, 1, 1, 1])
|
146 |
+
|
147 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
148 |
+
def test_geometric_transform03(self, order):
|
149 |
+
data = numpy.ones([4])
|
150 |
+
|
151 |
+
def mapping(x):
|
152 |
+
return (x[0] - 1,)
|
153 |
+
|
154 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
155 |
+
order=order)
|
156 |
+
assert_array_almost_equal(out, [0, 1, 1, 1])
|
157 |
+
|
158 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
159 |
+
def test_geometric_transform04(self, order):
|
160 |
+
data = numpy.array([4, 1, 3, 2])
|
161 |
+
|
162 |
+
def mapping(x):
|
163 |
+
return (x[0] - 1,)
|
164 |
+
|
165 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
166 |
+
order=order)
|
167 |
+
assert_array_almost_equal(out, [0, 4, 1, 3])
|
168 |
+
|
169 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
170 |
+
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
|
171 |
+
def test_geometric_transform05(self, order, dtype):
|
172 |
+
data = numpy.array([[1, 1, 1, 1],
|
173 |
+
[1, 1, 1, 1],
|
174 |
+
[1, 1, 1, 1]], dtype=dtype)
|
175 |
+
expected = numpy.array([[0, 1, 1, 1],
|
176 |
+
[0, 1, 1, 1],
|
177 |
+
[0, 1, 1, 1]], dtype=dtype)
|
178 |
+
if data.dtype.kind == 'c':
|
179 |
+
data -= 1j * data
|
180 |
+
expected -= 1j * expected
|
181 |
+
|
182 |
+
def mapping(x):
|
183 |
+
return (x[0], x[1] - 1)
|
184 |
+
|
185 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
186 |
+
order=order)
|
187 |
+
assert_array_almost_equal(out, expected)
|
188 |
+
|
189 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
190 |
+
def test_geometric_transform06(self, order):
|
191 |
+
data = numpy.array([[4, 1, 3, 2],
|
192 |
+
[7, 6, 8, 5],
|
193 |
+
[3, 5, 3, 6]])
|
194 |
+
|
195 |
+
def mapping(x):
|
196 |
+
return (x[0], x[1] - 1)
|
197 |
+
|
198 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
199 |
+
order=order)
|
200 |
+
assert_array_almost_equal(out, [[0, 4, 1, 3],
|
201 |
+
[0, 7, 6, 8],
|
202 |
+
[0, 3, 5, 3]])
|
203 |
+
|
204 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
205 |
+
def test_geometric_transform07(self, order):
|
206 |
+
data = numpy.array([[4, 1, 3, 2],
|
207 |
+
[7, 6, 8, 5],
|
208 |
+
[3, 5, 3, 6]])
|
209 |
+
|
210 |
+
def mapping(x):
|
211 |
+
return (x[0] - 1, x[1])
|
212 |
+
|
213 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
214 |
+
order=order)
|
215 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
216 |
+
[4, 1, 3, 2],
|
217 |
+
[7, 6, 8, 5]])
|
218 |
+
|
219 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
220 |
+
def test_geometric_transform08(self, order):
|
221 |
+
data = numpy.array([[4, 1, 3, 2],
|
222 |
+
[7, 6, 8, 5],
|
223 |
+
[3, 5, 3, 6]])
|
224 |
+
|
225 |
+
def mapping(x):
|
226 |
+
return (x[0] - 1, x[1] - 1)
|
227 |
+
|
228 |
+
out = ndimage.geometric_transform(data, mapping, data.shape,
|
229 |
+
order=order)
|
230 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
231 |
+
[0, 4, 1, 3],
|
232 |
+
[0, 7, 6, 8]])
|
233 |
+
|
234 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
235 |
+
def test_geometric_transform10(self, order):
|
236 |
+
data = numpy.array([[4, 1, 3, 2],
|
237 |
+
[7, 6, 8, 5],
|
238 |
+
[3, 5, 3, 6]])
|
239 |
+
|
240 |
+
def mapping(x):
|
241 |
+
return (x[0] - 1, x[1] - 1)
|
242 |
+
|
243 |
+
if (order > 1):
|
244 |
+
filtered = ndimage.spline_filter(data, order=order)
|
245 |
+
else:
|
246 |
+
filtered = data
|
247 |
+
out = ndimage.geometric_transform(filtered, mapping, data.shape,
|
248 |
+
order=order, prefilter=False)
|
249 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
250 |
+
[0, 4, 1, 3],
|
251 |
+
[0, 7, 6, 8]])
|
252 |
+
|
253 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
254 |
+
def test_geometric_transform13(self, order):
|
255 |
+
data = numpy.ones([2], numpy.float64)
|
256 |
+
|
257 |
+
def mapping(x):
|
258 |
+
return (x[0] // 2,)
|
259 |
+
|
260 |
+
out = ndimage.geometric_transform(data, mapping, [4], order=order)
|
261 |
+
assert_array_almost_equal(out, [1, 1, 1, 1])
|
262 |
+
|
263 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
264 |
+
def test_geometric_transform14(self, order):
|
265 |
+
data = [1, 5, 2, 6, 3, 7, 4, 4]
|
266 |
+
|
267 |
+
def mapping(x):
|
268 |
+
return (2 * x[0],)
|
269 |
+
|
270 |
+
out = ndimage.geometric_transform(data, mapping, [4], order=order)
|
271 |
+
assert_array_almost_equal(out, [1, 2, 3, 4])
|
272 |
+
|
273 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
274 |
+
def test_geometric_transform15(self, order):
|
275 |
+
data = [1, 2, 3, 4]
|
276 |
+
|
277 |
+
def mapping(x):
|
278 |
+
return (x[0] / 2,)
|
279 |
+
|
280 |
+
out = ndimage.geometric_transform(data, mapping, [8], order=order)
|
281 |
+
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
|
282 |
+
|
283 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
284 |
+
def test_geometric_transform16(self, order):
|
285 |
+
data = [[1, 2, 3, 4],
|
286 |
+
[5, 6, 7, 8],
|
287 |
+
[9.0, 10, 11, 12]]
|
288 |
+
|
289 |
+
def mapping(x):
|
290 |
+
return (x[0], x[1] * 2)
|
291 |
+
|
292 |
+
out = ndimage.geometric_transform(data, mapping, (3, 2),
|
293 |
+
order=order)
|
294 |
+
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
|
295 |
+
|
296 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
297 |
+
def test_geometric_transform17(self, order):
|
298 |
+
data = [[1, 2, 3, 4],
|
299 |
+
[5, 6, 7, 8],
|
300 |
+
[9, 10, 11, 12]]
|
301 |
+
|
302 |
+
def mapping(x):
|
303 |
+
return (x[0] * 2, x[1])
|
304 |
+
|
305 |
+
out = ndimage.geometric_transform(data, mapping, (1, 4),
|
306 |
+
order=order)
|
307 |
+
assert_array_almost_equal(out, [[1, 2, 3, 4]])
|
308 |
+
|
309 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
310 |
+
def test_geometric_transform18(self, order):
|
311 |
+
data = [[1, 2, 3, 4],
|
312 |
+
[5, 6, 7, 8],
|
313 |
+
[9, 10, 11, 12]]
|
314 |
+
|
315 |
+
def mapping(x):
|
316 |
+
return (x[0] * 2, x[1] * 2)
|
317 |
+
|
318 |
+
out = ndimage.geometric_transform(data, mapping, (1, 2),
|
319 |
+
order=order)
|
320 |
+
assert_array_almost_equal(out, [[1, 3]])
|
321 |
+
|
322 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
323 |
+
def test_geometric_transform19(self, order):
|
324 |
+
data = [[1, 2, 3, 4],
|
325 |
+
[5, 6, 7, 8],
|
326 |
+
[9, 10, 11, 12]]
|
327 |
+
|
328 |
+
def mapping(x):
|
329 |
+
return (x[0], x[1] / 2)
|
330 |
+
|
331 |
+
out = ndimage.geometric_transform(data, mapping, (3, 8),
|
332 |
+
order=order)
|
333 |
+
assert_array_almost_equal(out[..., ::2], data)
|
334 |
+
|
335 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
336 |
+
def test_geometric_transform20(self, order):
|
337 |
+
data = [[1, 2, 3, 4],
|
338 |
+
[5, 6, 7, 8],
|
339 |
+
[9, 10, 11, 12]]
|
340 |
+
|
341 |
+
def mapping(x):
|
342 |
+
return (x[0] / 2, x[1])
|
343 |
+
|
344 |
+
out = ndimage.geometric_transform(data, mapping, (6, 4),
|
345 |
+
order=order)
|
346 |
+
assert_array_almost_equal(out[::2, ...], data)
|
347 |
+
|
348 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
349 |
+
def test_geometric_transform21(self, order):
|
350 |
+
data = [[1, 2, 3, 4],
|
351 |
+
[5, 6, 7, 8],
|
352 |
+
[9, 10, 11, 12]]
|
353 |
+
|
354 |
+
def mapping(x):
|
355 |
+
return (x[0] / 2, x[1] / 2)
|
356 |
+
|
357 |
+
out = ndimage.geometric_transform(data, mapping, (6, 8),
|
358 |
+
order=order)
|
359 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
360 |
+
|
361 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
362 |
+
def test_geometric_transform22(self, order):
|
363 |
+
data = numpy.array([[1, 2, 3, 4],
|
364 |
+
[5, 6, 7, 8],
|
365 |
+
[9, 10, 11, 12]], numpy.float64)
|
366 |
+
|
367 |
+
def mapping1(x):
|
368 |
+
return (x[0] / 2, x[1] / 2)
|
369 |
+
|
370 |
+
def mapping2(x):
|
371 |
+
return (x[0] * 2, x[1] * 2)
|
372 |
+
|
373 |
+
out = ndimage.geometric_transform(data, mapping1,
|
374 |
+
(6, 8), order=order)
|
375 |
+
out = ndimage.geometric_transform(out, mapping2,
|
376 |
+
(3, 4), order=order)
|
377 |
+
assert_array_almost_equal(out, data)
|
378 |
+
|
379 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
380 |
+
def test_geometric_transform23(self, order):
|
381 |
+
data = [[1, 2, 3, 4],
|
382 |
+
[5, 6, 7, 8],
|
383 |
+
[9, 10, 11, 12]]
|
384 |
+
|
385 |
+
def mapping(x):
|
386 |
+
return (1, x[0] * 2)
|
387 |
+
|
388 |
+
out = ndimage.geometric_transform(data, mapping, (2,), order=order)
|
389 |
+
out = out.astype(numpy.int32)
|
390 |
+
assert_array_almost_equal(out, [5, 7])
|
391 |
+
|
392 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
393 |
+
def test_geometric_transform24(self, order):
|
394 |
+
data = [[1, 2, 3, 4],
|
395 |
+
[5, 6, 7, 8],
|
396 |
+
[9, 10, 11, 12]]
|
397 |
+
|
398 |
+
def mapping(x, a, b):
|
399 |
+
return (a, x[0] * b)
|
400 |
+
|
401 |
+
out = ndimage.geometric_transform(
|
402 |
+
data, mapping, (2,), order=order, extra_arguments=(1,),
|
403 |
+
extra_keywords={'b': 2})
|
404 |
+
assert_array_almost_equal(out, [5, 7])
|
405 |
+
|
406 |
+
def test_geometric_transform_grid_constant_order1(self):
|
407 |
+
# verify interpolation outside the original bounds
|
408 |
+
x = numpy.array([[1, 2, 3],
|
409 |
+
[4, 5, 6]], dtype=float)
|
410 |
+
|
411 |
+
def mapping(x):
|
412 |
+
return (x[0] - 0.5), (x[1] - 0.5)
|
413 |
+
|
414 |
+
expected_result = numpy.array([[0.25, 0.75, 1.25],
|
415 |
+
[1.25, 3.00, 4.00]])
|
416 |
+
assert_array_almost_equal(
|
417 |
+
ndimage.geometric_transform(x, mapping, mode='grid-constant',
|
418 |
+
order=1),
|
419 |
+
expected_result,
|
420 |
+
)
|
421 |
+
|
422 |
+
@pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
|
423 |
+
'mirror', 'reflect'])
|
424 |
+
@pytest.mark.parametrize('order', range(6))
|
425 |
+
def test_geometric_transform_vs_padded(self, order, mode):
|
426 |
+
x = numpy.arange(144, dtype=float).reshape(12, 12)
|
427 |
+
|
428 |
+
def mapping(x):
|
429 |
+
return (x[0] - 0.4), (x[1] + 2.3)
|
430 |
+
|
431 |
+
# Manually pad and then extract center after the transform to get the
|
432 |
+
# expected result.
|
433 |
+
npad = 24
|
434 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
435 |
+
xp = numpy.pad(x, npad, mode=pad_mode)
|
436 |
+
center_slice = tuple([slice(npad, -npad)] * x.ndim)
|
437 |
+
expected_result = ndimage.geometric_transform(
|
438 |
+
xp, mapping, mode=mode, order=order)[center_slice]
|
439 |
+
|
440 |
+
assert_allclose(
|
441 |
+
ndimage.geometric_transform(x, mapping, mode=mode,
|
442 |
+
order=order),
|
443 |
+
expected_result,
|
444 |
+
rtol=1e-7,
|
445 |
+
)
|
446 |
+
|
447 |
+
def test_geometric_transform_endianness_with_output_parameter(self):
|
448 |
+
# geometric transform given output ndarray or dtype with
|
449 |
+
# non-native endianness. see issue #4127
|
450 |
+
data = numpy.array([1])
|
451 |
+
|
452 |
+
def mapping(x):
|
453 |
+
return x
|
454 |
+
|
455 |
+
for out in [data.dtype, data.dtype.newbyteorder(),
|
456 |
+
numpy.empty_like(data),
|
457 |
+
numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
|
458 |
+
returned = ndimage.geometric_transform(data, mapping, data.shape,
|
459 |
+
output=out)
|
460 |
+
result = out if returned is None else returned
|
461 |
+
assert_array_almost_equal(result, [1])
|
462 |
+
|
463 |
+
def test_geometric_transform_with_string_output(self):
|
464 |
+
data = numpy.array([1])
|
465 |
+
|
466 |
+
def mapping(x):
|
467 |
+
return x
|
468 |
+
|
469 |
+
out = ndimage.geometric_transform(data, mapping, output='f')
|
470 |
+
assert_(out.dtype is numpy.dtype('f'))
|
471 |
+
assert_array_almost_equal(out, [1])
|
472 |
+
|
473 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
474 |
+
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
|
475 |
+
def test_map_coordinates01(self, order, dtype):
|
476 |
+
data = numpy.array([[4, 1, 3, 2],
|
477 |
+
[7, 6, 8, 5],
|
478 |
+
[3, 5, 3, 6]])
|
479 |
+
expected = numpy.array([[0, 0, 0, 0],
|
480 |
+
[0, 4, 1, 3],
|
481 |
+
[0, 7, 6, 8]])
|
482 |
+
if data.dtype.kind == 'c':
|
483 |
+
data = data - 1j * data
|
484 |
+
expected = expected - 1j * expected
|
485 |
+
|
486 |
+
idx = numpy.indices(data.shape)
|
487 |
+
idx -= 1
|
488 |
+
|
489 |
+
out = ndimage.map_coordinates(data, idx, order=order)
|
490 |
+
assert_array_almost_equal(out, expected)
|
491 |
+
|
492 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
493 |
+
def test_map_coordinates02(self, order):
|
494 |
+
data = numpy.array([[4, 1, 3, 2],
|
495 |
+
[7, 6, 8, 5],
|
496 |
+
[3, 5, 3, 6]])
|
497 |
+
idx = numpy.indices(data.shape, numpy.float64)
|
498 |
+
idx -= 0.5
|
499 |
+
|
500 |
+
out1 = ndimage.shift(data, 0.5, order=order)
|
501 |
+
out2 = ndimage.map_coordinates(data, idx, order=order)
|
502 |
+
assert_array_almost_equal(out1, out2)
|
503 |
+
|
504 |
+
def test_map_coordinates03(self):
|
505 |
+
data = numpy.array([[4, 1, 3, 2],
|
506 |
+
[7, 6, 8, 5],
|
507 |
+
[3, 5, 3, 6]], order='F')
|
508 |
+
idx = numpy.indices(data.shape) - 1
|
509 |
+
out = ndimage.map_coordinates(data, idx)
|
510 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
511 |
+
[0, 4, 1, 3],
|
512 |
+
[0, 7, 6, 8]])
|
513 |
+
assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
|
514 |
+
idx = numpy.indices(data[::2].shape) - 1
|
515 |
+
out = ndimage.map_coordinates(data[::2], idx)
|
516 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
517 |
+
[0, 4, 1, 3]])
|
518 |
+
assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
|
519 |
+
idx = numpy.indices(data[:, ::2].shape) - 1
|
520 |
+
out = ndimage.map_coordinates(data[:, ::2], idx)
|
521 |
+
assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
|
522 |
+
assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
|
523 |
+
|
524 |
+
def test_map_coordinates_endianness_with_output_parameter(self):
|
525 |
+
# output parameter given as array or dtype with either endianness
|
526 |
+
# see issue #4127
|
527 |
+
data = numpy.array([[1, 2], [7, 6]])
|
528 |
+
expected = numpy.array([[0, 0], [0, 1]])
|
529 |
+
idx = numpy.indices(data.shape)
|
530 |
+
idx -= 1
|
531 |
+
for out in [
|
532 |
+
data.dtype,
|
533 |
+
data.dtype.newbyteorder(),
|
534 |
+
numpy.empty_like(expected),
|
535 |
+
numpy.empty_like(expected).astype(expected.dtype.newbyteorder())
|
536 |
+
]:
|
537 |
+
returned = ndimage.map_coordinates(data, idx, output=out)
|
538 |
+
result = out if returned is None else returned
|
539 |
+
assert_array_almost_equal(result, expected)
|
540 |
+
|
541 |
+
def test_map_coordinates_with_string_output(self):
|
542 |
+
data = numpy.array([[1]])
|
543 |
+
idx = numpy.indices(data.shape)
|
544 |
+
out = ndimage.map_coordinates(data, idx, output='f')
|
545 |
+
assert_(out.dtype is numpy.dtype('f'))
|
546 |
+
assert_array_almost_equal(out, [[1]])
|
547 |
+
|
548 |
+
@pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8,
|
549 |
+
reason='do not run on 32 bit or windows '
|
550 |
+
'(no sparse memory)')
|
551 |
+
def test_map_coordinates_large_data(self):
|
552 |
+
# check crash on large data
|
553 |
+
try:
|
554 |
+
n = 30000
|
555 |
+
a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n)
|
556 |
+
# fill the part we might read
|
557 |
+
a[n - 3:, n - 3:] = 0
|
558 |
+
ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
|
559 |
+
except MemoryError as e:
|
560 |
+
raise pytest.skip('Not enough memory available') from e
|
561 |
+
|
562 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
563 |
+
def test_affine_transform01(self, order):
|
564 |
+
data = numpy.array([1])
|
565 |
+
out = ndimage.affine_transform(data, [[1]], order=order)
|
566 |
+
assert_array_almost_equal(out, [1])
|
567 |
+
|
568 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
569 |
+
def test_affine_transform02(self, order):
|
570 |
+
data = numpy.ones([4])
|
571 |
+
out = ndimage.affine_transform(data, [[1]], order=order)
|
572 |
+
assert_array_almost_equal(out, [1, 1, 1, 1])
|
573 |
+
|
574 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
575 |
+
def test_affine_transform03(self, order):
|
576 |
+
data = numpy.ones([4])
|
577 |
+
out = ndimage.affine_transform(data, [[1]], -1, order=order)
|
578 |
+
assert_array_almost_equal(out, [0, 1, 1, 1])
|
579 |
+
|
580 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
581 |
+
def test_affine_transform04(self, order):
|
582 |
+
data = numpy.array([4, 1, 3, 2])
|
583 |
+
out = ndimage.affine_transform(data, [[1]], -1, order=order)
|
584 |
+
assert_array_almost_equal(out, [0, 4, 1, 3])
|
585 |
+
|
586 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
587 |
+
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
|
588 |
+
def test_affine_transform05(self, order, dtype):
|
589 |
+
data = numpy.array([[1, 1, 1, 1],
|
590 |
+
[1, 1, 1, 1],
|
591 |
+
[1, 1, 1, 1]], dtype=dtype)
|
592 |
+
expected = numpy.array([[0, 1, 1, 1],
|
593 |
+
[0, 1, 1, 1],
|
594 |
+
[0, 1, 1, 1]], dtype=dtype)
|
595 |
+
if data.dtype.kind == 'c':
|
596 |
+
data -= 1j * data
|
597 |
+
expected -= 1j * expected
|
598 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
|
599 |
+
[0, -1], order=order)
|
600 |
+
assert_array_almost_equal(out, expected)
|
601 |
+
|
602 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
603 |
+
def test_affine_transform06(self, order):
|
604 |
+
data = numpy.array([[4, 1, 3, 2],
|
605 |
+
[7, 6, 8, 5],
|
606 |
+
[3, 5, 3, 6]])
|
607 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
|
608 |
+
[0, -1], order=order)
|
609 |
+
assert_array_almost_equal(out, [[0, 4, 1, 3],
|
610 |
+
[0, 7, 6, 8],
|
611 |
+
[0, 3, 5, 3]])
|
612 |
+
|
613 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
614 |
+
def test_affine_transform07(self, order):
|
615 |
+
data = numpy.array([[4, 1, 3, 2],
|
616 |
+
[7, 6, 8, 5],
|
617 |
+
[3, 5, 3, 6]])
|
618 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
|
619 |
+
[-1, 0], order=order)
|
620 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
621 |
+
[4, 1, 3, 2],
|
622 |
+
[7, 6, 8, 5]])
|
623 |
+
|
624 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
625 |
+
def test_affine_transform08(self, order):
|
626 |
+
data = numpy.array([[4, 1, 3, 2],
|
627 |
+
[7, 6, 8, 5],
|
628 |
+
[3, 5, 3, 6]])
|
629 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
|
630 |
+
[-1, -1], order=order)
|
631 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
632 |
+
[0, 4, 1, 3],
|
633 |
+
[0, 7, 6, 8]])
|
634 |
+
|
635 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
636 |
+
def test_affine_transform09(self, order):
|
637 |
+
data = numpy.array([[4, 1, 3, 2],
|
638 |
+
[7, 6, 8, 5],
|
639 |
+
[3, 5, 3, 6]])
|
640 |
+
if (order > 1):
|
641 |
+
filtered = ndimage.spline_filter(data, order=order)
|
642 |
+
else:
|
643 |
+
filtered = data
|
644 |
+
out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
|
645 |
+
[-1, -1], order=order,
|
646 |
+
prefilter=False)
|
647 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
648 |
+
[0, 4, 1, 3],
|
649 |
+
[0, 7, 6, 8]])
|
650 |
+
|
651 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
652 |
+
def test_affine_transform10(self, order):
|
653 |
+
data = numpy.ones([2], numpy.float64)
|
654 |
+
out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
|
655 |
+
order=order)
|
656 |
+
assert_array_almost_equal(out, [1, 1, 1, 0])
|
657 |
+
|
658 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
659 |
+
def test_affine_transform11(self, order):
|
660 |
+
data = [1, 5, 2, 6, 3, 7, 4, 4]
|
661 |
+
out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
|
662 |
+
assert_array_almost_equal(out, [1, 2, 3, 4])
|
663 |
+
|
664 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
665 |
+
def test_affine_transform12(self, order):
|
666 |
+
data = [1, 2, 3, 4]
|
667 |
+
out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
|
668 |
+
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
|
669 |
+
|
670 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
671 |
+
def test_affine_transform13(self, order):
|
672 |
+
data = [[1, 2, 3, 4],
|
673 |
+
[5, 6, 7, 8],
|
674 |
+
[9.0, 10, 11, 12]]
|
675 |
+
out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
|
676 |
+
order=order)
|
677 |
+
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
|
678 |
+
|
679 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
680 |
+
def test_affine_transform14(self, order):
|
681 |
+
data = [[1, 2, 3, 4],
|
682 |
+
[5, 6, 7, 8],
|
683 |
+
[9, 10, 11, 12]]
|
684 |
+
out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
|
685 |
+
order=order)
|
686 |
+
assert_array_almost_equal(out, [[1, 2, 3, 4]])
|
687 |
+
|
688 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
689 |
+
def test_affine_transform15(self, order):
|
690 |
+
data = [[1, 2, 3, 4],
|
691 |
+
[5, 6, 7, 8],
|
692 |
+
[9, 10, 11, 12]]
|
693 |
+
out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
|
694 |
+
order=order)
|
695 |
+
assert_array_almost_equal(out, [[1, 3]])
|
696 |
+
|
697 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
698 |
+
def test_affine_transform16(self, order):
|
699 |
+
data = [[1, 2, 3, 4],
|
700 |
+
[5, 6, 7, 8],
|
701 |
+
[9, 10, 11, 12]]
|
702 |
+
out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
|
703 |
+
(3, 8), order=order)
|
704 |
+
assert_array_almost_equal(out[..., ::2], data)
|
705 |
+
|
706 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
707 |
+
def test_affine_transform17(self, order):
|
708 |
+
data = [[1, 2, 3, 4],
|
709 |
+
[5, 6, 7, 8],
|
710 |
+
[9, 10, 11, 12]]
|
711 |
+
out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
|
712 |
+
(6, 4), order=order)
|
713 |
+
assert_array_almost_equal(out[::2, ...], data)
|
714 |
+
|
715 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
716 |
+
def test_affine_transform18(self, order):
|
717 |
+
data = [[1, 2, 3, 4],
|
718 |
+
[5, 6, 7, 8],
|
719 |
+
[9, 10, 11, 12]]
|
720 |
+
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
|
721 |
+
(6, 8), order=order)
|
722 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
723 |
+
|
724 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
725 |
+
def test_affine_transform19(self, order):
|
726 |
+
data = numpy.array([[1, 2, 3, 4],
|
727 |
+
[5, 6, 7, 8],
|
728 |
+
[9, 10, 11, 12]], numpy.float64)
|
729 |
+
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
|
730 |
+
(6, 8), order=order)
|
731 |
+
out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
|
732 |
+
(3, 4), order=order)
|
733 |
+
assert_array_almost_equal(out, data)
|
734 |
+
|
735 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
736 |
+
def test_affine_transform20(self, order):
|
737 |
+
data = [[1, 2, 3, 4],
|
738 |
+
[5, 6, 7, 8],
|
739 |
+
[9, 10, 11, 12]]
|
740 |
+
out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
|
741 |
+
order=order)
|
742 |
+
assert_array_almost_equal(out, [1, 3])
|
743 |
+
|
744 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
745 |
+
def test_affine_transform21(self, order):
|
746 |
+
data = [[1, 2, 3, 4],
|
747 |
+
[5, 6, 7, 8],
|
748 |
+
[9, 10, 11, 12]]
|
749 |
+
out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
|
750 |
+
order=order)
|
751 |
+
assert_array_almost_equal(out, [1, 9])
|
752 |
+
|
753 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
754 |
+
def test_affine_transform22(self, order):
|
755 |
+
# shift and offset interaction; see issue #1547
|
756 |
+
data = numpy.array([4, 1, 3, 2])
|
757 |
+
out = ndimage.affine_transform(data, [[2]], [-1], (3,),
|
758 |
+
order=order)
|
759 |
+
assert_array_almost_equal(out, [0, 1, 2])
|
760 |
+
|
761 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
762 |
+
def test_affine_transform23(self, order):
|
763 |
+
# shift and offset interaction; see issue #1547
|
764 |
+
data = numpy.array([4, 1, 3, 2])
|
765 |
+
out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
|
766 |
+
order=order)
|
767 |
+
assert_array_almost_equal(out[::2], [0, 4, 1, 3])
|
768 |
+
|
769 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
770 |
+
def test_affine_transform24(self, order):
|
771 |
+
# consistency between diagonal and non-diagonal case; see issue #1547
|
772 |
+
data = numpy.array([4, 1, 3, 2])
|
773 |
+
with suppress_warnings() as sup:
|
774 |
+
sup.filter(UserWarning,
|
775 |
+
'The behavior of affine_transform with a 1-D array .* '
|
776 |
+
'has changed')
|
777 |
+
out1 = ndimage.affine_transform(data, [2], -1, order=order)
|
778 |
+
out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
|
779 |
+
assert_array_almost_equal(out1, out2)
|
780 |
+
|
781 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
782 |
+
def test_affine_transform25(self, order):
|
783 |
+
# consistency between diagonal and non-diagonal case; see issue #1547
|
784 |
+
data = numpy.array([4, 1, 3, 2])
|
785 |
+
with suppress_warnings() as sup:
|
786 |
+
sup.filter(UserWarning,
|
787 |
+
'The behavior of affine_transform with a 1-D array .* '
|
788 |
+
'has changed')
|
789 |
+
out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
|
790 |
+
out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
|
791 |
+
assert_array_almost_equal(out1, out2)
|
792 |
+
|
793 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
794 |
+
def test_affine_transform26(self, order):
|
795 |
+
# test homogeneous coordinates
|
796 |
+
data = numpy.array([[4, 1, 3, 2],
|
797 |
+
[7, 6, 8, 5],
|
798 |
+
[3, 5, 3, 6]])
|
799 |
+
if (order > 1):
|
800 |
+
filtered = ndimage.spline_filter(data, order=order)
|
801 |
+
else:
|
802 |
+
filtered = data
|
803 |
+
tform_original = numpy.eye(2)
|
804 |
+
offset_original = -numpy.ones((2, 1))
|
805 |
+
tform_h1 = numpy.hstack((tform_original, offset_original))
|
806 |
+
tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]]))
|
807 |
+
out1 = ndimage.affine_transform(filtered, tform_original,
|
808 |
+
offset_original.ravel(),
|
809 |
+
order=order, prefilter=False)
|
810 |
+
out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
|
811 |
+
prefilter=False)
|
812 |
+
out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
|
813 |
+
prefilter=False)
|
814 |
+
for out in [out1, out2, out3]:
|
815 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
816 |
+
[0, 4, 1, 3],
|
817 |
+
[0, 7, 6, 8]])
|
818 |
+
|
819 |
+
def test_affine_transform27(self):
|
820 |
+
# test valid homogeneous transformation matrix
|
821 |
+
data = numpy.array([[4, 1, 3, 2],
|
822 |
+
[7, 6, 8, 5],
|
823 |
+
[3, 5, 3, 6]])
|
824 |
+
tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1))))
|
825 |
+
tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]]))
|
826 |
+
assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
|
827 |
+
|
828 |
+
def test_affine_transform_1d_endianness_with_output_parameter(self):
|
829 |
+
# 1d affine transform given output ndarray or dtype with
|
830 |
+
# either endianness. see issue #7388
|
831 |
+
data = numpy.ones((2, 2))
|
832 |
+
for out in [numpy.empty_like(data),
|
833 |
+
numpy.empty_like(data).astype(data.dtype.newbyteorder()),
|
834 |
+
data.dtype, data.dtype.newbyteorder()]:
|
835 |
+
with suppress_warnings() as sup:
|
836 |
+
sup.filter(UserWarning,
|
837 |
+
'The behavior of affine_transform with a 1-D array '
|
838 |
+
'.* has changed')
|
839 |
+
returned = ndimage.affine_transform(data, [1, 1], output=out)
|
840 |
+
result = out if returned is None else returned
|
841 |
+
assert_array_almost_equal(result, [[1, 1], [1, 1]])
|
842 |
+
|
843 |
+
def test_affine_transform_multi_d_endianness_with_output_parameter(self):
|
844 |
+
# affine transform given output ndarray or dtype with either endianness
|
845 |
+
# see issue #4127
|
846 |
+
data = numpy.array([1])
|
847 |
+
for out in [data.dtype, data.dtype.newbyteorder(),
|
848 |
+
numpy.empty_like(data),
|
849 |
+
numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
|
850 |
+
returned = ndimage.affine_transform(data, [[1]], output=out)
|
851 |
+
result = out if returned is None else returned
|
852 |
+
assert_array_almost_equal(result, [1])
|
853 |
+
|
854 |
+
def test_affine_transform_output_shape(self):
|
855 |
+
# don't require output_shape when out of a different size is given
|
856 |
+
data = numpy.arange(8, dtype=numpy.float64)
|
857 |
+
out = numpy.ones((16,))
|
858 |
+
|
859 |
+
ndimage.affine_transform(data, [[1]], output=out)
|
860 |
+
assert_array_almost_equal(out[:8], data)
|
861 |
+
|
862 |
+
# mismatched output shape raises an error
|
863 |
+
with pytest.raises(RuntimeError):
|
864 |
+
ndimage.affine_transform(
|
865 |
+
data, [[1]], output=out, output_shape=(12,))
|
866 |
+
|
867 |
+
def test_affine_transform_with_string_output(self):
|
868 |
+
data = numpy.array([1])
|
869 |
+
out = ndimage.affine_transform(data, [[1]], output='f')
|
870 |
+
assert_(out.dtype is numpy.dtype('f'))
|
871 |
+
assert_array_almost_equal(out, [1])
|
872 |
+
|
873 |
+
@pytest.mark.parametrize('shift',
|
874 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
875 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
876 |
+
def test_affine_transform_shift_via_grid_wrap(self, shift, order):
|
877 |
+
# For mode 'grid-wrap', integer shifts should match numpy.roll
|
878 |
+
x = numpy.array([[0, 1],
|
879 |
+
[2, 3]])
|
880 |
+
affine = numpy.zeros((2, 3))
|
881 |
+
affine[:2, :2] = numpy.eye(2)
|
882 |
+
affine[:, 2] = shift
|
883 |
+
assert_array_almost_equal(
|
884 |
+
ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
|
885 |
+
numpy.roll(x, shift, axis=(0, 1)),
|
886 |
+
)
|
887 |
+
|
888 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
889 |
+
def test_affine_transform_shift_reflect(self, order):
|
890 |
+
# shift by x.shape results in reflection
|
891 |
+
x = numpy.array([[0, 1, 2],
|
892 |
+
[3, 4, 5]])
|
893 |
+
affine = numpy.zeros((2, 3))
|
894 |
+
affine[:2, :2] = numpy.eye(2)
|
895 |
+
affine[:, 2] = x.shape
|
896 |
+
assert_array_almost_equal(
|
897 |
+
ndimage.affine_transform(x, affine, mode='reflect', order=order),
|
898 |
+
x[::-1, ::-1],
|
899 |
+
)
|
900 |
+
|
901 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
902 |
+
def test_shift01(self, order):
|
903 |
+
data = numpy.array([1])
|
904 |
+
out = ndimage.shift(data, [1], order=order)
|
905 |
+
assert_array_almost_equal(out, [0])
|
906 |
+
|
907 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
908 |
+
def test_shift02(self, order):
|
909 |
+
data = numpy.ones([4])
|
910 |
+
out = ndimage.shift(data, [1], order=order)
|
911 |
+
assert_array_almost_equal(out, [0, 1, 1, 1])
|
912 |
+
|
913 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
914 |
+
def test_shift03(self, order):
|
915 |
+
data = numpy.ones([4])
|
916 |
+
out = ndimage.shift(data, -1, order=order)
|
917 |
+
assert_array_almost_equal(out, [1, 1, 1, 0])
|
918 |
+
|
919 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
920 |
+
def test_shift04(self, order):
|
921 |
+
data = numpy.array([4, 1, 3, 2])
|
922 |
+
out = ndimage.shift(data, 1, order=order)
|
923 |
+
assert_array_almost_equal(out, [0, 4, 1, 3])
|
924 |
+
|
925 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
926 |
+
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
|
927 |
+
def test_shift05(self, order, dtype):
|
928 |
+
data = numpy.array([[1, 1, 1, 1],
|
929 |
+
[1, 1, 1, 1],
|
930 |
+
[1, 1, 1, 1]], dtype=dtype)
|
931 |
+
expected = numpy.array([[0, 1, 1, 1],
|
932 |
+
[0, 1, 1, 1],
|
933 |
+
[0, 1, 1, 1]], dtype=dtype)
|
934 |
+
if data.dtype.kind == 'c':
|
935 |
+
data -= 1j * data
|
936 |
+
expected -= 1j * expected
|
937 |
+
out = ndimage.shift(data, [0, 1], order=order)
|
938 |
+
assert_array_almost_equal(out, expected)
|
939 |
+
|
940 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
941 |
+
@pytest.mark.parametrize('mode', ['constant', 'grid-constant'])
|
942 |
+
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
|
943 |
+
def test_shift_with_nonzero_cval(self, order, mode, dtype):
|
944 |
+
data = numpy.array([[1, 1, 1, 1],
|
945 |
+
[1, 1, 1, 1],
|
946 |
+
[1, 1, 1, 1]], dtype=dtype)
|
947 |
+
|
948 |
+
expected = numpy.array([[0, 1, 1, 1],
|
949 |
+
[0, 1, 1, 1],
|
950 |
+
[0, 1, 1, 1]], dtype=dtype)
|
951 |
+
|
952 |
+
if data.dtype.kind == 'c':
|
953 |
+
data -= 1j * data
|
954 |
+
expected -= 1j * expected
|
955 |
+
cval = 5.0
|
956 |
+
expected[:, 0] = cval # specific to shift of [0, 1] used below
|
957 |
+
out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval)
|
958 |
+
assert_array_almost_equal(out, expected)
|
959 |
+
|
960 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
961 |
+
def test_shift06(self, order):
|
962 |
+
data = numpy.array([[4, 1, 3, 2],
|
963 |
+
[7, 6, 8, 5],
|
964 |
+
[3, 5, 3, 6]])
|
965 |
+
out = ndimage.shift(data, [0, 1], order=order)
|
966 |
+
assert_array_almost_equal(out, [[0, 4, 1, 3],
|
967 |
+
[0, 7, 6, 8],
|
968 |
+
[0, 3, 5, 3]])
|
969 |
+
|
970 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
971 |
+
def test_shift07(self, order):
|
972 |
+
data = numpy.array([[4, 1, 3, 2],
|
973 |
+
[7, 6, 8, 5],
|
974 |
+
[3, 5, 3, 6]])
|
975 |
+
out = ndimage.shift(data, [1, 0], order=order)
|
976 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
977 |
+
[4, 1, 3, 2],
|
978 |
+
[7, 6, 8, 5]])
|
979 |
+
|
980 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
981 |
+
def test_shift08(self, order):
|
982 |
+
data = numpy.array([[4, 1, 3, 2],
|
983 |
+
[7, 6, 8, 5],
|
984 |
+
[3, 5, 3, 6]])
|
985 |
+
out = ndimage.shift(data, [1, 1], order=order)
|
986 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
987 |
+
[0, 4, 1, 3],
|
988 |
+
[0, 7, 6, 8]])
|
989 |
+
|
990 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
991 |
+
def test_shift09(self, order):
|
992 |
+
data = numpy.array([[4, 1, 3, 2],
|
993 |
+
[7, 6, 8, 5],
|
994 |
+
[3, 5, 3, 6]])
|
995 |
+
if (order > 1):
|
996 |
+
filtered = ndimage.spline_filter(data, order=order)
|
997 |
+
else:
|
998 |
+
filtered = data
|
999 |
+
out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
|
1000 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0],
|
1001 |
+
[0, 4, 1, 3],
|
1002 |
+
[0, 7, 6, 8]])
|
1003 |
+
|
1004 |
+
@pytest.mark.parametrize('shift',
|
1005 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
1006 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1007 |
+
def test_shift_grid_wrap(self, shift, order):
|
1008 |
+
# For mode 'grid-wrap', integer shifts should match numpy.roll
|
1009 |
+
x = numpy.array([[0, 1],
|
1010 |
+
[2, 3]])
|
1011 |
+
assert_array_almost_equal(
|
1012 |
+
ndimage.shift(x, shift, mode='grid-wrap', order=order),
|
1013 |
+
numpy.roll(x, shift, axis=(0, 1)),
|
1014 |
+
)
|
1015 |
+
|
1016 |
+
@pytest.mark.parametrize('shift',
|
1017 |
+
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
|
1018 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1019 |
+
def test_shift_grid_constant1(self, shift, order):
|
1020 |
+
# For integer shifts, 'constant' and 'grid-constant' should be equal
|
1021 |
+
x = numpy.arange(20).reshape((5, 4))
|
1022 |
+
assert_array_almost_equal(
|
1023 |
+
ndimage.shift(x, shift, mode='grid-constant', order=order),
|
1024 |
+
ndimage.shift(x, shift, mode='constant', order=order),
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
def test_shift_grid_constant_order1(self):
|
1028 |
+
x = numpy.array([[1, 2, 3],
|
1029 |
+
[4, 5, 6]], dtype=float)
|
1030 |
+
expected_result = numpy.array([[0.25, 0.75, 1.25],
|
1031 |
+
[1.25, 3.00, 4.00]])
|
1032 |
+
assert_array_almost_equal(
|
1033 |
+
ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1),
|
1034 |
+
expected_result,
|
1035 |
+
)
|
1036 |
+
|
1037 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1038 |
+
def test_shift_reflect(self, order):
|
1039 |
+
# shift by x.shape results in reflection
|
1040 |
+
x = numpy.array([[0, 1, 2],
|
1041 |
+
[3, 4, 5]])
|
1042 |
+
assert_array_almost_equal(
|
1043 |
+
ndimage.shift(x, x.shape, mode='reflect', order=order),
|
1044 |
+
x[::-1, ::-1],
|
1045 |
+
)
|
1046 |
+
|
1047 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1048 |
+
@pytest.mark.parametrize('prefilter', [False, True])
|
1049 |
+
def test_shift_nearest_boundary(self, order, prefilter):
|
1050 |
+
# verify that shifting at least order // 2 beyond the end of the array
|
1051 |
+
# gives a value equal to the edge value.
|
1052 |
+
x = numpy.arange(16)
|
1053 |
+
kwargs = dict(mode='nearest', order=order, prefilter=prefilter)
|
1054 |
+
assert_array_almost_equal(
|
1055 |
+
ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0],
|
1056 |
+
)
|
1057 |
+
assert_array_almost_equal(
|
1058 |
+
ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1],
|
1059 |
+
)
|
1060 |
+
|
1061 |
+
@pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
|
1062 |
+
'mirror', 'reflect'])
|
1063 |
+
@pytest.mark.parametrize('order', range(6))
|
1064 |
+
def test_shift_vs_padded(self, order, mode):
|
1065 |
+
x = numpy.arange(144, dtype=float).reshape(12, 12)
|
1066 |
+
shift = (0.4, -2.3)
|
1067 |
+
|
1068 |
+
# manually pad and then extract center to get expected result
|
1069 |
+
npad = 32
|
1070 |
+
pad_mode = ndimage_to_numpy_mode.get(mode)
|
1071 |
+
xp = numpy.pad(x, npad, mode=pad_mode)
|
1072 |
+
center_slice = tuple([slice(npad, -npad)] * x.ndim)
|
1073 |
+
expected_result = ndimage.shift(
|
1074 |
+
xp, shift, mode=mode, order=order)[center_slice]
|
1075 |
+
|
1076 |
+
assert_allclose(
|
1077 |
+
ndimage.shift(x, shift, mode=mode, order=order),
|
1078 |
+
expected_result,
|
1079 |
+
rtol=1e-7,
|
1080 |
+
)
|
1081 |
+
|
1082 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1083 |
+
def test_zoom1(self, order):
|
1084 |
+
for z in [2, [2, 2]]:
|
1085 |
+
arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float)
|
1086 |
+
arr = ndimage.zoom(arr, z, order=order)
|
1087 |
+
assert_equal(arr.shape, (10, 10))
|
1088 |
+
assert_(numpy.all(arr[-1, :] != 0))
|
1089 |
+
assert_(numpy.all(arr[-1, :] >= (20 - eps)))
|
1090 |
+
assert_(numpy.all(arr[0, :] <= (5 + eps)))
|
1091 |
+
assert_(numpy.all(arr >= (0 - eps)))
|
1092 |
+
assert_(numpy.all(arr <= (24 + eps)))
|
1093 |
+
|
1094 |
+
def test_zoom2(self):
|
1095 |
+
arr = numpy.arange(12).reshape((3, 4))
|
1096 |
+
out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
|
1097 |
+
assert_array_equal(out, arr)
|
1098 |
+
|
1099 |
+
def test_zoom3(self):
|
1100 |
+
arr = numpy.array([[1, 2]])
|
1101 |
+
out1 = ndimage.zoom(arr, (2, 1))
|
1102 |
+
out2 = ndimage.zoom(arr, (1, 2))
|
1103 |
+
|
1104 |
+
assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]]))
|
1105 |
+
assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]]))
|
1106 |
+
|
1107 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1108 |
+
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
|
1109 |
+
def test_zoom_affine01(self, order, dtype):
|
1110 |
+
data = numpy.asarray([[1, 2, 3, 4],
|
1111 |
+
[5, 6, 7, 8],
|
1112 |
+
[9, 10, 11, 12]], dtype=dtype)
|
1113 |
+
if data.dtype.kind == 'c':
|
1114 |
+
data -= 1j * data
|
1115 |
+
with suppress_warnings() as sup:
|
1116 |
+
sup.filter(UserWarning,
|
1117 |
+
'The behavior of affine_transform with a 1-D array .* '
|
1118 |
+
'has changed')
|
1119 |
+
out = ndimage.affine_transform(data, [0.5, 0.5], 0,
|
1120 |
+
(6, 8), order=order)
|
1121 |
+
assert_array_almost_equal(out[::2, ::2], data)
|
1122 |
+
|
1123 |
+
def test_zoom_infinity(self):
|
1124 |
+
# Ticket #1419 regression test
|
1125 |
+
dim = 8
|
1126 |
+
ndimage.zoom(numpy.zeros((dim, dim)), 1. / dim, mode='nearest')
|
1127 |
+
|
1128 |
+
def test_zoom_zoomfactor_one(self):
|
1129 |
+
# Ticket #1122 regression test
|
1130 |
+
arr = numpy.zeros((1, 5, 5))
|
1131 |
+
zoom = (1.0, 2.0, 2.0)
|
1132 |
+
|
1133 |
+
out = ndimage.zoom(arr, zoom, cval=7)
|
1134 |
+
ref = numpy.zeros((1, 10, 10))
|
1135 |
+
assert_array_almost_equal(out, ref)
|
1136 |
+
|
1137 |
+
def test_zoom_output_shape_roundoff(self):
|
1138 |
+
arr = numpy.zeros((3, 11, 25))
|
1139 |
+
zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
|
1140 |
+
out = ndimage.zoom(arr, zoom)
|
1141 |
+
assert_array_equal(out.shape, (4, 15, 29))
|
1142 |
+
|
1143 |
+
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
|
1144 |
+
@pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
|
1145 |
+
'mirror', 'grid-wrap', 'grid-mirror',
|
1146 |
+
'grid-constant'])
|
1147 |
+
def test_zoom_by_int_order0(self, zoom, mode):
|
1148 |
+
# order 0 zoom should be the same as replication via numpy.kron
|
1149 |
+
# Note: This is not True for general x shapes when grid_mode is False,
|
1150 |
+
# but works here for all modes because the size ratio happens to
|
1151 |
+
# always be an integer when x.shape = (2, 2).
|
1152 |
+
x = numpy.array([[0, 1],
|
1153 |
+
[2, 3]], dtype=float)
|
1154 |
+
# x = numpy.arange(16, dtype=float).reshape(4, 4)
|
1155 |
+
assert_array_almost_equal(
|
1156 |
+
ndimage.zoom(x, zoom, order=0, mode=mode),
|
1157 |
+
numpy.kron(x, numpy.ones(zoom))
|
1158 |
+
)
|
1159 |
+
|
1160 |
+
@pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
|
1161 |
+
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
|
1162 |
+
@pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
|
1163 |
+
'grid-wrap', 'grid-constant'])
|
1164 |
+
def test_zoom_grid_by_int_order0(self, shape, zoom, mode):
|
1165 |
+
# When grid_mode is True, order 0 zoom should be the same as
|
1166 |
+
# replication via numpy.kron. The only exceptions to this are the
|
1167 |
+
# non-grid modes 'constant' and 'wrap'.
|
1168 |
+
x = numpy.arange(numpy.prod(shape), dtype=float).reshape(shape)
|
1169 |
+
assert_array_almost_equal(
|
1170 |
+
ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
|
1171 |
+
numpy.kron(x, numpy.ones(zoom))
|
1172 |
+
)
|
1173 |
+
|
1174 |
+
@pytest.mark.parametrize('mode', ['constant', 'wrap'])
|
1175 |
+
def test_zoom_grid_mode_warnings(self, mode):
|
1176 |
+
# Warn on use of non-grid modes when grid_mode is True
|
1177 |
+
x = numpy.arange(9, dtype=float).reshape((3, 3))
|
1178 |
+
with pytest.warns(UserWarning,
|
1179 |
+
match="It is recommended to use mode"):
|
1180 |
+
ndimage.zoom(x, 2, mode=mode, grid_mode=True),
|
1181 |
+
|
1182 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1183 |
+
def test_rotate01(self, order):
|
1184 |
+
data = numpy.array([[0, 0, 0, 0],
|
1185 |
+
[0, 1, 1, 0],
|
1186 |
+
[0, 0, 0, 0]], dtype=numpy.float64)
|
1187 |
+
out = ndimage.rotate(data, 0, order=order)
|
1188 |
+
assert_array_almost_equal(out, data)
|
1189 |
+
|
1190 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1191 |
+
def test_rotate02(self, order):
|
1192 |
+
data = numpy.array([[0, 0, 0, 0],
|
1193 |
+
[0, 1, 0, 0],
|
1194 |
+
[0, 0, 0, 0]], dtype=numpy.float64)
|
1195 |
+
expected = numpy.array([[0, 0, 0],
|
1196 |
+
[0, 0, 0],
|
1197 |
+
[0, 1, 0],
|
1198 |
+
[0, 0, 0]], dtype=numpy.float64)
|
1199 |
+
out = ndimage.rotate(data, 90, order=order)
|
1200 |
+
assert_array_almost_equal(out, expected)
|
1201 |
+
|
1202 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1203 |
+
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
|
1204 |
+
def test_rotate03(self, order, dtype):
|
1205 |
+
data = numpy.array([[0, 0, 0, 0, 0],
|
1206 |
+
[0, 1, 1, 0, 0],
|
1207 |
+
[0, 0, 0, 0, 0]], dtype=dtype)
|
1208 |
+
expected = numpy.array([[0, 0, 0],
|
1209 |
+
[0, 0, 0],
|
1210 |
+
[0, 1, 0],
|
1211 |
+
[0, 1, 0],
|
1212 |
+
[0, 0, 0]], dtype=dtype)
|
1213 |
+
if data.dtype.kind == 'c':
|
1214 |
+
data -= 1j * data
|
1215 |
+
expected -= 1j * expected
|
1216 |
+
out = ndimage.rotate(data, 90, order=order)
|
1217 |
+
assert_array_almost_equal(out, expected)
|
1218 |
+
|
1219 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1220 |
+
def test_rotate04(self, order):
|
1221 |
+
data = numpy.array([[0, 0, 0, 0, 0],
|
1222 |
+
[0, 1, 1, 0, 0],
|
1223 |
+
[0, 0, 0, 0, 0]], dtype=numpy.float64)
|
1224 |
+
expected = numpy.array([[0, 0, 0, 0, 0],
|
1225 |
+
[0, 0, 1, 0, 0],
|
1226 |
+
[0, 0, 1, 0, 0]], dtype=numpy.float64)
|
1227 |
+
out = ndimage.rotate(data, 90, reshape=False, order=order)
|
1228 |
+
assert_array_almost_equal(out, expected)
|
1229 |
+
|
1230 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1231 |
+
def test_rotate05(self, order):
|
1232 |
+
data = numpy.empty((4, 3, 3))
|
1233 |
+
for i in range(3):
|
1234 |
+
data[:, :, i] = numpy.array([[0, 0, 0],
|
1235 |
+
[0, 1, 0],
|
1236 |
+
[0, 1, 0],
|
1237 |
+
[0, 0, 0]], dtype=numpy.float64)
|
1238 |
+
expected = numpy.array([[0, 0, 0, 0],
|
1239 |
+
[0, 1, 1, 0],
|
1240 |
+
[0, 0, 0, 0]], dtype=numpy.float64)
|
1241 |
+
out = ndimage.rotate(data, 90, order=order)
|
1242 |
+
for i in range(3):
|
1243 |
+
assert_array_almost_equal(out[:, :, i], expected)
|
1244 |
+
|
1245 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1246 |
+
def test_rotate06(self, order):
|
1247 |
+
data = numpy.empty((3, 4, 3))
|
1248 |
+
for i in range(3):
|
1249 |
+
data[:, :, i] = numpy.array([[0, 0, 0, 0],
|
1250 |
+
[0, 1, 1, 0],
|
1251 |
+
[0, 0, 0, 0]], dtype=numpy.float64)
|
1252 |
+
expected = numpy.array([[0, 0, 0],
|
1253 |
+
[0, 1, 0],
|
1254 |
+
[0, 1, 0],
|
1255 |
+
[0, 0, 0]], dtype=numpy.float64)
|
1256 |
+
out = ndimage.rotate(data, 90, order=order)
|
1257 |
+
for i in range(3):
|
1258 |
+
assert_array_almost_equal(out[:, :, i], expected)
|
1259 |
+
|
1260 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1261 |
+
def test_rotate07(self, order):
|
1262 |
+
data = numpy.array([[[0, 0, 0, 0, 0],
|
1263 |
+
[0, 1, 1, 0, 0],
|
1264 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
|
1265 |
+
data = data.transpose()
|
1266 |
+
expected = numpy.array([[[0, 0, 0],
|
1267 |
+
[0, 1, 0],
|
1268 |
+
[0, 1, 0],
|
1269 |
+
[0, 0, 0],
|
1270 |
+
[0, 0, 0]]] * 2, dtype=numpy.float64)
|
1271 |
+
expected = expected.transpose([2, 1, 0])
|
1272 |
+
out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
|
1273 |
+
assert_array_almost_equal(out, expected)
|
1274 |
+
|
1275 |
+
@pytest.mark.parametrize('order', range(0, 6))
|
1276 |
+
def test_rotate08(self, order):
|
1277 |
+
data = numpy.array([[[0, 0, 0, 0, 0],
|
1278 |
+
[0, 1, 1, 0, 0],
|
1279 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
|
1280 |
+
data = data.transpose()
|
1281 |
+
expected = numpy.array([[[0, 0, 1, 0, 0],
|
1282 |
+
[0, 0, 1, 0, 0],
|
1283 |
+
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
|
1284 |
+
expected = expected.transpose()
|
1285 |
+
out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
|
1286 |
+
assert_array_almost_equal(out, expected)
|
1287 |
+
|
1288 |
+
def test_rotate09(self):
|
1289 |
+
data = numpy.array([[0, 0, 0, 0, 0],
|
1290 |
+
[0, 1, 1, 0, 0],
|
1291 |
+
[0, 0, 0, 0, 0]] * 2, dtype=numpy.float64)
|
1292 |
+
with assert_raises(ValueError):
|
1293 |
+
ndimage.rotate(data, 90, axes=(0, data.ndim))
|
1294 |
+
|
1295 |
+
def test_rotate10(self):
|
1296 |
+
data = numpy.arange(45, dtype=numpy.float64).reshape((3, 5, 3))
|
1297 |
+
|
1298 |
+
# The output of ndimage.rotate before refactoring
|
1299 |
+
expected = numpy.array([[[0.0, 0.0, 0.0],
|
1300 |
+
[0.0, 0.0, 0.0],
|
1301 |
+
[6.54914793, 7.54914793, 8.54914793],
|
1302 |
+
[10.84520162, 11.84520162, 12.84520162],
|
1303 |
+
[0.0, 0.0, 0.0]],
|
1304 |
+
[[6.19286575, 7.19286575, 8.19286575],
|
1305 |
+
[13.4730712, 14.4730712, 15.4730712],
|
1306 |
+
[21.0, 22.0, 23.0],
|
1307 |
+
[28.5269288, 29.5269288, 30.5269288],
|
1308 |
+
[35.80713425, 36.80713425, 37.80713425]],
|
1309 |
+
[[0.0, 0.0, 0.0],
|
1310 |
+
[31.15479838, 32.15479838, 33.15479838],
|
1311 |
+
[35.45085207, 36.45085207, 37.45085207],
|
1312 |
+
[0.0, 0.0, 0.0],
|
1313 |
+
[0.0, 0.0, 0.0]]])
|
1314 |
+
|
1315 |
+
out = ndimage.rotate(data, angle=12, reshape=False)
|
1316 |
+
assert_array_almost_equal(out, expected)
|
1317 |
+
|
1318 |
+
def test_rotate_exact_180(self):
|
1319 |
+
a = numpy.tile(numpy.arange(5), (5, 1))
|
1320 |
+
b = ndimage.rotate(ndimage.rotate(a, 180), -180)
|
1321 |
+
assert_equal(a, b)
|
1322 |
+
|
1323 |
+
|
1324 |
+
def test_zoom_output_shape():
|
1325 |
+
"""Ticket #643"""
|
1326 |
+
x = numpy.arange(12).reshape((3, 4))
|
1327 |
+
ndimage.zoom(x, 2, output=numpy.zeros((6, 8)))
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py
ADDED
@@ -0,0 +1,1409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os.path
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import (
|
5 |
+
assert_,
|
6 |
+
assert_allclose,
|
7 |
+
assert_almost_equal,
|
8 |
+
assert_array_almost_equal,
|
9 |
+
assert_array_equal,
|
10 |
+
assert_equal,
|
11 |
+
suppress_warnings,
|
12 |
+
)
|
13 |
+
from pytest import raises as assert_raises
|
14 |
+
|
15 |
+
import scipy.ndimage as ndimage
|
16 |
+
|
17 |
+
|
18 |
+
from . import types
|
19 |
+
|
20 |
+
|
21 |
+
class Test_measurements_stats:
|
22 |
+
"""ndimage._measurements._stats() is a utility used by other functions."""
|
23 |
+
|
24 |
+
def test_a(self):
|
25 |
+
x = [0, 1, 2, 6]
|
26 |
+
labels = [0, 0, 1, 1]
|
27 |
+
index = [0, 1]
|
28 |
+
for shp in [(4,), (2, 2)]:
|
29 |
+
x = np.array(x).reshape(shp)
|
30 |
+
labels = np.array(labels).reshape(shp)
|
31 |
+
counts, sums = ndimage._measurements._stats(
|
32 |
+
x, labels=labels, index=index)
|
33 |
+
assert_array_equal(counts, [2, 2])
|
34 |
+
assert_array_equal(sums, [1.0, 8.0])
|
35 |
+
|
36 |
+
def test_b(self):
|
37 |
+
# Same data as test_a, but different labels. The label 9 exceeds the
|
38 |
+
# length of 'labels', so this test will follow a different code path.
|
39 |
+
x = [0, 1, 2, 6]
|
40 |
+
labels = [0, 0, 9, 9]
|
41 |
+
index = [0, 9]
|
42 |
+
for shp in [(4,), (2, 2)]:
|
43 |
+
x = np.array(x).reshape(shp)
|
44 |
+
labels = np.array(labels).reshape(shp)
|
45 |
+
counts, sums = ndimage._measurements._stats(
|
46 |
+
x, labels=labels, index=index)
|
47 |
+
assert_array_equal(counts, [2, 2])
|
48 |
+
assert_array_equal(sums, [1.0, 8.0])
|
49 |
+
|
50 |
+
def test_a_centered(self):
|
51 |
+
x = [0, 1, 2, 6]
|
52 |
+
labels = [0, 0, 1, 1]
|
53 |
+
index = [0, 1]
|
54 |
+
for shp in [(4,), (2, 2)]:
|
55 |
+
x = np.array(x).reshape(shp)
|
56 |
+
labels = np.array(labels).reshape(shp)
|
57 |
+
counts, sums, centers = ndimage._measurements._stats(
|
58 |
+
x, labels=labels, index=index, centered=True)
|
59 |
+
assert_array_equal(counts, [2, 2])
|
60 |
+
assert_array_equal(sums, [1.0, 8.0])
|
61 |
+
assert_array_equal(centers, [0.5, 8.0])
|
62 |
+
|
63 |
+
def test_b_centered(self):
|
64 |
+
x = [0, 1, 2, 6]
|
65 |
+
labels = [0, 0, 9, 9]
|
66 |
+
index = [0, 9]
|
67 |
+
for shp in [(4,), (2, 2)]:
|
68 |
+
x = np.array(x).reshape(shp)
|
69 |
+
labels = np.array(labels).reshape(shp)
|
70 |
+
counts, sums, centers = ndimage._measurements._stats(
|
71 |
+
x, labels=labels, index=index, centered=True)
|
72 |
+
assert_array_equal(counts, [2, 2])
|
73 |
+
assert_array_equal(sums, [1.0, 8.0])
|
74 |
+
assert_array_equal(centers, [0.5, 8.0])
|
75 |
+
|
76 |
+
def test_nonint_labels(self):
|
77 |
+
x = [0, 1, 2, 6]
|
78 |
+
labels = [0.0, 0.0, 9.0, 9.0]
|
79 |
+
index = [0.0, 9.0]
|
80 |
+
for shp in [(4,), (2, 2)]:
|
81 |
+
x = np.array(x).reshape(shp)
|
82 |
+
labels = np.array(labels).reshape(shp)
|
83 |
+
counts, sums, centers = ndimage._measurements._stats(
|
84 |
+
x, labels=labels, index=index, centered=True)
|
85 |
+
assert_array_equal(counts, [2, 2])
|
86 |
+
assert_array_equal(sums, [1.0, 8.0])
|
87 |
+
assert_array_equal(centers, [0.5, 8.0])
|
88 |
+
|
89 |
+
|
90 |
+
class Test_measurements_select:
|
91 |
+
"""ndimage._measurements._select() is a utility used by other functions."""
|
92 |
+
|
93 |
+
def test_basic(self):
|
94 |
+
x = [0, 1, 6, 2]
|
95 |
+
cases = [
|
96 |
+
([0, 0, 1, 1], [0, 1]), # "Small" integer labels
|
97 |
+
([0, 0, 9, 9], [0, 9]), # A label larger than len(labels)
|
98 |
+
([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels
|
99 |
+
]
|
100 |
+
for labels, index in cases:
|
101 |
+
result = ndimage._measurements._select(
|
102 |
+
x, labels=labels, index=index)
|
103 |
+
assert_(len(result) == 0)
|
104 |
+
result = ndimage._measurements._select(
|
105 |
+
x, labels=labels, index=index, find_max=True)
|
106 |
+
assert_(len(result) == 1)
|
107 |
+
assert_array_equal(result[0], [1, 6])
|
108 |
+
result = ndimage._measurements._select(
|
109 |
+
x, labels=labels, index=index, find_min=True)
|
110 |
+
assert_(len(result) == 1)
|
111 |
+
assert_array_equal(result[0], [0, 2])
|
112 |
+
result = ndimage._measurements._select(
|
113 |
+
x, labels=labels, index=index, find_min=True,
|
114 |
+
find_min_positions=True)
|
115 |
+
assert_(len(result) == 2)
|
116 |
+
assert_array_equal(result[0], [0, 2])
|
117 |
+
assert_array_equal(result[1], [0, 3])
|
118 |
+
assert_equal(result[1].dtype.kind, 'i')
|
119 |
+
result = ndimage._measurements._select(
|
120 |
+
x, labels=labels, index=index, find_max=True,
|
121 |
+
find_max_positions=True)
|
122 |
+
assert_(len(result) == 2)
|
123 |
+
assert_array_equal(result[0], [1, 6])
|
124 |
+
assert_array_equal(result[1], [1, 2])
|
125 |
+
assert_equal(result[1].dtype.kind, 'i')
|
126 |
+
|
127 |
+
|
128 |
+
def test_label01():
|
129 |
+
data = np.ones([])
|
130 |
+
out, n = ndimage.label(data)
|
131 |
+
assert_array_almost_equal(out, 1)
|
132 |
+
assert_equal(n, 1)
|
133 |
+
|
134 |
+
|
135 |
+
def test_label02():
|
136 |
+
data = np.zeros([])
|
137 |
+
out, n = ndimage.label(data)
|
138 |
+
assert_array_almost_equal(out, 0)
|
139 |
+
assert_equal(n, 0)
|
140 |
+
|
141 |
+
|
142 |
+
def test_label03():
|
143 |
+
data = np.ones([1])
|
144 |
+
out, n = ndimage.label(data)
|
145 |
+
assert_array_almost_equal(out, [1])
|
146 |
+
assert_equal(n, 1)
|
147 |
+
|
148 |
+
|
149 |
+
def test_label04():
|
150 |
+
data = np.zeros([1])
|
151 |
+
out, n = ndimage.label(data)
|
152 |
+
assert_array_almost_equal(out, [0])
|
153 |
+
assert_equal(n, 0)
|
154 |
+
|
155 |
+
|
156 |
+
def test_label05():
|
157 |
+
data = np.ones([5])
|
158 |
+
out, n = ndimage.label(data)
|
159 |
+
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
|
160 |
+
assert_equal(n, 1)
|
161 |
+
|
162 |
+
|
163 |
+
def test_label06():
|
164 |
+
data = np.array([1, 0, 1, 1, 0, 1])
|
165 |
+
out, n = ndimage.label(data)
|
166 |
+
assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
|
167 |
+
assert_equal(n, 3)
|
168 |
+
|
169 |
+
|
170 |
+
def test_label07():
|
171 |
+
data = np.array([[0, 0, 0, 0, 0, 0],
|
172 |
+
[0, 0, 0, 0, 0, 0],
|
173 |
+
[0, 0, 0, 0, 0, 0],
|
174 |
+
[0, 0, 0, 0, 0, 0],
|
175 |
+
[0, 0, 0, 0, 0, 0],
|
176 |
+
[0, 0, 0, 0, 0, 0]])
|
177 |
+
out, n = ndimage.label(data)
|
178 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
|
179 |
+
[0, 0, 0, 0, 0, 0],
|
180 |
+
[0, 0, 0, 0, 0, 0],
|
181 |
+
[0, 0, 0, 0, 0, 0],
|
182 |
+
[0, 0, 0, 0, 0, 0],
|
183 |
+
[0, 0, 0, 0, 0, 0]])
|
184 |
+
assert_equal(n, 0)
|
185 |
+
|
186 |
+
|
187 |
+
def test_label08():
|
188 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
189 |
+
[0, 0, 1, 1, 0, 0],
|
190 |
+
[0, 0, 1, 1, 1, 0],
|
191 |
+
[1, 1, 0, 0, 0, 0],
|
192 |
+
[1, 1, 0, 0, 0, 0],
|
193 |
+
[0, 0, 0, 1, 1, 0]])
|
194 |
+
out, n = ndimage.label(data)
|
195 |
+
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
|
196 |
+
[0, 0, 2, 2, 0, 0],
|
197 |
+
[0, 0, 2, 2, 2, 0],
|
198 |
+
[3, 3, 0, 0, 0, 0],
|
199 |
+
[3, 3, 0, 0, 0, 0],
|
200 |
+
[0, 0, 0, 4, 4, 0]])
|
201 |
+
assert_equal(n, 4)
|
202 |
+
|
203 |
+
|
204 |
+
def test_label09():
|
205 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
206 |
+
[0, 0, 1, 1, 0, 0],
|
207 |
+
[0, 0, 1, 1, 1, 0],
|
208 |
+
[1, 1, 0, 0, 0, 0],
|
209 |
+
[1, 1, 0, 0, 0, 0],
|
210 |
+
[0, 0, 0, 1, 1, 0]])
|
211 |
+
struct = ndimage.generate_binary_structure(2, 2)
|
212 |
+
out, n = ndimage.label(data, struct)
|
213 |
+
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
|
214 |
+
[0, 0, 2, 2, 0, 0],
|
215 |
+
[0, 0, 2, 2, 2, 0],
|
216 |
+
[2, 2, 0, 0, 0, 0],
|
217 |
+
[2, 2, 0, 0, 0, 0],
|
218 |
+
[0, 0, 0, 3, 3, 0]])
|
219 |
+
assert_equal(n, 3)
|
220 |
+
|
221 |
+
|
222 |
+
def test_label10():
|
223 |
+
data = np.array([[0, 0, 0, 0, 0, 0],
|
224 |
+
[0, 1, 1, 0, 1, 0],
|
225 |
+
[0, 1, 1, 1, 1, 0],
|
226 |
+
[0, 0, 0, 0, 0, 0]])
|
227 |
+
struct = ndimage.generate_binary_structure(2, 2)
|
228 |
+
out, n = ndimage.label(data, struct)
|
229 |
+
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
|
230 |
+
[0, 1, 1, 0, 1, 0],
|
231 |
+
[0, 1, 1, 1, 1, 0],
|
232 |
+
[0, 0, 0, 0, 0, 0]])
|
233 |
+
assert_equal(n, 1)
|
234 |
+
|
235 |
+
|
236 |
+
def test_label11():
|
237 |
+
for type in types:
|
238 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
239 |
+
[0, 0, 1, 1, 0, 0],
|
240 |
+
[0, 0, 1, 1, 1, 0],
|
241 |
+
[1, 1, 0, 0, 0, 0],
|
242 |
+
[1, 1, 0, 0, 0, 0],
|
243 |
+
[0, 0, 0, 1, 1, 0]], type)
|
244 |
+
out, n = ndimage.label(data)
|
245 |
+
expected = [[1, 0, 0, 0, 0, 0],
|
246 |
+
[0, 0, 2, 2, 0, 0],
|
247 |
+
[0, 0, 2, 2, 2, 0],
|
248 |
+
[3, 3, 0, 0, 0, 0],
|
249 |
+
[3, 3, 0, 0, 0, 0],
|
250 |
+
[0, 0, 0, 4, 4, 0]]
|
251 |
+
assert_array_almost_equal(out, expected)
|
252 |
+
assert_equal(n, 4)
|
253 |
+
|
254 |
+
|
255 |
+
def test_label11_inplace():
|
256 |
+
for type in types:
|
257 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
258 |
+
[0, 0, 1, 1, 0, 0],
|
259 |
+
[0, 0, 1, 1, 1, 0],
|
260 |
+
[1, 1, 0, 0, 0, 0],
|
261 |
+
[1, 1, 0, 0, 0, 0],
|
262 |
+
[0, 0, 0, 1, 1, 0]], type)
|
263 |
+
n = ndimage.label(data, output=data)
|
264 |
+
expected = [[1, 0, 0, 0, 0, 0],
|
265 |
+
[0, 0, 2, 2, 0, 0],
|
266 |
+
[0, 0, 2, 2, 2, 0],
|
267 |
+
[3, 3, 0, 0, 0, 0],
|
268 |
+
[3, 3, 0, 0, 0, 0],
|
269 |
+
[0, 0, 0, 4, 4, 0]]
|
270 |
+
assert_array_almost_equal(data, expected)
|
271 |
+
assert_equal(n, 4)
|
272 |
+
|
273 |
+
|
274 |
+
def test_label12():
|
275 |
+
for type in types:
|
276 |
+
data = np.array([[0, 0, 0, 0, 1, 1],
|
277 |
+
[0, 0, 0, 0, 0, 1],
|
278 |
+
[0, 0, 1, 0, 1, 1],
|
279 |
+
[0, 0, 1, 1, 1, 1],
|
280 |
+
[0, 0, 0, 1, 1, 0]], type)
|
281 |
+
out, n = ndimage.label(data)
|
282 |
+
expected = [[0, 0, 0, 0, 1, 1],
|
283 |
+
[0, 0, 0, 0, 0, 1],
|
284 |
+
[0, 0, 1, 0, 1, 1],
|
285 |
+
[0, 0, 1, 1, 1, 1],
|
286 |
+
[0, 0, 0, 1, 1, 0]]
|
287 |
+
assert_array_almost_equal(out, expected)
|
288 |
+
assert_equal(n, 1)
|
289 |
+
|
290 |
+
|
291 |
+
def test_label13():
|
292 |
+
for type in types:
|
293 |
+
data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
|
294 |
+
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
|
295 |
+
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
|
296 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
|
297 |
+
type)
|
298 |
+
out, n = ndimage.label(data)
|
299 |
+
expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
|
300 |
+
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
|
301 |
+
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
|
302 |
+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
|
303 |
+
assert_array_almost_equal(out, expected)
|
304 |
+
assert_equal(n, 1)
|
305 |
+
|
306 |
+
|
307 |
+
def test_label_output_typed():
|
308 |
+
data = np.ones([5])
|
309 |
+
for t in types:
|
310 |
+
output = np.zeros([5], dtype=t)
|
311 |
+
n = ndimage.label(data, output=output)
|
312 |
+
assert_array_almost_equal(output, 1)
|
313 |
+
assert_equal(n, 1)
|
314 |
+
|
315 |
+
|
316 |
+
def test_label_output_dtype():
|
317 |
+
data = np.ones([5])
|
318 |
+
for t in types:
|
319 |
+
output, n = ndimage.label(data, output=t)
|
320 |
+
assert_array_almost_equal(output, 1)
|
321 |
+
assert output.dtype == t
|
322 |
+
|
323 |
+
|
324 |
+
def test_label_output_wrong_size():
|
325 |
+
data = np.ones([5])
|
326 |
+
for t in types:
|
327 |
+
output = np.zeros([10], t)
|
328 |
+
assert_raises((RuntimeError, ValueError),
|
329 |
+
ndimage.label, data, output=output)
|
330 |
+
|
331 |
+
|
332 |
+
def test_label_structuring_elements():
|
333 |
+
data = np.loadtxt(os.path.join(os.path.dirname(
|
334 |
+
__file__), "data", "label_inputs.txt"))
|
335 |
+
strels = np.loadtxt(os.path.join(
|
336 |
+
os.path.dirname(__file__), "data", "label_strels.txt"))
|
337 |
+
results = np.loadtxt(os.path.join(
|
338 |
+
os.path.dirname(__file__), "data", "label_results.txt"))
|
339 |
+
data = data.reshape((-1, 7, 7))
|
340 |
+
strels = strels.reshape((-1, 3, 3))
|
341 |
+
results = results.reshape((-1, 7, 7))
|
342 |
+
r = 0
|
343 |
+
for i in range(data.shape[0]):
|
344 |
+
d = data[i, :, :]
|
345 |
+
for j in range(strels.shape[0]):
|
346 |
+
s = strels[j, :, :]
|
347 |
+
assert_equal(ndimage.label(d, s)[0], results[r, :, :])
|
348 |
+
r += 1
|
349 |
+
|
350 |
+
|
351 |
+
def test_ticket_742():
|
352 |
+
def SE(img, thresh=.7, size=4):
|
353 |
+
mask = img > thresh
|
354 |
+
rank = len(mask.shape)
|
355 |
+
la, co = ndimage.label(mask,
|
356 |
+
ndimage.generate_binary_structure(rank, rank))
|
357 |
+
_ = ndimage.find_objects(la)
|
358 |
+
|
359 |
+
if np.dtype(np.intp) != np.dtype('i'):
|
360 |
+
shape = (3, 1240, 1240)
|
361 |
+
a = np.random.rand(np.prod(shape)).reshape(shape)
|
362 |
+
# shouldn't crash
|
363 |
+
SE(a)
|
364 |
+
|
365 |
+
|
366 |
+
def test_gh_issue_3025():
|
367 |
+
"""Github issue #3025 - improper merging of labels"""
|
368 |
+
d = np.zeros((60, 320))
|
369 |
+
d[:, :257] = 1
|
370 |
+
d[:, 260:] = 1
|
371 |
+
d[36, 257] = 1
|
372 |
+
d[35, 258] = 1
|
373 |
+
d[35, 259] = 1
|
374 |
+
assert ndimage.label(d, np.ones((3, 3)))[1] == 1
|
375 |
+
|
376 |
+
|
377 |
+
def test_label_default_dtype():
|
378 |
+
test_array = np.random.rand(10, 10)
|
379 |
+
label, no_features = ndimage.label(test_array > 0.5)
|
380 |
+
assert_(label.dtype in (np.int32, np.int64))
|
381 |
+
# Shouldn't raise an exception
|
382 |
+
ndimage.find_objects(label)
|
383 |
+
|
384 |
+
|
385 |
+
def test_find_objects01():
|
386 |
+
data = np.ones([], dtype=int)
|
387 |
+
out = ndimage.find_objects(data)
|
388 |
+
assert_(out == [()])
|
389 |
+
|
390 |
+
|
391 |
+
def test_find_objects02():
|
392 |
+
data = np.zeros([], dtype=int)
|
393 |
+
out = ndimage.find_objects(data)
|
394 |
+
assert_(out == [])
|
395 |
+
|
396 |
+
|
397 |
+
def test_find_objects03():
|
398 |
+
data = np.ones([1], dtype=int)
|
399 |
+
out = ndimage.find_objects(data)
|
400 |
+
assert_equal(out, [(slice(0, 1, None),)])
|
401 |
+
|
402 |
+
|
403 |
+
def test_find_objects04():
|
404 |
+
data = np.zeros([1], dtype=int)
|
405 |
+
out = ndimage.find_objects(data)
|
406 |
+
assert_equal(out, [])
|
407 |
+
|
408 |
+
|
409 |
+
def test_find_objects05():
|
410 |
+
data = np.ones([5], dtype=int)
|
411 |
+
out = ndimage.find_objects(data)
|
412 |
+
assert_equal(out, [(slice(0, 5, None),)])
|
413 |
+
|
414 |
+
|
415 |
+
def test_find_objects06():
|
416 |
+
data = np.array([1, 0, 2, 2, 0, 3])
|
417 |
+
out = ndimage.find_objects(data)
|
418 |
+
assert_equal(out, [(slice(0, 1, None),),
|
419 |
+
(slice(2, 4, None),),
|
420 |
+
(slice(5, 6, None),)])
|
421 |
+
|
422 |
+
|
423 |
+
def test_find_objects07():
|
424 |
+
data = np.array([[0, 0, 0, 0, 0, 0],
|
425 |
+
[0, 0, 0, 0, 0, 0],
|
426 |
+
[0, 0, 0, 0, 0, 0],
|
427 |
+
[0, 0, 0, 0, 0, 0],
|
428 |
+
[0, 0, 0, 0, 0, 0],
|
429 |
+
[0, 0, 0, 0, 0, 0]])
|
430 |
+
out = ndimage.find_objects(data)
|
431 |
+
assert_equal(out, [])
|
432 |
+
|
433 |
+
|
434 |
+
def test_find_objects08():
|
435 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
436 |
+
[0, 0, 2, 2, 0, 0],
|
437 |
+
[0, 0, 2, 2, 2, 0],
|
438 |
+
[3, 3, 0, 0, 0, 0],
|
439 |
+
[3, 3, 0, 0, 0, 0],
|
440 |
+
[0, 0, 0, 4, 4, 0]])
|
441 |
+
out = ndimage.find_objects(data)
|
442 |
+
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
|
443 |
+
(slice(1, 3, None), slice(2, 5, None)),
|
444 |
+
(slice(3, 5, None), slice(0, 2, None)),
|
445 |
+
(slice(5, 6, None), slice(3, 5, None))])
|
446 |
+
|
447 |
+
|
448 |
+
def test_find_objects09():
|
449 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
450 |
+
[0, 0, 2, 2, 0, 0],
|
451 |
+
[0, 0, 2, 2, 2, 0],
|
452 |
+
[0, 0, 0, 0, 0, 0],
|
453 |
+
[0, 0, 0, 0, 0, 0],
|
454 |
+
[0, 0, 0, 4, 4, 0]])
|
455 |
+
out = ndimage.find_objects(data)
|
456 |
+
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
|
457 |
+
(slice(1, 3, None), slice(2, 5, None)),
|
458 |
+
None,
|
459 |
+
(slice(5, 6, None), slice(3, 5, None))])
|
460 |
+
|
461 |
+
|
462 |
+
def test_value_indices01():
|
463 |
+
"Test dictionary keys and entries"
|
464 |
+
data = np.array([[1, 0, 0, 0, 0, 0],
|
465 |
+
[0, 0, 2, 2, 0, 0],
|
466 |
+
[0, 0, 2, 2, 2, 0],
|
467 |
+
[0, 0, 0, 0, 0, 0],
|
468 |
+
[0, 0, 0, 0, 0, 0],
|
469 |
+
[0, 0, 0, 4, 4, 0]])
|
470 |
+
vi = ndimage.value_indices(data, ignore_value=0)
|
471 |
+
true_keys = [1, 2, 4]
|
472 |
+
assert_equal(list(vi.keys()), true_keys)
|
473 |
+
|
474 |
+
truevi = {}
|
475 |
+
for k in true_keys:
|
476 |
+
truevi[k] = np.where(data == k)
|
477 |
+
|
478 |
+
vi = ndimage.value_indices(data, ignore_value=0)
|
479 |
+
assert_equal(vi, truevi)
|
480 |
+
|
481 |
+
|
482 |
+
def test_value_indices02():
|
483 |
+
"Test input checking"
|
484 |
+
data = np.zeros((5, 4), dtype=np.float32)
|
485 |
+
msg = "Parameter 'arr' must be an integer array"
|
486 |
+
with assert_raises(ValueError, match=msg):
|
487 |
+
ndimage.value_indices(data)
|
488 |
+
|
489 |
+
|
490 |
+
def test_value_indices03():
|
491 |
+
"Test different input array shapes, from 1-D to 4-D"
|
492 |
+
for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
|
493 |
+
a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape)
|
494 |
+
trueKeys = np.unique(a)
|
495 |
+
vi = ndimage.value_indices(a)
|
496 |
+
assert_equal(list(vi.keys()), list(trueKeys))
|
497 |
+
for k in trueKeys:
|
498 |
+
trueNdx = np.where(a == k)
|
499 |
+
assert_equal(vi[k], trueNdx)
|
500 |
+
|
501 |
+
|
502 |
+
def test_sum01():
|
503 |
+
for type in types:
|
504 |
+
input = np.array([], type)
|
505 |
+
output = ndimage.sum(input)
|
506 |
+
assert_equal(output, 0.0)
|
507 |
+
|
508 |
+
|
509 |
+
def test_sum02():
|
510 |
+
for type in types:
|
511 |
+
input = np.zeros([0, 4], type)
|
512 |
+
output = ndimage.sum(input)
|
513 |
+
assert_equal(output, 0.0)
|
514 |
+
|
515 |
+
|
516 |
+
def test_sum03():
|
517 |
+
for type in types:
|
518 |
+
input = np.ones([], type)
|
519 |
+
output = ndimage.sum(input)
|
520 |
+
assert_almost_equal(output, 1.0)
|
521 |
+
|
522 |
+
|
523 |
+
def test_sum04():
|
524 |
+
for type in types:
|
525 |
+
input = np.array([1, 2], type)
|
526 |
+
output = ndimage.sum(input)
|
527 |
+
assert_almost_equal(output, 3.0)
|
528 |
+
|
529 |
+
|
530 |
+
def test_sum05():
|
531 |
+
for type in types:
|
532 |
+
input = np.array([[1, 2], [3, 4]], type)
|
533 |
+
output = ndimage.sum(input)
|
534 |
+
assert_almost_equal(output, 10.0)
|
535 |
+
|
536 |
+
|
537 |
+
def test_sum06():
|
538 |
+
labels = np.array([], bool)
|
539 |
+
for type in types:
|
540 |
+
input = np.array([], type)
|
541 |
+
output = ndimage.sum(input, labels=labels)
|
542 |
+
assert_equal(output, 0.0)
|
543 |
+
|
544 |
+
|
545 |
+
def test_sum07():
|
546 |
+
labels = np.ones([0, 4], bool)
|
547 |
+
for type in types:
|
548 |
+
input = np.zeros([0, 4], type)
|
549 |
+
output = ndimage.sum(input, labels=labels)
|
550 |
+
assert_equal(output, 0.0)
|
551 |
+
|
552 |
+
|
553 |
+
def test_sum08():
|
554 |
+
labels = np.array([1, 0], bool)
|
555 |
+
for type in types:
|
556 |
+
input = np.array([1, 2], type)
|
557 |
+
output = ndimage.sum(input, labels=labels)
|
558 |
+
assert_equal(output, 1.0)
|
559 |
+
|
560 |
+
|
561 |
+
def test_sum09():
|
562 |
+
labels = np.array([1, 0], bool)
|
563 |
+
for type in types:
|
564 |
+
input = np.array([[1, 2], [3, 4]], type)
|
565 |
+
output = ndimage.sum(input, labels=labels)
|
566 |
+
assert_almost_equal(output, 4.0)
|
567 |
+
|
568 |
+
|
569 |
+
def test_sum10():
|
570 |
+
labels = np.array([1, 0], bool)
|
571 |
+
input = np.array([[1, 2], [3, 4]], bool)
|
572 |
+
output = ndimage.sum(input, labels=labels)
|
573 |
+
assert_almost_equal(output, 2.0)
|
574 |
+
|
575 |
+
|
576 |
+
def test_sum11():
|
577 |
+
labels = np.array([1, 2], np.int8)
|
578 |
+
for type in types:
|
579 |
+
input = np.array([[1, 2], [3, 4]], type)
|
580 |
+
output = ndimage.sum(input, labels=labels,
|
581 |
+
index=2)
|
582 |
+
assert_almost_equal(output, 6.0)
|
583 |
+
|
584 |
+
|
585 |
+
def test_sum12():
|
586 |
+
labels = np.array([[1, 2], [2, 4]], np.int8)
|
587 |
+
for type in types:
|
588 |
+
input = np.array([[1, 2], [3, 4]], type)
|
589 |
+
output = ndimage.sum(input, labels=labels, index=[4, 8, 2])
|
590 |
+
assert_array_almost_equal(output, [4.0, 0.0, 5.0])
|
591 |
+
|
592 |
+
|
593 |
+
def test_sum_labels():
|
594 |
+
labels = np.array([[1, 2], [2, 4]], np.int8)
|
595 |
+
for type in types:
|
596 |
+
input = np.array([[1, 2], [3, 4]], type)
|
597 |
+
output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2])
|
598 |
+
output_labels = ndimage.sum_labels(
|
599 |
+
input, labels=labels, index=[4, 8, 2])
|
600 |
+
|
601 |
+
assert (output_sum == output_labels).all()
|
602 |
+
assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0])
|
603 |
+
|
604 |
+
|
605 |
+
def test_mean01():
|
606 |
+
labels = np.array([1, 0], bool)
|
607 |
+
for type in types:
|
608 |
+
input = np.array([[1, 2], [3, 4]], type)
|
609 |
+
output = ndimage.mean(input, labels=labels)
|
610 |
+
assert_almost_equal(output, 2.0)
|
611 |
+
|
612 |
+
|
613 |
+
def test_mean02():
|
614 |
+
labels = np.array([1, 0], bool)
|
615 |
+
input = np.array([[1, 2], [3, 4]], bool)
|
616 |
+
output = ndimage.mean(input, labels=labels)
|
617 |
+
assert_almost_equal(output, 1.0)
|
618 |
+
|
619 |
+
|
620 |
+
def test_mean03():
|
621 |
+
labels = np.array([1, 2])
|
622 |
+
for type in types:
|
623 |
+
input = np.array([[1, 2], [3, 4]], type)
|
624 |
+
output = ndimage.mean(input, labels=labels,
|
625 |
+
index=2)
|
626 |
+
assert_almost_equal(output, 3.0)
|
627 |
+
|
628 |
+
|
629 |
+
def test_mean04():
|
630 |
+
labels = np.array([[1, 2], [2, 4]], np.int8)
|
631 |
+
with np.errstate(all='ignore'):
|
632 |
+
for type in types:
|
633 |
+
input = np.array([[1, 2], [3, 4]], type)
|
634 |
+
output = ndimage.mean(input, labels=labels,
|
635 |
+
index=[4, 8, 2])
|
636 |
+
assert_array_almost_equal(output[[0, 2]], [4.0, 2.5])
|
637 |
+
assert_(np.isnan(output[1]))
|
638 |
+
|
639 |
+
|
640 |
+
def test_minimum01():
|
641 |
+
labels = np.array([1, 0], bool)
|
642 |
+
for type in types:
|
643 |
+
input = np.array([[1, 2], [3, 4]], type)
|
644 |
+
output = ndimage.minimum(input, labels=labels)
|
645 |
+
assert_almost_equal(output, 1.0)
|
646 |
+
|
647 |
+
|
648 |
+
def test_minimum02():
|
649 |
+
labels = np.array([1, 0], bool)
|
650 |
+
input = np.array([[2, 2], [2, 4]], bool)
|
651 |
+
output = ndimage.minimum(input, labels=labels)
|
652 |
+
assert_almost_equal(output, 1.0)
|
653 |
+
|
654 |
+
|
655 |
+
def test_minimum03():
|
656 |
+
labels = np.array([1, 2])
|
657 |
+
for type in types:
|
658 |
+
input = np.array([[1, 2], [3, 4]], type)
|
659 |
+
output = ndimage.minimum(input, labels=labels,
|
660 |
+
index=2)
|
661 |
+
assert_almost_equal(output, 2.0)
|
662 |
+
|
663 |
+
|
664 |
+
def test_minimum04():
|
665 |
+
labels = np.array([[1, 2], [2, 3]])
|
666 |
+
for type in types:
|
667 |
+
input = np.array([[1, 2], [3, 4]], type)
|
668 |
+
output = ndimage.minimum(input, labels=labels,
|
669 |
+
index=[2, 3, 8])
|
670 |
+
assert_array_almost_equal(output, [2.0, 4.0, 0.0])
|
671 |
+
|
672 |
+
|
673 |
+
def test_maximum01():
|
674 |
+
labels = np.array([1, 0], bool)
|
675 |
+
for type in types:
|
676 |
+
input = np.array([[1, 2], [3, 4]], type)
|
677 |
+
output = ndimage.maximum(input, labels=labels)
|
678 |
+
assert_almost_equal(output, 3.0)
|
679 |
+
|
680 |
+
|
681 |
+
def test_maximum02():
|
682 |
+
labels = np.array([1, 0], bool)
|
683 |
+
input = np.array([[2, 2], [2, 4]], bool)
|
684 |
+
output = ndimage.maximum(input, labels=labels)
|
685 |
+
assert_almost_equal(output, 1.0)
|
686 |
+
|
687 |
+
|
688 |
+
def test_maximum03():
|
689 |
+
labels = np.array([1, 2])
|
690 |
+
for type in types:
|
691 |
+
input = np.array([[1, 2], [3, 4]], type)
|
692 |
+
output = ndimage.maximum(input, labels=labels,
|
693 |
+
index=2)
|
694 |
+
assert_almost_equal(output, 4.0)
|
695 |
+
|
696 |
+
|
697 |
+
def test_maximum04():
|
698 |
+
labels = np.array([[1, 2], [2, 3]])
|
699 |
+
for type in types:
|
700 |
+
input = np.array([[1, 2], [3, 4]], type)
|
701 |
+
output = ndimage.maximum(input, labels=labels,
|
702 |
+
index=[2, 3, 8])
|
703 |
+
assert_array_almost_equal(output, [3.0, 4.0, 0.0])
|
704 |
+
|
705 |
+
|
706 |
+
def test_maximum05():
|
707 |
+
# Regression test for ticket #501 (Trac)
|
708 |
+
x = np.array([-3, -2, -1])
|
709 |
+
assert_equal(ndimage.maximum(x), -1)
|
710 |
+
|
711 |
+
|
712 |
+
def test_median01():
|
713 |
+
a = np.array([[1, 2, 0, 1],
|
714 |
+
[5, 3, 0, 4],
|
715 |
+
[0, 0, 0, 7],
|
716 |
+
[9, 3, 0, 0]])
|
717 |
+
labels = np.array([[1, 1, 0, 2],
|
718 |
+
[1, 1, 0, 2],
|
719 |
+
[0, 0, 0, 2],
|
720 |
+
[3, 3, 0, 0]])
|
721 |
+
output = ndimage.median(a, labels=labels, index=[1, 2, 3])
|
722 |
+
assert_array_almost_equal(output, [2.5, 4.0, 6.0])
|
723 |
+
|
724 |
+
|
725 |
+
def test_median02():
|
726 |
+
a = np.array([[1, 2, 0, 1],
|
727 |
+
[5, 3, 0, 4],
|
728 |
+
[0, 0, 0, 7],
|
729 |
+
[9, 3, 0, 0]])
|
730 |
+
output = ndimage.median(a)
|
731 |
+
assert_almost_equal(output, 1.0)
|
732 |
+
|
733 |
+
|
734 |
+
def test_median03():
|
735 |
+
a = np.array([[1, 2, 0, 1],
|
736 |
+
[5, 3, 0, 4],
|
737 |
+
[0, 0, 0, 7],
|
738 |
+
[9, 3, 0, 0]])
|
739 |
+
labels = np.array([[1, 1, 0, 2],
|
740 |
+
[1, 1, 0, 2],
|
741 |
+
[0, 0, 0, 2],
|
742 |
+
[3, 3, 0, 0]])
|
743 |
+
output = ndimage.median(a, labels=labels)
|
744 |
+
assert_almost_equal(output, 3.0)
|
745 |
+
|
746 |
+
|
747 |
+
def test_median_gh12836_bool():
|
748 |
+
# test boolean addition fix on example from gh-12836
|
749 |
+
a = np.asarray([1, 1], dtype=bool)
|
750 |
+
output = ndimage.median(a, labels=np.ones((2,)), index=[1])
|
751 |
+
assert_array_almost_equal(output, [1.0])
|
752 |
+
|
753 |
+
|
754 |
+
def test_median_no_int_overflow():
|
755 |
+
# test integer overflow fix on example from gh-12836
|
756 |
+
a = np.asarray([65, 70], dtype=np.int8)
|
757 |
+
output = ndimage.median(a, labels=np.ones((2,)), index=[1])
|
758 |
+
assert_array_almost_equal(output, [67.5])
|
759 |
+
|
760 |
+
|
761 |
+
def test_variance01():
|
762 |
+
with np.errstate(all='ignore'):
|
763 |
+
for type in types:
|
764 |
+
input = np.array([], type)
|
765 |
+
with suppress_warnings() as sup:
|
766 |
+
sup.filter(RuntimeWarning, "Mean of empty slice")
|
767 |
+
output = ndimage.variance(input)
|
768 |
+
assert_(np.isnan(output))
|
769 |
+
|
770 |
+
|
771 |
+
def test_variance02():
|
772 |
+
for type in types:
|
773 |
+
input = np.array([1], type)
|
774 |
+
output = ndimage.variance(input)
|
775 |
+
assert_almost_equal(output, 0.0)
|
776 |
+
|
777 |
+
|
778 |
+
def test_variance03():
|
779 |
+
for type in types:
|
780 |
+
input = np.array([1, 3], type)
|
781 |
+
output = ndimage.variance(input)
|
782 |
+
assert_almost_equal(output, 1.0)
|
783 |
+
|
784 |
+
|
785 |
+
def test_variance04():
|
786 |
+
input = np.array([1, 0], bool)
|
787 |
+
output = ndimage.variance(input)
|
788 |
+
assert_almost_equal(output, 0.25)
|
789 |
+
|
790 |
+
|
791 |
+
def test_variance05():
|
792 |
+
labels = [2, 2, 3]
|
793 |
+
for type in types:
|
794 |
+
input = np.array([1, 3, 8], type)
|
795 |
+
output = ndimage.variance(input, labels, 2)
|
796 |
+
assert_almost_equal(output, 1.0)
|
797 |
+
|
798 |
+
|
799 |
+
def test_variance06():
|
800 |
+
labels = [2, 2, 3, 3, 4]
|
801 |
+
with np.errstate(all='ignore'):
|
802 |
+
for type in types:
|
803 |
+
input = np.array([1, 3, 8, 10, 8], type)
|
804 |
+
output = ndimage.variance(input, labels, [2, 3, 4])
|
805 |
+
assert_array_almost_equal(output, [1.0, 1.0, 0.0])
|
806 |
+
|
807 |
+
|
808 |
+
def test_standard_deviation01():
|
809 |
+
with np.errstate(all='ignore'):
|
810 |
+
for type in types:
|
811 |
+
input = np.array([], type)
|
812 |
+
with suppress_warnings() as sup:
|
813 |
+
sup.filter(RuntimeWarning, "Mean of empty slice")
|
814 |
+
output = ndimage.standard_deviation(input)
|
815 |
+
assert_(np.isnan(output))
|
816 |
+
|
817 |
+
|
818 |
+
def test_standard_deviation02():
|
819 |
+
for type in types:
|
820 |
+
input = np.array([1], type)
|
821 |
+
output = ndimage.standard_deviation(input)
|
822 |
+
assert_almost_equal(output, 0.0)
|
823 |
+
|
824 |
+
|
825 |
+
def test_standard_deviation03():
|
826 |
+
for type in types:
|
827 |
+
input = np.array([1, 3], type)
|
828 |
+
output = ndimage.standard_deviation(input)
|
829 |
+
assert_almost_equal(output, np.sqrt(1.0))
|
830 |
+
|
831 |
+
|
832 |
+
def test_standard_deviation04():
|
833 |
+
input = np.array([1, 0], bool)
|
834 |
+
output = ndimage.standard_deviation(input)
|
835 |
+
assert_almost_equal(output, 0.5)
|
836 |
+
|
837 |
+
|
838 |
+
def test_standard_deviation05():
|
839 |
+
labels = [2, 2, 3]
|
840 |
+
for type in types:
|
841 |
+
input = np.array([1, 3, 8], type)
|
842 |
+
output = ndimage.standard_deviation(input, labels, 2)
|
843 |
+
assert_almost_equal(output, 1.0)
|
844 |
+
|
845 |
+
|
846 |
+
def test_standard_deviation06():
|
847 |
+
labels = [2, 2, 3, 3, 4]
|
848 |
+
with np.errstate(all='ignore'):
|
849 |
+
for type in types:
|
850 |
+
input = np.array([1, 3, 8, 10, 8], type)
|
851 |
+
output = ndimage.standard_deviation(input, labels, [2, 3, 4])
|
852 |
+
assert_array_almost_equal(output, [1.0, 1.0, 0.0])
|
853 |
+
|
854 |
+
|
855 |
+
def test_standard_deviation07():
|
856 |
+
labels = [1]
|
857 |
+
with np.errstate(all='ignore'):
|
858 |
+
for type in types:
|
859 |
+
input = np.array([-0.00619519], type)
|
860 |
+
output = ndimage.standard_deviation(input, labels, [1])
|
861 |
+
assert_array_almost_equal(output, [0])
|
862 |
+
|
863 |
+
|
864 |
+
def test_minimum_position01():
|
865 |
+
labels = np.array([1, 0], bool)
|
866 |
+
for type in types:
|
867 |
+
input = np.array([[1, 2], [3, 4]], type)
|
868 |
+
output = ndimage.minimum_position(input, labels=labels)
|
869 |
+
assert_equal(output, (0, 0))
|
870 |
+
|
871 |
+
|
872 |
+
def test_minimum_position02():
|
873 |
+
for type in types:
|
874 |
+
input = np.array([[5, 4, 2, 5],
|
875 |
+
[3, 7, 0, 2],
|
876 |
+
[1, 5, 1, 1]], type)
|
877 |
+
output = ndimage.minimum_position(input)
|
878 |
+
assert_equal(output, (1, 2))
|
879 |
+
|
880 |
+
|
881 |
+
def test_minimum_position03():
|
882 |
+
input = np.array([[5, 4, 2, 5],
|
883 |
+
[3, 7, 0, 2],
|
884 |
+
[1, 5, 1, 1]], bool)
|
885 |
+
output = ndimage.minimum_position(input)
|
886 |
+
assert_equal(output, (1, 2))
|
887 |
+
|
888 |
+
|
889 |
+
def test_minimum_position04():
|
890 |
+
input = np.array([[5, 4, 2, 5],
|
891 |
+
[3, 7, 1, 2],
|
892 |
+
[1, 5, 1, 1]], bool)
|
893 |
+
output = ndimage.minimum_position(input)
|
894 |
+
assert_equal(output, (0, 0))
|
895 |
+
|
896 |
+
|
897 |
+
def test_minimum_position05():
|
898 |
+
labels = [1, 2, 0, 4]
|
899 |
+
for type in types:
|
900 |
+
input = np.array([[5, 4, 2, 5],
|
901 |
+
[3, 7, 0, 2],
|
902 |
+
[1, 5, 2, 3]], type)
|
903 |
+
output = ndimage.minimum_position(input, labels)
|
904 |
+
assert_equal(output, (2, 0))
|
905 |
+
|
906 |
+
|
907 |
+
def test_minimum_position06():
|
908 |
+
labels = [1, 2, 3, 4]
|
909 |
+
for type in types:
|
910 |
+
input = np.array([[5, 4, 2, 5],
|
911 |
+
[3, 7, 0, 2],
|
912 |
+
[1, 5, 1, 1]], type)
|
913 |
+
output = ndimage.minimum_position(input, labels, 2)
|
914 |
+
assert_equal(output, (0, 1))
|
915 |
+
|
916 |
+
|
917 |
+
def test_minimum_position07():
|
918 |
+
labels = [1, 2, 3, 4]
|
919 |
+
for type in types:
|
920 |
+
input = np.array([[5, 4, 2, 5],
|
921 |
+
[3, 7, 0, 2],
|
922 |
+
[1, 5, 1, 1]], type)
|
923 |
+
output = ndimage.minimum_position(input, labels,
|
924 |
+
[2, 3])
|
925 |
+
assert_equal(output[0], (0, 1))
|
926 |
+
assert_equal(output[1], (1, 2))
|
927 |
+
|
928 |
+
|
929 |
+
def test_maximum_position01():
|
930 |
+
labels = np.array([1, 0], bool)
|
931 |
+
for type in types:
|
932 |
+
input = np.array([[1, 2], [3, 4]], type)
|
933 |
+
output = ndimage.maximum_position(input,
|
934 |
+
labels=labels)
|
935 |
+
assert_equal(output, (1, 0))
|
936 |
+
|
937 |
+
|
938 |
+
def test_maximum_position02():
|
939 |
+
for type in types:
|
940 |
+
input = np.array([[5, 4, 2, 5],
|
941 |
+
[3, 7, 8, 2],
|
942 |
+
[1, 5, 1, 1]], type)
|
943 |
+
output = ndimage.maximum_position(input)
|
944 |
+
assert_equal(output, (1, 2))
|
945 |
+
|
946 |
+
|
947 |
+
def test_maximum_position03():
|
948 |
+
input = np.array([[5, 4, 2, 5],
|
949 |
+
[3, 7, 8, 2],
|
950 |
+
[1, 5, 1, 1]], bool)
|
951 |
+
output = ndimage.maximum_position(input)
|
952 |
+
assert_equal(output, (0, 0))
|
953 |
+
|
954 |
+
|
955 |
+
def test_maximum_position04():
|
956 |
+
labels = [1, 2, 0, 4]
|
957 |
+
for type in types:
|
958 |
+
input = np.array([[5, 4, 2, 5],
|
959 |
+
[3, 7, 8, 2],
|
960 |
+
[1, 5, 1, 1]], type)
|
961 |
+
output = ndimage.maximum_position(input, labels)
|
962 |
+
assert_equal(output, (1, 1))
|
963 |
+
|
964 |
+
|
965 |
+
def test_maximum_position05():
|
966 |
+
labels = [1, 2, 0, 4]
|
967 |
+
for type in types:
|
968 |
+
input = np.array([[5, 4, 2, 5],
|
969 |
+
[3, 7, 8, 2],
|
970 |
+
[1, 5, 1, 1]], type)
|
971 |
+
output = ndimage.maximum_position(input, labels, 1)
|
972 |
+
assert_equal(output, (0, 0))
|
973 |
+
|
974 |
+
|
975 |
+
def test_maximum_position06():
|
976 |
+
labels = [1, 2, 0, 4]
|
977 |
+
for type in types:
|
978 |
+
input = np.array([[5, 4, 2, 5],
|
979 |
+
[3, 7, 8, 2],
|
980 |
+
[1, 5, 1, 1]], type)
|
981 |
+
output = ndimage.maximum_position(input, labels,
|
982 |
+
[1, 2])
|
983 |
+
assert_equal(output[0], (0, 0))
|
984 |
+
assert_equal(output[1], (1, 1))
|
985 |
+
|
986 |
+
|
987 |
+
def test_maximum_position07():
|
988 |
+
# Test float labels
|
989 |
+
labels = np.array([1.0, 2.5, 0.0, 4.5])
|
990 |
+
for type in types:
|
991 |
+
input = np.array([[5, 4, 2, 5],
|
992 |
+
[3, 7, 8, 2],
|
993 |
+
[1, 5, 1, 1]], type)
|
994 |
+
output = ndimage.maximum_position(input, labels,
|
995 |
+
[1.0, 4.5])
|
996 |
+
assert_equal(output[0], (0, 0))
|
997 |
+
assert_equal(output[1], (0, 3))
|
998 |
+
|
999 |
+
|
1000 |
+
def test_extrema01():
|
1001 |
+
labels = np.array([1, 0], bool)
|
1002 |
+
for type in types:
|
1003 |
+
input = np.array([[1, 2], [3, 4]], type)
|
1004 |
+
output1 = ndimage.extrema(input, labels=labels)
|
1005 |
+
output2 = ndimage.minimum(input, labels=labels)
|
1006 |
+
output3 = ndimage.maximum(input, labels=labels)
|
1007 |
+
output4 = ndimage.minimum_position(input,
|
1008 |
+
labels=labels)
|
1009 |
+
output5 = ndimage.maximum_position(input,
|
1010 |
+
labels=labels)
|
1011 |
+
assert_equal(output1, (output2, output3, output4, output5))
|
1012 |
+
|
1013 |
+
|
1014 |
+
def test_extrema02():
|
1015 |
+
labels = np.array([1, 2])
|
1016 |
+
for type in types:
|
1017 |
+
input = np.array([[1, 2], [3, 4]], type)
|
1018 |
+
output1 = ndimage.extrema(input, labels=labels,
|
1019 |
+
index=2)
|
1020 |
+
output2 = ndimage.minimum(input, labels=labels,
|
1021 |
+
index=2)
|
1022 |
+
output3 = ndimage.maximum(input, labels=labels,
|
1023 |
+
index=2)
|
1024 |
+
output4 = ndimage.minimum_position(input,
|
1025 |
+
labels=labels, index=2)
|
1026 |
+
output5 = ndimage.maximum_position(input,
|
1027 |
+
labels=labels, index=2)
|
1028 |
+
assert_equal(output1, (output2, output3, output4, output5))
|
1029 |
+
|
1030 |
+
|
1031 |
+
def test_extrema03():
|
1032 |
+
labels = np.array([[1, 2], [2, 3]])
|
1033 |
+
for type in types:
|
1034 |
+
input = np.array([[1, 2], [3, 4]], type)
|
1035 |
+
output1 = ndimage.extrema(input, labels=labels,
|
1036 |
+
index=[2, 3, 8])
|
1037 |
+
output2 = ndimage.minimum(input, labels=labels,
|
1038 |
+
index=[2, 3, 8])
|
1039 |
+
output3 = ndimage.maximum(input, labels=labels,
|
1040 |
+
index=[2, 3, 8])
|
1041 |
+
output4 = ndimage.minimum_position(input,
|
1042 |
+
labels=labels, index=[2, 3, 8])
|
1043 |
+
output5 = ndimage.maximum_position(input,
|
1044 |
+
labels=labels, index=[2, 3, 8])
|
1045 |
+
assert_array_almost_equal(output1[0], output2)
|
1046 |
+
assert_array_almost_equal(output1[1], output3)
|
1047 |
+
assert_array_almost_equal(output1[2], output4)
|
1048 |
+
assert_array_almost_equal(output1[3], output5)
|
1049 |
+
|
1050 |
+
|
1051 |
+
def test_extrema04():
|
1052 |
+
labels = [1, 2, 0, 4]
|
1053 |
+
for type in types:
|
1054 |
+
input = np.array([[5, 4, 2, 5],
|
1055 |
+
[3, 7, 8, 2],
|
1056 |
+
[1, 5, 1, 1]], type)
|
1057 |
+
output1 = ndimage.extrema(input, labels, [1, 2])
|
1058 |
+
output2 = ndimage.minimum(input, labels, [1, 2])
|
1059 |
+
output3 = ndimage.maximum(input, labels, [1, 2])
|
1060 |
+
output4 = ndimage.minimum_position(input, labels,
|
1061 |
+
[1, 2])
|
1062 |
+
output5 = ndimage.maximum_position(input, labels,
|
1063 |
+
[1, 2])
|
1064 |
+
assert_array_almost_equal(output1[0], output2)
|
1065 |
+
assert_array_almost_equal(output1[1], output3)
|
1066 |
+
assert_array_almost_equal(output1[2], output4)
|
1067 |
+
assert_array_almost_equal(output1[3], output5)
|
1068 |
+
|
1069 |
+
|
1070 |
+
def test_center_of_mass01():
|
1071 |
+
expected = [0.0, 0.0]
|
1072 |
+
for type in types:
|
1073 |
+
input = np.array([[1, 0], [0, 0]], type)
|
1074 |
+
output = ndimage.center_of_mass(input)
|
1075 |
+
assert_array_almost_equal(output, expected)
|
1076 |
+
|
1077 |
+
|
1078 |
+
def test_center_of_mass02():
|
1079 |
+
expected = [1, 0]
|
1080 |
+
for type in types:
|
1081 |
+
input = np.array([[0, 0], [1, 0]], type)
|
1082 |
+
output = ndimage.center_of_mass(input)
|
1083 |
+
assert_array_almost_equal(output, expected)
|
1084 |
+
|
1085 |
+
|
1086 |
+
def test_center_of_mass03():
|
1087 |
+
expected = [0, 1]
|
1088 |
+
for type in types:
|
1089 |
+
input = np.array([[0, 1], [0, 0]], type)
|
1090 |
+
output = ndimage.center_of_mass(input)
|
1091 |
+
assert_array_almost_equal(output, expected)
|
1092 |
+
|
1093 |
+
|
1094 |
+
def test_center_of_mass04():
|
1095 |
+
expected = [1, 1]
|
1096 |
+
for type in types:
|
1097 |
+
input = np.array([[0, 0], [0, 1]], type)
|
1098 |
+
output = ndimage.center_of_mass(input)
|
1099 |
+
assert_array_almost_equal(output, expected)
|
1100 |
+
|
1101 |
+
|
1102 |
+
def test_center_of_mass05():
|
1103 |
+
expected = [0.5, 0.5]
|
1104 |
+
for type in types:
|
1105 |
+
input = np.array([[1, 1], [1, 1]], type)
|
1106 |
+
output = ndimage.center_of_mass(input)
|
1107 |
+
assert_array_almost_equal(output, expected)
|
1108 |
+
|
1109 |
+
|
1110 |
+
def test_center_of_mass06():
|
1111 |
+
expected = [0.5, 0.5]
|
1112 |
+
input = np.array([[1, 2], [3, 1]], bool)
|
1113 |
+
output = ndimage.center_of_mass(input)
|
1114 |
+
assert_array_almost_equal(output, expected)
|
1115 |
+
|
1116 |
+
|
1117 |
+
def test_center_of_mass07():
|
1118 |
+
labels = [1, 0]
|
1119 |
+
expected = [0.5, 0.0]
|
1120 |
+
input = np.array([[1, 2], [3, 1]], bool)
|
1121 |
+
output = ndimage.center_of_mass(input, labels)
|
1122 |
+
assert_array_almost_equal(output, expected)
|
1123 |
+
|
1124 |
+
|
1125 |
+
def test_center_of_mass08():
|
1126 |
+
labels = [1, 2]
|
1127 |
+
expected = [0.5, 1.0]
|
1128 |
+
input = np.array([[5, 2], [3, 1]], bool)
|
1129 |
+
output = ndimage.center_of_mass(input, labels, 2)
|
1130 |
+
assert_array_almost_equal(output, expected)
|
1131 |
+
|
1132 |
+
|
1133 |
+
def test_center_of_mass09():
|
1134 |
+
labels = [1, 2]
|
1135 |
+
expected = [(0.5, 0.0), (0.5, 1.0)]
|
1136 |
+
input = np.array([[1, 2], [1, 1]], bool)
|
1137 |
+
output = ndimage.center_of_mass(input, labels, [1, 2])
|
1138 |
+
assert_array_almost_equal(output, expected)
|
1139 |
+
|
1140 |
+
|
1141 |
+
def test_histogram01():
|
1142 |
+
expected = np.ones(10)
|
1143 |
+
input = np.arange(10)
|
1144 |
+
output = ndimage.histogram(input, 0, 10, 10)
|
1145 |
+
assert_array_almost_equal(output, expected)
|
1146 |
+
|
1147 |
+
|
1148 |
+
def test_histogram02():
|
1149 |
+
labels = [1, 1, 1, 1, 2, 2, 2, 2]
|
1150 |
+
expected = [0, 2, 0, 1, 1]
|
1151 |
+
input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
|
1152 |
+
output = ndimage.histogram(input, 0, 4, 5, labels, 1)
|
1153 |
+
assert_array_almost_equal(output, expected)
|
1154 |
+
|
1155 |
+
|
1156 |
+
def test_histogram03():
|
1157 |
+
labels = [1, 0, 1, 1, 2, 2, 2, 2]
|
1158 |
+
expected1 = [0, 1, 0, 1, 1]
|
1159 |
+
expected2 = [0, 0, 0, 3, 0]
|
1160 |
+
input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
|
1161 |
+
output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
|
1162 |
+
|
1163 |
+
assert_array_almost_equal(output[0], expected1)
|
1164 |
+
assert_array_almost_equal(output[1], expected2)
|
1165 |
+
|
1166 |
+
|
1167 |
+
def test_stat_funcs_2d():
|
1168 |
+
a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
|
1169 |
+
lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
|
1170 |
+
|
1171 |
+
mean = ndimage.mean(a, labels=lbl, index=[1, 2])
|
1172 |
+
assert_array_equal(mean, [7.0, 4.0])
|
1173 |
+
|
1174 |
+
var = ndimage.variance(a, labels=lbl, index=[1, 2])
|
1175 |
+
assert_array_equal(var, [2.5, 1.0])
|
1176 |
+
|
1177 |
+
std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
|
1178 |
+
assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))
|
1179 |
+
|
1180 |
+
med = ndimage.median(a, labels=lbl, index=[1, 2])
|
1181 |
+
assert_array_equal(med, [7.0, 4.0])
|
1182 |
+
|
1183 |
+
min = ndimage.minimum(a, labels=lbl, index=[1, 2])
|
1184 |
+
assert_array_equal(min, [5, 3])
|
1185 |
+
|
1186 |
+
max = ndimage.maximum(a, labels=lbl, index=[1, 2])
|
1187 |
+
assert_array_equal(max, [9, 5])
|
1188 |
+
|
1189 |
+
|
1190 |
+
class TestWatershedIft:
|
1191 |
+
|
1192 |
+
def test_watershed_ift01(self):
|
1193 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
1194 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1195 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1196 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1197 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1198 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1199 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1200 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
1201 |
+
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
1202 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1203 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1204 |
+
[0, 0, 0, 1, 0, 0, 0],
|
1205 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1206 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1207 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1208 |
+
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
1209 |
+
out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
|
1210 |
+
[1, 1, 1],
|
1211 |
+
[1, 1, 1]])
|
1212 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
1213 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1214 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1215 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1216 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1217 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1218 |
+
[-1, -1, -1, -1, -1, -1, -1],
|
1219 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
1220 |
+
assert_array_almost_equal(out, expected)
|
1221 |
+
|
1222 |
+
def test_watershed_ift02(self):
|
1223 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
1224 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1225 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1226 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1227 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1228 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1229 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1230 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
1231 |
+
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
1232 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1233 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1234 |
+
[0, 0, 0, 1, 0, 0, 0],
|
1235 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1236 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1237 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1238 |
+
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
1239 |
+
out = ndimage.watershed_ift(data, markers)
|
1240 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
1241 |
+
[-1, -1, 1, 1, 1, -1, -1],
|
1242 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1243 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1244 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1245 |
+
[-1, -1, 1, 1, 1, -1, -1],
|
1246 |
+
[-1, -1, -1, -1, -1, -1, -1],
|
1247 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
1248 |
+
assert_array_almost_equal(out, expected)
|
1249 |
+
|
1250 |
+
def test_watershed_ift03(self):
|
1251 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
1252 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1253 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1254 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1255 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1256 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1257 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
1258 |
+
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
1259 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1260 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1261 |
+
[0, 0, 2, 0, 3, 0, 0],
|
1262 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1263 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1264 |
+
[0, 0, 0, 0, 0, 0, -1]], np.int8)
|
1265 |
+
out = ndimage.watershed_ift(data, markers)
|
1266 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
1267 |
+
[-1, -1, 2, -1, 3, -1, -1],
|
1268 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
1269 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
1270 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
1271 |
+
[-1, -1, 2, -1, 3, -1, -1],
|
1272 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
1273 |
+
assert_array_almost_equal(out, expected)
|
1274 |
+
|
1275 |
+
def test_watershed_ift04(self):
|
1276 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
1277 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1278 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1279 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1280 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1281 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1282 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
1283 |
+
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
1284 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1285 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1286 |
+
[0, 0, 2, 0, 3, 0, 0],
|
1287 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1288 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1289 |
+
[0, 0, 0, 0, 0, 0, -1]],
|
1290 |
+
np.int8)
|
1291 |
+
out = ndimage.watershed_ift(data, markers,
|
1292 |
+
structure=[[1, 1, 1],
|
1293 |
+
[1, 1, 1],
|
1294 |
+
[1, 1, 1]])
|
1295 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
1296 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
1297 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
1298 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
1299 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
1300 |
+
[-1, 2, 2, 3, 3, 3, -1],
|
1301 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
1302 |
+
assert_array_almost_equal(out, expected)
|
1303 |
+
|
1304 |
+
def test_watershed_ift05(self):
|
1305 |
+
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
1306 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1307 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1308 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1309 |
+
[0, 1, 0, 1, 0, 1, 0],
|
1310 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1311 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
1312 |
+
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
1313 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1314 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1315 |
+
[0, 0, 3, 0, 2, 0, 0],
|
1316 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1317 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1318 |
+
[0, 0, 0, 0, 0, 0, -1]],
|
1319 |
+
np.int8)
|
1320 |
+
out = ndimage.watershed_ift(data, markers,
|
1321 |
+
structure=[[1, 1, 1],
|
1322 |
+
[1, 1, 1],
|
1323 |
+
[1, 1, 1]])
|
1324 |
+
expected = [[-1, -1, -1, -1, -1, -1, -1],
|
1325 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
1326 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
1327 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
1328 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
1329 |
+
[-1, 3, 3, 2, 2, 2, -1],
|
1330 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
1331 |
+
assert_array_almost_equal(out, expected)
|
1332 |
+
|
1333 |
+
def test_watershed_ift06(self):
|
1334 |
+
data = np.array([[0, 1, 0, 0, 0, 1, 0],
|
1335 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1336 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1337 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1338 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1339 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
1340 |
+
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
1341 |
+
[0, 0, 0, 1, 0, 0, 0],
|
1342 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1343 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1344 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1345 |
+
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
1346 |
+
out = ndimage.watershed_ift(data, markers,
|
1347 |
+
structure=[[1, 1, 1],
|
1348 |
+
[1, 1, 1],
|
1349 |
+
[1, 1, 1]])
|
1350 |
+
expected = [[-1, 1, 1, 1, 1, 1, -1],
|
1351 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1352 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1353 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1354 |
+
[-1, -1, -1, -1, -1, -1, -1],
|
1355 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
1356 |
+
assert_array_almost_equal(out, expected)
|
1357 |
+
|
1358 |
+
def test_watershed_ift07(self):
|
1359 |
+
shape = (7, 6)
|
1360 |
+
data = np.zeros(shape, dtype=np.uint8)
|
1361 |
+
data = data.transpose()
|
1362 |
+
data[...] = np.array([[0, 1, 0, 0, 0, 1, 0],
|
1363 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1364 |
+
[0, 1, 0, 0, 0, 1, 0],
|
1365 |
+
[0, 1, 1, 1, 1, 1, 0],
|
1366 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1367 |
+
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
1368 |
+
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
1369 |
+
[0, 0, 0, 1, 0, 0, 0],
|
1370 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1371 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1372 |
+
[0, 0, 0, 0, 0, 0, 0],
|
1373 |
+
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
1374 |
+
out = np.zeros(shape, dtype=np.int16)
|
1375 |
+
out = out.transpose()
|
1376 |
+
ndimage.watershed_ift(data, markers,
|
1377 |
+
structure=[[1, 1, 1],
|
1378 |
+
[1, 1, 1],
|
1379 |
+
[1, 1, 1]],
|
1380 |
+
output=out)
|
1381 |
+
expected = [[-1, 1, 1, 1, 1, 1, -1],
|
1382 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1383 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1384 |
+
[-1, 1, 1, 1, 1, 1, -1],
|
1385 |
+
[-1, -1, -1, -1, -1, -1, -1],
|
1386 |
+
[-1, -1, -1, -1, -1, -1, -1]]
|
1387 |
+
assert_array_almost_equal(out, expected)
|
1388 |
+
|
1389 |
+
def test_watershed_ift08(self):
|
1390 |
+
# Test cost larger than uint8. See gh-10069.
|
1391 |
+
data = np.array([[256, 0],
|
1392 |
+
[0, 0]], np.uint16)
|
1393 |
+
markers = np.array([[1, 0],
|
1394 |
+
[0, 0]], np.int8)
|
1395 |
+
out = ndimage.watershed_ift(data, markers)
|
1396 |
+
expected = [[1, 1],
|
1397 |
+
[1, 1]]
|
1398 |
+
assert_array_almost_equal(out, expected)
|
1399 |
+
|
1400 |
+
def test_watershed_ift09(self):
|
1401 |
+
# Test large cost. See gh-19575
|
1402 |
+
data = np.array([[np.iinfo(np.uint16).max, 0],
|
1403 |
+
[0, 0]], np.uint16)
|
1404 |
+
markers = np.array([[1, 0],
|
1405 |
+
[0, 0]], np.int8)
|
1406 |
+
out = ndimage.watershed_ift(data, markers)
|
1407 |
+
expected = [[1, 1],
|
1408 |
+
[1, 1]]
|
1409 |
+
assert_allclose(out, expected)
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_ni_support.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from .._ni_support import _get_output
|
5 |
+
|
6 |
+
|
7 |
+
@pytest.mark.parametrize(
|
8 |
+
'dtype',
|
9 |
+
[
|
10 |
+
# String specifiers
|
11 |
+
'f4', 'float32', 'complex64', 'complex128',
|
12 |
+
# Type and dtype specifiers
|
13 |
+
np.float32, float, np.dtype('f4'),
|
14 |
+
# Derive from input
|
15 |
+
None,
|
16 |
+
],
|
17 |
+
)
|
18 |
+
def test_get_output_basic(dtype):
|
19 |
+
shape = (2, 3)
|
20 |
+
|
21 |
+
input_ = np.zeros(shape, 'float32')
|
22 |
+
|
23 |
+
# For None, derive dtype from input
|
24 |
+
expected_dtype = 'float32' if dtype is None else dtype
|
25 |
+
|
26 |
+
# Output is dtype-specifier, retrieve shape from input
|
27 |
+
result = _get_output(dtype, input_)
|
28 |
+
assert result.shape == shape
|
29 |
+
assert result.dtype == np.dtype(expected_dtype)
|
30 |
+
|
31 |
+
# Output is dtype specifier, with explicit shape, overriding input
|
32 |
+
result = _get_output(dtype, input_, shape=(3, 2))
|
33 |
+
assert result.shape == (3, 2)
|
34 |
+
assert result.dtype == np.dtype(expected_dtype)
|
35 |
+
|
36 |
+
# Output is pre-allocated array, return directly
|
37 |
+
output = np.zeros(shape, dtype)
|
38 |
+
result = _get_output(output, input_)
|
39 |
+
assert result is output
|
40 |
+
|
41 |
+
|
42 |
+
def test_get_output_complex():
|
43 |
+
shape = (2, 3)
|
44 |
+
|
45 |
+
input_ = np.zeros(shape)
|
46 |
+
|
47 |
+
# None, promote input type to complex
|
48 |
+
result = _get_output(None, input_, complex_output=True)
|
49 |
+
assert result.shape == shape
|
50 |
+
assert result.dtype == np.dtype('complex128')
|
51 |
+
|
52 |
+
# Explicit type, promote type to complex
|
53 |
+
with pytest.warns(UserWarning, match='promoting specified output dtype to complex'):
|
54 |
+
result = _get_output(float, input_, complex_output=True)
|
55 |
+
assert result.shape == shape
|
56 |
+
assert result.dtype == np.dtype('complex128')
|
57 |
+
|
58 |
+
# String specifier, simply verify complex output
|
59 |
+
result = _get_output('complex64', input_, complex_output=True)
|
60 |
+
assert result.shape == shape
|
61 |
+
assert result.dtype == np.dtype('complex64')
|
62 |
+
|
63 |
+
|
64 |
+
def test_get_output_error_cases():
|
65 |
+
input_ = np.zeros((2, 3), 'float32')
|
66 |
+
|
67 |
+
# Two separate paths can raise the same error
|
68 |
+
with pytest.raises(RuntimeError, match='output must have complex dtype'):
|
69 |
+
_get_output('float32', input_, complex_output=True)
|
70 |
+
with pytest.raises(RuntimeError, match='output must have complex dtype'):
|
71 |
+
_get_output(np.zeros((2, 3)), input_, complex_output=True)
|
72 |
+
|
73 |
+
with pytest.raises(RuntimeError, match='output must have numeric dtype'):
|
74 |
+
_get_output('void', input_)
|
75 |
+
|
76 |
+
with pytest.raises(RuntimeError, match='shape not correct'):
|
77 |
+
_get_output(np.zeros((3, 2)), input_)
|
venv/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for spline filtering."""
|
2 |
+
import numpy as np
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from numpy.testing import assert_almost_equal
|
6 |
+
|
7 |
+
from scipy import ndimage
|
8 |
+
|
9 |
+
|
10 |
+
def get_spline_knot_values(order):
|
11 |
+
"""Knot values to the right of a B-spline's center."""
|
12 |
+
knot_values = {0: [1],
|
13 |
+
1: [1],
|
14 |
+
2: [6, 1],
|
15 |
+
3: [4, 1],
|
16 |
+
4: [230, 76, 1],
|
17 |
+
5: [66, 26, 1]}
|
18 |
+
|
19 |
+
return knot_values[order]
|
20 |
+
|
21 |
+
|
22 |
+
def make_spline_knot_matrix(n, order, mode='mirror'):
|
23 |
+
"""Matrix to invert to find the spline coefficients."""
|
24 |
+
knot_values = get_spline_knot_values(order)
|
25 |
+
|
26 |
+
matrix = np.zeros((n, n))
|
27 |
+
for diag, knot_value in enumerate(knot_values):
|
28 |
+
indices = np.arange(diag, n)
|
29 |
+
if diag == 0:
|
30 |
+
matrix[indices, indices] = knot_value
|
31 |
+
else:
|
32 |
+
matrix[indices, indices - diag] = knot_value
|
33 |
+
matrix[indices - diag, indices] = knot_value
|
34 |
+
|
35 |
+
knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
|
36 |
+
|
37 |
+
if mode == 'mirror':
|
38 |
+
start, step = 1, 1
|
39 |
+
elif mode == 'reflect':
|
40 |
+
start, step = 0, 1
|
41 |
+
elif mode == 'grid-wrap':
|
42 |
+
start, step = -1, -1
|
43 |
+
else:
|
44 |
+
raise ValueError(f'unsupported mode {mode}')
|
45 |
+
|
46 |
+
for row in range(len(knot_values) - 1):
|
47 |
+
for idx, knot_value in enumerate(knot_values[row + 1:]):
|
48 |
+
matrix[row, start + step*idx] += knot_value
|
49 |
+
matrix[-row - 1, -start - 1 - step*idx] += knot_value
|
50 |
+
|
51 |
+
return matrix / knot_values_sum
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
|
55 |
+
@pytest.mark.parametrize('mode', ['mirror', 'grid-wrap', 'reflect'])
|
56 |
+
def test_spline_filter_vs_matrix_solution(order, mode):
|
57 |
+
n = 100
|
58 |
+
eye = np.eye(n, dtype=float)
|
59 |
+
spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order,
|
60 |
+
mode=mode)
|
61 |
+
spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order,
|
62 |
+
mode=mode)
|
63 |
+
matrix = make_spline_knot_matrix(n, order, mode=mode)
|
64 |
+
assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
|
65 |
+
assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
|
venv/lib/python3.10/site-packages/scipy/stats/__init__.py
ADDED
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
.. _statsrefmanual:
|
3 |
+
|
4 |
+
==========================================
|
5 |
+
Statistical functions (:mod:`scipy.stats`)
|
6 |
+
==========================================
|
7 |
+
|
8 |
+
.. currentmodule:: scipy.stats
|
9 |
+
|
10 |
+
This module contains a large number of probability distributions,
|
11 |
+
summary and frequency statistics, correlation functions and statistical
|
12 |
+
tests, masked statistics, kernel density estimation, quasi-Monte Carlo
|
13 |
+
functionality, and more.
|
14 |
+
|
15 |
+
Statistics is a very large area, and there are topics that are out of scope
|
16 |
+
for SciPy and are covered by other packages. Some of the most important ones
|
17 |
+
are:
|
18 |
+
|
19 |
+
- `statsmodels <https://www.statsmodels.org/stable/index.html>`__:
|
20 |
+
regression, linear models, time series analysis, extensions to topics
|
21 |
+
also covered by ``scipy.stats``.
|
22 |
+
- `Pandas <https://pandas.pydata.org/>`__: tabular data, time series
|
23 |
+
functionality, interfaces to other statistical languages.
|
24 |
+
- `PyMC <https://docs.pymc.io/>`__: Bayesian statistical
|
25 |
+
modeling, probabilistic machine learning.
|
26 |
+
- `scikit-learn <https://scikit-learn.org/>`__: classification, regression,
|
27 |
+
model selection.
|
28 |
+
- `Seaborn <https://seaborn.pydata.org/>`__: statistical data visualization.
|
29 |
+
- `rpy2 <https://rpy2.github.io/>`__: Python to R bridge.
|
30 |
+
|
31 |
+
|
32 |
+
Probability distributions
|
33 |
+
=========================
|
34 |
+
|
35 |
+
Each univariate distribution is an instance of a subclass of `rv_continuous`
|
36 |
+
(`rv_discrete` for discrete distributions):
|
37 |
+
|
38 |
+
.. autosummary::
|
39 |
+
:toctree: generated/
|
40 |
+
|
41 |
+
rv_continuous
|
42 |
+
rv_discrete
|
43 |
+
rv_histogram
|
44 |
+
|
45 |
+
Continuous distributions
|
46 |
+
------------------------
|
47 |
+
|
48 |
+
.. autosummary::
|
49 |
+
:toctree: generated/
|
50 |
+
|
51 |
+
alpha -- Alpha
|
52 |
+
anglit -- Anglit
|
53 |
+
arcsine -- Arcsine
|
54 |
+
argus -- Argus
|
55 |
+
beta -- Beta
|
56 |
+
betaprime -- Beta Prime
|
57 |
+
bradford -- Bradford
|
58 |
+
burr -- Burr (Type III)
|
59 |
+
burr12 -- Burr (Type XII)
|
60 |
+
cauchy -- Cauchy
|
61 |
+
chi -- Chi
|
62 |
+
chi2 -- Chi-squared
|
63 |
+
cosine -- Cosine
|
64 |
+
crystalball -- Crystalball
|
65 |
+
dgamma -- Double Gamma
|
66 |
+
dweibull -- Double Weibull
|
67 |
+
erlang -- Erlang
|
68 |
+
expon -- Exponential
|
69 |
+
exponnorm -- Exponentially Modified Normal
|
70 |
+
exponweib -- Exponentiated Weibull
|
71 |
+
exponpow -- Exponential Power
|
72 |
+
f -- F (Snecdor F)
|
73 |
+
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
|
74 |
+
fisk -- Fisk
|
75 |
+
foldcauchy -- Folded Cauchy
|
76 |
+
foldnorm -- Folded Normal
|
77 |
+
genlogistic -- Generalized Logistic
|
78 |
+
gennorm -- Generalized normal
|
79 |
+
genpareto -- Generalized Pareto
|
80 |
+
genexpon -- Generalized Exponential
|
81 |
+
genextreme -- Generalized Extreme Value
|
82 |
+
gausshyper -- Gauss Hypergeometric
|
83 |
+
gamma -- Gamma
|
84 |
+
gengamma -- Generalized gamma
|
85 |
+
genhalflogistic -- Generalized Half Logistic
|
86 |
+
genhyperbolic -- Generalized Hyperbolic
|
87 |
+
geninvgauss -- Generalized Inverse Gaussian
|
88 |
+
gibrat -- Gibrat
|
89 |
+
gompertz -- Gompertz (Truncated Gumbel)
|
90 |
+
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
|
91 |
+
gumbel_l -- Left Sided Gumbel, etc.
|
92 |
+
halfcauchy -- Half Cauchy
|
93 |
+
halflogistic -- Half Logistic
|
94 |
+
halfnorm -- Half Normal
|
95 |
+
halfgennorm -- Generalized Half Normal
|
96 |
+
hypsecant -- Hyperbolic Secant
|
97 |
+
invgamma -- Inverse Gamma
|
98 |
+
invgauss -- Inverse Gaussian
|
99 |
+
invweibull -- Inverse Weibull
|
100 |
+
jf_skew_t -- Jones and Faddy Skew-T
|
101 |
+
johnsonsb -- Johnson SB
|
102 |
+
johnsonsu -- Johnson SU
|
103 |
+
kappa4 -- Kappa 4 parameter
|
104 |
+
kappa3 -- Kappa 3 parameter
|
105 |
+
ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic
|
106 |
+
kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic
|
107 |
+
kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
|
108 |
+
laplace -- Laplace
|
109 |
+
laplace_asymmetric -- Asymmetric Laplace
|
110 |
+
levy -- Levy
|
111 |
+
levy_l
|
112 |
+
levy_stable
|
113 |
+
logistic -- Logistic
|
114 |
+
loggamma -- Log-Gamma
|
115 |
+
loglaplace -- Log-Laplace (Log Double Exponential)
|
116 |
+
lognorm -- Log-Normal
|
117 |
+
loguniform -- Log-Uniform
|
118 |
+
lomax -- Lomax (Pareto of the second kind)
|
119 |
+
maxwell -- Maxwell
|
120 |
+
mielke -- Mielke's Beta-Kappa
|
121 |
+
moyal -- Moyal
|
122 |
+
nakagami -- Nakagami
|
123 |
+
ncx2 -- Non-central chi-squared
|
124 |
+
ncf -- Non-central F
|
125 |
+
nct -- Non-central Student's T
|
126 |
+
norm -- Normal (Gaussian)
|
127 |
+
norminvgauss -- Normal Inverse Gaussian
|
128 |
+
pareto -- Pareto
|
129 |
+
pearson3 -- Pearson type III
|
130 |
+
powerlaw -- Power-function
|
131 |
+
powerlognorm -- Power log normal
|
132 |
+
powernorm -- Power normal
|
133 |
+
rdist -- R-distribution
|
134 |
+
rayleigh -- Rayleigh
|
135 |
+
rel_breitwigner -- Relativistic Breit-Wigner
|
136 |
+
rice -- Rice
|
137 |
+
recipinvgauss -- Reciprocal Inverse Gaussian
|
138 |
+
semicircular -- Semicircular
|
139 |
+
skewcauchy -- Skew Cauchy
|
140 |
+
skewnorm -- Skew normal
|
141 |
+
studentized_range -- Studentized Range
|
142 |
+
t -- Student's T
|
143 |
+
trapezoid -- Trapezoidal
|
144 |
+
triang -- Triangular
|
145 |
+
truncexpon -- Truncated Exponential
|
146 |
+
truncnorm -- Truncated Normal
|
147 |
+
truncpareto -- Truncated Pareto
|
148 |
+
truncweibull_min -- Truncated minimum Weibull distribution
|
149 |
+
tukeylambda -- Tukey-Lambda
|
150 |
+
uniform -- Uniform
|
151 |
+
vonmises -- Von-Mises (Circular)
|
152 |
+
vonmises_line -- Von-Mises (Line)
|
153 |
+
wald -- Wald
|
154 |
+
weibull_min -- Minimum Weibull (see Frechet)
|
155 |
+
weibull_max -- Maximum Weibull (see Frechet)
|
156 |
+
wrapcauchy -- Wrapped Cauchy
|
157 |
+
|
158 |
+
The ``fit`` method of the univariate continuous distributions uses
|
159 |
+
maximum likelihood estimation to fit the distribution to a data set.
|
160 |
+
The ``fit`` method can accept regular data or *censored data*.
|
161 |
+
Censored data is represented with instances of the `CensoredData`
|
162 |
+
class.
|
163 |
+
|
164 |
+
.. autosummary::
|
165 |
+
:toctree: generated/
|
166 |
+
|
167 |
+
CensoredData
|
168 |
+
|
169 |
+
|
170 |
+
Multivariate distributions
|
171 |
+
--------------------------
|
172 |
+
|
173 |
+
.. autosummary::
|
174 |
+
:toctree: generated/
|
175 |
+
|
176 |
+
multivariate_normal -- Multivariate normal distribution
|
177 |
+
matrix_normal -- Matrix normal distribution
|
178 |
+
dirichlet -- Dirichlet
|
179 |
+
dirichlet_multinomial -- Dirichlet multinomial distribution
|
180 |
+
wishart -- Wishart
|
181 |
+
invwishart -- Inverse Wishart
|
182 |
+
multinomial -- Multinomial distribution
|
183 |
+
special_ortho_group -- SO(N) group
|
184 |
+
ortho_group -- O(N) group
|
185 |
+
unitary_group -- U(N) group
|
186 |
+
random_correlation -- random correlation matrices
|
187 |
+
multivariate_t -- Multivariate t-distribution
|
188 |
+
multivariate_hypergeom -- Multivariate hypergeometric distribution
|
189 |
+
random_table -- Distribution of random tables with given marginals
|
190 |
+
uniform_direction -- Uniform distribution on S(N-1)
|
191 |
+
vonmises_fisher -- Von Mises-Fisher distribution
|
192 |
+
|
193 |
+
`scipy.stats.multivariate_normal` methods accept instances
|
194 |
+
of the following class to represent the covariance.
|
195 |
+
|
196 |
+
.. autosummary::
|
197 |
+
:toctree: generated/
|
198 |
+
|
199 |
+
Covariance -- Representation of a covariance matrix
|
200 |
+
|
201 |
+
|
202 |
+
Discrete distributions
|
203 |
+
----------------------
|
204 |
+
|
205 |
+
.. autosummary::
|
206 |
+
:toctree: generated/
|
207 |
+
|
208 |
+
bernoulli -- Bernoulli
|
209 |
+
betabinom -- Beta-Binomial
|
210 |
+
betanbinom -- Beta-Negative Binomial
|
211 |
+
binom -- Binomial
|
212 |
+
boltzmann -- Boltzmann (Truncated Discrete Exponential)
|
213 |
+
dlaplace -- Discrete Laplacian
|
214 |
+
geom -- Geometric
|
215 |
+
hypergeom -- Hypergeometric
|
216 |
+
logser -- Logarithmic (Log-Series, Series)
|
217 |
+
nbinom -- Negative Binomial
|
218 |
+
nchypergeom_fisher -- Fisher's Noncentral Hypergeometric
|
219 |
+
nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric
|
220 |
+
nhypergeom -- Negative Hypergeometric
|
221 |
+
planck -- Planck (Discrete Exponential)
|
222 |
+
poisson -- Poisson
|
223 |
+
randint -- Discrete Uniform
|
224 |
+
skellam -- Skellam
|
225 |
+
yulesimon -- Yule-Simon
|
226 |
+
zipf -- Zipf (Zeta)
|
227 |
+
zipfian -- Zipfian
|
228 |
+
|
229 |
+
|
230 |
+
An overview of statistical functions is given below. Many of these functions
|
231 |
+
have a similar version in `scipy.stats.mstats` which work for masked arrays.
|
232 |
+
|
233 |
+
Summary statistics
|
234 |
+
==================
|
235 |
+
|
236 |
+
.. autosummary::
|
237 |
+
:toctree: generated/
|
238 |
+
|
239 |
+
describe -- Descriptive statistics
|
240 |
+
gmean -- Geometric mean
|
241 |
+
hmean -- Harmonic mean
|
242 |
+
pmean -- Power mean
|
243 |
+
kurtosis -- Fisher or Pearson kurtosis
|
244 |
+
mode -- Modal value
|
245 |
+
moment -- Central moment
|
246 |
+
expectile -- Expectile
|
247 |
+
skew -- Skewness
|
248 |
+
kstat --
|
249 |
+
kstatvar --
|
250 |
+
tmean -- Truncated arithmetic mean
|
251 |
+
tvar -- Truncated variance
|
252 |
+
tmin --
|
253 |
+
tmax --
|
254 |
+
tstd --
|
255 |
+
tsem --
|
256 |
+
variation -- Coefficient of variation
|
257 |
+
find_repeats
|
258 |
+
rankdata
|
259 |
+
tiecorrect
|
260 |
+
trim_mean
|
261 |
+
gstd -- Geometric Standard Deviation
|
262 |
+
iqr
|
263 |
+
sem
|
264 |
+
bayes_mvs
|
265 |
+
mvsdist
|
266 |
+
entropy
|
267 |
+
differential_entropy
|
268 |
+
median_abs_deviation
|
269 |
+
|
270 |
+
Frequency statistics
|
271 |
+
====================
|
272 |
+
|
273 |
+
.. autosummary::
|
274 |
+
:toctree: generated/
|
275 |
+
|
276 |
+
cumfreq
|
277 |
+
percentileofscore
|
278 |
+
scoreatpercentile
|
279 |
+
relfreq
|
280 |
+
|
281 |
+
.. autosummary::
|
282 |
+
:toctree: generated/
|
283 |
+
|
284 |
+
binned_statistic -- Compute a binned statistic for a set of data.
|
285 |
+
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
|
286 |
+
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
|
287 |
+
|
288 |
+
Hypothesis Tests and related functions
|
289 |
+
======================================
|
290 |
+
SciPy has many functions for performing hypothesis tests that return a
|
291 |
+
test statistic and a p-value, and several of them return confidence intervals
|
292 |
+
and/or other related information.
|
293 |
+
|
294 |
+
The headings below are based on common uses of the functions within, but due to
|
295 |
+
the wide variety of statistical procedures, any attempt at coarse-grained
|
296 |
+
categorization will be imperfect. Also, note that tests within the same heading
|
297 |
+
are not interchangeable in general (e.g. many have different distributional
|
298 |
+
assumptions).
|
299 |
+
|
300 |
+
One Sample Tests / Paired Sample Tests
|
301 |
+
--------------------------------------
|
302 |
+
One sample tests are typically used to assess whether a single sample was
|
303 |
+
drawn from a specified distribution or a distribution with specified properties
|
304 |
+
(e.g. zero mean).
|
305 |
+
|
306 |
+
.. autosummary::
|
307 |
+
:toctree: generated/
|
308 |
+
|
309 |
+
ttest_1samp
|
310 |
+
binomtest
|
311 |
+
quantile_test
|
312 |
+
skewtest
|
313 |
+
kurtosistest
|
314 |
+
normaltest
|
315 |
+
jarque_bera
|
316 |
+
shapiro
|
317 |
+
anderson
|
318 |
+
cramervonmises
|
319 |
+
ks_1samp
|
320 |
+
goodness_of_fit
|
321 |
+
chisquare
|
322 |
+
power_divergence
|
323 |
+
|
324 |
+
Paired sample tests are often used to assess whether two samples were drawn
|
325 |
+
from the same distribution; they differ from the independent sample tests below
|
326 |
+
in that each observation in one sample is treated as paired with a
|
327 |
+
closely-related observation in the other sample (e.g. when environmental
|
328 |
+
factors are controlled between observations within a pair but not among pairs).
|
329 |
+
They can also be interpreted or used as one-sample tests (e.g. tests on the
|
330 |
+
mean or median of *differences* between paired observations).
|
331 |
+
|
332 |
+
.. autosummary::
|
333 |
+
:toctree: generated/
|
334 |
+
|
335 |
+
ttest_rel
|
336 |
+
wilcoxon
|
337 |
+
|
338 |
+
Association/Correlation Tests
|
339 |
+
-----------------------------
|
340 |
+
|
341 |
+
These tests are often used to assess whether there is a relationship (e.g.
|
342 |
+
linear) between paired observations in multiple samples or among the
|
343 |
+
coordinates of multivariate observations.
|
344 |
+
|
345 |
+
.. autosummary::
|
346 |
+
:toctree: generated/
|
347 |
+
|
348 |
+
linregress
|
349 |
+
pearsonr
|
350 |
+
spearmanr
|
351 |
+
pointbiserialr
|
352 |
+
kendalltau
|
353 |
+
weightedtau
|
354 |
+
somersd
|
355 |
+
siegelslopes
|
356 |
+
theilslopes
|
357 |
+
page_trend_test
|
358 |
+
multiscale_graphcorr
|
359 |
+
|
360 |
+
These association tests and are to work with samples in the form of contingency
|
361 |
+
tables. Supporting functions are available in `scipy.stats.contingency`.
|
362 |
+
|
363 |
+
.. autosummary::
|
364 |
+
:toctree: generated/
|
365 |
+
|
366 |
+
chi2_contingency
|
367 |
+
fisher_exact
|
368 |
+
barnard_exact
|
369 |
+
boschloo_exact
|
370 |
+
|
371 |
+
Independent Sample Tests
|
372 |
+
------------------------
|
373 |
+
Independent sample tests are typically used to assess whether multiple samples
|
374 |
+
were independently drawn from the same distribution or different distributions
|
375 |
+
with a shared property (e.g. equal means).
|
376 |
+
|
377 |
+
Some tests are specifically for comparing two samples.
|
378 |
+
|
379 |
+
.. autosummary::
|
380 |
+
:toctree: generated/
|
381 |
+
|
382 |
+
ttest_ind_from_stats
|
383 |
+
poisson_means_test
|
384 |
+
ttest_ind
|
385 |
+
mannwhitneyu
|
386 |
+
bws_test
|
387 |
+
ranksums
|
388 |
+
brunnermunzel
|
389 |
+
mood
|
390 |
+
ansari
|
391 |
+
cramervonmises_2samp
|
392 |
+
epps_singleton_2samp
|
393 |
+
ks_2samp
|
394 |
+
kstest
|
395 |
+
|
396 |
+
Others are generalized to multiple samples.
|
397 |
+
|
398 |
+
.. autosummary::
|
399 |
+
:toctree: generated/
|
400 |
+
|
401 |
+
f_oneway
|
402 |
+
tukey_hsd
|
403 |
+
dunnett
|
404 |
+
kruskal
|
405 |
+
alexandergovern
|
406 |
+
fligner
|
407 |
+
levene
|
408 |
+
bartlett
|
409 |
+
median_test
|
410 |
+
friedmanchisquare
|
411 |
+
anderson_ksamp
|
412 |
+
|
413 |
+
Resampling and Monte Carlo Methods
|
414 |
+
----------------------------------
|
415 |
+
The following functions can reproduce the p-value and confidence interval
|
416 |
+
results of most of the functions above, and often produce accurate results in a
|
417 |
+
wider variety of conditions. They can also be used to perform hypothesis tests
|
418 |
+
and generate confidence intervals for custom statistics. This flexibility comes
|
419 |
+
at the cost of greater computational requirements and stochastic results.
|
420 |
+
|
421 |
+
.. autosummary::
|
422 |
+
:toctree: generated/
|
423 |
+
|
424 |
+
monte_carlo_test
|
425 |
+
permutation_test
|
426 |
+
bootstrap
|
427 |
+
|
428 |
+
Instances of the following object can be passed into some hypothesis test
|
429 |
+
functions to perform a resampling or Monte Carlo version of the hypothesis
|
430 |
+
test.
|
431 |
+
|
432 |
+
.. autosummary::
|
433 |
+
:toctree: generated/
|
434 |
+
|
435 |
+
MonteCarloMethod
|
436 |
+
PermutationMethod
|
437 |
+
BootstrapMethod
|
438 |
+
|
439 |
+
Multiple Hypothesis Testing and Meta-Analysis
|
440 |
+
---------------------------------------------
|
441 |
+
These functions are for assessing the results of individual tests as a whole.
|
442 |
+
Functions for performing specific multiple hypothesis tests (e.g. post hoc
|
443 |
+
tests) are listed above.
|
444 |
+
|
445 |
+
.. autosummary::
|
446 |
+
:toctree: generated/
|
447 |
+
|
448 |
+
combine_pvalues
|
449 |
+
false_discovery_control
|
450 |
+
|
451 |
+
|
452 |
+
The following functions are related to the tests above but do not belong in the
|
453 |
+
above categories.
|
454 |
+
|
455 |
+
Quasi-Monte Carlo
|
456 |
+
=================
|
457 |
+
|
458 |
+
.. toctree::
|
459 |
+
:maxdepth: 4
|
460 |
+
|
461 |
+
stats.qmc
|
462 |
+
|
463 |
+
Contingency Tables
|
464 |
+
==================
|
465 |
+
|
466 |
+
.. toctree::
|
467 |
+
:maxdepth: 4
|
468 |
+
|
469 |
+
stats.contingency
|
470 |
+
|
471 |
+
Masked statistics functions
|
472 |
+
===========================
|
473 |
+
|
474 |
+
.. toctree::
|
475 |
+
|
476 |
+
stats.mstats
|
477 |
+
|
478 |
+
|
479 |
+
Other statistical functionality
|
480 |
+
===============================
|
481 |
+
|
482 |
+
Transformations
|
483 |
+
---------------
|
484 |
+
|
485 |
+
.. autosummary::
|
486 |
+
:toctree: generated/
|
487 |
+
|
488 |
+
boxcox
|
489 |
+
boxcox_normmax
|
490 |
+
boxcox_llf
|
491 |
+
yeojohnson
|
492 |
+
yeojohnson_normmax
|
493 |
+
yeojohnson_llf
|
494 |
+
obrientransform
|
495 |
+
sigmaclip
|
496 |
+
trimboth
|
497 |
+
trim1
|
498 |
+
zmap
|
499 |
+
zscore
|
500 |
+
gzscore
|
501 |
+
|
502 |
+
Statistical distances
|
503 |
+
---------------------
|
504 |
+
|
505 |
+
.. autosummary::
|
506 |
+
:toctree: generated/
|
507 |
+
|
508 |
+
wasserstein_distance
|
509 |
+
wasserstein_distance_nd
|
510 |
+
energy_distance
|
511 |
+
|
512 |
+
Sampling
|
513 |
+
--------
|
514 |
+
|
515 |
+
.. toctree::
|
516 |
+
:maxdepth: 4
|
517 |
+
|
518 |
+
stats.sampling
|
519 |
+
|
520 |
+
Random variate generation / CDF Inversion
|
521 |
+
-----------------------------------------
|
522 |
+
|
523 |
+
.. autosummary::
|
524 |
+
:toctree: generated/
|
525 |
+
|
526 |
+
rvs_ratio_uniforms
|
527 |
+
|
528 |
+
Fitting / Survival Analysis
|
529 |
+
---------------------------
|
530 |
+
|
531 |
+
.. autosummary::
|
532 |
+
:toctree: generated/
|
533 |
+
|
534 |
+
fit
|
535 |
+
ecdf
|
536 |
+
logrank
|
537 |
+
|
538 |
+
Directional statistical functions
|
539 |
+
---------------------------------
|
540 |
+
|
541 |
+
.. autosummary::
|
542 |
+
:toctree: generated/
|
543 |
+
|
544 |
+
directional_stats
|
545 |
+
circmean
|
546 |
+
circvar
|
547 |
+
circstd
|
548 |
+
|
549 |
+
Sensitivity Analysis
|
550 |
+
--------------------
|
551 |
+
|
552 |
+
.. autosummary::
|
553 |
+
:toctree: generated/
|
554 |
+
|
555 |
+
sobol_indices
|
556 |
+
|
557 |
+
Plot-tests
|
558 |
+
----------
|
559 |
+
|
560 |
+
.. autosummary::
|
561 |
+
:toctree: generated/
|
562 |
+
|
563 |
+
ppcc_max
|
564 |
+
ppcc_plot
|
565 |
+
probplot
|
566 |
+
boxcox_normplot
|
567 |
+
yeojohnson_normplot
|
568 |
+
|
569 |
+
Univariate and multivariate kernel density estimation
|
570 |
+
-----------------------------------------------------
|
571 |
+
|
572 |
+
.. autosummary::
|
573 |
+
:toctree: generated/
|
574 |
+
|
575 |
+
gaussian_kde
|
576 |
+
|
577 |
+
Warnings / Errors used in :mod:`scipy.stats`
|
578 |
+
--------------------------------------------
|
579 |
+
|
580 |
+
.. autosummary::
|
581 |
+
:toctree: generated/
|
582 |
+
|
583 |
+
DegenerateDataWarning
|
584 |
+
ConstantInputWarning
|
585 |
+
NearConstantInputWarning
|
586 |
+
FitError
|
587 |
+
|
588 |
+
Result classes used in :mod:`scipy.stats`
|
589 |
+
-----------------------------------------
|
590 |
+
|
591 |
+
.. warning::
|
592 |
+
|
593 |
+
These classes are private, but they are included here because instances
|
594 |
+
of them are returned by other statistical functions. User import and
|
595 |
+
instantiation is not supported.
|
596 |
+
|
597 |
+
.. toctree::
|
598 |
+
:maxdepth: 2
|
599 |
+
|
600 |
+
stats._result_classes
|
601 |
+
|
602 |
+
""" # noqa: E501
|
603 |
+
|
604 |
+
from ._warnings_errors import (ConstantInputWarning, NearConstantInputWarning,
|
605 |
+
DegenerateDataWarning, FitError)
|
606 |
+
from ._stats_py import *
|
607 |
+
from ._variation import variation
|
608 |
+
from .distributions import *
|
609 |
+
from ._morestats import *
|
610 |
+
from ._multicomp import *
|
611 |
+
from ._binomtest import binomtest
|
612 |
+
from ._binned_statistic import *
|
613 |
+
from ._kde import gaussian_kde
|
614 |
+
from . import mstats
|
615 |
+
from . import qmc
|
616 |
+
from ._multivariate import *
|
617 |
+
from . import contingency
|
618 |
+
from .contingency import chi2_contingency
|
619 |
+
from ._censored_data import CensoredData
|
620 |
+
from ._resampling import (bootstrap, monte_carlo_test, permutation_test,
|
621 |
+
MonteCarloMethod, PermutationMethod, BootstrapMethod)
|
622 |
+
from ._entropy import *
|
623 |
+
from ._hypotests import *
|
624 |
+
from ._rvs_sampling import rvs_ratio_uniforms
|
625 |
+
from ._page_trend_test import page_trend_test
|
626 |
+
from ._mannwhitneyu import mannwhitneyu
|
627 |
+
from ._bws_test import bws_test
|
628 |
+
from ._fit import fit, goodness_of_fit
|
629 |
+
from ._covariance import Covariance
|
630 |
+
from ._sensitivity_analysis import *
|
631 |
+
from ._survival import *
|
632 |
+
|
633 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
634 |
+
from . import (
|
635 |
+
biasedurn, kde, morestats, mstats_basic, mstats_extras, mvn, stats
|
636 |
+
)
|
637 |
+
|
638 |
+
|
639 |
+
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
|
640 |
+
|
641 |
+
from scipy._lib._testutils import PytestTester
|
642 |
+
test = PytestTester(__name__)
|
643 |
+
del PytestTester
|
venv/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (278 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py
ADDED
@@ -0,0 +1,642 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Many scipy.stats functions support `axis` and `nan_policy` parameters.
|
2 |
+
# When the two are combined, it can be tricky to get all the behavior just
|
3 |
+
# right. This file contains utility functions useful for scipy.stats functions
|
4 |
+
# that support `axis` and `nan_policy`, including a decorator that
|
5 |
+
# automatically adds `axis` and `nan_policy` arguments to a function.
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from functools import wraps
|
9 |
+
from scipy._lib._docscrape import FunctionDoc, Parameter
|
10 |
+
from scipy._lib._util import _contains_nan, AxisError, _get_nan
|
11 |
+
import inspect
|
12 |
+
|
13 |
+
|
14 |
+
def _broadcast_arrays(arrays, axis=None):
|
15 |
+
"""
|
16 |
+
Broadcast shapes of arrays, ignoring incompatibility of specified axes
|
17 |
+
"""
|
18 |
+
new_shapes = _broadcast_array_shapes(arrays, axis=axis)
|
19 |
+
if axis is None:
|
20 |
+
new_shapes = [new_shapes]*len(arrays)
|
21 |
+
return [np.broadcast_to(array, new_shape)
|
22 |
+
for array, new_shape in zip(arrays, new_shapes)]
|
23 |
+
|
24 |
+
|
25 |
+
def _broadcast_array_shapes(arrays, axis=None):
|
26 |
+
"""
|
27 |
+
Broadcast shapes of arrays, ignoring incompatibility of specified axes
|
28 |
+
"""
|
29 |
+
shapes = [np.asarray(arr).shape for arr in arrays]
|
30 |
+
return _broadcast_shapes(shapes, axis)
|
31 |
+
|
32 |
+
|
33 |
+
def _broadcast_shapes(shapes, axis=None):
|
34 |
+
"""
|
35 |
+
Broadcast shapes, ignoring incompatibility of specified axes
|
36 |
+
"""
|
37 |
+
if not shapes:
|
38 |
+
return shapes
|
39 |
+
|
40 |
+
# input validation
|
41 |
+
if axis is not None:
|
42 |
+
axis = np.atleast_1d(axis)
|
43 |
+
axis_int = axis.astype(int)
|
44 |
+
if not np.array_equal(axis_int, axis):
|
45 |
+
raise AxisError('`axis` must be an integer, a '
|
46 |
+
'tuple of integers, or `None`.')
|
47 |
+
axis = axis_int
|
48 |
+
|
49 |
+
# First, ensure all shapes have same number of dimensions by prepending 1s.
|
50 |
+
n_dims = max([len(shape) for shape in shapes])
|
51 |
+
new_shapes = np.ones((len(shapes), n_dims), dtype=int)
|
52 |
+
for row, shape in zip(new_shapes, shapes):
|
53 |
+
row[len(row)-len(shape):] = shape # can't use negative indices (-0:)
|
54 |
+
|
55 |
+
# Remove the shape elements of the axes to be ignored, but remember them.
|
56 |
+
if axis is not None:
|
57 |
+
axis[axis < 0] = n_dims + axis[axis < 0]
|
58 |
+
axis = np.sort(axis)
|
59 |
+
if axis[-1] >= n_dims or axis[0] < 0:
|
60 |
+
message = (f"`axis` is out of bounds "
|
61 |
+
f"for array of dimension {n_dims}")
|
62 |
+
raise AxisError(message)
|
63 |
+
|
64 |
+
if len(np.unique(axis)) != len(axis):
|
65 |
+
raise AxisError("`axis` must contain only distinct elements")
|
66 |
+
|
67 |
+
removed_shapes = new_shapes[:, axis]
|
68 |
+
new_shapes = np.delete(new_shapes, axis, axis=1)
|
69 |
+
|
70 |
+
# If arrays are broadcastable, shape elements that are 1 may be replaced
|
71 |
+
# with a corresponding non-1 shape element. Assuming arrays are
|
72 |
+
# broadcastable, that final shape element can be found with:
|
73 |
+
new_shape = np.max(new_shapes, axis=0)
|
74 |
+
# except in case of an empty array:
|
75 |
+
new_shape *= new_shapes.all(axis=0)
|
76 |
+
|
77 |
+
# Among all arrays, there can only be one unique non-1 shape element.
|
78 |
+
# Therefore, if any non-1 shape element does not match what we found
|
79 |
+
# above, the arrays must not be broadcastable after all.
|
80 |
+
if np.any(~((new_shapes == 1) | (new_shapes == new_shape))):
|
81 |
+
raise ValueError("Array shapes are incompatible for broadcasting.")
|
82 |
+
|
83 |
+
if axis is not None:
|
84 |
+
# Add back the shape elements that were ignored
|
85 |
+
new_axis = axis - np.arange(len(axis))
|
86 |
+
new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape))
|
87 |
+
for removed_shape in removed_shapes]
|
88 |
+
return new_shapes
|
89 |
+
else:
|
90 |
+
return tuple(new_shape)
|
91 |
+
|
92 |
+
|
93 |
+
def _broadcast_array_shapes_remove_axis(arrays, axis=None):
|
94 |
+
"""
|
95 |
+
Broadcast shapes of arrays, dropping specified axes
|
96 |
+
|
97 |
+
Given a sequence of arrays `arrays` and an integer or tuple `axis`, find
|
98 |
+
the shape of the broadcast result after consuming/dropping `axis`.
|
99 |
+
In other words, return output shape of a typical hypothesis test on
|
100 |
+
`arrays` vectorized along `axis`.
|
101 |
+
|
102 |
+
Examples
|
103 |
+
--------
|
104 |
+
>>> import numpy as np
|
105 |
+
>>> from scipy.stats._axis_nan_policy import _broadcast_array_shapes
|
106 |
+
>>> a = np.zeros((5, 2, 1))
|
107 |
+
>>> b = np.zeros((9, 3))
|
108 |
+
>>> _broadcast_array_shapes((a, b), 1)
|
109 |
+
(5, 3)
|
110 |
+
"""
|
111 |
+
# Note that here, `axis=None` means do not consume/drop any axes - _not_
|
112 |
+
# ravel arrays before broadcasting.
|
113 |
+
shapes = [arr.shape for arr in arrays]
|
114 |
+
return _broadcast_shapes_remove_axis(shapes, axis)
|
115 |
+
|
116 |
+
|
117 |
+
def _broadcast_shapes_remove_axis(shapes, axis=None):
|
118 |
+
"""
|
119 |
+
Broadcast shapes, dropping specified axes
|
120 |
+
|
121 |
+
Same as _broadcast_array_shapes, but given a sequence
|
122 |
+
of array shapes `shapes` instead of the arrays themselves.
|
123 |
+
"""
|
124 |
+
shapes = _broadcast_shapes(shapes, axis)
|
125 |
+
shape = shapes[0]
|
126 |
+
if axis is not None:
|
127 |
+
shape = np.delete(shape, axis)
|
128 |
+
return tuple(shape)
|
129 |
+
|
130 |
+
|
131 |
+
def _broadcast_concatenate(arrays, axis, paired=False):
|
132 |
+
"""Concatenate arrays along an axis with broadcasting."""
|
133 |
+
arrays = _broadcast_arrays(arrays, axis if not paired else None)
|
134 |
+
res = np.concatenate(arrays, axis=axis)
|
135 |
+
return res
|
136 |
+
|
137 |
+
|
138 |
+
# TODO: add support for `axis` tuples
|
139 |
+
def _remove_nans(samples, paired):
|
140 |
+
"Remove nans from paired or unpaired 1D samples"
|
141 |
+
# potential optimization: don't copy arrays that don't contain nans
|
142 |
+
if not paired:
|
143 |
+
return [sample[~np.isnan(sample)] for sample in samples]
|
144 |
+
|
145 |
+
# for paired samples, we need to remove the whole pair when any part
|
146 |
+
# has a nan
|
147 |
+
nans = np.isnan(samples[0])
|
148 |
+
for sample in samples[1:]:
|
149 |
+
nans = nans | np.isnan(sample)
|
150 |
+
not_nans = ~nans
|
151 |
+
return [sample[not_nans] for sample in samples]
|
152 |
+
|
153 |
+
|
154 |
+
def _remove_sentinel(samples, paired, sentinel):
|
155 |
+
"Remove sentinel values from paired or unpaired 1D samples"
|
156 |
+
# could consolidate with `_remove_nans`, but it's not quite as simple as
|
157 |
+
# passing `sentinel=np.nan` because `(np.nan == np.nan) is False`
|
158 |
+
|
159 |
+
# potential optimization: don't copy arrays that don't contain sentinel
|
160 |
+
if not paired:
|
161 |
+
return [sample[sample != sentinel] for sample in samples]
|
162 |
+
|
163 |
+
# for paired samples, we need to remove the whole pair when any part
|
164 |
+
# has a nan
|
165 |
+
sentinels = (samples[0] == sentinel)
|
166 |
+
for sample in samples[1:]:
|
167 |
+
sentinels = sentinels | (sample == sentinel)
|
168 |
+
not_sentinels = ~sentinels
|
169 |
+
return [sample[not_sentinels] for sample in samples]
|
170 |
+
|
171 |
+
|
172 |
+
def _masked_arrays_2_sentinel_arrays(samples):
|
173 |
+
# masked arrays in `samples` are converted to regular arrays, and values
|
174 |
+
# corresponding with masked elements are replaced with a sentinel value
|
175 |
+
|
176 |
+
# return without modifying arrays if none have a mask
|
177 |
+
has_mask = False
|
178 |
+
for sample in samples:
|
179 |
+
mask = getattr(sample, 'mask', False)
|
180 |
+
has_mask = has_mask or np.any(mask)
|
181 |
+
if not has_mask:
|
182 |
+
return samples, None # None means there is no sentinel value
|
183 |
+
|
184 |
+
# Choose a sentinel value. We can't use `np.nan`, because sentinel (masked)
|
185 |
+
# values are always omitted, but there are different nan policies.
|
186 |
+
dtype = np.result_type(*samples)
|
187 |
+
dtype = dtype if np.issubdtype(dtype, np.number) else np.float64
|
188 |
+
for i in range(len(samples)):
|
189 |
+
# Things get more complicated if the arrays are of different types.
|
190 |
+
# We could have different sentinel values for each array, but
|
191 |
+
# the purpose of this code is convenience, not efficiency.
|
192 |
+
samples[i] = samples[i].astype(dtype, copy=False)
|
193 |
+
|
194 |
+
inexact = np.issubdtype(dtype, np.inexact)
|
195 |
+
info = np.finfo if inexact else np.iinfo
|
196 |
+
max_possible, min_possible = info(dtype).max, info(dtype).min
|
197 |
+
nextafter = np.nextafter if inexact else (lambda x, _: x - 1)
|
198 |
+
|
199 |
+
sentinel = max_possible
|
200 |
+
# For simplicity, min_possible/np.infs are not candidate sentinel values
|
201 |
+
while sentinel > min_possible:
|
202 |
+
for sample in samples:
|
203 |
+
if np.any(sample == sentinel): # choose a new sentinel value
|
204 |
+
sentinel = nextafter(sentinel, -np.inf)
|
205 |
+
break
|
206 |
+
else: # when sentinel value is OK, break the while loop
|
207 |
+
break
|
208 |
+
else:
|
209 |
+
message = ("This function replaces masked elements with sentinel "
|
210 |
+
"values, but the data contains all distinct values of this "
|
211 |
+
"data type. Consider promoting the dtype to `np.float64`.")
|
212 |
+
raise ValueError(message)
|
213 |
+
|
214 |
+
# replace masked elements with sentinel value
|
215 |
+
out_samples = []
|
216 |
+
for sample in samples:
|
217 |
+
mask = getattr(sample, 'mask', None)
|
218 |
+
if mask is not None: # turn all masked arrays into sentinel arrays
|
219 |
+
mask = np.broadcast_to(mask, sample.shape)
|
220 |
+
sample = sample.data.copy() if np.any(mask) else sample.data
|
221 |
+
sample = np.asarray(sample) # `sample.data` could be a memoryview?
|
222 |
+
sample[mask] = sentinel
|
223 |
+
out_samples.append(sample)
|
224 |
+
|
225 |
+
return out_samples, sentinel
|
226 |
+
|
227 |
+
|
228 |
+
def _check_empty_inputs(samples, axis):
|
229 |
+
"""
|
230 |
+
Check for empty sample; return appropriate output for a vectorized hypotest
|
231 |
+
"""
|
232 |
+
# if none of the samples are empty, we need to perform the test
|
233 |
+
if not any(sample.size == 0 for sample in samples):
|
234 |
+
return None
|
235 |
+
# otherwise, the statistic and p-value will be either empty arrays or
|
236 |
+
# arrays with NaNs. Produce the appropriate array and return it.
|
237 |
+
output_shape = _broadcast_array_shapes_remove_axis(samples, axis)
|
238 |
+
output = np.ones(output_shape) * _get_nan(*samples)
|
239 |
+
return output
|
240 |
+
|
241 |
+
|
242 |
+
def _add_reduced_axes(res, reduced_axes, keepdims):
|
243 |
+
"""
|
244 |
+
Add reduced axes back to all the arrays in the result object
|
245 |
+
if keepdims = True.
|
246 |
+
"""
|
247 |
+
return ([np.expand_dims(output, reduced_axes) for output in res]
|
248 |
+
if keepdims else res)
|
249 |
+
|
250 |
+
|
251 |
+
# Standard docstring / signature entries for `axis`, `nan_policy`, `keepdims`
|
252 |
+
_name = 'axis'
|
253 |
+
_desc = (
|
254 |
+
"""If an int, the axis of the input along which to compute the statistic.
|
255 |
+
The statistic of each axis-slice (e.g. row) of the input will appear in a
|
256 |
+
corresponding element of the output.
|
257 |
+
If ``None``, the input will be raveled before computing the statistic."""
|
258 |
+
.split('\n'))
|
259 |
+
|
260 |
+
|
261 |
+
def _get_axis_params(default_axis=0, _name=_name, _desc=_desc): # bind NOW
|
262 |
+
_type = f"int or None, default: {default_axis}"
|
263 |
+
_axis_parameter_doc = Parameter(_name, _type, _desc)
|
264 |
+
_axis_parameter = inspect.Parameter(_name,
|
265 |
+
inspect.Parameter.KEYWORD_ONLY,
|
266 |
+
default=default_axis)
|
267 |
+
return _axis_parameter_doc, _axis_parameter
|
268 |
+
|
269 |
+
|
270 |
+
_name = 'nan_policy'
|
271 |
+
_type = "{'propagate', 'omit', 'raise'}"
|
272 |
+
_desc = (
|
273 |
+
"""Defines how to handle input NaNs.
|
274 |
+
|
275 |
+
- ``propagate``: if a NaN is present in the axis slice (e.g. row) along
|
276 |
+
which the statistic is computed, the corresponding entry of the output
|
277 |
+
will be NaN.
|
278 |
+
- ``omit``: NaNs will be omitted when performing the calculation.
|
279 |
+
If insufficient data remains in the axis slice along which the
|
280 |
+
statistic is computed, the corresponding entry of the output will be
|
281 |
+
NaN.
|
282 |
+
- ``raise``: if a NaN is present, a ``ValueError`` will be raised."""
|
283 |
+
.split('\n'))
|
284 |
+
_nan_policy_parameter_doc = Parameter(_name, _type, _desc)
|
285 |
+
_nan_policy_parameter = inspect.Parameter(_name,
|
286 |
+
inspect.Parameter.KEYWORD_ONLY,
|
287 |
+
default='propagate')
|
288 |
+
|
289 |
+
_name = 'keepdims'
|
290 |
+
_type = "bool, default: False"
|
291 |
+
_desc = (
|
292 |
+
"""If this is set to True, the axes which are reduced are left
|
293 |
+
in the result as dimensions with size one. With this option,
|
294 |
+
the result will broadcast correctly against the input array."""
|
295 |
+
.split('\n'))
|
296 |
+
_keepdims_parameter_doc = Parameter(_name, _type, _desc)
|
297 |
+
_keepdims_parameter = inspect.Parameter(_name,
|
298 |
+
inspect.Parameter.KEYWORD_ONLY,
|
299 |
+
default=False)
|
300 |
+
|
301 |
+
_standard_note_addition = (
|
302 |
+
"""\nBeginning in SciPy 1.9, ``np.matrix`` inputs (not recommended for new
|
303 |
+
code) are converted to ``np.ndarray`` before the calculation is performed. In
|
304 |
+
this case, the output will be a scalar or ``np.ndarray`` of appropriate shape
|
305 |
+
rather than a 2D ``np.matrix``. Similarly, while masked elements of masked
|
306 |
+
arrays are ignored, the output will be a scalar or ``np.ndarray`` rather than a
|
307 |
+
masked array with ``mask=False``.""").split('\n')
|
308 |
+
|
309 |
+
|
310 |
+
def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
|
311 |
+
n_samples=1, paired=False,
|
312 |
+
result_to_tuple=None, too_small=0,
|
313 |
+
n_outputs=2, kwd_samples=[], override=None):
|
314 |
+
"""Factory for a wrapper that adds axis/nan_policy params to a function.
|
315 |
+
|
316 |
+
Parameters
|
317 |
+
----------
|
318 |
+
tuple_to_result : callable
|
319 |
+
Callable that returns an object of the type returned by the function
|
320 |
+
being wrapped (e.g. the namedtuple or dataclass returned by a
|
321 |
+
statistical test) provided the separate components (e.g. statistic,
|
322 |
+
pvalue).
|
323 |
+
default_axis : int, default: 0
|
324 |
+
The default value of the axis argument. Standard is 0 except when
|
325 |
+
backwards compatibility demands otherwise (e.g. `None`).
|
326 |
+
n_samples : int or callable, default: 1
|
327 |
+
The number of data samples accepted by the function
|
328 |
+
(e.g. `mannwhitneyu`), a callable that accepts a dictionary of
|
329 |
+
parameters passed into the function and returns the number of data
|
330 |
+
samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number
|
331 |
+
of samples (e.g. `kruskal`).
|
332 |
+
paired : {False, True}
|
333 |
+
Whether the function being wrapped treats the samples as paired (i.e.
|
334 |
+
corresponding elements of each sample should be considered as different
|
335 |
+
components of the same sample.)
|
336 |
+
result_to_tuple : callable, optional
|
337 |
+
Function that unpacks the results of the function being wrapped into
|
338 |
+
a tuple. This is essentially the inverse of `tuple_to_result`. Default
|
339 |
+
is `None`, which is appropriate for statistical tests that return a
|
340 |
+
statistic, pvalue tuple (rather than, e.g., a non-iterable datalass).
|
341 |
+
too_small : int or callable, default: 0
|
342 |
+
The largest unnacceptably small sample for the function being wrapped.
|
343 |
+
For example, some functions require samples of size two or more or they
|
344 |
+
raise an error. This argument prevents the error from being raised when
|
345 |
+
input is not 1D and instead places a NaN in the corresponding element
|
346 |
+
of the result. If callable, it must accept a list of samples, axis,
|
347 |
+
and a dictionary of keyword arguments passed to the wrapper function as
|
348 |
+
arguments and return a bool indicating weather the samples passed are
|
349 |
+
too small.
|
350 |
+
n_outputs : int or callable, default: 2
|
351 |
+
The number of outputs produced by the function given 1d sample(s). For
|
352 |
+
example, hypothesis tests that return a namedtuple or result object
|
353 |
+
with attributes ``statistic`` and ``pvalue`` use the default
|
354 |
+
``n_outputs=2``; summary statistics with scalar output use
|
355 |
+
``n_outputs=1``. Alternatively, may be a callable that accepts a
|
356 |
+
dictionary of arguments passed into the wrapped function and returns
|
357 |
+
the number of outputs corresponding with those arguments.
|
358 |
+
kwd_samples : sequence, default: []
|
359 |
+
The names of keyword parameters that should be treated as samples. For
|
360 |
+
example, `gmean` accepts as its first argument a sample `a` but
|
361 |
+
also `weights` as a fourth, optional keyword argument. In this case, we
|
362 |
+
use `n_samples=1` and kwd_samples=['weights'].
|
363 |
+
override : dict, default: {'vectorization': False, 'nan_propagation': True}
|
364 |
+
Pass a dictionary with ``'vectorization': True`` to ensure that the
|
365 |
+
decorator overrides the function's behavior for multimensional input.
|
366 |
+
Use ``'nan_propagation': False`` to ensure that the decorator does not
|
367 |
+
override the function's behavior for ``nan_policy='propagate'``.
|
368 |
+
(See `scipy.stats.mode`, for example.)
|
369 |
+
"""
|
370 |
+
# Specify which existing behaviors the decorator must override
|
371 |
+
temp = override or {}
|
372 |
+
override = {'vectorization': False,
|
373 |
+
'nan_propagation': True}
|
374 |
+
override.update(temp)
|
375 |
+
|
376 |
+
if result_to_tuple is None:
|
377 |
+
def result_to_tuple(res):
|
378 |
+
return res
|
379 |
+
|
380 |
+
if not callable(too_small):
|
381 |
+
def is_too_small(samples, *ts_args, axis=-1, **ts_kwargs):
|
382 |
+
for sample in samples:
|
383 |
+
if sample.shape[axis] <= too_small:
|
384 |
+
return True
|
385 |
+
return False
|
386 |
+
else:
|
387 |
+
is_too_small = too_small
|
388 |
+
|
389 |
+
def axis_nan_policy_decorator(hypotest_fun_in):
|
390 |
+
@wraps(hypotest_fun_in)
|
391 |
+
def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds):
|
392 |
+
|
393 |
+
if _no_deco: # for testing, decorator does nothing
|
394 |
+
return hypotest_fun_in(*args, **kwds)
|
395 |
+
|
396 |
+
# We need to be flexible about whether position or keyword
|
397 |
+
# arguments are used, but we need to make sure users don't pass
|
398 |
+
# both for the same parameter. To complicate matters, some
|
399 |
+
# functions accept samples with *args, and some functions already
|
400 |
+
# accept `axis` and `nan_policy` as positional arguments.
|
401 |
+
# The strategy is to make sure that there is no duplication
|
402 |
+
# between `args` and `kwds`, combine the two into `kwds`, then
|
403 |
+
# the samples, `nan_policy`, and `axis` from `kwds`, as they are
|
404 |
+
# dealt with separately.
|
405 |
+
|
406 |
+
# Check for intersection between positional and keyword args
|
407 |
+
params = list(inspect.signature(hypotest_fun_in).parameters)
|
408 |
+
if n_samples is None:
|
409 |
+
# Give unique names to each positional sample argument
|
410 |
+
# Note that *args can't be provided as a keyword argument
|
411 |
+
params = [f"arg{i}" for i in range(len(args))] + params[1:]
|
412 |
+
|
413 |
+
# raise if there are too many positional args
|
414 |
+
maxarg = (np.inf if inspect.getfullargspec(hypotest_fun_in).varargs
|
415 |
+
else len(inspect.getfullargspec(hypotest_fun_in).args))
|
416 |
+
if len(args) > maxarg: # let the function raise the right error
|
417 |
+
hypotest_fun_in(*args, **kwds)
|
418 |
+
|
419 |
+
# raise if multiple values passed for same parameter
|
420 |
+
d_args = dict(zip(params, args))
|
421 |
+
intersection = set(d_args) & set(kwds)
|
422 |
+
if intersection: # let the function raise the right error
|
423 |
+
hypotest_fun_in(*args, **kwds)
|
424 |
+
|
425 |
+
# Consolidate other positional and keyword args into `kwds`
|
426 |
+
kwds.update(d_args)
|
427 |
+
|
428 |
+
# rename avoids UnboundLocalError
|
429 |
+
if callable(n_samples):
|
430 |
+
# Future refactoring idea: no need for callable n_samples.
|
431 |
+
# Just replace `n_samples` and `kwd_samples` with a single
|
432 |
+
# list of the names of all samples, and treat all of them
|
433 |
+
# as `kwd_samples` are treated below.
|
434 |
+
n_samp = n_samples(kwds)
|
435 |
+
else:
|
436 |
+
n_samp = n_samples or len(args)
|
437 |
+
|
438 |
+
# get the number of outputs
|
439 |
+
n_out = n_outputs # rename to avoid UnboundLocalError
|
440 |
+
if callable(n_out):
|
441 |
+
n_out = n_out(kwds)
|
442 |
+
|
443 |
+
# If necessary, rearrange function signature: accept other samples
|
444 |
+
# as positional args right after the first n_samp args
|
445 |
+
kwd_samp = [name for name in kwd_samples
|
446 |
+
if kwds.get(name, None) is not None]
|
447 |
+
n_kwd_samp = len(kwd_samp)
|
448 |
+
if not kwd_samp:
|
449 |
+
hypotest_fun_out = hypotest_fun_in
|
450 |
+
else:
|
451 |
+
def hypotest_fun_out(*samples, **kwds):
|
452 |
+
new_kwds = dict(zip(kwd_samp, samples[n_samp:]))
|
453 |
+
kwds.update(new_kwds)
|
454 |
+
return hypotest_fun_in(*samples[:n_samp], **kwds)
|
455 |
+
|
456 |
+
# Extract the things we need here
|
457 |
+
try: # if something is missing
|
458 |
+
samples = [np.atleast_1d(kwds.pop(param))
|
459 |
+
for param in (params[:n_samp] + kwd_samp)]
|
460 |
+
except KeyError: # let the function raise the right error
|
461 |
+
# might need to revisit this if required arg is not a "sample"
|
462 |
+
hypotest_fun_in(*args, **kwds)
|
463 |
+
vectorized = True if 'axis' in params else False
|
464 |
+
vectorized = vectorized and not override['vectorization']
|
465 |
+
axis = kwds.pop('axis', default_axis)
|
466 |
+
nan_policy = kwds.pop('nan_policy', 'propagate')
|
467 |
+
keepdims = kwds.pop("keepdims", False)
|
468 |
+
del args # avoid the possibility of passing both `args` and `kwds`
|
469 |
+
|
470 |
+
# convert masked arrays to regular arrays with sentinel values
|
471 |
+
samples, sentinel = _masked_arrays_2_sentinel_arrays(samples)
|
472 |
+
|
473 |
+
# standardize to always work along last axis
|
474 |
+
reduced_axes = axis
|
475 |
+
if axis is None:
|
476 |
+
if samples:
|
477 |
+
# when axis=None, take the maximum of all dimensions since
|
478 |
+
# all the dimensions are reduced.
|
479 |
+
n_dims = np.max([sample.ndim for sample in samples])
|
480 |
+
reduced_axes = tuple(range(n_dims))
|
481 |
+
samples = [np.asarray(sample.ravel()) for sample in samples]
|
482 |
+
else:
|
483 |
+
samples = _broadcast_arrays(samples, axis=axis)
|
484 |
+
axis = np.atleast_1d(axis)
|
485 |
+
n_axes = len(axis)
|
486 |
+
# move all axes in `axis` to the end to be raveled
|
487 |
+
samples = [np.moveaxis(sample, axis, range(-len(axis), 0))
|
488 |
+
for sample in samples]
|
489 |
+
shapes = [sample.shape for sample in samples]
|
490 |
+
# New shape is unchanged for all axes _not_ in `axis`
|
491 |
+
# At the end, we append the product of the shapes of the axes
|
492 |
+
# in `axis`. Appending -1 doesn't work for zero-size arrays!
|
493 |
+
new_shapes = [shape[:-n_axes] + (np.prod(shape[-n_axes:]),)
|
494 |
+
for shape in shapes]
|
495 |
+
samples = [sample.reshape(new_shape)
|
496 |
+
for sample, new_shape in zip(samples, new_shapes)]
|
497 |
+
axis = -1 # work over the last axis
|
498 |
+
NaN = _get_nan(*samples)
|
499 |
+
|
500 |
+
# if axis is not needed, just handle nan_policy and return
|
501 |
+
ndims = np.array([sample.ndim for sample in samples])
|
502 |
+
if np.all(ndims <= 1):
|
503 |
+
# Addresses nan_policy == "raise"
|
504 |
+
if nan_policy != 'propagate' or override['nan_propagation']:
|
505 |
+
contains_nan = [_contains_nan(sample, nan_policy)[0]
|
506 |
+
for sample in samples]
|
507 |
+
else:
|
508 |
+
# Behave as though there are no NaNs (even if there are)
|
509 |
+
contains_nan = [False]*len(samples)
|
510 |
+
|
511 |
+
# Addresses nan_policy == "propagate"
|
512 |
+
if any(contains_nan) and (nan_policy == 'propagate'
|
513 |
+
and override['nan_propagation']):
|
514 |
+
res = np.full(n_out, NaN)
|
515 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
516 |
+
return tuple_to_result(*res)
|
517 |
+
|
518 |
+
# Addresses nan_policy == "omit"
|
519 |
+
if any(contains_nan) and nan_policy == 'omit':
|
520 |
+
# consider passing in contains_nan
|
521 |
+
samples = _remove_nans(samples, paired)
|
522 |
+
|
523 |
+
# ideally, this is what the behavior would be:
|
524 |
+
# if is_too_small(samples):
|
525 |
+
# return tuple_to_result(NaN, NaN)
|
526 |
+
# but some existing functions raise exceptions, and changing
|
527 |
+
# behavior of those would break backward compatibility.
|
528 |
+
|
529 |
+
if sentinel:
|
530 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
531 |
+
res = hypotest_fun_out(*samples, **kwds)
|
532 |
+
res = result_to_tuple(res)
|
533 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
534 |
+
return tuple_to_result(*res)
|
535 |
+
|
536 |
+
# check for empty input
|
537 |
+
# ideally, move this to the top, but some existing functions raise
|
538 |
+
# exceptions for empty input, so overriding it would break
|
539 |
+
# backward compatibility.
|
540 |
+
empty_output = _check_empty_inputs(samples, axis)
|
541 |
+
# only return empty output if zero sized input is too small.
|
542 |
+
if (
|
543 |
+
empty_output is not None
|
544 |
+
and (is_too_small(samples, kwds) or empty_output.size == 0)
|
545 |
+
):
|
546 |
+
res = [empty_output.copy() for i in range(n_out)]
|
547 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
548 |
+
return tuple_to_result(*res)
|
549 |
+
|
550 |
+
# otherwise, concatenate all samples along axis, remembering where
|
551 |
+
# each separate sample begins
|
552 |
+
lengths = np.array([sample.shape[axis] for sample in samples])
|
553 |
+
split_indices = np.cumsum(lengths)
|
554 |
+
x = _broadcast_concatenate(samples, axis)
|
555 |
+
|
556 |
+
# Addresses nan_policy == "raise"
|
557 |
+
if nan_policy != 'propagate' or override['nan_propagation']:
|
558 |
+
contains_nan, _ = _contains_nan(x, nan_policy)
|
559 |
+
else:
|
560 |
+
contains_nan = False # behave like there are no NaNs
|
561 |
+
|
562 |
+
if vectorized and not contains_nan and not sentinel:
|
563 |
+
res = hypotest_fun_out(*samples, axis=axis, **kwds)
|
564 |
+
res = result_to_tuple(res)
|
565 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
566 |
+
return tuple_to_result(*res)
|
567 |
+
|
568 |
+
# Addresses nan_policy == "omit"
|
569 |
+
if contains_nan and nan_policy == 'omit':
|
570 |
+
def hypotest_fun(x):
|
571 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
572 |
+
samples = _remove_nans(samples, paired)
|
573 |
+
if sentinel:
|
574 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
575 |
+
if is_too_small(samples, kwds):
|
576 |
+
return np.full(n_out, NaN)
|
577 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
578 |
+
|
579 |
+
# Addresses nan_policy == "propagate"
|
580 |
+
elif (contains_nan and nan_policy == 'propagate'
|
581 |
+
and override['nan_propagation']):
|
582 |
+
def hypotest_fun(x):
|
583 |
+
if np.isnan(x).any():
|
584 |
+
return np.full(n_out, NaN)
|
585 |
+
|
586 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
587 |
+
if sentinel:
|
588 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
589 |
+
if is_too_small(samples, kwds):
|
590 |
+
return np.full(n_out, NaN)
|
591 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
592 |
+
|
593 |
+
else:
|
594 |
+
def hypotest_fun(x):
|
595 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
596 |
+
if sentinel:
|
597 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
598 |
+
if is_too_small(samples, kwds):
|
599 |
+
return np.full(n_out, NaN)
|
600 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
601 |
+
|
602 |
+
x = np.moveaxis(x, axis, 0)
|
603 |
+
res = np.apply_along_axis(hypotest_fun, axis=0, arr=x)
|
604 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
605 |
+
return tuple_to_result(*res)
|
606 |
+
|
607 |
+
_axis_parameter_doc, _axis_parameter = _get_axis_params(default_axis)
|
608 |
+
doc = FunctionDoc(axis_nan_policy_wrapper)
|
609 |
+
parameter_names = [param.name for param in doc['Parameters']]
|
610 |
+
if 'axis' in parameter_names:
|
611 |
+
doc['Parameters'][parameter_names.index('axis')] = (
|
612 |
+
_axis_parameter_doc)
|
613 |
+
else:
|
614 |
+
doc['Parameters'].append(_axis_parameter_doc)
|
615 |
+
if 'nan_policy' in parameter_names:
|
616 |
+
doc['Parameters'][parameter_names.index('nan_policy')] = (
|
617 |
+
_nan_policy_parameter_doc)
|
618 |
+
else:
|
619 |
+
doc['Parameters'].append(_nan_policy_parameter_doc)
|
620 |
+
if 'keepdims' in parameter_names:
|
621 |
+
doc['Parameters'][parameter_names.index('keepdims')] = (
|
622 |
+
_keepdims_parameter_doc)
|
623 |
+
else:
|
624 |
+
doc['Parameters'].append(_keepdims_parameter_doc)
|
625 |
+
doc['Notes'] += _standard_note_addition
|
626 |
+
doc = str(doc).split("\n", 1)[1] # remove signature
|
627 |
+
axis_nan_policy_wrapper.__doc__ = str(doc)
|
628 |
+
|
629 |
+
sig = inspect.signature(axis_nan_policy_wrapper)
|
630 |
+
parameters = sig.parameters
|
631 |
+
parameter_list = list(parameters.values())
|
632 |
+
if 'axis' not in parameters:
|
633 |
+
parameter_list.append(_axis_parameter)
|
634 |
+
if 'nan_policy' not in parameters:
|
635 |
+
parameter_list.append(_nan_policy_parameter)
|
636 |
+
if 'keepdims' not in parameters:
|
637 |
+
parameter_list.append(_keepdims_parameter)
|
638 |
+
sig = sig.replace(parameters=parameter_list)
|
639 |
+
axis_nan_policy_wrapper.__signature__ = sig
|
640 |
+
|
641 |
+
return axis_nan_policy_wrapper
|
642 |
+
return axis_nan_policy_decorator
|
venv/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (360 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/stats/_bws_test.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from functools import partial
|
3 |
+
from scipy import stats
|
4 |
+
|
5 |
+
|
6 |
+
def _bws_input_validation(x, y, alternative, method):
|
7 |
+
''' Input validation and standardization for bws test'''
|
8 |
+
x, y = np.atleast_1d(x, y)
|
9 |
+
if x.ndim > 1 or y.ndim > 1:
|
10 |
+
raise ValueError('`x` and `y` must be exactly one-dimensional.')
|
11 |
+
if np.isnan(x).any() or np.isnan(y).any():
|
12 |
+
raise ValueError('`x` and `y` must not contain NaNs.')
|
13 |
+
if np.size(x) == 0 or np.size(y) == 0:
|
14 |
+
raise ValueError('`x` and `y` must be of nonzero size.')
|
15 |
+
|
16 |
+
z = stats.rankdata(np.concatenate((x, y)))
|
17 |
+
x, y = z[:len(x)], z[len(x):]
|
18 |
+
|
19 |
+
alternatives = {'two-sided', 'less', 'greater'}
|
20 |
+
alternative = alternative.lower()
|
21 |
+
if alternative not in alternatives:
|
22 |
+
raise ValueError(f'`alternative` must be one of {alternatives}.')
|
23 |
+
|
24 |
+
method = stats.PermutationMethod() if method is None else method
|
25 |
+
if not isinstance(method, stats.PermutationMethod):
|
26 |
+
raise ValueError('`method` must be an instance of '
|
27 |
+
'`scipy.stats.PermutationMethod`')
|
28 |
+
|
29 |
+
return x, y, alternative, method
|
30 |
+
|
31 |
+
|
32 |
+
def _bws_statistic(x, y, alternative, axis):
|
33 |
+
'''Compute the BWS test statistic for two independent samples'''
|
34 |
+
# Public function currently does not accept `axis`, but `permutation_test`
|
35 |
+
# uses `axis` to make vectorized call.
|
36 |
+
|
37 |
+
Ri, Hj = np.sort(x, axis=axis), np.sort(y, axis=axis)
|
38 |
+
n, m = Ri.shape[axis], Hj.shape[axis]
|
39 |
+
i, j = np.arange(1, n+1), np.arange(1, m+1)
|
40 |
+
|
41 |
+
Bx_num = Ri - (m + n)/n * i
|
42 |
+
By_num = Hj - (m + n)/m * j
|
43 |
+
|
44 |
+
if alternative == 'two-sided':
|
45 |
+
Bx_num *= Bx_num
|
46 |
+
By_num *= By_num
|
47 |
+
else:
|
48 |
+
Bx_num *= np.abs(Bx_num)
|
49 |
+
By_num *= np.abs(By_num)
|
50 |
+
|
51 |
+
Bx_den = i/(n+1) * (1 - i/(n+1)) * m*(m+n)/n
|
52 |
+
By_den = j/(m+1) * (1 - j/(m+1)) * n*(m+n)/m
|
53 |
+
|
54 |
+
Bx = 1/n * np.sum(Bx_num/Bx_den, axis=axis)
|
55 |
+
By = 1/m * np.sum(By_num/By_den, axis=axis)
|
56 |
+
|
57 |
+
B = (Bx + By) / 2 if alternative == 'two-sided' else (Bx - By) / 2
|
58 |
+
|
59 |
+
return B
|
60 |
+
|
61 |
+
|
62 |
+
def bws_test(x, y, *, alternative="two-sided", method=None):
|
63 |
+
r'''Perform the Baumgartner-Weiss-Schindler test on two independent samples.
|
64 |
+
|
65 |
+
The Baumgartner-Weiss-Schindler (BWS) test is a nonparametric test of
|
66 |
+
the null hypothesis that the distribution underlying sample `x`
|
67 |
+
is the same as the distribution underlying sample `y`. Unlike
|
68 |
+
the Kolmogorov-Smirnov, Wilcoxon, and Cramer-Von Mises tests,
|
69 |
+
the BWS test weights the integral by the variance of the difference
|
70 |
+
in cumulative distribution functions (CDFs), emphasizing the tails of the
|
71 |
+
distributions, which increases the power of the test in many applications.
|
72 |
+
|
73 |
+
Parameters
|
74 |
+
----------
|
75 |
+
x, y : array-like
|
76 |
+
1-d arrays of samples.
|
77 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
78 |
+
Defines the alternative hypothesis. Default is 'two-sided'.
|
79 |
+
Let *F(u)* and *G(u)* be the cumulative distribution functions of the
|
80 |
+
distributions underlying `x` and `y`, respectively. Then the following
|
81 |
+
alternative hypotheses are available:
|
82 |
+
|
83 |
+
* 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for
|
84 |
+
at least one *u*.
|
85 |
+
* 'less': the distribution underlying `x` is stochastically less than
|
86 |
+
the distribution underlying `y`, i.e. *F(u) >= G(u)* for all *u*.
|
87 |
+
* 'greater': the distribution underlying `x` is stochastically greater
|
88 |
+
than the distribution underlying `y`, i.e. *F(u) <= G(u)* for all
|
89 |
+
*u*.
|
90 |
+
|
91 |
+
Under a more restrictive set of assumptions, the alternative hypotheses
|
92 |
+
can be expressed in terms of the locations of the distributions;
|
93 |
+
see [2] section 5.1.
|
94 |
+
method : PermutationMethod, optional
|
95 |
+
Configures the method used to compute the p-value. The default is
|
96 |
+
the default `PermutationMethod` object.
|
97 |
+
|
98 |
+
Returns
|
99 |
+
-------
|
100 |
+
res : PermutationTestResult
|
101 |
+
An object with attributes:
|
102 |
+
|
103 |
+
statistic : float
|
104 |
+
The observed test statistic of the data.
|
105 |
+
pvalue : float
|
106 |
+
The p-value for the given alternative.
|
107 |
+
null_distribution : ndarray
|
108 |
+
The values of the test statistic generated under the null hypothesis.
|
109 |
+
|
110 |
+
See also
|
111 |
+
--------
|
112 |
+
scipy.stats.wilcoxon, scipy.stats.mannwhitneyu, scipy.stats.ttest_ind
|
113 |
+
|
114 |
+
Notes
|
115 |
+
-----
|
116 |
+
When ``alternative=='two-sided'``, the statistic is defined by the
|
117 |
+
equations given in [1]_ Section 2. This statistic is not appropriate for
|
118 |
+
one-sided alternatives; in that case, the statistic is the *negative* of
|
119 |
+
that given by the equations in [1]_ Section 2. Consequently, when the
|
120 |
+
distribution of the first sample is stochastically greater than that of the
|
121 |
+
second sample, the statistic will tend to be positive.
|
122 |
+
|
123 |
+
References
|
124 |
+
----------
|
125 |
+
.. [1] Neuhäuser, M. (2005). Exact Tests Based on the
|
126 |
+
Baumgartner-Weiss-Schindler Statistic: A Survey. Statistical Papers,
|
127 |
+
46(1), 1-29.
|
128 |
+
.. [2] Fay, M. P., & Proschan, M. A. (2010). Wilcoxon-Mann-Whitney or t-test?
|
129 |
+
On assumptions for hypothesis tests and multiple interpretations of
|
130 |
+
decision rules. Statistics surveys, 4, 1.
|
131 |
+
|
132 |
+
Examples
|
133 |
+
--------
|
134 |
+
We follow the example of table 3 in [1]_: Fourteen children were divided
|
135 |
+
randomly into two groups. Their ranks at performing a specific tests are
|
136 |
+
as follows.
|
137 |
+
|
138 |
+
>>> import numpy as np
|
139 |
+
>>> x = [1, 2, 3, 4, 6, 7, 8]
|
140 |
+
>>> y = [5, 9, 10, 11, 12, 13, 14]
|
141 |
+
|
142 |
+
We use the BWS test to assess whether there is a statistically significant
|
143 |
+
difference between the two groups.
|
144 |
+
The null hypothesis is that there is no difference in the distributions of
|
145 |
+
performance between the two groups. We decide that a significance level of
|
146 |
+
1% is required to reject the null hypothesis in favor of the alternative
|
147 |
+
that the distributions are different.
|
148 |
+
Since the number of samples is very small, we can compare the observed test
|
149 |
+
statistic against the *exact* distribution of the test statistic under the
|
150 |
+
null hypothesis.
|
151 |
+
|
152 |
+
>>> from scipy.stats import bws_test
|
153 |
+
>>> res = bws_test(x, y)
|
154 |
+
>>> print(res.statistic)
|
155 |
+
5.132167152575315
|
156 |
+
|
157 |
+
This agrees with :math:`B = 5.132` reported in [1]_. The *p*-value produced
|
158 |
+
by `bws_test` also agrees with :math:`p = 0.0029` reported in [1]_.
|
159 |
+
|
160 |
+
>>> print(res.pvalue)
|
161 |
+
0.002913752913752914
|
162 |
+
|
163 |
+
Because the p-value is below our threshold of 1%, we take this as evidence
|
164 |
+
against the null hypothesis in favor of the alternative that there is a
|
165 |
+
difference in performance between the two groups.
|
166 |
+
'''
|
167 |
+
|
168 |
+
x, y, alternative, method = _bws_input_validation(x, y, alternative,
|
169 |
+
method)
|
170 |
+
bws_statistic = partial(_bws_statistic, alternative=alternative)
|
171 |
+
|
172 |
+
permutation_alternative = 'less' if alternative == 'less' else 'greater'
|
173 |
+
res = stats.permutation_test((x, y), bws_statistic,
|
174 |
+
alternative=permutation_alternative,
|
175 |
+
**method._asdict())
|
176 |
+
|
177 |
+
return res
|
venv/lib/python3.10/site-packages/scipy/stats/_censored_data.py
ADDED
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
|
4 |
+
def _validate_1d(a, name, allow_inf=False):
|
5 |
+
if np.ndim(a) != 1:
|
6 |
+
raise ValueError(f'`{name}` must be a one-dimensional sequence.')
|
7 |
+
if np.isnan(a).any():
|
8 |
+
raise ValueError(f'`{name}` must not contain nan.')
|
9 |
+
if not allow_inf and np.isinf(a).any():
|
10 |
+
raise ValueError(f'`{name}` must contain only finite values.')
|
11 |
+
|
12 |
+
|
13 |
+
def _validate_interval(interval):
|
14 |
+
interval = np.asarray(interval)
|
15 |
+
if interval.shape == (0,):
|
16 |
+
# The input was a sequence with length 0.
|
17 |
+
interval = interval.reshape((0, 2))
|
18 |
+
if interval.ndim != 2 or interval.shape[-1] != 2:
|
19 |
+
raise ValueError('`interval` must be a two-dimensional array with '
|
20 |
+
'shape (m, 2), where m is the number of '
|
21 |
+
'interval-censored values, but got shape '
|
22 |
+
f'{interval.shape}')
|
23 |
+
|
24 |
+
if np.isnan(interval).any():
|
25 |
+
raise ValueError('`interval` must not contain nan.')
|
26 |
+
if np.isinf(interval).all(axis=1).any():
|
27 |
+
raise ValueError('In each row in `interval`, both values must not'
|
28 |
+
' be infinite.')
|
29 |
+
if (interval[:, 0] > interval[:, 1]).any():
|
30 |
+
raise ValueError('In each row of `interval`, the left value must not'
|
31 |
+
' exceed the right value.')
|
32 |
+
|
33 |
+
uncensored_mask = interval[:, 0] == interval[:, 1]
|
34 |
+
left_mask = np.isinf(interval[:, 0])
|
35 |
+
right_mask = np.isinf(interval[:, 1])
|
36 |
+
interval_mask = np.isfinite(interval).all(axis=1) & ~uncensored_mask
|
37 |
+
|
38 |
+
uncensored2 = interval[uncensored_mask, 0]
|
39 |
+
left2 = interval[left_mask, 1]
|
40 |
+
right2 = interval[right_mask, 0]
|
41 |
+
interval2 = interval[interval_mask]
|
42 |
+
|
43 |
+
return uncensored2, left2, right2, interval2
|
44 |
+
|
45 |
+
|
46 |
+
def _validate_x_censored(x, censored):
|
47 |
+
x = np.asarray(x)
|
48 |
+
if x.ndim != 1:
|
49 |
+
raise ValueError('`x` must be one-dimensional.')
|
50 |
+
censored = np.asarray(censored)
|
51 |
+
if censored.ndim != 1:
|
52 |
+
raise ValueError('`censored` must be one-dimensional.')
|
53 |
+
if (~np.isfinite(x)).any():
|
54 |
+
raise ValueError('`x` must not contain nan or inf.')
|
55 |
+
if censored.size != x.size:
|
56 |
+
raise ValueError('`x` and `censored` must have the same length.')
|
57 |
+
return x, censored.astype(bool)
|
58 |
+
|
59 |
+
|
60 |
+
class CensoredData:
|
61 |
+
"""
|
62 |
+
Instances of this class represent censored data.
|
63 |
+
|
64 |
+
Instances may be passed to the ``fit`` method of continuous
|
65 |
+
univariate SciPy distributions for maximum likelihood estimation.
|
66 |
+
The *only* method of the univariate continuous distributions that
|
67 |
+
understands `CensoredData` is the ``fit`` method. An instance of
|
68 |
+
`CensoredData` can not be passed to methods such as ``pdf`` and
|
69 |
+
``cdf``.
|
70 |
+
|
71 |
+
An observation is said to be *censored* when the precise value is unknown,
|
72 |
+
but it has a known upper and/or lower bound. The conventional terminology
|
73 |
+
is:
|
74 |
+
|
75 |
+
* left-censored: an observation is below a certain value but it is
|
76 |
+
unknown by how much.
|
77 |
+
* right-censored: an observation is above a certain value but it is
|
78 |
+
unknown by how much.
|
79 |
+
* interval-censored: an observation lies somewhere on an interval between
|
80 |
+
two values.
|
81 |
+
|
82 |
+
Left-, right-, and interval-censored data can be represented by
|
83 |
+
`CensoredData`.
|
84 |
+
|
85 |
+
For convenience, the class methods ``left_censored`` and
|
86 |
+
``right_censored`` are provided to create a `CensoredData`
|
87 |
+
instance from a single one-dimensional array of measurements
|
88 |
+
and a corresponding boolean array to indicate which measurements
|
89 |
+
are censored. The class method ``interval_censored`` accepts two
|
90 |
+
one-dimensional arrays that hold the lower and upper bounds of the
|
91 |
+
intervals.
|
92 |
+
|
93 |
+
Parameters
|
94 |
+
----------
|
95 |
+
uncensored : array_like, 1D
|
96 |
+
Uncensored observations.
|
97 |
+
left : array_like, 1D
|
98 |
+
Left-censored observations.
|
99 |
+
right : array_like, 1D
|
100 |
+
Right-censored observations.
|
101 |
+
interval : array_like, 2D, with shape (m, 2)
|
102 |
+
Interval-censored observations. Each row ``interval[k, :]``
|
103 |
+
represents the interval for the kth interval-censored observation.
|
104 |
+
|
105 |
+
Notes
|
106 |
+
-----
|
107 |
+
In the input array `interval`, the lower bound of the interval may
|
108 |
+
be ``-inf``, and the upper bound may be ``inf``, but at least one must be
|
109 |
+
finite. When the lower bound is ``-inf``, the row represents a left-
|
110 |
+
censored observation, and when the upper bound is ``inf``, the row
|
111 |
+
represents a right-censored observation. If the length of an interval
|
112 |
+
is 0 (i.e. ``interval[k, 0] == interval[k, 1]``, the observation is
|
113 |
+
treated as uncensored. So one can represent all the types of censored
|
114 |
+
and uncensored data in ``interval``, but it is generally more convenient
|
115 |
+
to use `uncensored`, `left` and `right` for uncensored, left-censored and
|
116 |
+
right-censored observations, respectively.
|
117 |
+
|
118 |
+
Examples
|
119 |
+
--------
|
120 |
+
In the most general case, a censored data set may contain values that
|
121 |
+
are left-censored, right-censored, interval-censored, and uncensored.
|
122 |
+
For example, here we create a data set with five observations. Two
|
123 |
+
are uncensored (values 1 and 1.5), one is a left-censored observation
|
124 |
+
of 0, one is a right-censored observation of 10 and one is
|
125 |
+
interval-censored in the interval [2, 3].
|
126 |
+
|
127 |
+
>>> import numpy as np
|
128 |
+
>>> from scipy.stats import CensoredData
|
129 |
+
>>> data = CensoredData(uncensored=[1, 1.5], left=[0], right=[10],
|
130 |
+
... interval=[[2, 3]])
|
131 |
+
>>> print(data)
|
132 |
+
CensoredData(5 values: 2 not censored, 1 left-censored,
|
133 |
+
1 right-censored, 1 interval-censored)
|
134 |
+
|
135 |
+
Equivalently,
|
136 |
+
|
137 |
+
>>> data = CensoredData(interval=[[1, 1],
|
138 |
+
... [1.5, 1.5],
|
139 |
+
... [-np.inf, 0],
|
140 |
+
... [10, np.inf],
|
141 |
+
... [2, 3]])
|
142 |
+
>>> print(data)
|
143 |
+
CensoredData(5 values: 2 not censored, 1 left-censored,
|
144 |
+
1 right-censored, 1 interval-censored)
|
145 |
+
|
146 |
+
A common case is to have a mix of uncensored observations and censored
|
147 |
+
observations that are all right-censored (or all left-censored). For
|
148 |
+
example, consider an experiment in which six devices are started at
|
149 |
+
various times and left running until they fail. Assume that time is
|
150 |
+
measured in hours, and the experiment is stopped after 30 hours, even
|
151 |
+
if all the devices have not failed by that time. We might end up with
|
152 |
+
data such as this::
|
153 |
+
|
154 |
+
Device Start-time Fail-time Time-to-failure
|
155 |
+
1 0 13 13
|
156 |
+
2 2 24 22
|
157 |
+
3 5 22 17
|
158 |
+
4 8 23 15
|
159 |
+
5 10 *** >20
|
160 |
+
6 12 *** >18
|
161 |
+
|
162 |
+
Two of the devices had not failed when the experiment was stopped;
|
163 |
+
the observations of the time-to-failure for these two devices are
|
164 |
+
right-censored. We can represent this data with
|
165 |
+
|
166 |
+
>>> data = CensoredData(uncensored=[13, 22, 17, 15], right=[20, 18])
|
167 |
+
>>> print(data)
|
168 |
+
CensoredData(6 values: 4 not censored, 2 right-censored)
|
169 |
+
|
170 |
+
Alternatively, we can use the method `CensoredData.right_censored` to
|
171 |
+
create a representation of this data. The time-to-failure observations
|
172 |
+
are put the list ``ttf``. The ``censored`` list indicates which values
|
173 |
+
in ``ttf`` are censored.
|
174 |
+
|
175 |
+
>>> ttf = [13, 22, 17, 15, 20, 18]
|
176 |
+
>>> censored = [False, False, False, False, True, True]
|
177 |
+
|
178 |
+
Pass these lists to `CensoredData.right_censored` to create an
|
179 |
+
instance of `CensoredData`.
|
180 |
+
|
181 |
+
>>> data = CensoredData.right_censored(ttf, censored)
|
182 |
+
>>> print(data)
|
183 |
+
CensoredData(6 values: 4 not censored, 2 right-censored)
|
184 |
+
|
185 |
+
If the input data is interval censored and already stored in two
|
186 |
+
arrays, one holding the low end of the intervals and another
|
187 |
+
holding the high ends, the class method ``interval_censored`` can
|
188 |
+
be used to create the `CensoredData` instance.
|
189 |
+
|
190 |
+
This example creates an instance with four interval-censored values.
|
191 |
+
The intervals are [10, 11], [0.5, 1], [2, 3], and [12.5, 13.5].
|
192 |
+
|
193 |
+
>>> a = [10, 0.5, 2, 12.5] # Low ends of the intervals
|
194 |
+
>>> b = [11, 1.0, 3, 13.5] # High ends of the intervals
|
195 |
+
>>> data = CensoredData.interval_censored(low=a, high=b)
|
196 |
+
>>> print(data)
|
197 |
+
CensoredData(4 values: 0 not censored, 4 interval-censored)
|
198 |
+
|
199 |
+
Finally, we create and censor some data from the `weibull_min`
|
200 |
+
distribution, and then fit `weibull_min` to that data. We'll assume
|
201 |
+
that the location parameter is known to be 0.
|
202 |
+
|
203 |
+
>>> from scipy.stats import weibull_min
|
204 |
+
>>> rng = np.random.default_rng()
|
205 |
+
|
206 |
+
Create the random data set.
|
207 |
+
|
208 |
+
>>> x = weibull_min.rvs(2.5, loc=0, scale=30, size=250, random_state=rng)
|
209 |
+
>>> x[x > 40] = 40 # Right-censor values greater or equal to 40.
|
210 |
+
|
211 |
+
Create the `CensoredData` instance with the `right_censored` method.
|
212 |
+
The censored values are those where the value is 40.
|
213 |
+
|
214 |
+
>>> data = CensoredData.right_censored(x, x == 40)
|
215 |
+
>>> print(data)
|
216 |
+
CensoredData(250 values: 215 not censored, 35 right-censored)
|
217 |
+
|
218 |
+
35 values have been right-censored.
|
219 |
+
|
220 |
+
Fit `weibull_min` to the censored data. We expect to shape and scale
|
221 |
+
to be approximately 2.5 and 30, respectively.
|
222 |
+
|
223 |
+
>>> weibull_min.fit(data, floc=0)
|
224 |
+
(2.3575922823897315, 0, 30.40650074451254)
|
225 |
+
|
226 |
+
"""
|
227 |
+
|
228 |
+
def __init__(self, uncensored=None, *, left=None, right=None,
|
229 |
+
interval=None):
|
230 |
+
if uncensored is None:
|
231 |
+
uncensored = []
|
232 |
+
if left is None:
|
233 |
+
left = []
|
234 |
+
if right is None:
|
235 |
+
right = []
|
236 |
+
if interval is None:
|
237 |
+
interval = np.empty((0, 2))
|
238 |
+
|
239 |
+
_validate_1d(uncensored, 'uncensored')
|
240 |
+
_validate_1d(left, 'left')
|
241 |
+
_validate_1d(right, 'right')
|
242 |
+
uncensored2, left2, right2, interval2 = _validate_interval(interval)
|
243 |
+
|
244 |
+
self._uncensored = np.concatenate((uncensored, uncensored2))
|
245 |
+
self._left = np.concatenate((left, left2))
|
246 |
+
self._right = np.concatenate((right, right2))
|
247 |
+
# Note that by construction, the private attribute _interval
|
248 |
+
# will be a 2D array that contains only finite values representing
|
249 |
+
# intervals with nonzero but finite length.
|
250 |
+
self._interval = interval2
|
251 |
+
|
252 |
+
def __repr__(self):
|
253 |
+
uncensored_str = " ".join(np.array_repr(self._uncensored).split())
|
254 |
+
left_str = " ".join(np.array_repr(self._left).split())
|
255 |
+
right_str = " ".join(np.array_repr(self._right).split())
|
256 |
+
interval_str = " ".join(np.array_repr(self._interval).split())
|
257 |
+
return (f"CensoredData(uncensored={uncensored_str}, left={left_str}, "
|
258 |
+
f"right={right_str}, interval={interval_str})")
|
259 |
+
|
260 |
+
def __str__(self):
|
261 |
+
num_nc = len(self._uncensored)
|
262 |
+
num_lc = len(self._left)
|
263 |
+
num_rc = len(self._right)
|
264 |
+
num_ic = len(self._interval)
|
265 |
+
n = num_nc + num_lc + num_rc + num_ic
|
266 |
+
parts = [f'{num_nc} not censored']
|
267 |
+
if num_lc > 0:
|
268 |
+
parts.append(f'{num_lc} left-censored')
|
269 |
+
if num_rc > 0:
|
270 |
+
parts.append(f'{num_rc} right-censored')
|
271 |
+
if num_ic > 0:
|
272 |
+
parts.append(f'{num_ic} interval-censored')
|
273 |
+
return f'CensoredData({n} values: ' + ', '.join(parts) + ')'
|
274 |
+
|
275 |
+
# This is not a complete implementation of the arithmetic operators.
|
276 |
+
# All we need is subtracting a scalar and dividing by a scalar.
|
277 |
+
|
278 |
+
def __sub__(self, other):
|
279 |
+
return CensoredData(uncensored=self._uncensored - other,
|
280 |
+
left=self._left - other,
|
281 |
+
right=self._right - other,
|
282 |
+
interval=self._interval - other)
|
283 |
+
|
284 |
+
def __truediv__(self, other):
|
285 |
+
return CensoredData(uncensored=self._uncensored / other,
|
286 |
+
left=self._left / other,
|
287 |
+
right=self._right / other,
|
288 |
+
interval=self._interval / other)
|
289 |
+
|
290 |
+
def __len__(self):
|
291 |
+
"""
|
292 |
+
The number of values (censored and not censored).
|
293 |
+
"""
|
294 |
+
return (len(self._uncensored) + len(self._left) + len(self._right)
|
295 |
+
+ len(self._interval))
|
296 |
+
|
297 |
+
def num_censored(self):
|
298 |
+
"""
|
299 |
+
Number of censored values.
|
300 |
+
"""
|
301 |
+
return len(self._left) + len(self._right) + len(self._interval)
|
302 |
+
|
303 |
+
@classmethod
|
304 |
+
def right_censored(cls, x, censored):
|
305 |
+
"""
|
306 |
+
Create a `CensoredData` instance of right-censored data.
|
307 |
+
|
308 |
+
Parameters
|
309 |
+
----------
|
310 |
+
x : array_like
|
311 |
+
`x` is the array of observed data or measurements.
|
312 |
+
`x` must be a one-dimensional sequence of finite numbers.
|
313 |
+
censored : array_like of bool
|
314 |
+
`censored` must be a one-dimensional sequence of boolean
|
315 |
+
values. If ``censored[k]`` is True, the corresponding value
|
316 |
+
in `x` is right-censored. That is, the value ``x[k]``
|
317 |
+
is the lower bound of the true (but unknown) value.
|
318 |
+
|
319 |
+
Returns
|
320 |
+
-------
|
321 |
+
data : `CensoredData`
|
322 |
+
An instance of `CensoredData` that represents the
|
323 |
+
collection of uncensored and right-censored values.
|
324 |
+
|
325 |
+
Examples
|
326 |
+
--------
|
327 |
+
>>> from scipy.stats import CensoredData
|
328 |
+
|
329 |
+
Two uncensored values (4 and 10) and two right-censored values
|
330 |
+
(24 and 25).
|
331 |
+
|
332 |
+
>>> data = CensoredData.right_censored([4, 10, 24, 25],
|
333 |
+
... [False, False, True, True])
|
334 |
+
>>> data
|
335 |
+
CensoredData(uncensored=array([ 4., 10.]),
|
336 |
+
left=array([], dtype=float64), right=array([24., 25.]),
|
337 |
+
interval=array([], shape=(0, 2), dtype=float64))
|
338 |
+
>>> print(data)
|
339 |
+
CensoredData(4 values: 2 not censored, 2 right-censored)
|
340 |
+
"""
|
341 |
+
x, censored = _validate_x_censored(x, censored)
|
342 |
+
return cls(uncensored=x[~censored], right=x[censored])
|
343 |
+
|
344 |
+
@classmethod
|
345 |
+
def left_censored(cls, x, censored):
|
346 |
+
"""
|
347 |
+
Create a `CensoredData` instance of left-censored data.
|
348 |
+
|
349 |
+
Parameters
|
350 |
+
----------
|
351 |
+
x : array_like
|
352 |
+
`x` is the array of observed data or measurements.
|
353 |
+
`x` must be a one-dimensional sequence of finite numbers.
|
354 |
+
censored : array_like of bool
|
355 |
+
`censored` must be a one-dimensional sequence of boolean
|
356 |
+
values. If ``censored[k]`` is True, the corresponding value
|
357 |
+
in `x` is left-censored. That is, the value ``x[k]``
|
358 |
+
is the upper bound of the true (but unknown) value.
|
359 |
+
|
360 |
+
Returns
|
361 |
+
-------
|
362 |
+
data : `CensoredData`
|
363 |
+
An instance of `CensoredData` that represents the
|
364 |
+
collection of uncensored and left-censored values.
|
365 |
+
|
366 |
+
Examples
|
367 |
+
--------
|
368 |
+
>>> from scipy.stats import CensoredData
|
369 |
+
|
370 |
+
Two uncensored values (0.12 and 0.033) and two left-censored values
|
371 |
+
(both 1e-3).
|
372 |
+
|
373 |
+
>>> data = CensoredData.left_censored([0.12, 0.033, 1e-3, 1e-3],
|
374 |
+
... [False, False, True, True])
|
375 |
+
>>> data
|
376 |
+
CensoredData(uncensored=array([0.12 , 0.033]),
|
377 |
+
left=array([0.001, 0.001]), right=array([], dtype=float64),
|
378 |
+
interval=array([], shape=(0, 2), dtype=float64))
|
379 |
+
>>> print(data)
|
380 |
+
CensoredData(4 values: 2 not censored, 2 left-censored)
|
381 |
+
"""
|
382 |
+
x, censored = _validate_x_censored(x, censored)
|
383 |
+
return cls(uncensored=x[~censored], left=x[censored])
|
384 |
+
|
385 |
+
@classmethod
|
386 |
+
def interval_censored(cls, low, high):
|
387 |
+
"""
|
388 |
+
Create a `CensoredData` instance of interval-censored data.
|
389 |
+
|
390 |
+
This method is useful when all the data is interval-censored, and
|
391 |
+
the low and high ends of the intervals are already stored in
|
392 |
+
separate one-dimensional arrays.
|
393 |
+
|
394 |
+
Parameters
|
395 |
+
----------
|
396 |
+
low : array_like
|
397 |
+
The one-dimensional array containing the low ends of the
|
398 |
+
intervals.
|
399 |
+
high : array_like
|
400 |
+
The one-dimensional array containing the high ends of the
|
401 |
+
intervals.
|
402 |
+
|
403 |
+
Returns
|
404 |
+
-------
|
405 |
+
data : `CensoredData`
|
406 |
+
An instance of `CensoredData` that represents the
|
407 |
+
collection of censored values.
|
408 |
+
|
409 |
+
Examples
|
410 |
+
--------
|
411 |
+
>>> import numpy as np
|
412 |
+
>>> from scipy.stats import CensoredData
|
413 |
+
|
414 |
+
``a`` and ``b`` are the low and high ends of a collection of
|
415 |
+
interval-censored values.
|
416 |
+
|
417 |
+
>>> a = [0.5, 2.0, 3.0, 5.5]
|
418 |
+
>>> b = [1.0, 2.5, 3.5, 7.0]
|
419 |
+
>>> data = CensoredData.interval_censored(low=a, high=b)
|
420 |
+
>>> print(data)
|
421 |
+
CensoredData(4 values: 0 not censored, 4 interval-censored)
|
422 |
+
"""
|
423 |
+
_validate_1d(low, 'low', allow_inf=True)
|
424 |
+
_validate_1d(high, 'high', allow_inf=True)
|
425 |
+
if len(low) != len(high):
|
426 |
+
raise ValueError('`low` and `high` must have the same length.')
|
427 |
+
interval = np.column_stack((low, high))
|
428 |
+
uncensored, left, right, interval = _validate_interval(interval)
|
429 |
+
return cls(uncensored=uncensored, left=left, right=right,
|
430 |
+
interval=interval)
|
431 |
+
|
432 |
+
def _uncensor(self):
|
433 |
+
"""
|
434 |
+
This function is used when a non-censored version of the data
|
435 |
+
is needed to create a rough estimate of the parameters of a
|
436 |
+
distribution via the method of moments or some similar method.
|
437 |
+
The data is "uncensored" by taking the given endpoints as the
|
438 |
+
data for the left- or right-censored data, and the mean for the
|
439 |
+
interval-censored data.
|
440 |
+
"""
|
441 |
+
data = np.concatenate((self._uncensored, self._left, self._right,
|
442 |
+
self._interval.mean(axis=1)))
|
443 |
+
return data
|
444 |
+
|
445 |
+
def _supported(self, a, b):
|
446 |
+
"""
|
447 |
+
Return a subset of self containing the values that are in
|
448 |
+
(or overlap with) the interval (a, b).
|
449 |
+
"""
|
450 |
+
uncensored = self._uncensored
|
451 |
+
uncensored = uncensored[(a < uncensored) & (uncensored < b)]
|
452 |
+
left = self._left
|
453 |
+
left = left[a < left]
|
454 |
+
right = self._right
|
455 |
+
right = right[right < b]
|
456 |
+
interval = self._interval
|
457 |
+
interval = interval[(a < interval[:, 1]) & (interval[:, 0] < b)]
|
458 |
+
return CensoredData(uncensored, left=left, right=right,
|
459 |
+
interval=interval)
|
venv/lib/python3.10/site-packages/scipy/stats/_common.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import namedtuple
|
2 |
+
|
3 |
+
|
4 |
+
ConfidenceInterval = namedtuple("ConfidenceInterval", ["low", "high"])
|
5 |
+
ConfidenceInterval. __doc__ = "Class for confidence intervals."
|
venv/lib/python3.10/site-packages/scipy/stats/_constants.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Statistics-related constants.
|
3 |
+
|
4 |
+
"""
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
|
8 |
+
# The smallest representable positive number such that 1.0 + _EPS != 1.0.
|
9 |
+
_EPS = np.finfo(float).eps
|
10 |
+
|
11 |
+
# The largest [in magnitude] usable floating value.
|
12 |
+
_XMAX = np.finfo(float).max
|
13 |
+
|
14 |
+
# The log of the largest usable floating value; useful for knowing
|
15 |
+
# when exp(something) will overflow
|
16 |
+
_LOGXMAX = np.log(_XMAX)
|
17 |
+
|
18 |
+
# The smallest [in magnitude] usable (i.e. not subnormal) double precision
|
19 |
+
# floating value.
|
20 |
+
_XMIN = np.finfo(float).tiny
|
21 |
+
|
22 |
+
# The log of the smallest [in magnitude] usable (i.e not subnormal)
|
23 |
+
# double precision floating value.
|
24 |
+
_LOGXMIN = np.log(_XMIN)
|
25 |
+
|
26 |
+
# -special.psi(1)
|
27 |
+
_EULER = 0.577215664901532860606512090082402431042
|
28 |
+
|
29 |
+
# special.zeta(3, 1) Apery's constant
|
30 |
+
_ZETA3 = 1.202056903159594285399738161511449990765
|
31 |
+
|
32 |
+
# sqrt(pi)
|
33 |
+
_SQRT_PI = 1.772453850905516027298167483341145182798
|
34 |
+
|
35 |
+
# sqrt(2/pi)
|
36 |
+
_SQRT_2_OVER_PI = 0.7978845608028654
|
37 |
+
|
38 |
+
# log(sqrt(2/pi))
|
39 |
+
_LOG_SQRT_2_OVER_PI = -0.22579135264472744
|
venv/lib/python3.10/site-packages/scipy/stats/_continuous_distns.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|