applied-ai-018 commited on
Commit
33a0b79
·
verified ·
1 Parent(s): 555b2eb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__init__.py +169 -0
  5. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so +0 -0
  6. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so +0 -0
  7. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_filters.py +1852 -0
  8. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_fourier.py +307 -0
  9. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py +1010 -0
  10. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_measurements.py +1681 -0
  11. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_morphology.py +2520 -0
  12. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so +0 -0
  13. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py +208 -0
  14. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so +0 -0
  15. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py +119 -0
  16. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/filters.py +27 -0
  17. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/fourier.py +21 -0
  18. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/interpolation.py +23 -0
  19. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/measurements.py +24 -0
  20. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/morphology.py +27 -0
  21. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py +13 -0
  22. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py +102 -0
  32. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py +66 -0
  33. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py +2189 -0
  34. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_fourier.py +151 -0
  35. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py +1327 -0
  36. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py +1409 -0
  37. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py +0 -0
  38. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_ni_support.py +77 -0
  39. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py +65 -0
  40. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc ADDED
Binary file (27.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc ADDED
Binary file (71.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc ADDED
Binary file (725 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__init__.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================================
3
+ Multidimensional image processing (:mod:`scipy.ndimage`)
4
+ =========================================================
5
+
6
+ .. currentmodule:: scipy.ndimage
7
+
8
+ This package contains various functions for multidimensional image
9
+ processing.
10
+
11
+
12
+ Filters
13
+ =======
14
+
15
+ .. autosummary::
16
+ :toctree: generated/
17
+
18
+ convolve - Multidimensional convolution
19
+ convolve1d - 1-D convolution along the given axis
20
+ correlate - Multidimensional correlation
21
+ correlate1d - 1-D correlation along the given axis
22
+ gaussian_filter
23
+ gaussian_filter1d
24
+ gaussian_gradient_magnitude
25
+ gaussian_laplace
26
+ generic_filter - Multidimensional filter using a given function
27
+ generic_filter1d - 1-D generic filter along the given axis
28
+ generic_gradient_magnitude
29
+ generic_laplace
30
+ laplace - N-D Laplace filter based on approximate second derivatives
31
+ maximum_filter
32
+ maximum_filter1d
33
+ median_filter - Calculates a multidimensional median filter
34
+ minimum_filter
35
+ minimum_filter1d
36
+ percentile_filter - Calculates a multidimensional percentile filter
37
+ prewitt
38
+ rank_filter - Calculates a multidimensional rank filter
39
+ sobel
40
+ uniform_filter - Multidimensional uniform filter
41
+ uniform_filter1d - 1-D uniform filter along the given axis
42
+
43
+ Fourier filters
44
+ ===============
45
+
46
+ .. autosummary::
47
+ :toctree: generated/
48
+
49
+ fourier_ellipsoid
50
+ fourier_gaussian
51
+ fourier_shift
52
+ fourier_uniform
53
+
54
+ Interpolation
55
+ =============
56
+
57
+ .. autosummary::
58
+ :toctree: generated/
59
+
60
+ affine_transform - Apply an affine transformation
61
+ geometric_transform - Apply an arbitrary geometric transform
62
+ map_coordinates - Map input array to new coordinates by interpolation
63
+ rotate - Rotate an array
64
+ shift - Shift an array
65
+ spline_filter
66
+ spline_filter1d
67
+ zoom - Zoom an array
68
+
69
+ Measurements
70
+ ============
71
+
72
+ .. autosummary::
73
+ :toctree: generated/
74
+
75
+ center_of_mass - The center of mass of the values of an array at labels
76
+ extrema - Min's and max's of an array at labels, with their positions
77
+ find_objects - Find objects in a labeled array
78
+ histogram - Histogram of the values of an array, optionally at labels
79
+ label - Label features in an array
80
+ labeled_comprehension
81
+ maximum
82
+ maximum_position
83
+ mean - Mean of the values of an array at labels
84
+ median
85
+ minimum
86
+ minimum_position
87
+ standard_deviation - Standard deviation of an N-D image array
88
+ sum_labels - Sum of the values of the array
89
+ value_indices - Find indices of each distinct value in given array
90
+ variance - Variance of the values of an N-D image array
91
+ watershed_ift
92
+
93
+ Morphology
94
+ ==========
95
+
96
+ .. autosummary::
97
+ :toctree: generated/
98
+
99
+ binary_closing
100
+ binary_dilation
101
+ binary_erosion
102
+ binary_fill_holes
103
+ binary_hit_or_miss
104
+ binary_opening
105
+ binary_propagation
106
+ black_tophat
107
+ distance_transform_bf
108
+ distance_transform_cdt
109
+ distance_transform_edt
110
+ generate_binary_structure
111
+ grey_closing
112
+ grey_dilation
113
+ grey_erosion
114
+ grey_opening
115
+ iterate_structure
116
+ morphological_gradient
117
+ morphological_laplace
118
+ white_tophat
119
+
120
+ """
121
+
122
+ # Copyright (C) 2003-2005 Peter J. Verveer
123
+ #
124
+ # Redistribution and use in source and binary forms, with or without
125
+ # modification, are permitted provided that the following conditions
126
+ # are met:
127
+ #
128
+ # 1. Redistributions of source code must retain the above copyright
129
+ # notice, this list of conditions and the following disclaimer.
130
+ #
131
+ # 2. Redistributions in binary form must reproduce the above
132
+ # copyright notice, this list of conditions and the following
133
+ # disclaimer in the documentation and/or other materials provided
134
+ # with the distribution.
135
+ #
136
+ # 3. The name of the author may not be used to endorse or promote
137
+ # products derived from this software without specific prior
138
+ # written permission.
139
+ #
140
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
141
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
142
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
143
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
144
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
145
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
146
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
147
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
148
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
149
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
150
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
151
+
152
+ from ._filters import *
153
+ from ._fourier import *
154
+ from ._interpolation import *
155
+ from ._measurements import *
156
+ from ._morphology import *
157
+
158
+ # Deprecated namespaces, to be removed in v2.0.0
159
+ from . import filters
160
+ from . import fourier
161
+ from . import interpolation
162
+ from . import measurements
163
+ from . import morphology
164
+
165
+ __all__ = [s for s in dir() if not s.startswith('_')]
166
+
167
+ from scipy._lib._testutils import PytestTester
168
+ test = PytestTester(__name__)
169
+ del PytestTester
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_filters.py ADDED
@@ -0,0 +1,1852 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ from collections.abc import Iterable
32
+ import numbers
33
+ import warnings
34
+ import numpy
35
+ import operator
36
+
37
+ from scipy._lib._util import normalize_axis_index
38
+ from . import _ni_support
39
+ from . import _nd_image
40
+ from . import _ni_docstrings
41
+
42
+ __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
43
+ 'prewitt', 'sobel', 'generic_laplace', 'laplace',
44
+ 'gaussian_laplace', 'generic_gradient_magnitude',
45
+ 'gaussian_gradient_magnitude', 'correlate', 'convolve',
46
+ 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
47
+ 'maximum_filter1d', 'minimum_filter', 'maximum_filter',
48
+ 'rank_filter', 'median_filter', 'percentile_filter',
49
+ 'generic_filter1d', 'generic_filter']
50
+
51
+
52
+ def _invalid_origin(origin, lenw):
53
+ return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
54
+
55
+
56
+ def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
57
+ """Complex convolution via a linear combination of real convolutions."""
58
+ complex_input = input.dtype.kind == 'c'
59
+ complex_weights = weights.dtype.kind == 'c'
60
+ if complex_input and complex_weights:
61
+ # real component of the output
62
+ func(input.real, weights.real, output=output.real,
63
+ cval=numpy.real(cval), **kwargs)
64
+ output.real -= func(input.imag, weights.imag, output=None,
65
+ cval=numpy.imag(cval), **kwargs)
66
+ # imaginary component of the output
67
+ func(input.real, weights.imag, output=output.imag,
68
+ cval=numpy.real(cval), **kwargs)
69
+ output.imag += func(input.imag, weights.real, output=None,
70
+ cval=numpy.imag(cval), **kwargs)
71
+ elif complex_input:
72
+ func(input.real, weights, output=output.real, cval=numpy.real(cval),
73
+ **kwargs)
74
+ func(input.imag, weights, output=output.imag, cval=numpy.imag(cval),
75
+ **kwargs)
76
+ else:
77
+ if numpy.iscomplexobj(cval):
78
+ raise ValueError("Cannot provide a complex-valued cval when the "
79
+ "input is real.")
80
+ func(input, weights.real, output=output.real, cval=cval, **kwargs)
81
+ func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
82
+ return output
83
+
84
+
85
+ @_ni_docstrings.docfiller
86
+ def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
87
+ cval=0.0, origin=0):
88
+ """Calculate a 1-D correlation along the given axis.
89
+
90
+ The lines of the array along the given axis are correlated with the
91
+ given weights.
92
+
93
+ Parameters
94
+ ----------
95
+ %(input)s
96
+ weights : array
97
+ 1-D sequence of numbers.
98
+ %(axis)s
99
+ %(output)s
100
+ %(mode_reflect)s
101
+ %(cval)s
102
+ %(origin)s
103
+
104
+ Returns
105
+ -------
106
+ result : ndarray
107
+ Correlation result. Has the same shape as `input`.
108
+
109
+ Examples
110
+ --------
111
+ >>> from scipy.ndimage import correlate1d
112
+ >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
113
+ array([ 8, 26, 8, 12, 7, 28, 36, 9])
114
+ """
115
+ input = numpy.asarray(input)
116
+ weights = numpy.asarray(weights)
117
+ complex_input = input.dtype.kind == 'c'
118
+ complex_weights = weights.dtype.kind == 'c'
119
+ if complex_input or complex_weights:
120
+ if complex_weights:
121
+ weights = weights.conj()
122
+ weights = weights.astype(numpy.complex128, copy=False)
123
+ kwargs = dict(axis=axis, mode=mode, origin=origin)
124
+ output = _ni_support._get_output(output, input, complex_output=True)
125
+ return _complex_via_real_components(correlate1d, input, weights,
126
+ output, cval, **kwargs)
127
+
128
+ output = _ni_support._get_output(output, input)
129
+ weights = numpy.asarray(weights, dtype=numpy.float64)
130
+ if weights.ndim != 1 or weights.shape[0] < 1:
131
+ raise RuntimeError('no filter weights given')
132
+ if not weights.flags.contiguous:
133
+ weights = weights.copy()
134
+ axis = normalize_axis_index(axis, input.ndim)
135
+ if _invalid_origin(origin, len(weights)):
136
+ raise ValueError('Invalid origin; origin must satisfy '
137
+ '-(len(weights) // 2) <= origin <= '
138
+ '(len(weights)-1) // 2')
139
+ mode = _ni_support._extend_mode_to_code(mode)
140
+ _nd_image.correlate1d(input, weights, axis, output, mode, cval,
141
+ origin)
142
+ return output
143
+
144
+
145
+ @_ni_docstrings.docfiller
146
+ def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
147
+ cval=0.0, origin=0):
148
+ """Calculate a 1-D convolution along the given axis.
149
+
150
+ The lines of the array along the given axis are convolved with the
151
+ given weights.
152
+
153
+ Parameters
154
+ ----------
155
+ %(input)s
156
+ weights : ndarray
157
+ 1-D sequence of numbers.
158
+ %(axis)s
159
+ %(output)s
160
+ %(mode_reflect)s
161
+ %(cval)s
162
+ %(origin)s
163
+
164
+ Returns
165
+ -------
166
+ convolve1d : ndarray
167
+ Convolved array with same shape as input
168
+
169
+ Examples
170
+ --------
171
+ >>> from scipy.ndimage import convolve1d
172
+ >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
173
+ array([14, 24, 4, 13, 12, 36, 27, 0])
174
+ """
175
+ weights = weights[::-1]
176
+ origin = -origin
177
+ if not len(weights) & 1:
178
+ origin -= 1
179
+ weights = numpy.asarray(weights)
180
+ if weights.dtype.kind == 'c':
181
+ # pre-conjugate here to counteract the conjugation in correlate1d
182
+ weights = weights.conj()
183
+ return correlate1d(input, weights, axis, output, mode, cval, origin)
184
+
185
+
186
+ def _gaussian_kernel1d(sigma, order, radius):
187
+ """
188
+ Computes a 1-D Gaussian convolution kernel.
189
+ """
190
+ if order < 0:
191
+ raise ValueError('order must be non-negative')
192
+ exponent_range = numpy.arange(order + 1)
193
+ sigma2 = sigma * sigma
194
+ x = numpy.arange(-radius, radius+1)
195
+ phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
196
+ phi_x = phi_x / phi_x.sum()
197
+
198
+ if order == 0:
199
+ return phi_x
200
+ else:
201
+ # f(x) = q(x) * phi(x) = q(x) * exp(p(x))
202
+ # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
203
+ # p'(x) = -1 / sigma ** 2
204
+ # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
205
+ # coefficients of q(x)
206
+ q = numpy.zeros(order + 1)
207
+ q[0] = 1
208
+ D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
209
+ P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
210
+ Q_deriv = D + P
211
+ for _ in range(order):
212
+ q = Q_deriv.dot(q)
213
+ q = (x[:, None] ** exponent_range).dot(q)
214
+ return q * phi_x
215
+
216
+
217
+ @_ni_docstrings.docfiller
218
+ def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
219
+ mode="reflect", cval=0.0, truncate=4.0, *, radius=None):
220
+ """1-D Gaussian filter.
221
+
222
+ Parameters
223
+ ----------
224
+ %(input)s
225
+ sigma : scalar
226
+ standard deviation for Gaussian kernel
227
+ %(axis)s
228
+ order : int, optional
229
+ An order of 0 corresponds to convolution with a Gaussian
230
+ kernel. A positive order corresponds to convolution with
231
+ that derivative of a Gaussian.
232
+ %(output)s
233
+ %(mode_reflect)s
234
+ %(cval)s
235
+ truncate : float, optional
236
+ Truncate the filter at this many standard deviations.
237
+ Default is 4.0.
238
+ radius : None or int, optional
239
+ Radius of the Gaussian kernel. If specified, the size of
240
+ the kernel will be ``2*radius + 1``, and `truncate` is ignored.
241
+ Default is None.
242
+
243
+ Returns
244
+ -------
245
+ gaussian_filter1d : ndarray
246
+
247
+ Notes
248
+ -----
249
+ The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
250
+ `radius` is None, a default ``radius = round(truncate * sigma)`` will be
251
+ used.
252
+
253
+ Examples
254
+ --------
255
+ >>> from scipy.ndimage import gaussian_filter1d
256
+ >>> import numpy as np
257
+ >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
258
+ array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
259
+ >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
260
+ array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
261
+ >>> import matplotlib.pyplot as plt
262
+ >>> rng = np.random.default_rng()
263
+ >>> x = rng.standard_normal(101).cumsum()
264
+ >>> y3 = gaussian_filter1d(x, 3)
265
+ >>> y6 = gaussian_filter1d(x, 6)
266
+ >>> plt.plot(x, 'k', label='original data')
267
+ >>> plt.plot(y3, '--', label='filtered, sigma=3')
268
+ >>> plt.plot(y6, ':', label='filtered, sigma=6')
269
+ >>> plt.legend()
270
+ >>> plt.grid()
271
+ >>> plt.show()
272
+
273
+ """
274
+ sd = float(sigma)
275
+ # make the radius of the filter equal to truncate standard deviations
276
+ lw = int(truncate * sd + 0.5)
277
+ if radius is not None:
278
+ lw = radius
279
+ if not isinstance(lw, numbers.Integral) or lw < 0:
280
+ raise ValueError('Radius must be a nonnegative integer.')
281
+ # Since we are calling correlate, not convolve, revert the kernel
282
+ weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
283
+ return correlate1d(input, weights, axis, output, mode, cval, 0)
284
+
285
+
286
+ @_ni_docstrings.docfiller
287
+ def gaussian_filter(input, sigma, order=0, output=None,
288
+ mode="reflect", cval=0.0, truncate=4.0, *, radius=None,
289
+ axes=None):
290
+ """Multidimensional Gaussian filter.
291
+
292
+ Parameters
293
+ ----------
294
+ %(input)s
295
+ sigma : scalar or sequence of scalars
296
+ Standard deviation for Gaussian kernel. The standard
297
+ deviations of the Gaussian filter are given for each axis as a
298
+ sequence, or as a single number, in which case it is equal for
299
+ all axes.
300
+ order : int or sequence of ints, optional
301
+ The order of the filter along each axis is given as a sequence
302
+ of integers, or as a single number. An order of 0 corresponds
303
+ to convolution with a Gaussian kernel. A positive order
304
+ corresponds to convolution with that derivative of a Gaussian.
305
+ %(output)s
306
+ %(mode_multiple)s
307
+ %(cval)s
308
+ truncate : float, optional
309
+ Truncate the filter at this many standard deviations.
310
+ Default is 4.0.
311
+ radius : None or int or sequence of ints, optional
312
+ Radius of the Gaussian kernel. The radius are given for each axis
313
+ as a sequence, or as a single number, in which case it is equal
314
+ for all axes. If specified, the size of the kernel along each axis
315
+ will be ``2*radius + 1``, and `truncate` is ignored.
316
+ Default is None.
317
+ axes : tuple of int or None, optional
318
+ If None, `input` is filtered along all axes. Otherwise,
319
+ `input` is filtered along the specified axes. When `axes` is
320
+ specified, any tuples used for `sigma`, `order`, `mode` and/or `radius`
321
+ must match the length of `axes`. The ith entry in any of these tuples
322
+ corresponds to the ith entry in `axes`.
323
+
324
+ Returns
325
+ -------
326
+ gaussian_filter : ndarray
327
+ Returned array of same shape as `input`.
328
+
329
+ Notes
330
+ -----
331
+ The multidimensional filter is implemented as a sequence of
332
+ 1-D convolution filters. The intermediate arrays are
333
+ stored in the same data type as the output. Therefore, for output
334
+ types with a limited precision, the results may be imprecise
335
+ because intermediate results may be stored with insufficient
336
+ precision.
337
+
338
+ The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
339
+ `radius` is None, the default ``radius = round(truncate * sigma)`` will be
340
+ used.
341
+
342
+ Examples
343
+ --------
344
+ >>> from scipy.ndimage import gaussian_filter
345
+ >>> import numpy as np
346
+ >>> a = np.arange(50, step=2).reshape((5,5))
347
+ >>> a
348
+ array([[ 0, 2, 4, 6, 8],
349
+ [10, 12, 14, 16, 18],
350
+ [20, 22, 24, 26, 28],
351
+ [30, 32, 34, 36, 38],
352
+ [40, 42, 44, 46, 48]])
353
+ >>> gaussian_filter(a, sigma=1)
354
+ array([[ 4, 6, 8, 9, 11],
355
+ [10, 12, 14, 15, 17],
356
+ [20, 22, 24, 25, 27],
357
+ [29, 31, 33, 34, 36],
358
+ [35, 37, 39, 40, 42]])
359
+
360
+ >>> from scipy import datasets
361
+ >>> import matplotlib.pyplot as plt
362
+ >>> fig = plt.figure()
363
+ >>> plt.gray() # show the filtered result in grayscale
364
+ >>> ax1 = fig.add_subplot(121) # left side
365
+ >>> ax2 = fig.add_subplot(122) # right side
366
+ >>> ascent = datasets.ascent()
367
+ >>> result = gaussian_filter(ascent, sigma=5)
368
+ >>> ax1.imshow(ascent)
369
+ >>> ax2.imshow(result)
370
+ >>> plt.show()
371
+ """
372
+ input = numpy.asarray(input)
373
+ output = _ni_support._get_output(output, input)
374
+
375
+ axes = _ni_support._check_axes(axes, input.ndim)
376
+ num_axes = len(axes)
377
+ orders = _ni_support._normalize_sequence(order, num_axes)
378
+ sigmas = _ni_support._normalize_sequence(sigma, num_axes)
379
+ modes = _ni_support._normalize_sequence(mode, num_axes)
380
+ radiuses = _ni_support._normalize_sequence(radius, num_axes)
381
+ axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii])
382
+ for ii in range(num_axes) if sigmas[ii] > 1e-15]
383
+ if len(axes) > 0:
384
+ for axis, sigma, order, mode, radius in axes:
385
+ gaussian_filter1d(input, sigma, axis, order, output,
386
+ mode, cval, truncate, radius=radius)
387
+ input = output
388
+ else:
389
+ output[...] = input[...]
390
+ return output
391
+
392
+
393
+ @_ni_docstrings.docfiller
394
+ def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
395
+ """Calculate a Prewitt filter.
396
+
397
+ Parameters
398
+ ----------
399
+ %(input)s
400
+ %(axis)s
401
+ %(output)s
402
+ %(mode_multiple)s
403
+ %(cval)s
404
+
405
+ Returns
406
+ -------
407
+ prewitt : ndarray
408
+ Filtered array. Has the same shape as `input`.
409
+
410
+ See Also
411
+ --------
412
+ sobel: Sobel filter
413
+
414
+ Notes
415
+ -----
416
+ This function computes the one-dimensional Prewitt filter.
417
+ Horizontal edges are emphasised with the horizontal transform (axis=0),
418
+ vertical edges with the vertical transform (axis=1), and so on for higher
419
+ dimensions. These can be combined to give the magnitude.
420
+
421
+ Examples
422
+ --------
423
+ >>> from scipy import ndimage, datasets
424
+ >>> import matplotlib.pyplot as plt
425
+ >>> import numpy as np
426
+ >>> ascent = datasets.ascent()
427
+ >>> prewitt_h = ndimage.prewitt(ascent, axis=0)
428
+ >>> prewitt_v = ndimage.prewitt(ascent, axis=1)
429
+ >>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2)
430
+ >>> magnitude *= 255 / np.max(magnitude) # Normalization
431
+ >>> fig, axes = plt.subplots(2, 2, figsize = (8, 8))
432
+ >>> plt.gray()
433
+ >>> axes[0, 0].imshow(ascent)
434
+ >>> axes[0, 1].imshow(prewitt_h)
435
+ >>> axes[1, 0].imshow(prewitt_v)
436
+ >>> axes[1, 1].imshow(magnitude)
437
+ >>> titles = ["original", "horizontal", "vertical", "magnitude"]
438
+ >>> for i, ax in enumerate(axes.ravel()):
439
+ ... ax.set_title(titles[i])
440
+ ... ax.axis("off")
441
+ >>> plt.show()
442
+
443
+ """
444
+ input = numpy.asarray(input)
445
+ axis = normalize_axis_index(axis, input.ndim)
446
+ output = _ni_support._get_output(output, input)
447
+ modes = _ni_support._normalize_sequence(mode, input.ndim)
448
+ correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
449
+ axes = [ii for ii in range(input.ndim) if ii != axis]
450
+ for ii in axes:
451
+ correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
452
+ return output
453
+
454
+
455
+ @_ni_docstrings.docfiller
456
+ def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
457
+ """Calculate a Sobel filter.
458
+
459
+ Parameters
460
+ ----------
461
+ %(input)s
462
+ %(axis)s
463
+ %(output)s
464
+ %(mode_multiple)s
465
+ %(cval)s
466
+
467
+ Returns
468
+ -------
469
+ sobel : ndarray
470
+ Filtered array. Has the same shape as `input`.
471
+
472
+ Notes
473
+ -----
474
+ This function computes the axis-specific Sobel gradient.
475
+ The horizontal edges can be emphasised with the horizontal transform (axis=0),
476
+ the vertical edges with the vertical transform (axis=1) and so on for higher
477
+ dimensions. These can be combined to give the magnitude.
478
+
479
+ Examples
480
+ --------
481
+ >>> from scipy import ndimage, datasets
482
+ >>> import matplotlib.pyplot as plt
483
+ >>> import numpy as np
484
+ >>> ascent = datasets.ascent().astype('int32')
485
+ >>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient
486
+ >>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient
487
+ >>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2)
488
+ >>> magnitude *= 255.0 / np.max(magnitude) # normalization
489
+ >>> fig, axs = plt.subplots(2, 2, figsize=(8, 8))
490
+ >>> plt.gray() # show the filtered result in grayscale
491
+ >>> axs[0, 0].imshow(ascent)
492
+ >>> axs[0, 1].imshow(sobel_h)
493
+ >>> axs[1, 0].imshow(sobel_v)
494
+ >>> axs[1, 1].imshow(magnitude)
495
+ >>> titles = ["original", "horizontal", "vertical", "magnitude"]
496
+ >>> for i, ax in enumerate(axs.ravel()):
497
+ ... ax.set_title(titles[i])
498
+ ... ax.axis("off")
499
+ >>> plt.show()
500
+
501
+ """
502
+ input = numpy.asarray(input)
503
+ axis = normalize_axis_index(axis, input.ndim)
504
+ output = _ni_support._get_output(output, input)
505
+ modes = _ni_support._normalize_sequence(mode, input.ndim)
506
+ correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
507
+ axes = [ii for ii in range(input.ndim) if ii != axis]
508
+ for ii in axes:
509
+ correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
510
+ return output
511
+
512
+
513
+ @_ni_docstrings.docfiller
514
+ def generic_laplace(input, derivative2, output=None, mode="reflect",
515
+ cval=0.0,
516
+ extra_arguments=(),
517
+ extra_keywords=None):
518
+ """
519
+ N-D Laplace filter using a provided second derivative function.
520
+
521
+ Parameters
522
+ ----------
523
+ %(input)s
524
+ derivative2 : callable
525
+ Callable with the following signature::
526
+
527
+ derivative2(input, axis, output, mode, cval,
528
+ *extra_arguments, **extra_keywords)
529
+
530
+ See `extra_arguments`, `extra_keywords` below.
531
+ %(output)s
532
+ %(mode_multiple)s
533
+ %(cval)s
534
+ %(extra_keywords)s
535
+ %(extra_arguments)s
536
+
537
+ Returns
538
+ -------
539
+ generic_laplace : ndarray
540
+ Filtered array. Has the same shape as `input`.
541
+
542
+ """
543
+ if extra_keywords is None:
544
+ extra_keywords = {}
545
+ input = numpy.asarray(input)
546
+ output = _ni_support._get_output(output, input)
547
+ axes = list(range(input.ndim))
548
+ if len(axes) > 0:
549
+ modes = _ni_support._normalize_sequence(mode, len(axes))
550
+ derivative2(input, axes[0], output, modes[0], cval,
551
+ *extra_arguments, **extra_keywords)
552
+ for ii in range(1, len(axes)):
553
+ tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
554
+ *extra_arguments, **extra_keywords)
555
+ output += tmp
556
+ else:
557
+ output[...] = input[...]
558
+ return output
559
+
560
+
561
+ @_ni_docstrings.docfiller
562
+ def laplace(input, output=None, mode="reflect", cval=0.0):
563
+ """N-D Laplace filter based on approximate second derivatives.
564
+
565
+ Parameters
566
+ ----------
567
+ %(input)s
568
+ %(output)s
569
+ %(mode_multiple)s
570
+ %(cval)s
571
+
572
+ Returns
573
+ -------
574
+ laplace : ndarray
575
+ Filtered array. Has the same shape as `input`.
576
+
577
+ Examples
578
+ --------
579
+ >>> from scipy import ndimage, datasets
580
+ >>> import matplotlib.pyplot as plt
581
+ >>> fig = plt.figure()
582
+ >>> plt.gray() # show the filtered result in grayscale
583
+ >>> ax1 = fig.add_subplot(121) # left side
584
+ >>> ax2 = fig.add_subplot(122) # right side
585
+ >>> ascent = datasets.ascent()
586
+ >>> result = ndimage.laplace(ascent)
587
+ >>> ax1.imshow(ascent)
588
+ >>> ax2.imshow(result)
589
+ >>> plt.show()
590
+ """
591
+ def derivative2(input, axis, output, mode, cval):
592
+ return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
593
+ return generic_laplace(input, derivative2, output, mode, cval)
594
+
595
+
596
+ @_ni_docstrings.docfiller
597
+ def gaussian_laplace(input, sigma, output=None, mode="reflect",
598
+ cval=0.0, **kwargs):
599
+ """Multidimensional Laplace filter using Gaussian second derivatives.
600
+
601
+ Parameters
602
+ ----------
603
+ %(input)s
604
+ sigma : scalar or sequence of scalars
605
+ The standard deviations of the Gaussian filter are given for
606
+ each axis as a sequence, or as a single number, in which case
607
+ it is equal for all axes.
608
+ %(output)s
609
+ %(mode_multiple)s
610
+ %(cval)s
611
+ Extra keyword arguments will be passed to gaussian_filter().
612
+
613
+ Returns
614
+ -------
615
+ gaussian_laplace : ndarray
616
+ Filtered array. Has the same shape as `input`.
617
+
618
+ Examples
619
+ --------
620
+ >>> from scipy import ndimage, datasets
621
+ >>> import matplotlib.pyplot as plt
622
+ >>> ascent = datasets.ascent()
623
+
624
+ >>> fig = plt.figure()
625
+ >>> plt.gray() # show the filtered result in grayscale
626
+ >>> ax1 = fig.add_subplot(121) # left side
627
+ >>> ax2 = fig.add_subplot(122) # right side
628
+
629
+ >>> result = ndimage.gaussian_laplace(ascent, sigma=1)
630
+ >>> ax1.imshow(result)
631
+
632
+ >>> result = ndimage.gaussian_laplace(ascent, sigma=3)
633
+ >>> ax2.imshow(result)
634
+ >>> plt.show()
635
+ """
636
+ input = numpy.asarray(input)
637
+
638
+ def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
639
+ order = [0] * input.ndim
640
+ order[axis] = 2
641
+ return gaussian_filter(input, sigma, order, output, mode, cval,
642
+ **kwargs)
643
+
644
+ return generic_laplace(input, derivative2, output, mode, cval,
645
+ extra_arguments=(sigma,),
646
+ extra_keywords=kwargs)
647
+
648
+
649
+ @_ni_docstrings.docfiller
650
+ def generic_gradient_magnitude(input, derivative, output=None,
651
+ mode="reflect", cval=0.0,
652
+ extra_arguments=(), extra_keywords=None):
653
+ """Gradient magnitude using a provided gradient function.
654
+
655
+ Parameters
656
+ ----------
657
+ %(input)s
658
+ derivative : callable
659
+ Callable with the following signature::
660
+
661
+ derivative(input, axis, output, mode, cval,
662
+ *extra_arguments, **extra_keywords)
663
+
664
+ See `extra_arguments`, `extra_keywords` below.
665
+ `derivative` can assume that `input` and `output` are ndarrays.
666
+ Note that the output from `derivative` is modified inplace;
667
+ be careful to copy important inputs before returning them.
668
+ %(output)s
669
+ %(mode_multiple)s
670
+ %(cval)s
671
+ %(extra_keywords)s
672
+ %(extra_arguments)s
673
+
674
+ Returns
675
+ -------
676
+ generic_gradient_matnitude : ndarray
677
+ Filtered array. Has the same shape as `input`.
678
+
679
+ """
680
+ if extra_keywords is None:
681
+ extra_keywords = {}
682
+ input = numpy.asarray(input)
683
+ output = _ni_support._get_output(output, input)
684
+ axes = list(range(input.ndim))
685
+ if len(axes) > 0:
686
+ modes = _ni_support._normalize_sequence(mode, len(axes))
687
+ derivative(input, axes[0], output, modes[0], cval,
688
+ *extra_arguments, **extra_keywords)
689
+ numpy.multiply(output, output, output)
690
+ for ii in range(1, len(axes)):
691
+ tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
692
+ *extra_arguments, **extra_keywords)
693
+ numpy.multiply(tmp, tmp, tmp)
694
+ output += tmp
695
+ # This allows the sqrt to work with a different default casting
696
+ numpy.sqrt(output, output, casting='unsafe')
697
+ else:
698
+ output[...] = input[...]
699
+ return output
700
+
701
+
702
+ @_ni_docstrings.docfiller
703
+ def gaussian_gradient_magnitude(input, sigma, output=None,
704
+ mode="reflect", cval=0.0, **kwargs):
705
+ """Multidimensional gradient magnitude using Gaussian derivatives.
706
+
707
+ Parameters
708
+ ----------
709
+ %(input)s
710
+ sigma : scalar or sequence of scalars
711
+ The standard deviations of the Gaussian filter are given for
712
+ each axis as a sequence, or as a single number, in which case
713
+ it is equal for all axes.
714
+ %(output)s
715
+ %(mode_multiple)s
716
+ %(cval)s
717
+ Extra keyword arguments will be passed to gaussian_filter().
718
+
719
+ Returns
720
+ -------
721
+ gaussian_gradient_magnitude : ndarray
722
+ Filtered array. Has the same shape as `input`.
723
+
724
+ Examples
725
+ --------
726
+ >>> from scipy import ndimage, datasets
727
+ >>> import matplotlib.pyplot as plt
728
+ >>> fig = plt.figure()
729
+ >>> plt.gray() # show the filtered result in grayscale
730
+ >>> ax1 = fig.add_subplot(121) # left side
731
+ >>> ax2 = fig.add_subplot(122) # right side
732
+ >>> ascent = datasets.ascent()
733
+ >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
734
+ >>> ax1.imshow(ascent)
735
+ >>> ax2.imshow(result)
736
+ >>> plt.show()
737
+ """
738
+ input = numpy.asarray(input)
739
+
740
+ def derivative(input, axis, output, mode, cval, sigma, **kwargs):
741
+ order = [0] * input.ndim
742
+ order[axis] = 1
743
+ return gaussian_filter(input, sigma, order, output, mode,
744
+ cval, **kwargs)
745
+
746
+ return generic_gradient_magnitude(input, derivative, output, mode,
747
+ cval, extra_arguments=(sigma,),
748
+ extra_keywords=kwargs)
749
+
750
+
751
+ def _correlate_or_convolve(input, weights, output, mode, cval, origin,
752
+ convolution):
753
+ input = numpy.asarray(input)
754
+ weights = numpy.asarray(weights)
755
+ complex_input = input.dtype.kind == 'c'
756
+ complex_weights = weights.dtype.kind == 'c'
757
+ if complex_input or complex_weights:
758
+ if complex_weights and not convolution:
759
+ # As for numpy.correlate, conjugate weights rather than input.
760
+ weights = weights.conj()
761
+ kwargs = dict(
762
+ mode=mode, origin=origin, convolution=convolution
763
+ )
764
+ output = _ni_support._get_output(output, input, complex_output=True)
765
+
766
+ return _complex_via_real_components(_correlate_or_convolve, input,
767
+ weights, output, cval, **kwargs)
768
+
769
+ origins = _ni_support._normalize_sequence(origin, input.ndim)
770
+ weights = numpy.asarray(weights, dtype=numpy.float64)
771
+ wshape = [ii for ii in weights.shape if ii > 0]
772
+ if len(wshape) != input.ndim:
773
+ raise RuntimeError('filter weights array has incorrect shape.')
774
+ if convolution:
775
+ weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
776
+ for ii in range(len(origins)):
777
+ origins[ii] = -origins[ii]
778
+ if not weights.shape[ii] & 1:
779
+ origins[ii] -= 1
780
+ for origin, lenw in zip(origins, wshape):
781
+ if _invalid_origin(origin, lenw):
782
+ raise ValueError('Invalid origin; origin must satisfy '
783
+ '-(weights.shape[k] // 2) <= origin[k] <= '
784
+ '(weights.shape[k]-1) // 2')
785
+
786
+ if not weights.flags.contiguous:
787
+ weights = weights.copy()
788
+ output = _ni_support._get_output(output, input)
789
+ temp_needed = numpy.may_share_memory(input, output)
790
+ if temp_needed:
791
+ # input and output arrays cannot share memory
792
+ temp = output
793
+ output = _ni_support._get_output(output.dtype, input)
794
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
795
+ raise RuntimeError("A sequence of modes is not supported")
796
+ mode = _ni_support._extend_mode_to_code(mode)
797
+ _nd_image.correlate(input, weights, output, mode, cval, origins)
798
+ if temp_needed:
799
+ temp[...] = output
800
+ output = temp
801
+ return output
802
+
803
+
804
+ @_ni_docstrings.docfiller
805
+ def correlate(input, weights, output=None, mode='reflect', cval=0.0,
806
+ origin=0):
807
+ """
808
+ Multidimensional correlation.
809
+
810
+ The array is correlated with the given kernel.
811
+
812
+ Parameters
813
+ ----------
814
+ %(input)s
815
+ weights : ndarray
816
+ array of weights, same number of dimensions as input
817
+ %(output)s
818
+ %(mode_reflect)s
819
+ %(cval)s
820
+ %(origin_multiple)s
821
+
822
+ Returns
823
+ -------
824
+ result : ndarray
825
+ The result of correlation of `input` with `weights`.
826
+
827
+ See Also
828
+ --------
829
+ convolve : Convolve an image with a kernel.
830
+
831
+ Examples
832
+ --------
833
+ Correlation is the process of moving a filter mask often referred to
834
+ as kernel over the image and computing the sum of products at each location.
835
+
836
+ >>> from scipy.ndimage import correlate
837
+ >>> import numpy as np
838
+ >>> input_img = np.arange(25).reshape(5,5)
839
+ >>> print(input_img)
840
+ [[ 0 1 2 3 4]
841
+ [ 5 6 7 8 9]
842
+ [10 11 12 13 14]
843
+ [15 16 17 18 19]
844
+ [20 21 22 23 24]]
845
+
846
+ Define a kernel (weights) for correlation. In this example, it is for sum of
847
+ center and up, down, left and right next elements.
848
+
849
+ >>> weights = [[0, 1, 0],
850
+ ... [1, 1, 1],
851
+ ... [0, 1, 0]]
852
+
853
+ We can calculate a correlation result:
854
+ For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
855
+
856
+ >>> correlate(input_img, weights)
857
+ array([[ 6, 10, 15, 20, 24],
858
+ [ 26, 30, 35, 40, 44],
859
+ [ 51, 55, 60, 65, 69],
860
+ [ 76, 80, 85, 90, 94],
861
+ [ 96, 100, 105, 110, 114]])
862
+
863
+ """
864
+ return _correlate_or_convolve(input, weights, output, mode, cval,
865
+ origin, False)
866
+
867
+
868
+ @_ni_docstrings.docfiller
869
+ def convolve(input, weights, output=None, mode='reflect', cval=0.0,
870
+ origin=0):
871
+ """
872
+ Multidimensional convolution.
873
+
874
+ The array is convolved with the given kernel.
875
+
876
+ Parameters
877
+ ----------
878
+ %(input)s
879
+ weights : array_like
880
+ Array of weights, same number of dimensions as input
881
+ %(output)s
882
+ %(mode_reflect)s
883
+ cval : scalar, optional
884
+ Value to fill past edges of input if `mode` is 'constant'. Default
885
+ is 0.0
886
+ origin : int, optional
887
+ Controls the origin of the input signal, which is where the
888
+ filter is centered to produce the first element of the output.
889
+ Positive values shift the filter to the right, and negative values
890
+ shift the filter to the left. Default is 0.
891
+
892
+ Returns
893
+ -------
894
+ result : ndarray
895
+ The result of convolution of `input` with `weights`.
896
+
897
+ See Also
898
+ --------
899
+ correlate : Correlate an image with a kernel.
900
+
901
+ Notes
902
+ -----
903
+ Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
904
+ W is the `weights` kernel,
905
+ j is the N-D spatial index over :math:`W`,
906
+ I is the `input` and k is the coordinate of the center of
907
+ W, specified by `origin` in the input parameters.
908
+
909
+ Examples
910
+ --------
911
+ Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
912
+ because in this case borders (i.e., where the `weights` kernel, centered
913
+ on any one value, extends beyond an edge of `input`) are treated as zeros.
914
+
915
+ >>> import numpy as np
916
+ >>> a = np.array([[1, 2, 0, 0],
917
+ ... [5, 3, 0, 4],
918
+ ... [0, 0, 0, 7],
919
+ ... [9, 3, 0, 0]])
920
+ >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
921
+ >>> from scipy import ndimage
922
+ >>> ndimage.convolve(a, k, mode='constant', cval=0.0)
923
+ array([[11, 10, 7, 4],
924
+ [10, 3, 11, 11],
925
+ [15, 12, 14, 7],
926
+ [12, 3, 7, 0]])
927
+
928
+ Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
929
+ with 1.0's (and then extracting only the original region of the result).
930
+
931
+ >>> ndimage.convolve(a, k, mode='constant', cval=1.0)
932
+ array([[13, 11, 8, 7],
933
+ [11, 3, 11, 14],
934
+ [16, 12, 14, 10],
935
+ [15, 6, 10, 5]])
936
+
937
+ With ``mode='reflect'`` (the default), outer values are reflected at the
938
+ edge of `input` to fill in missing values.
939
+
940
+ >>> b = np.array([[2, 0, 0],
941
+ ... [1, 0, 0],
942
+ ... [0, 0, 0]])
943
+ >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
944
+ >>> ndimage.convolve(b, k, mode='reflect')
945
+ array([[5, 0, 0],
946
+ [3, 0, 0],
947
+ [1, 0, 0]])
948
+
949
+ This includes diagonally at the corners.
950
+
951
+ >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
952
+ >>> ndimage.convolve(b, k)
953
+ array([[4, 2, 0],
954
+ [3, 2, 0],
955
+ [1, 1, 0]])
956
+
957
+ With ``mode='nearest'``, the single nearest value in to an edge in
958
+ `input` is repeated as many times as needed to match the overlapping
959
+ `weights`.
960
+
961
+ >>> c = np.array([[2, 0, 1],
962
+ ... [1, 0, 0],
963
+ ... [0, 0, 0]])
964
+ >>> k = np.array([[0, 1, 0],
965
+ ... [0, 1, 0],
966
+ ... [0, 1, 0],
967
+ ... [0, 1, 0],
968
+ ... [0, 1, 0]])
969
+ >>> ndimage.convolve(c, k, mode='nearest')
970
+ array([[7, 0, 3],
971
+ [5, 0, 2],
972
+ [3, 0, 1]])
973
+
974
+ """
975
+ return _correlate_or_convolve(input, weights, output, mode, cval,
976
+ origin, True)
977
+
978
+
979
+ @_ni_docstrings.docfiller
980
+ def uniform_filter1d(input, size, axis=-1, output=None,
981
+ mode="reflect", cval=0.0, origin=0):
982
+ """Calculate a 1-D uniform filter along the given axis.
983
+
984
+ The lines of the array along the given axis are filtered with a
985
+ uniform filter of given size.
986
+
987
+ Parameters
988
+ ----------
989
+ %(input)s
990
+ size : int
991
+ length of uniform filter
992
+ %(axis)s
993
+ %(output)s
994
+ %(mode_reflect)s
995
+ %(cval)s
996
+ %(origin)s
997
+
998
+ Returns
999
+ -------
1000
+ result : ndarray
1001
+ Filtered array. Has same shape as `input`.
1002
+
1003
+ Examples
1004
+ --------
1005
+ >>> from scipy.ndimage import uniform_filter1d
1006
+ >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1007
+ array([4, 3, 4, 1, 4, 6, 6, 3])
1008
+ """
1009
+ input = numpy.asarray(input)
1010
+ axis = normalize_axis_index(axis, input.ndim)
1011
+ if size < 1:
1012
+ raise RuntimeError('incorrect filter size')
1013
+ complex_output = input.dtype.kind == 'c'
1014
+ output = _ni_support._get_output(output, input,
1015
+ complex_output=complex_output)
1016
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1017
+ raise ValueError('invalid origin')
1018
+ mode = _ni_support._extend_mode_to_code(mode)
1019
+ if not complex_output:
1020
+ _nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
1021
+ origin)
1022
+ else:
1023
+ _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
1024
+ numpy.real(cval), origin)
1025
+ _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
1026
+ numpy.imag(cval), origin)
1027
+ return output
1028
+
1029
+
1030
+ @_ni_docstrings.docfiller
1031
+ def uniform_filter(input, size=3, output=None, mode="reflect",
1032
+ cval=0.0, origin=0, *, axes=None):
1033
+ """Multidimensional uniform filter.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ %(input)s
1038
+ size : int or sequence of ints, optional
1039
+ The sizes of the uniform filter are given for each axis as a
1040
+ sequence, or as a single number, in which case the size is
1041
+ equal for all axes.
1042
+ %(output)s
1043
+ %(mode_multiple)s
1044
+ %(cval)s
1045
+ %(origin_multiple)s
1046
+ axes : tuple of int or None, optional
1047
+ If None, `input` is filtered along all axes. Otherwise,
1048
+ `input` is filtered along the specified axes. When `axes` is
1049
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1050
+ must match the length of `axes`. The ith entry in any of these tuples
1051
+ corresponds to the ith entry in `axes`.
1052
+
1053
+ Returns
1054
+ -------
1055
+ uniform_filter : ndarray
1056
+ Filtered array. Has the same shape as `input`.
1057
+
1058
+ Notes
1059
+ -----
1060
+ The multidimensional filter is implemented as a sequence of
1061
+ 1-D uniform filters. The intermediate arrays are stored
1062
+ in the same data type as the output. Therefore, for output types
1063
+ with a limited precision, the results may be imprecise because
1064
+ intermediate results may be stored with insufficient precision.
1065
+
1066
+ Examples
1067
+ --------
1068
+ >>> from scipy import ndimage, datasets
1069
+ >>> import matplotlib.pyplot as plt
1070
+ >>> fig = plt.figure()
1071
+ >>> plt.gray() # show the filtered result in grayscale
1072
+ >>> ax1 = fig.add_subplot(121) # left side
1073
+ >>> ax2 = fig.add_subplot(122) # right side
1074
+ >>> ascent = datasets.ascent()
1075
+ >>> result = ndimage.uniform_filter(ascent, size=20)
1076
+ >>> ax1.imshow(ascent)
1077
+ >>> ax2.imshow(result)
1078
+ >>> plt.show()
1079
+ """
1080
+ input = numpy.asarray(input)
1081
+ output = _ni_support._get_output(output, input,
1082
+ complex_output=input.dtype.kind == 'c')
1083
+ axes = _ni_support._check_axes(axes, input.ndim)
1084
+ num_axes = len(axes)
1085
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1086
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1087
+ modes = _ni_support._normalize_sequence(mode, num_axes)
1088
+ axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
1089
+ for ii in range(num_axes) if sizes[ii] > 1]
1090
+ if len(axes) > 0:
1091
+ for axis, size, origin, mode in axes:
1092
+ uniform_filter1d(input, int(size), axis, output, mode,
1093
+ cval, origin)
1094
+ input = output
1095
+ else:
1096
+ output[...] = input[...]
1097
+ return output
1098
+
1099
+
1100
+ @_ni_docstrings.docfiller
1101
+ def minimum_filter1d(input, size, axis=-1, output=None,
1102
+ mode="reflect", cval=0.0, origin=0):
1103
+ """Calculate a 1-D minimum filter along the given axis.
1104
+
1105
+ The lines of the array along the given axis are filtered with a
1106
+ minimum filter of given size.
1107
+
1108
+ Parameters
1109
+ ----------
1110
+ %(input)s
1111
+ size : int
1112
+ length along which to calculate 1D minimum
1113
+ %(axis)s
1114
+ %(output)s
1115
+ %(mode_reflect)s
1116
+ %(cval)s
1117
+ %(origin)s
1118
+
1119
+ Returns
1120
+ -------
1121
+ result : ndarray.
1122
+ Filtered image. Has the same shape as `input`.
1123
+
1124
+ Notes
1125
+ -----
1126
+ This function implements the MINLIST algorithm [1]_, as described by
1127
+ Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
1128
+ the `input` length, regardless of filter size.
1129
+
1130
+ References
1131
+ ----------
1132
+ .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
1133
+ .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
1134
+
1135
+
1136
+ Examples
1137
+ --------
1138
+ >>> from scipy.ndimage import minimum_filter1d
1139
+ >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1140
+ array([2, 0, 0, 0, 1, 1, 0, 0])
1141
+ """
1142
+ input = numpy.asarray(input)
1143
+ if numpy.iscomplexobj(input):
1144
+ raise TypeError('Complex type not supported')
1145
+ axis = normalize_axis_index(axis, input.ndim)
1146
+ if size < 1:
1147
+ raise RuntimeError('incorrect filter size')
1148
+ output = _ni_support._get_output(output, input)
1149
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1150
+ raise ValueError('invalid origin')
1151
+ mode = _ni_support._extend_mode_to_code(mode)
1152
+ _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
1153
+ origin, 1)
1154
+ return output
1155
+
1156
+
1157
+ @_ni_docstrings.docfiller
1158
+ def maximum_filter1d(input, size, axis=-1, output=None,
1159
+ mode="reflect", cval=0.0, origin=0):
1160
+ """Calculate a 1-D maximum filter along the given axis.
1161
+
1162
+ The lines of the array along the given axis are filtered with a
1163
+ maximum filter of given size.
1164
+
1165
+ Parameters
1166
+ ----------
1167
+ %(input)s
1168
+ size : int
1169
+ Length along which to calculate the 1-D maximum.
1170
+ %(axis)s
1171
+ %(output)s
1172
+ %(mode_reflect)s
1173
+ %(cval)s
1174
+ %(origin)s
1175
+
1176
+ Returns
1177
+ -------
1178
+ maximum1d : ndarray, None
1179
+ Maximum-filtered array with same shape as input.
1180
+ None if `output` is not None
1181
+
1182
+ Notes
1183
+ -----
1184
+ This function implements the MAXLIST algorithm [1]_, as described by
1185
+ Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
1186
+ the `input` length, regardless of filter size.
1187
+
1188
+ References
1189
+ ----------
1190
+ .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
1191
+ .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
1192
+
1193
+ Examples
1194
+ --------
1195
+ >>> from scipy.ndimage import maximum_filter1d
1196
+ >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
1197
+ array([8, 8, 8, 4, 9, 9, 9, 9])
1198
+ """
1199
+ input = numpy.asarray(input)
1200
+ if numpy.iscomplexobj(input):
1201
+ raise TypeError('Complex type not supported')
1202
+ axis = normalize_axis_index(axis, input.ndim)
1203
+ if size < 1:
1204
+ raise RuntimeError('incorrect filter size')
1205
+ output = _ni_support._get_output(output, input)
1206
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
1207
+ raise ValueError('invalid origin')
1208
+ mode = _ni_support._extend_mode_to_code(mode)
1209
+ _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
1210
+ origin, 0)
1211
+ return output
1212
+
1213
+
1214
+ def _min_or_max_filter(input, size, footprint, structure, output, mode,
1215
+ cval, origin, minimum, axes=None):
1216
+ if (size is not None) and (footprint is not None):
1217
+ warnings.warn("ignoring size because footprint is set",
1218
+ UserWarning, stacklevel=3)
1219
+ if structure is None:
1220
+ if footprint is None:
1221
+ if size is None:
1222
+ raise RuntimeError("no footprint provided")
1223
+ separable = True
1224
+ else:
1225
+ footprint = numpy.asarray(footprint, dtype=bool)
1226
+ if not footprint.any():
1227
+ raise ValueError("All-zero footprint is not supported.")
1228
+ if footprint.all():
1229
+ size = footprint.shape
1230
+ footprint = None
1231
+ separable = True
1232
+ else:
1233
+ separable = False
1234
+ else:
1235
+ structure = numpy.asarray(structure, dtype=numpy.float64)
1236
+ separable = False
1237
+ if footprint is None:
1238
+ footprint = numpy.ones(structure.shape, bool)
1239
+ else:
1240
+ footprint = numpy.asarray(footprint, dtype=bool)
1241
+ input = numpy.asarray(input)
1242
+ if numpy.iscomplexobj(input):
1243
+ raise TypeError('Complex type not supported')
1244
+ output = _ni_support._get_output(output, input)
1245
+ temp_needed = numpy.may_share_memory(input, output)
1246
+ if temp_needed:
1247
+ # input and output arrays cannot share memory
1248
+ temp = output
1249
+ output = _ni_support._get_output(output.dtype, input)
1250
+ axes = _ni_support._check_axes(axes, input.ndim)
1251
+ num_axes = len(axes)
1252
+ if separable:
1253
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1254
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1255
+ modes = _ni_support._normalize_sequence(mode, num_axes)
1256
+ axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
1257
+ for ii in range(len(axes)) if sizes[ii] > 1]
1258
+ if minimum:
1259
+ filter_ = minimum_filter1d
1260
+ else:
1261
+ filter_ = maximum_filter1d
1262
+ if len(axes) > 0:
1263
+ for axis, size, origin, mode in axes:
1264
+ filter_(input, int(size), axis, output, mode, cval, origin)
1265
+ input = output
1266
+ else:
1267
+ output[...] = input[...]
1268
+ else:
1269
+ origins = _ni_support._normalize_sequence(origin, input.ndim)
1270
+ if num_axes < input.ndim:
1271
+ if footprint.ndim != num_axes:
1272
+ raise RuntimeError("footprint array has incorrect shape")
1273
+ footprint = numpy.expand_dims(
1274
+ footprint,
1275
+ tuple(ax for ax in range(input.ndim) if ax not in axes)
1276
+ )
1277
+ fshape = [ii for ii in footprint.shape if ii > 0]
1278
+ if len(fshape) != input.ndim:
1279
+ raise RuntimeError('footprint array has incorrect shape.')
1280
+ for origin, lenf in zip(origins, fshape):
1281
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1282
+ raise ValueError('invalid origin')
1283
+ if not footprint.flags.contiguous:
1284
+ footprint = footprint.copy()
1285
+ if structure is not None:
1286
+ if len(structure.shape) != input.ndim:
1287
+ raise RuntimeError('structure array has incorrect shape')
1288
+ if num_axes != structure.ndim:
1289
+ structure = numpy.expand_dims(
1290
+ structure,
1291
+ tuple(ax for ax in range(structure.ndim) if ax not in axes)
1292
+ )
1293
+ if not structure.flags.contiguous:
1294
+ structure = structure.copy()
1295
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
1296
+ raise RuntimeError(
1297
+ "A sequence of modes is not supported for non-separable "
1298
+ "footprints")
1299
+ mode = _ni_support._extend_mode_to_code(mode)
1300
+ _nd_image.min_or_max_filter(input, footprint, structure, output,
1301
+ mode, cval, origins, minimum)
1302
+ if temp_needed:
1303
+ temp[...] = output
1304
+ output = temp
1305
+ return output
1306
+
1307
+
1308
+ @_ni_docstrings.docfiller
1309
+ def minimum_filter(input, size=None, footprint=None, output=None,
1310
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1311
+ """Calculate a multidimensional minimum filter.
1312
+
1313
+ Parameters
1314
+ ----------
1315
+ %(input)s
1316
+ %(size_foot)s
1317
+ %(output)s
1318
+ %(mode_multiple)s
1319
+ %(cval)s
1320
+ %(origin_multiple)s
1321
+ axes : tuple of int or None, optional
1322
+ If None, `input` is filtered along all axes. Otherwise,
1323
+ `input` is filtered along the specified axes. When `axes` is
1324
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1325
+ must match the length of `axes`. The ith entry in any of these tuples
1326
+ corresponds to the ith entry in `axes`.
1327
+
1328
+ Returns
1329
+ -------
1330
+ minimum_filter : ndarray
1331
+ Filtered array. Has the same shape as `input`.
1332
+
1333
+ Notes
1334
+ -----
1335
+ A sequence of modes (one per axis) is only supported when the footprint is
1336
+ separable. Otherwise, a single mode string must be provided.
1337
+
1338
+ Examples
1339
+ --------
1340
+ >>> from scipy import ndimage, datasets
1341
+ >>> import matplotlib.pyplot as plt
1342
+ >>> fig = plt.figure()
1343
+ >>> plt.gray() # show the filtered result in grayscale
1344
+ >>> ax1 = fig.add_subplot(121) # left side
1345
+ >>> ax2 = fig.add_subplot(122) # right side
1346
+ >>> ascent = datasets.ascent()
1347
+ >>> result = ndimage.minimum_filter(ascent, size=20)
1348
+ >>> ax1.imshow(ascent)
1349
+ >>> ax2.imshow(result)
1350
+ >>> plt.show()
1351
+ """
1352
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
1353
+ cval, origin, 1, axes)
1354
+
1355
+
1356
+ @_ni_docstrings.docfiller
1357
+ def maximum_filter(input, size=None, footprint=None, output=None,
1358
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1359
+ """Calculate a multidimensional maximum filter.
1360
+
1361
+ Parameters
1362
+ ----------
1363
+ %(input)s
1364
+ %(size_foot)s
1365
+ %(output)s
1366
+ %(mode_multiple)s
1367
+ %(cval)s
1368
+ %(origin_multiple)s
1369
+ axes : tuple of int or None, optional
1370
+ If None, `input` is filtered along all axes. Otherwise,
1371
+ `input` is filtered along the specified axes. When `axes` is
1372
+ specified, any tuples used for `size`, `origin`, and/or `mode`
1373
+ must match the length of `axes`. The ith entry in any of these tuples
1374
+ corresponds to the ith entry in `axes`.
1375
+
1376
+ Returns
1377
+ -------
1378
+ maximum_filter : ndarray
1379
+ Filtered array. Has the same shape as `input`.
1380
+
1381
+ Notes
1382
+ -----
1383
+ A sequence of modes (one per axis) is only supported when the footprint is
1384
+ separable. Otherwise, a single mode string must be provided.
1385
+
1386
+ Examples
1387
+ --------
1388
+ >>> from scipy import ndimage, datasets
1389
+ >>> import matplotlib.pyplot as plt
1390
+ >>> fig = plt.figure()
1391
+ >>> plt.gray() # show the filtered result in grayscale
1392
+ >>> ax1 = fig.add_subplot(121) # left side
1393
+ >>> ax2 = fig.add_subplot(122) # right side
1394
+ >>> ascent = datasets.ascent()
1395
+ >>> result = ndimage.maximum_filter(ascent, size=20)
1396
+ >>> ax1.imshow(ascent)
1397
+ >>> ax2.imshow(result)
1398
+ >>> plt.show()
1399
+ """
1400
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
1401
+ cval, origin, 0, axes)
1402
+
1403
+
1404
+ @_ni_docstrings.docfiller
1405
+ def _rank_filter(input, rank, size=None, footprint=None, output=None,
1406
+ mode="reflect", cval=0.0, origin=0, operation='rank',
1407
+ axes=None):
1408
+ if (size is not None) and (footprint is not None):
1409
+ warnings.warn("ignoring size because footprint is set",
1410
+ UserWarning, stacklevel=3)
1411
+ input = numpy.asarray(input)
1412
+ if numpy.iscomplexobj(input):
1413
+ raise TypeError('Complex type not supported')
1414
+ axes = _ni_support._check_axes(axes, input.ndim)
1415
+ num_axes = len(axes)
1416
+ origins = _ni_support._normalize_sequence(origin, num_axes)
1417
+ if footprint is None:
1418
+ if size is None:
1419
+ raise RuntimeError("no footprint or filter size provided")
1420
+ sizes = _ni_support._normalize_sequence(size, num_axes)
1421
+ footprint = numpy.ones(sizes, dtype=bool)
1422
+ else:
1423
+ footprint = numpy.asarray(footprint, dtype=bool)
1424
+ if num_axes < input.ndim:
1425
+ # set origin = 0 for any axes not being filtered
1426
+ origins_temp = [0,] * input.ndim
1427
+ for o, ax in zip(origins, axes):
1428
+ origins_temp[ax] = o
1429
+ origins = origins_temp
1430
+
1431
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
1432
+ # set mode = 'constant' for any axes not being filtered
1433
+ modes = _ni_support._normalize_sequence(mode, num_axes)
1434
+ modes_temp = ['constant'] * input.ndim
1435
+ for m, ax in zip(modes, axes):
1436
+ modes_temp[ax] = m
1437
+ mode = modes_temp
1438
+
1439
+ # insert singleton dimension along any non-filtered axes
1440
+ if footprint.ndim != num_axes:
1441
+ raise RuntimeError("footprint array has incorrect shape")
1442
+ footprint = numpy.expand_dims(
1443
+ footprint,
1444
+ tuple(ax for ax in range(input.ndim) if ax not in axes)
1445
+ )
1446
+ fshape = [ii for ii in footprint.shape if ii > 0]
1447
+ if len(fshape) != input.ndim:
1448
+ raise RuntimeError('footprint array has incorrect shape.')
1449
+ for origin, lenf in zip(origins, fshape):
1450
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1451
+ raise ValueError('invalid origin')
1452
+ if not footprint.flags.contiguous:
1453
+ footprint = footprint.copy()
1454
+ filter_size = numpy.where(footprint, 1, 0).sum()
1455
+ if operation == 'median':
1456
+ rank = filter_size // 2
1457
+ elif operation == 'percentile':
1458
+ percentile = rank
1459
+ if percentile < 0.0:
1460
+ percentile += 100.0
1461
+ if percentile < 0 or percentile > 100:
1462
+ raise RuntimeError('invalid percentile')
1463
+ if percentile == 100.0:
1464
+ rank = filter_size - 1
1465
+ else:
1466
+ rank = int(float(filter_size) * percentile / 100.0)
1467
+ if rank < 0:
1468
+ rank += filter_size
1469
+ if rank < 0 or rank >= filter_size:
1470
+ raise RuntimeError('rank not within filter footprint size')
1471
+ if rank == 0:
1472
+ return minimum_filter(input, None, footprint, output, mode, cval,
1473
+ origins, axes=None)
1474
+ elif rank == filter_size - 1:
1475
+ return maximum_filter(input, None, footprint, output, mode, cval,
1476
+ origins, axes=None)
1477
+ else:
1478
+ output = _ni_support._get_output(output, input)
1479
+ temp_needed = numpy.may_share_memory(input, output)
1480
+ if temp_needed:
1481
+ # input and output arrays cannot share memory
1482
+ temp = output
1483
+ output = _ni_support._get_output(output.dtype, input)
1484
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
1485
+ raise RuntimeError(
1486
+ "A sequence of modes is not supported by non-separable rank "
1487
+ "filters")
1488
+ mode = _ni_support._extend_mode_to_code(mode)
1489
+ _nd_image.rank_filter(input, rank, footprint, output, mode, cval,
1490
+ origins)
1491
+ if temp_needed:
1492
+ temp[...] = output
1493
+ output = temp
1494
+ return output
1495
+
1496
+
1497
+ @_ni_docstrings.docfiller
1498
+ def rank_filter(input, rank, size=None, footprint=None, output=None,
1499
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1500
+ """Calculate a multidimensional rank filter.
1501
+
1502
+ Parameters
1503
+ ----------
1504
+ %(input)s
1505
+ rank : int
1506
+ The rank parameter may be less than zero, i.e., rank = -1
1507
+ indicates the largest element.
1508
+ %(size_foot)s
1509
+ %(output)s
1510
+ %(mode_reflect)s
1511
+ %(cval)s
1512
+ %(origin_multiple)s
1513
+ axes : tuple of int or None, optional
1514
+ If None, `input` is filtered along all axes. Otherwise,
1515
+ `input` is filtered along the specified axes.
1516
+
1517
+ Returns
1518
+ -------
1519
+ rank_filter : ndarray
1520
+ Filtered array. Has the same shape as `input`.
1521
+
1522
+ Examples
1523
+ --------
1524
+ >>> from scipy import ndimage, datasets
1525
+ >>> import matplotlib.pyplot as plt
1526
+ >>> fig = plt.figure()
1527
+ >>> plt.gray() # show the filtered result in grayscale
1528
+ >>> ax1 = fig.add_subplot(121) # left side
1529
+ >>> ax2 = fig.add_subplot(122) # right side
1530
+ >>> ascent = datasets.ascent()
1531
+ >>> result = ndimage.rank_filter(ascent, rank=42, size=20)
1532
+ >>> ax1.imshow(ascent)
1533
+ >>> ax2.imshow(result)
1534
+ >>> plt.show()
1535
+ """
1536
+ rank = operator.index(rank)
1537
+ return _rank_filter(input, rank, size, footprint, output, mode, cval,
1538
+ origin, 'rank', axes=axes)
1539
+
1540
+
1541
+ @_ni_docstrings.docfiller
1542
+ def median_filter(input, size=None, footprint=None, output=None,
1543
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
1544
+ """
1545
+ Calculate a multidimensional median filter.
1546
+
1547
+ Parameters
1548
+ ----------
1549
+ %(input)s
1550
+ %(size_foot)s
1551
+ %(output)s
1552
+ %(mode_reflect)s
1553
+ %(cval)s
1554
+ %(origin_multiple)s
1555
+ axes : tuple of int or None, optional
1556
+ If None, `input` is filtered along all axes. Otherwise,
1557
+ `input` is filtered along the specified axes.
1558
+
1559
+ Returns
1560
+ -------
1561
+ median_filter : ndarray
1562
+ Filtered array. Has the same shape as `input`.
1563
+
1564
+ See Also
1565
+ --------
1566
+ scipy.signal.medfilt2d
1567
+
1568
+ Notes
1569
+ -----
1570
+ For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
1571
+ the specialised function `scipy.signal.medfilt2d` may be faster. It is
1572
+ however limited to constant mode with ``cval=0``.
1573
+
1574
+ Examples
1575
+ --------
1576
+ >>> from scipy import ndimage, datasets
1577
+ >>> import matplotlib.pyplot as plt
1578
+ >>> fig = plt.figure()
1579
+ >>> plt.gray() # show the filtered result in grayscale
1580
+ >>> ax1 = fig.add_subplot(121) # left side
1581
+ >>> ax2 = fig.add_subplot(122) # right side
1582
+ >>> ascent = datasets.ascent()
1583
+ >>> result = ndimage.median_filter(ascent, size=20)
1584
+ >>> ax1.imshow(ascent)
1585
+ >>> ax2.imshow(result)
1586
+ >>> plt.show()
1587
+ """
1588
+ return _rank_filter(input, 0, size, footprint, output, mode, cval,
1589
+ origin, 'median', axes=axes)
1590
+
1591
+
1592
+ @_ni_docstrings.docfiller
1593
+ def percentile_filter(input, percentile, size=None, footprint=None,
1594
+ output=None, mode="reflect", cval=0.0, origin=0, *,
1595
+ axes=None):
1596
+ """Calculate a multidimensional percentile filter.
1597
+
1598
+ Parameters
1599
+ ----------
1600
+ %(input)s
1601
+ percentile : scalar
1602
+ The percentile parameter may be less than zero, i.e.,
1603
+ percentile = -20 equals percentile = 80
1604
+ %(size_foot)s
1605
+ %(output)s
1606
+ %(mode_reflect)s
1607
+ %(cval)s
1608
+ %(origin_multiple)s
1609
+ axes : tuple of int or None, optional
1610
+ If None, `input` is filtered along all axes. Otherwise,
1611
+ `input` is filtered along the specified axes.
1612
+
1613
+ Returns
1614
+ -------
1615
+ percentile_filter : ndarray
1616
+ Filtered array. Has the same shape as `input`.
1617
+
1618
+ Examples
1619
+ --------
1620
+ >>> from scipy import ndimage, datasets
1621
+ >>> import matplotlib.pyplot as plt
1622
+ >>> fig = plt.figure()
1623
+ >>> plt.gray() # show the filtered result in grayscale
1624
+ >>> ax1 = fig.add_subplot(121) # left side
1625
+ >>> ax2 = fig.add_subplot(122) # right side
1626
+ >>> ascent = datasets.ascent()
1627
+ >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
1628
+ >>> ax1.imshow(ascent)
1629
+ >>> ax2.imshow(result)
1630
+ >>> plt.show()
1631
+ """
1632
+ return _rank_filter(input, percentile, size, footprint, output, mode,
1633
+ cval, origin, 'percentile', axes=axes)
1634
+
1635
+
1636
+ @_ni_docstrings.docfiller
1637
+ def generic_filter1d(input, function, filter_size, axis=-1,
1638
+ output=None, mode="reflect", cval=0.0, origin=0,
1639
+ extra_arguments=(), extra_keywords=None):
1640
+ """Calculate a 1-D filter along the given axis.
1641
+
1642
+ `generic_filter1d` iterates over the lines of the array, calling the
1643
+ given function at each line. The arguments of the line are the
1644
+ input line, and the output line. The input and output lines are 1-D
1645
+ double arrays. The input line is extended appropriately according
1646
+ to the filter size and origin. The output line must be modified
1647
+ in-place with the result.
1648
+
1649
+ Parameters
1650
+ ----------
1651
+ %(input)s
1652
+ function : {callable, scipy.LowLevelCallable}
1653
+ Function to apply along given axis.
1654
+ filter_size : scalar
1655
+ Length of the filter.
1656
+ %(axis)s
1657
+ %(output)s
1658
+ %(mode_reflect)s
1659
+ %(cval)s
1660
+ %(origin)s
1661
+ %(extra_arguments)s
1662
+ %(extra_keywords)s
1663
+
1664
+ Returns
1665
+ -------
1666
+ generic_filter1d : ndarray
1667
+ Filtered array. Has the same shape as `input`.
1668
+
1669
+ Notes
1670
+ -----
1671
+ This function also accepts low-level callback functions with one of
1672
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
1673
+
1674
+ .. code:: c
1675
+
1676
+ int function(double *input_line, npy_intp input_length,
1677
+ double *output_line, npy_intp output_length,
1678
+ void *user_data)
1679
+ int function(double *input_line, intptr_t input_length,
1680
+ double *output_line, intptr_t output_length,
1681
+ void *user_data)
1682
+
1683
+ The calling function iterates over the lines of the input and output
1684
+ arrays, calling the callback function at each line. The current line
1685
+ is extended according to the border conditions set by the calling
1686
+ function, and the result is copied into the array that is passed
1687
+ through ``input_line``. The length of the input line (after extension)
1688
+ is passed through ``input_length``. The callback function should apply
1689
+ the filter and store the result in the array passed through
1690
+ ``output_line``. The length of the output line is passed through
1691
+ ``output_length``. ``user_data`` is the data pointer provided
1692
+ to `scipy.LowLevelCallable` as-is.
1693
+
1694
+ The callback function must return an integer error status that is zero
1695
+ if something went wrong and one otherwise. If an error occurs, you should
1696
+ normally set the python error status with an informative message
1697
+ before returning, otherwise a default error message is set by the
1698
+ calling function.
1699
+
1700
+ In addition, some other low-level function pointer specifications
1701
+ are accepted, but these are for backward compatibility only and should
1702
+ not be used in new code.
1703
+
1704
+ """
1705
+ if extra_keywords is None:
1706
+ extra_keywords = {}
1707
+ input = numpy.asarray(input)
1708
+ if numpy.iscomplexobj(input):
1709
+ raise TypeError('Complex type not supported')
1710
+ output = _ni_support._get_output(output, input)
1711
+ if filter_size < 1:
1712
+ raise RuntimeError('invalid filter size')
1713
+ axis = normalize_axis_index(axis, input.ndim)
1714
+ if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
1715
+ filter_size):
1716
+ raise ValueError('invalid origin')
1717
+ mode = _ni_support._extend_mode_to_code(mode)
1718
+ _nd_image.generic_filter1d(input, function, filter_size, axis, output,
1719
+ mode, cval, origin, extra_arguments,
1720
+ extra_keywords)
1721
+ return output
1722
+
1723
+
1724
+ @_ni_docstrings.docfiller
1725
+ def generic_filter(input, function, size=None, footprint=None,
1726
+ output=None, mode="reflect", cval=0.0, origin=0,
1727
+ extra_arguments=(), extra_keywords=None):
1728
+ """Calculate a multidimensional filter using the given function.
1729
+
1730
+ At each element the provided function is called. The input values
1731
+ within the filter footprint at that element are passed to the function
1732
+ as a 1-D array of double values.
1733
+
1734
+ Parameters
1735
+ ----------
1736
+ %(input)s
1737
+ function : {callable, scipy.LowLevelCallable}
1738
+ Function to apply at each element.
1739
+ %(size_foot)s
1740
+ %(output)s
1741
+ %(mode_reflect)s
1742
+ %(cval)s
1743
+ %(origin_multiple)s
1744
+ %(extra_arguments)s
1745
+ %(extra_keywords)s
1746
+
1747
+ Returns
1748
+ -------
1749
+ generic_filter : ndarray
1750
+ Filtered array. Has the same shape as `input`.
1751
+
1752
+ Notes
1753
+ -----
1754
+ This function also accepts low-level callback functions with one of
1755
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
1756
+
1757
+ .. code:: c
1758
+
1759
+ int callback(double *buffer, npy_intp filter_size,
1760
+ double *return_value, void *user_data)
1761
+ int callback(double *buffer, intptr_t filter_size,
1762
+ double *return_value, void *user_data)
1763
+
1764
+ The calling function iterates over the elements of the input and
1765
+ output arrays, calling the callback function at each element. The
1766
+ elements within the footprint of the filter at the current element are
1767
+ passed through the ``buffer`` parameter, and the number of elements
1768
+ within the footprint through ``filter_size``. The calculated value is
1769
+ returned in ``return_value``. ``user_data`` is the data pointer provided
1770
+ to `scipy.LowLevelCallable` as-is.
1771
+
1772
+ The callback function must return an integer error status that is zero
1773
+ if something went wrong and one otherwise. If an error occurs, you should
1774
+ normally set the python error status with an informative message
1775
+ before returning, otherwise a default error message is set by the
1776
+ calling function.
1777
+
1778
+ In addition, some other low-level function pointer specifications
1779
+ are accepted, but these are for backward compatibility only and should
1780
+ not be used in new code.
1781
+
1782
+ Examples
1783
+ --------
1784
+ Import the necessary modules and load the example image used for
1785
+ filtering.
1786
+
1787
+ >>> import numpy as np
1788
+ >>> from scipy import datasets
1789
+ >>> from scipy.ndimage import generic_filter
1790
+ >>> import matplotlib.pyplot as plt
1791
+ >>> ascent = datasets.ascent()
1792
+
1793
+ Compute a maximum filter with kernel size 10 by passing a simple NumPy
1794
+ aggregation function as argument to `function`.
1795
+
1796
+ >>> maximum_filter_result = generic_filter(ascent, np.amax, [10, 10])
1797
+
1798
+ While a maximmum filter could also directly be obtained using
1799
+ `maximum_filter`, `generic_filter` allows generic Python function or
1800
+ `scipy.LowLevelCallable` to be used as a filter. Here, we compute the
1801
+ range between maximum and minimum value as an example for a kernel size
1802
+ of 5.
1803
+
1804
+ >>> def custom_filter(image):
1805
+ ... return np.amax(image) - np.amin(image)
1806
+ >>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5])
1807
+
1808
+ Plot the original and filtered images.
1809
+
1810
+ >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12))
1811
+ >>> plt.gray() # show the filtered result in grayscale
1812
+ >>> top, middle, bottom = axes
1813
+ >>> for ax in axes:
1814
+ ... ax.set_axis_off() # remove coordinate system
1815
+ >>> top.imshow(ascent)
1816
+ >>> top.set_title("Original image")
1817
+ >>> middle.imshow(maximum_filter_result)
1818
+ >>> middle.set_title("Maximum filter, Kernel: 10x10")
1819
+ >>> bottom.imshow(custom_filter_result)
1820
+ >>> bottom.set_title("Custom filter, Kernel: 5x5")
1821
+ >>> fig.tight_layout()
1822
+
1823
+ """
1824
+ if (size is not None) and (footprint is not None):
1825
+ warnings.warn("ignoring size because footprint is set",
1826
+ UserWarning, stacklevel=2)
1827
+ if extra_keywords is None:
1828
+ extra_keywords = {}
1829
+ input = numpy.asarray(input)
1830
+ if numpy.iscomplexobj(input):
1831
+ raise TypeError('Complex type not supported')
1832
+ origins = _ni_support._normalize_sequence(origin, input.ndim)
1833
+ if footprint is None:
1834
+ if size is None:
1835
+ raise RuntimeError("no footprint or filter size provided")
1836
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
1837
+ footprint = numpy.ones(sizes, dtype=bool)
1838
+ else:
1839
+ footprint = numpy.asarray(footprint, dtype=bool)
1840
+ fshape = [ii for ii in footprint.shape if ii > 0]
1841
+ if len(fshape) != input.ndim:
1842
+ raise RuntimeError('filter footprint array has incorrect shape.')
1843
+ for origin, lenf in zip(origins, fshape):
1844
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
1845
+ raise ValueError('invalid origin')
1846
+ if not footprint.flags.contiguous:
1847
+ footprint = footprint.copy()
1848
+ output = _ni_support._get_output(output, input)
1849
+ mode = _ni_support._extend_mode_to_code(mode)
1850
+ _nd_image.generic_filter(input, function, footprint, output, mode,
1851
+ cval, origins, extra_arguments, extra_keywords)
1852
+ return output
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_fourier.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ import numpy
32
+ from scipy._lib._util import normalize_axis_index
33
+ from . import _ni_support
34
+ from . import _nd_image
35
+
36
+ __all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
37
+ 'fourier_shift']
38
+
39
+
40
+ def _get_output_fourier(output, input):
41
+ if output is None:
42
+ if input.dtype.type in [numpy.complex64, numpy.complex128,
43
+ numpy.float32]:
44
+ output = numpy.zeros(input.shape, dtype=input.dtype)
45
+ else:
46
+ output = numpy.zeros(input.shape, dtype=numpy.float64)
47
+ elif type(output) is type:
48
+ if output not in [numpy.complex64, numpy.complex128,
49
+ numpy.float32, numpy.float64]:
50
+ raise RuntimeError("output type not supported")
51
+ output = numpy.zeros(input.shape, dtype=output)
52
+ elif output.shape != input.shape:
53
+ raise RuntimeError("output shape not correct")
54
+ return output
55
+
56
+
57
+ def _get_output_fourier_complex(output, input):
58
+ if output is None:
59
+ if input.dtype.type in [numpy.complex64, numpy.complex128]:
60
+ output = numpy.zeros(input.shape, dtype=input.dtype)
61
+ else:
62
+ output = numpy.zeros(input.shape, dtype=numpy.complex128)
63
+ elif type(output) is type:
64
+ if output not in [numpy.complex64, numpy.complex128]:
65
+ raise RuntimeError("output type not supported")
66
+ output = numpy.zeros(input.shape, dtype=output)
67
+ elif output.shape != input.shape:
68
+ raise RuntimeError("output shape not correct")
69
+ return output
70
+
71
+
72
+ def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
73
+ """
74
+ Multidimensional Gaussian fourier filter.
75
+
76
+ The array is multiplied with the fourier transform of a Gaussian
77
+ kernel.
78
+
79
+ Parameters
80
+ ----------
81
+ input : array_like
82
+ The input array.
83
+ sigma : float or sequence
84
+ The sigma of the Gaussian kernel. If a float, `sigma` is the same for
85
+ all axes. If a sequence, `sigma` has to contain one value for each
86
+ axis.
87
+ n : int, optional
88
+ If `n` is negative (default), then the input is assumed to be the
89
+ result of a complex fft.
90
+ If `n` is larger than or equal to zero, the input is assumed to be the
91
+ result of a real fft, and `n` gives the length of the array before
92
+ transformation along the real transform direction.
93
+ axis : int, optional
94
+ The axis of the real transform.
95
+ output : ndarray, optional
96
+ If given, the result of filtering the input is placed in this array.
97
+
98
+ Returns
99
+ -------
100
+ fourier_gaussian : ndarray
101
+ The filtered input.
102
+
103
+ Examples
104
+ --------
105
+ >>> from scipy import ndimage, datasets
106
+ >>> import numpy.fft
107
+ >>> import matplotlib.pyplot as plt
108
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
109
+ >>> plt.gray() # show the filtered result in grayscale
110
+ >>> ascent = datasets.ascent()
111
+ >>> input_ = numpy.fft.fft2(ascent)
112
+ >>> result = ndimage.fourier_gaussian(input_, sigma=4)
113
+ >>> result = numpy.fft.ifft2(result)
114
+ >>> ax1.imshow(ascent)
115
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
116
+ >>> plt.show()
117
+ """
118
+ input = numpy.asarray(input)
119
+ output = _get_output_fourier(output, input)
120
+ axis = normalize_axis_index(axis, input.ndim)
121
+ sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
122
+ sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
123
+ if not sigmas.flags.contiguous:
124
+ sigmas = sigmas.copy()
125
+
126
+ _nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
127
+ return output
128
+
129
+
130
+ def fourier_uniform(input, size, n=-1, axis=-1, output=None):
131
+ """
132
+ Multidimensional uniform fourier filter.
133
+
134
+ The array is multiplied with the Fourier transform of a box of given
135
+ size.
136
+
137
+ Parameters
138
+ ----------
139
+ input : array_like
140
+ The input array.
141
+ size : float or sequence
142
+ The size of the box used for filtering.
143
+ If a float, `size` is the same for all axes. If a sequence, `size` has
144
+ to contain one value for each axis.
145
+ n : int, optional
146
+ If `n` is negative (default), then the input is assumed to be the
147
+ result of a complex fft.
148
+ If `n` is larger than or equal to zero, the input is assumed to be the
149
+ result of a real fft, and `n` gives the length of the array before
150
+ transformation along the real transform direction.
151
+ axis : int, optional
152
+ The axis of the real transform.
153
+ output : ndarray, optional
154
+ If given, the result of filtering the input is placed in this array.
155
+
156
+ Returns
157
+ -------
158
+ fourier_uniform : ndarray
159
+ The filtered input.
160
+
161
+ Examples
162
+ --------
163
+ >>> from scipy import ndimage, datasets
164
+ >>> import numpy.fft
165
+ >>> import matplotlib.pyplot as plt
166
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
167
+ >>> plt.gray() # show the filtered result in grayscale
168
+ >>> ascent = datasets.ascent()
169
+ >>> input_ = numpy.fft.fft2(ascent)
170
+ >>> result = ndimage.fourier_uniform(input_, size=20)
171
+ >>> result = numpy.fft.ifft2(result)
172
+ >>> ax1.imshow(ascent)
173
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
174
+ >>> plt.show()
175
+ """
176
+ input = numpy.asarray(input)
177
+ output = _get_output_fourier(output, input)
178
+ axis = normalize_axis_index(axis, input.ndim)
179
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
180
+ sizes = numpy.asarray(sizes, dtype=numpy.float64)
181
+ if not sizes.flags.contiguous:
182
+ sizes = sizes.copy()
183
+ _nd_image.fourier_filter(input, sizes, n, axis, output, 1)
184
+ return output
185
+
186
+
187
+ def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
188
+ """
189
+ Multidimensional ellipsoid Fourier filter.
190
+
191
+ The array is multiplied with the fourier transform of an ellipsoid of
192
+ given sizes.
193
+
194
+ Parameters
195
+ ----------
196
+ input : array_like
197
+ The input array.
198
+ size : float or sequence
199
+ The size of the box used for filtering.
200
+ If a float, `size` is the same for all axes. If a sequence, `size` has
201
+ to contain one value for each axis.
202
+ n : int, optional
203
+ If `n` is negative (default), then the input is assumed to be the
204
+ result of a complex fft.
205
+ If `n` is larger than or equal to zero, the input is assumed to be the
206
+ result of a real fft, and `n` gives the length of the array before
207
+ transformation along the real transform direction.
208
+ axis : int, optional
209
+ The axis of the real transform.
210
+ output : ndarray, optional
211
+ If given, the result of filtering the input is placed in this array.
212
+
213
+ Returns
214
+ -------
215
+ fourier_ellipsoid : ndarray
216
+ The filtered input.
217
+
218
+ Notes
219
+ -----
220
+ This function is implemented for arrays of rank 1, 2, or 3.
221
+
222
+ Examples
223
+ --------
224
+ >>> from scipy import ndimage, datasets
225
+ >>> import numpy.fft
226
+ >>> import matplotlib.pyplot as plt
227
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
228
+ >>> plt.gray() # show the filtered result in grayscale
229
+ >>> ascent = datasets.ascent()
230
+ >>> input_ = numpy.fft.fft2(ascent)
231
+ >>> result = ndimage.fourier_ellipsoid(input_, size=20)
232
+ >>> result = numpy.fft.ifft2(result)
233
+ >>> ax1.imshow(ascent)
234
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
235
+ >>> plt.show()
236
+ """
237
+ input = numpy.asarray(input)
238
+ if input.ndim > 3:
239
+ raise NotImplementedError("Only 1d, 2d and 3d inputs are supported")
240
+ output = _get_output_fourier(output, input)
241
+ if output.size == 0:
242
+ # The C code has a bug that can result in a segfault with arrays
243
+ # that have size 0 (gh-17270), so check here.
244
+ return output
245
+ axis = normalize_axis_index(axis, input.ndim)
246
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
247
+ sizes = numpy.asarray(sizes, dtype=numpy.float64)
248
+ if not sizes.flags.contiguous:
249
+ sizes = sizes.copy()
250
+ _nd_image.fourier_filter(input, sizes, n, axis, output, 2)
251
+ return output
252
+
253
+
254
+ def fourier_shift(input, shift, n=-1, axis=-1, output=None):
255
+ """
256
+ Multidimensional Fourier shift filter.
257
+
258
+ The array is multiplied with the Fourier transform of a shift operation.
259
+
260
+ Parameters
261
+ ----------
262
+ input : array_like
263
+ The input array.
264
+ shift : float or sequence
265
+ The size of the box used for filtering.
266
+ If a float, `shift` is the same for all axes. If a sequence, `shift`
267
+ has to contain one value for each axis.
268
+ n : int, optional
269
+ If `n` is negative (default), then the input is assumed to be the
270
+ result of a complex fft.
271
+ If `n` is larger than or equal to zero, the input is assumed to be the
272
+ result of a real fft, and `n` gives the length of the array before
273
+ transformation along the real transform direction.
274
+ axis : int, optional
275
+ The axis of the real transform.
276
+ output : ndarray, optional
277
+ If given, the result of shifting the input is placed in this array.
278
+
279
+ Returns
280
+ -------
281
+ fourier_shift : ndarray
282
+ The shifted input.
283
+
284
+ Examples
285
+ --------
286
+ >>> from scipy import ndimage, datasets
287
+ >>> import matplotlib.pyplot as plt
288
+ >>> import numpy.fft
289
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
290
+ >>> plt.gray() # show the filtered result in grayscale
291
+ >>> ascent = datasets.ascent()
292
+ >>> input_ = numpy.fft.fft2(ascent)
293
+ >>> result = ndimage.fourier_shift(input_, shift=200)
294
+ >>> result = numpy.fft.ifft2(result)
295
+ >>> ax1.imshow(ascent)
296
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
297
+ >>> plt.show()
298
+ """
299
+ input = numpy.asarray(input)
300
+ output = _get_output_fourier_complex(output, input)
301
+ axis = normalize_axis_index(axis, input.ndim)
302
+ shifts = _ni_support._normalize_sequence(shift, input.ndim)
303
+ shifts = numpy.asarray(shifts, dtype=numpy.float64)
304
+ if not shifts.flags.contiguous:
305
+ shifts = shifts.copy()
306
+ _nd_image.fourier_shift(input, shifts, n, axis, output)
307
+ return output
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py ADDED
@@ -0,0 +1,1010 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ import itertools
32
+ import warnings
33
+
34
+ import numpy
35
+ from scipy._lib._util import normalize_axis_index
36
+
37
+ from scipy import special
38
+ from . import _ni_support
39
+ from . import _nd_image
40
+ from ._ni_docstrings import docfiller
41
+
42
+
43
+ __all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
44
+ 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
45
+
46
+
47
+ @docfiller
48
+ def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
49
+ mode='mirror'):
50
+ """
51
+ Calculate a 1-D spline filter along the given axis.
52
+
53
+ The lines of the array along the given axis are filtered by a
54
+ spline filter. The order of the spline must be >= 2 and <= 5.
55
+
56
+ Parameters
57
+ ----------
58
+ %(input)s
59
+ order : int, optional
60
+ The order of the spline, default is 3.
61
+ axis : int, optional
62
+ The axis along which the spline filter is applied. Default is the last
63
+ axis.
64
+ output : ndarray or dtype, optional
65
+ The array in which to place the output, or the dtype of the returned
66
+ array. Default is ``numpy.float64``.
67
+ %(mode_interp_mirror)s
68
+
69
+ Returns
70
+ -------
71
+ spline_filter1d : ndarray
72
+ The filtered input.
73
+
74
+ See Also
75
+ --------
76
+ spline_filter : Multidimensional spline filter.
77
+
78
+ Notes
79
+ -----
80
+ All of the interpolation functions in `ndimage` do spline interpolation of
81
+ the input image. If using B-splines of `order > 1`, the input image
82
+ values have to be converted to B-spline coefficients first, which is
83
+ done by applying this 1-D filter sequentially along all
84
+ axes of the input. All functions that require B-spline coefficients
85
+ will automatically filter their inputs, a behavior controllable with
86
+ the `prefilter` keyword argument. For functions that accept a `mode`
87
+ parameter, the result will only be correct if it matches the `mode`
88
+ used when filtering.
89
+
90
+ For complex-valued `input`, this function processes the real and imaginary
91
+ components independently.
92
+
93
+ .. versionadded:: 1.6.0
94
+ Complex-valued support added.
95
+
96
+ Examples
97
+ --------
98
+ We can filter an image using 1-D spline along the given axis:
99
+
100
+ >>> from scipy.ndimage import spline_filter1d
101
+ >>> import numpy as np
102
+ >>> import matplotlib.pyplot as plt
103
+ >>> orig_img = np.eye(20) # create an image
104
+ >>> orig_img[10, :] = 1.0
105
+ >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
106
+ >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
107
+ >>> f, ax = plt.subplots(1, 3, sharex=True)
108
+ >>> for ind, data in enumerate([[orig_img, "original image"],
109
+ ... [sp_filter_axis_0, "spline filter (axis=0)"],
110
+ ... [sp_filter_axis_1, "spline filter (axis=1)"]]):
111
+ ... ax[ind].imshow(data[0], cmap='gray_r')
112
+ ... ax[ind].set_title(data[1])
113
+ >>> plt.tight_layout()
114
+ >>> plt.show()
115
+
116
+ """
117
+ if order < 0 or order > 5:
118
+ raise RuntimeError('spline order not supported')
119
+ input = numpy.asarray(input)
120
+ complex_output = numpy.iscomplexobj(input)
121
+ output = _ni_support._get_output(output, input,
122
+ complex_output=complex_output)
123
+ if complex_output:
124
+ spline_filter1d(input.real, order, axis, output.real, mode)
125
+ spline_filter1d(input.imag, order, axis, output.imag, mode)
126
+ return output
127
+ if order in [0, 1]:
128
+ output[...] = numpy.array(input)
129
+ else:
130
+ mode = _ni_support._extend_mode_to_code(mode)
131
+ axis = normalize_axis_index(axis, input.ndim)
132
+ _nd_image.spline_filter1d(input, order, axis, output, mode)
133
+ return output
134
+
135
+ @docfiller
136
+ def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
137
+ """
138
+ Multidimensional spline filter.
139
+
140
+ Parameters
141
+ ----------
142
+ %(input)s
143
+ order : int, optional
144
+ The order of the spline, default is 3.
145
+ output : ndarray or dtype, optional
146
+ The array in which to place the output, or the dtype of the returned
147
+ array. Default is ``numpy.float64``.
148
+ %(mode_interp_mirror)s
149
+
150
+ Returns
151
+ -------
152
+ spline_filter : ndarray
153
+ Filtered array. Has the same shape as `input`.
154
+
155
+ See Also
156
+ --------
157
+ spline_filter1d : Calculate a 1-D spline filter along the given axis.
158
+
159
+ Notes
160
+ -----
161
+ The multidimensional filter is implemented as a sequence of
162
+ 1-D spline filters. The intermediate arrays are stored
163
+ in the same data type as the output. Therefore, for output types
164
+ with a limited precision, the results may be imprecise because
165
+ intermediate results may be stored with insufficient precision.
166
+
167
+ For complex-valued `input`, this function processes the real and imaginary
168
+ components independently.
169
+
170
+ .. versionadded:: 1.6.0
171
+ Complex-valued support added.
172
+
173
+ Examples
174
+ --------
175
+ We can filter an image using multidimentional splines:
176
+
177
+ >>> from scipy.ndimage import spline_filter
178
+ >>> import numpy as np
179
+ >>> import matplotlib.pyplot as plt
180
+ >>> orig_img = np.eye(20) # create an image
181
+ >>> orig_img[10, :] = 1.0
182
+ >>> sp_filter = spline_filter(orig_img, order=3)
183
+ >>> f, ax = plt.subplots(1, 2, sharex=True)
184
+ >>> for ind, data in enumerate([[orig_img, "original image"],
185
+ ... [sp_filter, "spline filter"]]):
186
+ ... ax[ind].imshow(data[0], cmap='gray_r')
187
+ ... ax[ind].set_title(data[1])
188
+ >>> plt.tight_layout()
189
+ >>> plt.show()
190
+
191
+ """
192
+ if order < 2 or order > 5:
193
+ raise RuntimeError('spline order not supported')
194
+ input = numpy.asarray(input)
195
+ complex_output = numpy.iscomplexobj(input)
196
+ output = _ni_support._get_output(output, input,
197
+ complex_output=complex_output)
198
+ if complex_output:
199
+ spline_filter(input.real, order, output.real, mode)
200
+ spline_filter(input.imag, order, output.imag, mode)
201
+ return output
202
+ if order not in [0, 1] and input.ndim > 0:
203
+ for axis in range(input.ndim):
204
+ spline_filter1d(input, order, axis, output=output, mode=mode)
205
+ input = output
206
+ else:
207
+ output[...] = input[...]
208
+ return output
209
+
210
+
211
+ def _prepad_for_spline_filter(input, mode, cval):
212
+ if mode in ['nearest', 'grid-constant']:
213
+ npad = 12
214
+ if mode == 'grid-constant':
215
+ padded = numpy.pad(input, npad, mode='constant',
216
+ constant_values=cval)
217
+ elif mode == 'nearest':
218
+ padded = numpy.pad(input, npad, mode='edge')
219
+ else:
220
+ # other modes have exact boundary conditions implemented so
221
+ # no prepadding is needed
222
+ npad = 0
223
+ padded = input
224
+ return padded, npad
225
+
226
+
227
+ @docfiller
228
+ def geometric_transform(input, mapping, output_shape=None,
229
+ output=None, order=3,
230
+ mode='constant', cval=0.0, prefilter=True,
231
+ extra_arguments=(), extra_keywords={}):
232
+ """
233
+ Apply an arbitrary geometric transform.
234
+
235
+ The given mapping function is used to find, for each point in the
236
+ output, the corresponding coordinates in the input. The value of the
237
+ input at those coordinates is determined by spline interpolation of
238
+ the requested order.
239
+
240
+ Parameters
241
+ ----------
242
+ %(input)s
243
+ mapping : {callable, scipy.LowLevelCallable}
244
+ A callable object that accepts a tuple of length equal to the output
245
+ array rank, and returns the corresponding input coordinates as a tuple
246
+ of length equal to the input array rank.
247
+ output_shape : tuple of ints, optional
248
+ Shape tuple.
249
+ %(output)s
250
+ order : int, optional
251
+ The order of the spline interpolation, default is 3.
252
+ The order has to be in the range 0-5.
253
+ %(mode_interp_constant)s
254
+ %(cval)s
255
+ %(prefilter)s
256
+ extra_arguments : tuple, optional
257
+ Extra arguments passed to `mapping`.
258
+ extra_keywords : dict, optional
259
+ Extra keywords passed to `mapping`.
260
+
261
+ Returns
262
+ -------
263
+ output : ndarray
264
+ The filtered input.
265
+
266
+ See Also
267
+ --------
268
+ map_coordinates, affine_transform, spline_filter1d
269
+
270
+
271
+ Notes
272
+ -----
273
+ This function also accepts low-level callback functions with one
274
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
275
+
276
+ .. code:: c
277
+
278
+ int mapping(npy_intp *output_coordinates, double *input_coordinates,
279
+ int output_rank, int input_rank, void *user_data)
280
+ int mapping(intptr_t *output_coordinates, double *input_coordinates,
281
+ int output_rank, int input_rank, void *user_data)
282
+
283
+ The calling function iterates over the elements of the output array,
284
+ calling the callback function at each element. The coordinates of the
285
+ current output element are passed through ``output_coordinates``. The
286
+ callback function must return the coordinates at which the input must
287
+ be interpolated in ``input_coordinates``. The rank of the input and
288
+ output arrays are given by ``input_rank`` and ``output_rank``
289
+ respectively. ``user_data`` is the data pointer provided
290
+ to `scipy.LowLevelCallable` as-is.
291
+
292
+ The callback function must return an integer error status that is zero
293
+ if something went wrong and one otherwise. If an error occurs, you should
294
+ normally set the Python error status with an informative message
295
+ before returning, otherwise a default error message is set by the
296
+ calling function.
297
+
298
+ In addition, some other low-level function pointer specifications
299
+ are accepted, but these are for backward compatibility only and should
300
+ not be used in new code.
301
+
302
+ For complex-valued `input`, this function transforms the real and imaginary
303
+ components independently.
304
+
305
+ .. versionadded:: 1.6.0
306
+ Complex-valued support added.
307
+
308
+ Examples
309
+ --------
310
+ >>> import numpy as np
311
+ >>> from scipy.ndimage import geometric_transform
312
+ >>> a = np.arange(12.).reshape((4, 3))
313
+ >>> def shift_func(output_coords):
314
+ ... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
315
+ ...
316
+ >>> geometric_transform(a, shift_func)
317
+ array([[ 0. , 0. , 0. ],
318
+ [ 0. , 1.362, 2.738],
319
+ [ 0. , 4.812, 6.187],
320
+ [ 0. , 8.263, 9.637]])
321
+
322
+ >>> b = [1, 2, 3, 4, 5]
323
+ >>> def shift_func(output_coords):
324
+ ... return (output_coords[0] - 3,)
325
+ ...
326
+ >>> geometric_transform(b, shift_func, mode='constant')
327
+ array([0, 0, 0, 1, 2])
328
+ >>> geometric_transform(b, shift_func, mode='nearest')
329
+ array([1, 1, 1, 1, 2])
330
+ >>> geometric_transform(b, shift_func, mode='reflect')
331
+ array([3, 2, 1, 1, 2])
332
+ >>> geometric_transform(b, shift_func, mode='wrap')
333
+ array([2, 3, 4, 1, 2])
334
+
335
+ """
336
+ if order < 0 or order > 5:
337
+ raise RuntimeError('spline order not supported')
338
+ input = numpy.asarray(input)
339
+ if output_shape is None:
340
+ output_shape = input.shape
341
+ if input.ndim < 1 or len(output_shape) < 1:
342
+ raise RuntimeError('input and output rank must be > 0')
343
+ complex_output = numpy.iscomplexobj(input)
344
+ output = _ni_support._get_output(output, input, shape=output_shape,
345
+ complex_output=complex_output)
346
+ if complex_output:
347
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter,
348
+ output_shape=output_shape,
349
+ extra_arguments=extra_arguments,
350
+ extra_keywords=extra_keywords)
351
+ geometric_transform(input.real, mapping, output=output.real,
352
+ cval=numpy.real(cval), **kwargs)
353
+ geometric_transform(input.imag, mapping, output=output.imag,
354
+ cval=numpy.imag(cval), **kwargs)
355
+ return output
356
+
357
+ if prefilter and order > 1:
358
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
359
+ filtered = spline_filter(padded, order, output=numpy.float64,
360
+ mode=mode)
361
+ else:
362
+ npad = 0
363
+ filtered = input
364
+ mode = _ni_support._extend_mode_to_code(mode)
365
+ _nd_image.geometric_transform(filtered, mapping, None, None, None, output,
366
+ order, mode, cval, npad, extra_arguments,
367
+ extra_keywords)
368
+ return output
369
+
370
+
371
+ @docfiller
372
+ def map_coordinates(input, coordinates, output=None, order=3,
373
+ mode='constant', cval=0.0, prefilter=True):
374
+ """
375
+ Map the input array to new coordinates by interpolation.
376
+
377
+ The array of coordinates is used to find, for each point in the output,
378
+ the corresponding coordinates in the input. The value of the input at
379
+ those coordinates is determined by spline interpolation of the
380
+ requested order.
381
+
382
+ The shape of the output is derived from that of the coordinate
383
+ array by dropping the first axis. The values of the array along
384
+ the first axis are the coordinates in the input array at which the
385
+ output value is found.
386
+
387
+ Parameters
388
+ ----------
389
+ %(input)s
390
+ coordinates : array_like
391
+ The coordinates at which `input` is evaluated.
392
+ %(output)s
393
+ order : int, optional
394
+ The order of the spline interpolation, default is 3.
395
+ The order has to be in the range 0-5.
396
+ %(mode_interp_constant)s
397
+ %(cval)s
398
+ %(prefilter)s
399
+
400
+ Returns
401
+ -------
402
+ map_coordinates : ndarray
403
+ The result of transforming the input. The shape of the output is
404
+ derived from that of `coordinates` by dropping the first axis.
405
+
406
+ See Also
407
+ --------
408
+ spline_filter, geometric_transform, scipy.interpolate
409
+
410
+ Notes
411
+ -----
412
+ For complex-valued `input`, this function maps the real and imaginary
413
+ components independently.
414
+
415
+ .. versionadded:: 1.6.0
416
+ Complex-valued support added.
417
+
418
+ Examples
419
+ --------
420
+ >>> from scipy import ndimage
421
+ >>> import numpy as np
422
+ >>> a = np.arange(12.).reshape((4, 3))
423
+ >>> a
424
+ array([[ 0., 1., 2.],
425
+ [ 3., 4., 5.],
426
+ [ 6., 7., 8.],
427
+ [ 9., 10., 11.]])
428
+ >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
429
+ array([ 2., 7.])
430
+
431
+ Above, the interpolated value of a[0.5, 0.5] gives output[0], while
432
+ a[2, 1] is output[1].
433
+
434
+ >>> inds = np.array([[0.5, 2], [0.5, 4]])
435
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
436
+ array([ 2. , -33.3])
437
+ >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
438
+ array([ 2., 8.])
439
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
440
+ array([ True, False], dtype=bool)
441
+
442
+ """
443
+ if order < 0 or order > 5:
444
+ raise RuntimeError('spline order not supported')
445
+ input = numpy.asarray(input)
446
+ coordinates = numpy.asarray(coordinates)
447
+ if numpy.iscomplexobj(coordinates):
448
+ raise TypeError('Complex type not supported')
449
+ output_shape = coordinates.shape[1:]
450
+ if input.ndim < 1 or len(output_shape) < 1:
451
+ raise RuntimeError('input and output rank must be > 0')
452
+ if coordinates.shape[0] != input.ndim:
453
+ raise RuntimeError('invalid shape for coordinate array')
454
+ complex_output = numpy.iscomplexobj(input)
455
+ output = _ni_support._get_output(output, input, shape=output_shape,
456
+ complex_output=complex_output)
457
+ if complex_output:
458
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
459
+ map_coordinates(input.real, coordinates, output=output.real,
460
+ cval=numpy.real(cval), **kwargs)
461
+ map_coordinates(input.imag, coordinates, output=output.imag,
462
+ cval=numpy.imag(cval), **kwargs)
463
+ return output
464
+ if prefilter and order > 1:
465
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
466
+ filtered = spline_filter(padded, order, output=numpy.float64,
467
+ mode=mode)
468
+ else:
469
+ npad = 0
470
+ filtered = input
471
+ mode = _ni_support._extend_mode_to_code(mode)
472
+ _nd_image.geometric_transform(filtered, None, coordinates, None, None,
473
+ output, order, mode, cval, npad, None, None)
474
+ return output
475
+
476
+
477
+ @docfiller
478
+ def affine_transform(input, matrix, offset=0.0, output_shape=None,
479
+ output=None, order=3,
480
+ mode='constant', cval=0.0, prefilter=True):
481
+ """
482
+ Apply an affine transformation.
483
+
484
+ Given an output image pixel index vector ``o``, the pixel value
485
+ is determined from the input image at position
486
+ ``np.dot(matrix, o) + offset``.
487
+
488
+ This does 'pull' (or 'backward') resampling, transforming the output space
489
+ to the input to locate data. Affine transformations are often described in
490
+ the 'push' (or 'forward') direction, transforming input to output. If you
491
+ have a matrix for the 'push' transformation, use its inverse
492
+ (:func:`numpy.linalg.inv`) in this function.
493
+
494
+ Parameters
495
+ ----------
496
+ %(input)s
497
+ matrix : ndarray
498
+ The inverse coordinate transformation matrix, mapping output
499
+ coordinates to input coordinates. If ``ndim`` is the number of
500
+ dimensions of ``input``, the given matrix must have one of the
501
+ following shapes:
502
+
503
+ - ``(ndim, ndim)``: the linear transformation matrix for each
504
+ output coordinate.
505
+ - ``(ndim,)``: assume that the 2-D transformation matrix is
506
+ diagonal, with the diagonal specified by the given value. A more
507
+ efficient algorithm is then used that exploits the separability
508
+ of the problem.
509
+ - ``(ndim + 1, ndim + 1)``: assume that the transformation is
510
+ specified using homogeneous coordinates [1]_. In this case, any
511
+ value passed to ``offset`` is ignored.
512
+ - ``(ndim, ndim + 1)``: as above, but the bottom row of a
513
+ homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
514
+ and may be omitted.
515
+
516
+ offset : float or sequence, optional
517
+ The offset into the array where the transform is applied. If a float,
518
+ `offset` is the same for each axis. If a sequence, `offset` should
519
+ contain one value for each axis.
520
+ output_shape : tuple of ints, optional
521
+ Shape tuple.
522
+ %(output)s
523
+ order : int, optional
524
+ The order of the spline interpolation, default is 3.
525
+ The order has to be in the range 0-5.
526
+ %(mode_interp_constant)s
527
+ %(cval)s
528
+ %(prefilter)s
529
+
530
+ Returns
531
+ -------
532
+ affine_transform : ndarray
533
+ The transformed input.
534
+
535
+ Notes
536
+ -----
537
+ The given matrix and offset are used to find for each point in the
538
+ output the corresponding coordinates in the input by an affine
539
+ transformation. The value of the input at those coordinates is
540
+ determined by spline interpolation of the requested order. Points
541
+ outside the boundaries of the input are filled according to the given
542
+ mode.
543
+
544
+ .. versionchanged:: 0.18.0
545
+ Previously, the exact interpretation of the affine transformation
546
+ depended on whether the matrix was supplied as a 1-D or a
547
+ 2-D array. If a 1-D array was supplied
548
+ to the matrix parameter, the output pixel value at index ``o``
549
+ was determined from the input image at position
550
+ ``matrix * (o + offset)``.
551
+
552
+ For complex-valued `input`, this function transforms the real and imaginary
553
+ components independently.
554
+
555
+ .. versionadded:: 1.6.0
556
+ Complex-valued support added.
557
+
558
+ References
559
+ ----------
560
+ .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
561
+ """
562
+ if order < 0 or order > 5:
563
+ raise RuntimeError('spline order not supported')
564
+ input = numpy.asarray(input)
565
+ if output_shape is None:
566
+ if isinstance(output, numpy.ndarray):
567
+ output_shape = output.shape
568
+ else:
569
+ output_shape = input.shape
570
+ if input.ndim < 1 or len(output_shape) < 1:
571
+ raise RuntimeError('input and output rank must be > 0')
572
+ complex_output = numpy.iscomplexobj(input)
573
+ output = _ni_support._get_output(output, input, shape=output_shape,
574
+ complex_output=complex_output)
575
+ if complex_output:
576
+ kwargs = dict(offset=offset, output_shape=output_shape, order=order,
577
+ mode=mode, prefilter=prefilter)
578
+ affine_transform(input.real, matrix, output=output.real,
579
+ cval=numpy.real(cval), **kwargs)
580
+ affine_transform(input.imag, matrix, output=output.imag,
581
+ cval=numpy.imag(cval), **kwargs)
582
+ return output
583
+ if prefilter and order > 1:
584
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
585
+ filtered = spline_filter(padded, order, output=numpy.float64,
586
+ mode=mode)
587
+ else:
588
+ npad = 0
589
+ filtered = input
590
+ mode = _ni_support._extend_mode_to_code(mode)
591
+ matrix = numpy.asarray(matrix, dtype=numpy.float64)
592
+ if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
593
+ raise RuntimeError('no proper affine matrix provided')
594
+ if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
595
+ (matrix.shape[0] in [input.ndim, input.ndim + 1])):
596
+ if matrix.shape[0] == input.ndim + 1:
597
+ exptd = [0] * input.ndim + [1]
598
+ if not numpy.all(matrix[input.ndim] == exptd):
599
+ msg = ('Expected homogeneous transformation matrix with '
600
+ 'shape {} for image shape {}, but bottom row was '
601
+ 'not equal to {}'.format(matrix.shape, input.shape, exptd))
602
+ raise ValueError(msg)
603
+ # assume input is homogeneous coordinate transformation matrix
604
+ offset = matrix[:input.ndim, input.ndim]
605
+ matrix = matrix[:input.ndim, :input.ndim]
606
+ if matrix.shape[0] != input.ndim:
607
+ raise RuntimeError('affine matrix has wrong number of rows')
608
+ if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
609
+ raise RuntimeError('affine matrix has wrong number of columns')
610
+ if not matrix.flags.contiguous:
611
+ matrix = matrix.copy()
612
+ offset = _ni_support._normalize_sequence(offset, input.ndim)
613
+ offset = numpy.asarray(offset, dtype=numpy.float64)
614
+ if offset.ndim != 1 or offset.shape[0] < 1:
615
+ raise RuntimeError('no proper offset provided')
616
+ if not offset.flags.contiguous:
617
+ offset = offset.copy()
618
+ if matrix.ndim == 1:
619
+ warnings.warn(
620
+ "The behavior of affine_transform with a 1-D "
621
+ "array supplied for the matrix parameter has changed in "
622
+ "SciPy 0.18.0.",
623
+ stacklevel=2
624
+ )
625
+ _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
626
+ mode, cval, npad, False)
627
+ else:
628
+ _nd_image.geometric_transform(filtered, None, None, matrix, offset,
629
+ output, order, mode, cval, npad, None,
630
+ None)
631
+ return output
632
+
633
+
634
+ @docfiller
635
+ def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
636
+ prefilter=True):
637
+ """
638
+ Shift an array.
639
+
640
+ The array is shifted using spline interpolation of the requested order.
641
+ Points outside the boundaries of the input are filled according to the
642
+ given mode.
643
+
644
+ Parameters
645
+ ----------
646
+ %(input)s
647
+ shift : float or sequence
648
+ The shift along the axes. If a float, `shift` is the same for each
649
+ axis. If a sequence, `shift` should contain one value for each axis.
650
+ %(output)s
651
+ order : int, optional
652
+ The order of the spline interpolation, default is 3.
653
+ The order has to be in the range 0-5.
654
+ %(mode_interp_constant)s
655
+ %(cval)s
656
+ %(prefilter)s
657
+
658
+ Returns
659
+ -------
660
+ shift : ndarray
661
+ The shifted input.
662
+
663
+ See Also
664
+ --------
665
+ affine_transform : Affine transformations
666
+
667
+ Notes
668
+ -----
669
+ For complex-valued `input`, this function shifts the real and imaginary
670
+ components independently.
671
+
672
+ .. versionadded:: 1.6.0
673
+ Complex-valued support added.
674
+
675
+ Examples
676
+ --------
677
+ Import the necessary modules and an exemplary image.
678
+
679
+ >>> from scipy.ndimage import shift
680
+ >>> import matplotlib.pyplot as plt
681
+ >>> from scipy import datasets
682
+ >>> image = datasets.ascent()
683
+
684
+ Shift the image vertically by 20 pixels.
685
+
686
+ >>> image_shifted_vertically = shift(image, (20, 0))
687
+
688
+ Shift the image vertically by -200 pixels and horizontally by 100 pixels.
689
+
690
+ >>> image_shifted_both_directions = shift(image, (-200, 100))
691
+
692
+ Plot the original and the shifted images.
693
+
694
+ >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12))
695
+ >>> plt.gray() # show the filtered result in grayscale
696
+ >>> top, middle, bottom = axes
697
+ >>> for ax in axes:
698
+ ... ax.set_axis_off() # remove coordinate system
699
+ >>> top.imshow(image)
700
+ >>> top.set_title("Original image")
701
+ >>> middle.imshow(image_shifted_vertically)
702
+ >>> middle.set_title("Vertically shifted image")
703
+ >>> bottom.imshow(image_shifted_both_directions)
704
+ >>> bottom.set_title("Image shifted in both directions")
705
+ >>> fig.tight_layout()
706
+ """
707
+ if order < 0 or order > 5:
708
+ raise RuntimeError('spline order not supported')
709
+ input = numpy.asarray(input)
710
+ if input.ndim < 1:
711
+ raise RuntimeError('input and output rank must be > 0')
712
+ complex_output = numpy.iscomplexobj(input)
713
+ output = _ni_support._get_output(output, input,
714
+ complex_output=complex_output)
715
+ if complex_output:
716
+ # import under different name to avoid confusion with shift parameter
717
+ from scipy.ndimage._interpolation import shift as _shift
718
+
719
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
720
+ _shift(input.real, shift, output=output.real, cval=numpy.real(cval),
721
+ **kwargs)
722
+ _shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval),
723
+ **kwargs)
724
+ return output
725
+ if prefilter and order > 1:
726
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
727
+ filtered = spline_filter(padded, order, output=numpy.float64,
728
+ mode=mode)
729
+ else:
730
+ npad = 0
731
+ filtered = input
732
+ mode = _ni_support._extend_mode_to_code(mode)
733
+ shift = _ni_support._normalize_sequence(shift, input.ndim)
734
+ shift = [-ii for ii in shift]
735
+ shift = numpy.asarray(shift, dtype=numpy.float64)
736
+ if not shift.flags.contiguous:
737
+ shift = shift.copy()
738
+ _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
739
+ npad, False)
740
+ return output
741
+
742
+
743
+ @docfiller
744
+ def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
745
+ prefilter=True, *, grid_mode=False):
746
+ """
747
+ Zoom an array.
748
+
749
+ The array is zoomed using spline interpolation of the requested order.
750
+
751
+ Parameters
752
+ ----------
753
+ %(input)s
754
+ zoom : float or sequence
755
+ The zoom factor along the axes. If a float, `zoom` is the same for each
756
+ axis. If a sequence, `zoom` should contain one value for each axis.
757
+ %(output)s
758
+ order : int, optional
759
+ The order of the spline interpolation, default is 3.
760
+ The order has to be in the range 0-5.
761
+ %(mode_interp_constant)s
762
+ %(cval)s
763
+ %(prefilter)s
764
+ grid_mode : bool, optional
765
+ If False, the distance from the pixel centers is zoomed. Otherwise, the
766
+ distance including the full pixel extent is used. For example, a 1d
767
+ signal of length 5 is considered to have length 4 when `grid_mode` is
768
+ False, but length 5 when `grid_mode` is True. See the following
769
+ visual illustration:
770
+
771
+ .. code-block:: text
772
+
773
+ | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
774
+ |<-------------------------------------->|
775
+ vs.
776
+ |<----------------------------------------------->|
777
+
778
+ The starting point of the arrow in the diagram above corresponds to
779
+ coordinate location 0 in each mode.
780
+
781
+ Returns
782
+ -------
783
+ zoom : ndarray
784
+ The zoomed input.
785
+
786
+ Notes
787
+ -----
788
+ For complex-valued `input`, this function zooms the real and imaginary
789
+ components independently.
790
+
791
+ .. versionadded:: 1.6.0
792
+ Complex-valued support added.
793
+
794
+ Examples
795
+ --------
796
+ >>> from scipy import ndimage, datasets
797
+ >>> import matplotlib.pyplot as plt
798
+
799
+ >>> fig = plt.figure()
800
+ >>> ax1 = fig.add_subplot(121) # left side
801
+ >>> ax2 = fig.add_subplot(122) # right side
802
+ >>> ascent = datasets.ascent()
803
+ >>> result = ndimage.zoom(ascent, 3.0)
804
+ >>> ax1.imshow(ascent, vmin=0, vmax=255)
805
+ >>> ax2.imshow(result, vmin=0, vmax=255)
806
+ >>> plt.show()
807
+
808
+ >>> print(ascent.shape)
809
+ (512, 512)
810
+
811
+ >>> print(result.shape)
812
+ (1536, 1536)
813
+ """
814
+ if order < 0 or order > 5:
815
+ raise RuntimeError('spline order not supported')
816
+ input = numpy.asarray(input)
817
+ if input.ndim < 1:
818
+ raise RuntimeError('input and output rank must be > 0')
819
+ zoom = _ni_support._normalize_sequence(zoom, input.ndim)
820
+ output_shape = tuple(
821
+ [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
822
+ complex_output = numpy.iscomplexobj(input)
823
+ output = _ni_support._get_output(output, input, shape=output_shape,
824
+ complex_output=complex_output)
825
+ if complex_output:
826
+ # import under different name to avoid confusion with zoom parameter
827
+ from scipy.ndimage._interpolation import zoom as _zoom
828
+
829
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
830
+ _zoom(input.real, zoom, output=output.real, cval=numpy.real(cval),
831
+ **kwargs)
832
+ _zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval),
833
+ **kwargs)
834
+ return output
835
+ if prefilter and order > 1:
836
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
837
+ filtered = spline_filter(padded, order, output=numpy.float64,
838
+ mode=mode)
839
+ else:
840
+ npad = 0
841
+ filtered = input
842
+ if grid_mode:
843
+ # warn about modes that may have surprising behavior
844
+ suggest_mode = None
845
+ if mode == 'constant':
846
+ suggest_mode = 'grid-constant'
847
+ elif mode == 'wrap':
848
+ suggest_mode = 'grid-wrap'
849
+ if suggest_mode is not None:
850
+ warnings.warn(
851
+ ("It is recommended to use mode = {} instead of {} when "
852
+ "grid_mode is True.").format(suggest_mode, mode),
853
+ stacklevel=2
854
+ )
855
+ mode = _ni_support._extend_mode_to_code(mode)
856
+
857
+ zoom_div = numpy.array(output_shape)
858
+ zoom_nominator = numpy.array(input.shape)
859
+ if not grid_mode:
860
+ zoom_div -= 1
861
+ zoom_nominator -= 1
862
+
863
+ # Zooming to infinite values is unpredictable, so just choose
864
+ # zoom factor 1 instead
865
+ zoom = numpy.divide(zoom_nominator, zoom_div,
866
+ out=numpy.ones_like(input.shape, dtype=numpy.float64),
867
+ where=zoom_div != 0)
868
+ zoom = numpy.ascontiguousarray(zoom)
869
+ _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
870
+ grid_mode)
871
+ return output
872
+
873
+
874
+ @docfiller
875
+ def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
876
+ mode='constant', cval=0.0, prefilter=True):
877
+ """
878
+ Rotate an array.
879
+
880
+ The array is rotated in the plane defined by the two axes given by the
881
+ `axes` parameter using spline interpolation of the requested order.
882
+
883
+ Parameters
884
+ ----------
885
+ %(input)s
886
+ angle : float
887
+ The rotation angle in degrees.
888
+ axes : tuple of 2 ints, optional
889
+ The two axes that define the plane of rotation. Default is the first
890
+ two axes.
891
+ reshape : bool, optional
892
+ If `reshape` is true, the output shape is adapted so that the input
893
+ array is contained completely in the output. Default is True.
894
+ %(output)s
895
+ order : int, optional
896
+ The order of the spline interpolation, default is 3.
897
+ The order has to be in the range 0-5.
898
+ %(mode_interp_constant)s
899
+ %(cval)s
900
+ %(prefilter)s
901
+
902
+ Returns
903
+ -------
904
+ rotate : ndarray
905
+ The rotated input.
906
+
907
+ Notes
908
+ -----
909
+ For complex-valued `input`, this function rotates the real and imaginary
910
+ components independently.
911
+
912
+ .. versionadded:: 1.6.0
913
+ Complex-valued support added.
914
+
915
+ Examples
916
+ --------
917
+ >>> from scipy import ndimage, datasets
918
+ >>> import matplotlib.pyplot as plt
919
+ >>> fig = plt.figure(figsize=(10, 3))
920
+ >>> ax1, ax2, ax3 = fig.subplots(1, 3)
921
+ >>> img = datasets.ascent()
922
+ >>> img_45 = ndimage.rotate(img, 45, reshape=False)
923
+ >>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
924
+ >>> ax1.imshow(img, cmap='gray')
925
+ >>> ax1.set_axis_off()
926
+ >>> ax2.imshow(img_45, cmap='gray')
927
+ >>> ax2.set_axis_off()
928
+ >>> ax3.imshow(full_img_45, cmap='gray')
929
+ >>> ax3.set_axis_off()
930
+ >>> fig.set_layout_engine('tight')
931
+ >>> plt.show()
932
+ >>> print(img.shape)
933
+ (512, 512)
934
+ >>> print(img_45.shape)
935
+ (512, 512)
936
+ >>> print(full_img_45.shape)
937
+ (724, 724)
938
+
939
+ """
940
+ input_arr = numpy.asarray(input)
941
+ ndim = input_arr.ndim
942
+
943
+ if ndim < 2:
944
+ raise ValueError('input array should be at least 2D')
945
+
946
+ axes = list(axes)
947
+
948
+ if len(axes) != 2:
949
+ raise ValueError('axes should contain exactly two values')
950
+
951
+ if not all([float(ax).is_integer() for ax in axes]):
952
+ raise ValueError('axes should contain only integer values')
953
+
954
+ if axes[0] < 0:
955
+ axes[0] += ndim
956
+ if axes[1] < 0:
957
+ axes[1] += ndim
958
+ if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
959
+ raise ValueError('invalid rotation plane specified')
960
+
961
+ axes.sort()
962
+
963
+ c, s = special.cosdg(angle), special.sindg(angle)
964
+
965
+ rot_matrix = numpy.array([[c, s],
966
+ [-s, c]])
967
+
968
+ img_shape = numpy.asarray(input_arr.shape)
969
+ in_plane_shape = img_shape[axes]
970
+ if reshape:
971
+ # Compute transformed input bounds
972
+ iy, ix = in_plane_shape
973
+ out_bounds = rot_matrix @ [[0, 0, iy, iy],
974
+ [0, ix, 0, ix]]
975
+ # Compute the shape of the transformed input plane
976
+ out_plane_shape = (numpy.ptp(out_bounds, axis=1) + 0.5).astype(int)
977
+ else:
978
+ out_plane_shape = img_shape[axes]
979
+
980
+ out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
981
+ in_center = (in_plane_shape - 1) / 2
982
+ offset = in_center - out_center
983
+
984
+ output_shape = img_shape
985
+ output_shape[axes] = out_plane_shape
986
+ output_shape = tuple(output_shape)
987
+
988
+ complex_output = numpy.iscomplexobj(input_arr)
989
+ output = _ni_support._get_output(output, input_arr, shape=output_shape,
990
+ complex_output=complex_output)
991
+
992
+ if ndim <= 2:
993
+ affine_transform(input_arr, rot_matrix, offset, output_shape, output,
994
+ order, mode, cval, prefilter)
995
+ else:
996
+ # If ndim > 2, the rotation is applied over all the planes
997
+ # parallel to axes
998
+ planes_coord = itertools.product(
999
+ *[[slice(None)] if ax in axes else range(img_shape[ax])
1000
+ for ax in range(ndim)])
1001
+
1002
+ out_plane_shape = tuple(out_plane_shape)
1003
+
1004
+ for coordinates in planes_coord:
1005
+ ia = input_arr[coordinates]
1006
+ oa = output[coordinates]
1007
+ affine_transform(ia, rot_matrix, offset, out_plane_shape,
1008
+ oa, order, mode, cval, prefilter)
1009
+
1010
+ return output
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_measurements.py ADDED
@@ -0,0 +1,1681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ import numpy
32
+ import numpy as np
33
+ from . import _ni_support
34
+ from . import _ni_label
35
+ from . import _nd_image
36
+ from . import _morphology
37
+
38
+ __all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
39
+ 'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
40
+ 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
41
+ 'histogram', 'watershed_ift', 'sum_labels', 'value_indices']
42
+
43
+
44
+ def label(input, structure=None, output=None):
45
+ """
46
+ Label features in an array.
47
+
48
+ Parameters
49
+ ----------
50
+ input : array_like
51
+ An array-like object to be labeled. Any non-zero values in `input` are
52
+ counted as features and zero values are considered the background.
53
+ structure : array_like, optional
54
+ A structuring element that defines feature connections.
55
+ `structure` must be centrosymmetric
56
+ (see Notes).
57
+ If no structuring element is provided,
58
+ one is automatically generated with a squared connectivity equal to
59
+ one. That is, for a 2-D `input` array, the default structuring element
60
+ is::
61
+
62
+ [[0,1,0],
63
+ [1,1,1],
64
+ [0,1,0]]
65
+
66
+ output : (None, data-type, array_like), optional
67
+ If `output` is a data type, it specifies the type of the resulting
68
+ labeled feature array.
69
+ If `output` is an array-like object, then `output` will be updated
70
+ with the labeled features from this function. This function can
71
+ operate in-place, by passing output=input.
72
+ Note that the output must be able to store the largest label, or this
73
+ function will raise an Exception.
74
+
75
+ Returns
76
+ -------
77
+ label : ndarray or int
78
+ An integer ndarray where each unique feature in `input` has a unique
79
+ label in the returned array.
80
+ num_features : int
81
+ How many objects were found.
82
+
83
+ If `output` is None, this function returns a tuple of
84
+ (`labeled_array`, `num_features`).
85
+
86
+ If `output` is a ndarray, then it will be updated with values in
87
+ `labeled_array` and only `num_features` will be returned by this
88
+ function.
89
+
90
+ See Also
91
+ --------
92
+ find_objects : generate a list of slices for the labeled features (or
93
+ objects); useful for finding features' position or
94
+ dimensions
95
+
96
+ Notes
97
+ -----
98
+ A centrosymmetric matrix is a matrix that is symmetric about the center.
99
+ See [1]_ for more information.
100
+
101
+ The `structure` matrix must be centrosymmetric to ensure
102
+ two-way connections.
103
+ For instance, if the `structure` matrix is not centrosymmetric
104
+ and is defined as::
105
+
106
+ [[0,1,0],
107
+ [1,1,0],
108
+ [0,0,0]]
109
+
110
+ and the `input` is::
111
+
112
+ [[1,2],
113
+ [0,3]]
114
+
115
+ then the structure matrix would indicate the
116
+ entry 2 in the input is connected to 1,
117
+ but 1 is not connected to 2.
118
+
119
+ References
120
+ ----------
121
+ .. [1] James R. Weaver, "Centrosymmetric (cross-symmetric)
122
+ matrices, their basic properties, eigenvalues, and
123
+ eigenvectors." The American Mathematical Monthly 92.10
124
+ (1985): 711-717.
125
+
126
+ Examples
127
+ --------
128
+ Create an image with some features, then label it using the default
129
+ (cross-shaped) structuring element:
130
+
131
+ >>> from scipy.ndimage import label, generate_binary_structure
132
+ >>> import numpy as np
133
+ >>> a = np.array([[0,0,1,1,0,0],
134
+ ... [0,0,0,1,0,0],
135
+ ... [1,1,0,0,1,0],
136
+ ... [0,0,0,1,0,0]])
137
+ >>> labeled_array, num_features = label(a)
138
+
139
+ Each of the 4 features are labeled with a different integer:
140
+
141
+ >>> num_features
142
+ 4
143
+ >>> labeled_array
144
+ array([[0, 0, 1, 1, 0, 0],
145
+ [0, 0, 0, 1, 0, 0],
146
+ [2, 2, 0, 0, 3, 0],
147
+ [0, 0, 0, 4, 0, 0]])
148
+
149
+ Generate a structuring element that will consider features connected even
150
+ if they touch diagonally:
151
+
152
+ >>> s = generate_binary_structure(2,2)
153
+
154
+ or,
155
+
156
+ >>> s = [[1,1,1],
157
+ ... [1,1,1],
158
+ ... [1,1,1]]
159
+
160
+ Label the image using the new structuring element:
161
+
162
+ >>> labeled_array, num_features = label(a, structure=s)
163
+
164
+ Show the 2 labeled features (note that features 1, 3, and 4 from above are
165
+ now considered a single feature):
166
+
167
+ >>> num_features
168
+ 2
169
+ >>> labeled_array
170
+ array([[0, 0, 1, 1, 0, 0],
171
+ [0, 0, 0, 1, 0, 0],
172
+ [2, 2, 0, 0, 1, 0],
173
+ [0, 0, 0, 1, 0, 0]])
174
+
175
+ """
176
+ input = numpy.asarray(input)
177
+ if numpy.iscomplexobj(input):
178
+ raise TypeError('Complex type not supported')
179
+ if structure is None:
180
+ structure = _morphology.generate_binary_structure(input.ndim, 1)
181
+ structure = numpy.asarray(structure, dtype=bool)
182
+ if structure.ndim != input.ndim:
183
+ raise RuntimeError('structure and input must have equal rank')
184
+ for ii in structure.shape:
185
+ if ii != 3:
186
+ raise ValueError('structure dimensions must be equal to 3')
187
+
188
+ # Use 32 bits if it's large enough for this image.
189
+ # _ni_label.label() needs two entries for background and
190
+ # foreground tracking
191
+ need_64bits = input.size >= (2**31 - 2)
192
+
193
+ if isinstance(output, numpy.ndarray):
194
+ if output.shape != input.shape:
195
+ raise ValueError("output shape not correct")
196
+ caller_provided_output = True
197
+ else:
198
+ caller_provided_output = False
199
+ if output is None:
200
+ output = np.empty(input.shape, np.intp if need_64bits else np.int32)
201
+ else:
202
+ output = np.empty(input.shape, output)
203
+
204
+ # handle scalars, 0-D arrays
205
+ if input.ndim == 0 or input.size == 0:
206
+ if input.ndim == 0:
207
+ # scalar
208
+ maxlabel = 1 if (input != 0) else 0
209
+ output[...] = maxlabel
210
+ else:
211
+ # 0-D
212
+ maxlabel = 0
213
+ if caller_provided_output:
214
+ return maxlabel
215
+ else:
216
+ return output, maxlabel
217
+
218
+ try:
219
+ max_label = _ni_label._label(input, structure, output)
220
+ except _ni_label.NeedMoreBits as e:
221
+ # Make another attempt with enough bits, then try to cast to the
222
+ # new type.
223
+ tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
224
+ max_label = _ni_label._label(input, structure, tmp_output)
225
+ output[...] = tmp_output[...]
226
+ if not np.all(output == tmp_output):
227
+ # refuse to return bad results
228
+ raise RuntimeError(
229
+ "insufficient bit-depth in requested output type"
230
+ ) from e
231
+
232
+ if caller_provided_output:
233
+ # result was written in-place
234
+ return max_label
235
+ else:
236
+ return output, max_label
237
+
238
+
239
+ def find_objects(input, max_label=0):
240
+ """
241
+ Find objects in a labeled array.
242
+
243
+ Parameters
244
+ ----------
245
+ input : ndarray of ints
246
+ Array containing objects defined by different labels. Labels with
247
+ value 0 are ignored.
248
+ max_label : int, optional
249
+ Maximum label to be searched for in `input`. If max_label is not
250
+ given, the positions of all objects are returned.
251
+
252
+ Returns
253
+ -------
254
+ object_slices : list of tuples
255
+ A list of tuples, with each tuple containing N slices (with N the
256
+ dimension of the input array). Slices correspond to the minimal
257
+ parallelepiped that contains the object. If a number is missing,
258
+ None is returned instead of a slice. The label ``l`` corresponds to
259
+ the index ``l-1`` in the returned list.
260
+
261
+ See Also
262
+ --------
263
+ label, center_of_mass
264
+
265
+ Notes
266
+ -----
267
+ This function is very useful for isolating a volume of interest inside
268
+ a 3-D array, that cannot be "seen through".
269
+
270
+ Examples
271
+ --------
272
+ >>> from scipy import ndimage
273
+ >>> import numpy as np
274
+ >>> a = np.zeros((6,6), dtype=int)
275
+ >>> a[2:4, 2:4] = 1
276
+ >>> a[4, 4] = 1
277
+ >>> a[:2, :3] = 2
278
+ >>> a[0, 5] = 3
279
+ >>> a
280
+ array([[2, 2, 2, 0, 0, 3],
281
+ [2, 2, 2, 0, 0, 0],
282
+ [0, 0, 1, 1, 0, 0],
283
+ [0, 0, 1, 1, 0, 0],
284
+ [0, 0, 0, 0, 1, 0],
285
+ [0, 0, 0, 0, 0, 0]])
286
+ >>> ndimage.find_objects(a)
287
+ [(slice(2, 5, None), slice(2, 5, None)),
288
+ (slice(0, 2, None), slice(0, 3, None)),
289
+ (slice(0, 1, None), slice(5, 6, None))]
290
+ >>> ndimage.find_objects(a, max_label=2)
291
+ [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]
292
+ >>> ndimage.find_objects(a == 1, max_label=2)
293
+ [(slice(2, 5, None), slice(2, 5, None)), None]
294
+
295
+ >>> loc = ndimage.find_objects(a)[0]
296
+ >>> a[loc]
297
+ array([[1, 1, 0],
298
+ [1, 1, 0],
299
+ [0, 0, 1]])
300
+
301
+ """
302
+ input = numpy.asarray(input)
303
+ if numpy.iscomplexobj(input):
304
+ raise TypeError('Complex type not supported')
305
+
306
+ if max_label < 1:
307
+ max_label = input.max()
308
+
309
+ return _nd_image.find_objects(input, max_label)
310
+
311
+
312
+ def value_indices(arr, *, ignore_value=None):
313
+ """
314
+ Find indices of each distinct value in given array.
315
+
316
+ Parameters
317
+ ----------
318
+ arr : ndarray of ints
319
+ Array containing integer values.
320
+ ignore_value : int, optional
321
+ This value will be ignored in searching the `arr` array. If not
322
+ given, all values found will be included in output. Default
323
+ is None.
324
+
325
+ Returns
326
+ -------
327
+ indices : dictionary
328
+ A Python dictionary of array indices for each distinct value. The
329
+ dictionary is keyed by the distinct values, the entries are array
330
+ index tuples covering all occurrences of the value within the
331
+ array.
332
+
333
+ This dictionary can occupy significant memory, usually several times
334
+ the size of the input array.
335
+
336
+ See Also
337
+ --------
338
+ label, maximum, median, minimum_position, extrema, sum, mean, variance,
339
+ standard_deviation, numpy.where, numpy.unique
340
+
341
+ Notes
342
+ -----
343
+ For a small array with few distinct values, one might use
344
+ `numpy.unique()` to find all possible values, and ``(arr == val)`` to
345
+ locate each value within that array. However, for large arrays,
346
+ with many distinct values, this can become extremely inefficient,
347
+ as locating each value would require a new search through the entire
348
+ array. Using this function, there is essentially one search, with
349
+ the indices saved for all distinct values.
350
+
351
+ This is useful when matching a categorical image (e.g. a segmentation
352
+ or classification) to an associated image of other data, allowing
353
+ any per-class statistic(s) to then be calculated. Provides a
354
+ more flexible alternative to functions like ``scipy.ndimage.mean()``
355
+ and ``scipy.ndimage.variance()``.
356
+
357
+ Some other closely related functionality, with different strengths and
358
+ weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and
359
+ the `scikit-image <https://scikit-image.org/>`_ function
360
+ ``skimage.measure.regionprops()``.
361
+
362
+ Note for IDL users: this provides functionality equivalent to IDL's
363
+ REVERSE_INDICES option (as per the IDL documentation for the
364
+ `HISTOGRAM <https://www.l3harrisgeospatial.com/docs/histogram.html>`_
365
+ function).
366
+
367
+ .. versionadded:: 1.10.0
368
+
369
+ Examples
370
+ --------
371
+ >>> import numpy as np
372
+ >>> from scipy import ndimage
373
+ >>> a = np.zeros((6, 6), dtype=int)
374
+ >>> a[2:4, 2:4] = 1
375
+ >>> a[4, 4] = 1
376
+ >>> a[:2, :3] = 2
377
+ >>> a[0, 5] = 3
378
+ >>> a
379
+ array([[2, 2, 2, 0, 0, 3],
380
+ [2, 2, 2, 0, 0, 0],
381
+ [0, 0, 1, 1, 0, 0],
382
+ [0, 0, 1, 1, 0, 0],
383
+ [0, 0, 0, 0, 1, 0],
384
+ [0, 0, 0, 0, 0, 0]])
385
+ >>> val_indices = ndimage.value_indices(a)
386
+
387
+ The dictionary `val_indices` will have an entry for each distinct
388
+ value in the input array.
389
+
390
+ >>> val_indices.keys()
391
+ dict_keys([0, 1, 2, 3])
392
+
393
+ The entry for each value is an index tuple, locating the elements
394
+ with that value.
395
+
396
+ >>> ndx1 = val_indices[1]
397
+ >>> ndx1
398
+ (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4]))
399
+
400
+ This can be used to index into the original array, or any other
401
+ array with the same shape.
402
+
403
+ >>> a[ndx1]
404
+ array([1, 1, 1, 1, 1])
405
+
406
+ If the zeros were to be ignored, then the resulting dictionary
407
+ would no longer have an entry for zero.
408
+
409
+ >>> val_indices = ndimage.value_indices(a, ignore_value=0)
410
+ >>> val_indices.keys()
411
+ dict_keys([1, 2, 3])
412
+
413
+ """
414
+ # Cope with ignore_value being None, without too much extra complexity
415
+ # in the C code. If not None, the value is passed in as a numpy array
416
+ # with the same dtype as arr.
417
+ ignore_value_arr = numpy.zeros((1,), dtype=arr.dtype)
418
+ ignoreIsNone = (ignore_value is None)
419
+ if not ignoreIsNone:
420
+ ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value)
421
+
422
+ val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr)
423
+ return val_indices
424
+
425
+
426
+ def labeled_comprehension(input, labels, index, func, out_dtype, default,
427
+ pass_positions=False):
428
+ """
429
+ Roughly equivalent to [func(input[labels == i]) for i in index].
430
+
431
+ Sequentially applies an arbitrary function (that works on array_like input)
432
+ to subsets of an N-D image array specified by `labels` and `index`.
433
+ The option exists to provide the function with positional parameters as the
434
+ second argument.
435
+
436
+ Parameters
437
+ ----------
438
+ input : array_like
439
+ Data from which to select `labels` to process.
440
+ labels : array_like or None
441
+ Labels to objects in `input`.
442
+ If not None, array must be same shape as `input`.
443
+ If None, `func` is applied to raveled `input`.
444
+ index : int, sequence of ints or None
445
+ Subset of `labels` to which to apply `func`.
446
+ If a scalar, a single value is returned.
447
+ If None, `func` is applied to all non-zero values of `labels`.
448
+ func : callable
449
+ Python function to apply to `labels` from `input`.
450
+ out_dtype : dtype
451
+ Dtype to use for `result`.
452
+ default : int, float or None
453
+ Default return value when a element of `index` does not exist
454
+ in `labels`.
455
+ pass_positions : bool, optional
456
+ If True, pass linear indices to `func` as a second argument.
457
+ Default is False.
458
+
459
+ Returns
460
+ -------
461
+ result : ndarray
462
+ Result of applying `func` to each of `labels` to `input` in `index`.
463
+
464
+ Examples
465
+ --------
466
+ >>> import numpy as np
467
+ >>> a = np.array([[1, 2, 0, 0],
468
+ ... [5, 3, 0, 4],
469
+ ... [0, 0, 0, 7],
470
+ ... [9, 3, 0, 0]])
471
+ >>> from scipy import ndimage
472
+ >>> lbl, nlbl = ndimage.label(a)
473
+ >>> lbls = np.arange(1, nlbl+1)
474
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)
475
+ array([ 2.75, 5.5 , 6. ])
476
+
477
+ Falling back to `default`:
478
+
479
+ >>> lbls = np.arange(1, nlbl+2)
480
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)
481
+ array([ 2.75, 5.5 , 6. , -1. ])
482
+
483
+ Passing positions:
484
+
485
+ >>> def fn(val, pos):
486
+ ... print("fn says: %s : %s" % (val, pos))
487
+ ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
488
+ ...
489
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
490
+ fn says: [1 2 5 3] : [0 1 4 5]
491
+ fn says: [4 7] : [ 7 11]
492
+ fn says: [9 3] : [12 13]
493
+ array([ 11., 11., -12., 0.])
494
+
495
+ """
496
+
497
+ as_scalar = numpy.isscalar(index)
498
+ input = numpy.asarray(input)
499
+
500
+ if pass_positions:
501
+ positions = numpy.arange(input.size).reshape(input.shape)
502
+
503
+ if labels is None:
504
+ if index is not None:
505
+ raise ValueError("index without defined labels")
506
+ if not pass_positions:
507
+ return func(input.ravel())
508
+ else:
509
+ return func(input.ravel(), positions.ravel())
510
+
511
+ try:
512
+ input, labels = numpy.broadcast_arrays(input, labels)
513
+ except ValueError as e:
514
+ raise ValueError("input and labels must have the same shape "
515
+ "(excepting dimensions with width 1)") from e
516
+
517
+ if index is None:
518
+ if not pass_positions:
519
+ return func(input[labels > 0])
520
+ else:
521
+ return func(input[labels > 0], positions[labels > 0])
522
+
523
+ index = numpy.atleast_1d(index)
524
+ if np.any(index.astype(labels.dtype).astype(index.dtype) != index):
525
+ raise ValueError(f"Cannot convert index values from <{index.dtype}> to "
526
+ f"<{labels.dtype}> (labels' type) without loss of precision")
527
+
528
+ index = index.astype(labels.dtype)
529
+
530
+ # optimization: find min/max in index,
531
+ # and select those parts of labels, input, and positions
532
+ lo = index.min()
533
+ hi = index.max()
534
+ mask = (labels >= lo) & (labels <= hi)
535
+
536
+ # this also ravels the arrays
537
+ labels = labels[mask]
538
+ input = input[mask]
539
+ if pass_positions:
540
+ positions = positions[mask]
541
+
542
+ # sort everything by labels
543
+ label_order = labels.argsort()
544
+ labels = labels[label_order]
545
+ input = input[label_order]
546
+ if pass_positions:
547
+ positions = positions[label_order]
548
+
549
+ index_order = index.argsort()
550
+ sorted_index = index[index_order]
551
+
552
+ def do_map(inputs, output):
553
+ """labels must be sorted"""
554
+ nidx = sorted_index.size
555
+
556
+ # Find boundaries for each stretch of constant labels
557
+ # This could be faster, but we already paid N log N to sort labels.
558
+ lo = numpy.searchsorted(labels, sorted_index, side='left')
559
+ hi = numpy.searchsorted(labels, sorted_index, side='right')
560
+
561
+ for i, l, h in zip(range(nidx), lo, hi):
562
+ if l == h:
563
+ continue
564
+ output[i] = func(*[inp[l:h] for inp in inputs])
565
+
566
+ temp = numpy.empty(index.shape, out_dtype)
567
+ temp[:] = default
568
+ if not pass_positions:
569
+ do_map([input], temp)
570
+ else:
571
+ do_map([input, positions], temp)
572
+
573
+ output = numpy.zeros(index.shape, out_dtype)
574
+ output[index_order] = temp
575
+ if as_scalar:
576
+ output = output[0]
577
+
578
+ return output
579
+
580
+
581
+ def _safely_castable_to_int(dt):
582
+ """Test whether the NumPy data type `dt` can be safely cast to an int."""
583
+ int_size = np.dtype(int).itemsize
584
+ safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or
585
+ (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))
586
+ return safe
587
+
588
+
589
+ def _stats(input, labels=None, index=None, centered=False):
590
+ """Count, sum, and optionally compute (sum - centre)^2 of input by label
591
+
592
+ Parameters
593
+ ----------
594
+ input : array_like, N-D
595
+ The input data to be analyzed.
596
+ labels : array_like (N-D), optional
597
+ The labels of the data in `input`. This array must be broadcast
598
+ compatible with `input`; typically, it is the same shape as `input`.
599
+ If `labels` is None, all nonzero values in `input` are treated as
600
+ the single labeled group.
601
+ index : label or sequence of labels, optional
602
+ These are the labels of the groups for which the stats are computed.
603
+ If `index` is None, the stats are computed for the single group where
604
+ `labels` is greater than 0.
605
+ centered : bool, optional
606
+ If True, the centered sum of squares for each labeled group is
607
+ also returned. Default is False.
608
+
609
+ Returns
610
+ -------
611
+ counts : int or ndarray of ints
612
+ The number of elements in each labeled group.
613
+ sums : scalar or ndarray of scalars
614
+ The sums of the values in each labeled group.
615
+ sums_c : scalar or ndarray of scalars, optional
616
+ The sums of mean-centered squares of the values in each labeled group.
617
+ This is only returned if `centered` is True.
618
+
619
+ """
620
+ def single_group(vals):
621
+ if centered:
622
+ vals_c = vals - vals.mean()
623
+ return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()
624
+ else:
625
+ return vals.size, vals.sum()
626
+
627
+ if labels is None:
628
+ return single_group(input)
629
+
630
+ # ensure input and labels match sizes
631
+ input, labels = numpy.broadcast_arrays(input, labels)
632
+
633
+ if index is None:
634
+ return single_group(input[labels > 0])
635
+
636
+ if numpy.isscalar(index):
637
+ return single_group(input[labels == index])
638
+
639
+ def _sum_centered(labels):
640
+ # `labels` is expected to be an ndarray with the same shape as `input`.
641
+ # It must contain the label indices (which are not necessarily the labels
642
+ # themselves).
643
+ means = sums / counts
644
+ centered_input = input - means[labels]
645
+ # bincount expects 1-D inputs, so we ravel the arguments.
646
+ bc = numpy.bincount(labels.ravel(),
647
+ weights=(centered_input *
648
+ centered_input.conjugate()).ravel())
649
+ return bc
650
+
651
+ # Remap labels to unique integers if necessary, or if the largest
652
+ # label is larger than the number of values.
653
+
654
+ if (not _safely_castable_to_int(labels.dtype) or
655
+ labels.min() < 0 or labels.max() > labels.size):
656
+ # Use numpy.unique to generate the label indices. `new_labels` will
657
+ # be 1-D, but it should be interpreted as the flattened N-D array of
658
+ # label indices.
659
+ unique_labels, new_labels = numpy.unique(labels, return_inverse=True)
660
+ new_labels = np.reshape(new_labels, (-1,)) # flatten, since it may be >1-D
661
+ counts = numpy.bincount(new_labels)
662
+ sums = numpy.bincount(new_labels, weights=input.ravel())
663
+ if centered:
664
+ # Compute the sum of the mean-centered squares.
665
+ # We must reshape new_labels to the N-D shape of `input` before
666
+ # passing it _sum_centered.
667
+ sums_c = _sum_centered(new_labels.reshape(labels.shape))
668
+ idxs = numpy.searchsorted(unique_labels, index)
669
+ # make all of idxs valid
670
+ idxs[idxs >= unique_labels.size] = 0
671
+ found = (unique_labels[idxs] == index)
672
+ else:
673
+ # labels are an integer type allowed by bincount, and there aren't too
674
+ # many, so call bincount directly.
675
+ counts = numpy.bincount(labels.ravel())
676
+ sums = numpy.bincount(labels.ravel(), weights=input.ravel())
677
+ if centered:
678
+ sums_c = _sum_centered(labels)
679
+ # make sure all index values are valid
680
+ idxs = numpy.asanyarray(index, numpy.int_).copy()
681
+ found = (idxs >= 0) & (idxs < counts.size)
682
+ idxs[~found] = 0
683
+
684
+ counts = counts[idxs]
685
+ counts[~found] = 0
686
+ sums = sums[idxs]
687
+ sums[~found] = 0
688
+
689
+ if not centered:
690
+ return (counts, sums)
691
+ else:
692
+ sums_c = sums_c[idxs]
693
+ sums_c[~found] = 0
694
+ return (counts, sums, sums_c)
695
+
696
+
697
+ def sum(input, labels=None, index=None):
698
+ """
699
+ Calculate the sum of the values of the array.
700
+
701
+ Notes
702
+ -----
703
+ This is an alias for `ndimage.sum_labels` kept for backwards compatibility
704
+ reasons, for new code please prefer `sum_labels`. See the `sum_labels`
705
+ docstring for more details.
706
+
707
+ """
708
+ return sum_labels(input, labels, index)
709
+
710
+
711
+ def sum_labels(input, labels=None, index=None):
712
+ """
713
+ Calculate the sum of the values of the array.
714
+
715
+ Parameters
716
+ ----------
717
+ input : array_like
718
+ Values of `input` inside the regions defined by `labels`
719
+ are summed together.
720
+ labels : array_like of ints, optional
721
+ Assign labels to the values of the array. Has to have the same shape as
722
+ `input`.
723
+ index : array_like, optional
724
+ A single label number or a sequence of label numbers of
725
+ the objects to be measured.
726
+
727
+ Returns
728
+ -------
729
+ sum : ndarray or scalar
730
+ An array of the sums of values of `input` inside the regions defined
731
+ by `labels` with the same shape as `index`. If 'index' is None or scalar,
732
+ a scalar is returned.
733
+
734
+ See Also
735
+ --------
736
+ mean, median
737
+
738
+ Examples
739
+ --------
740
+ >>> from scipy import ndimage
741
+ >>> input = [0,1,2,3]
742
+ >>> labels = [1,1,2,2]
743
+ >>> ndimage.sum_labels(input, labels, index=[1,2])
744
+ [1.0, 5.0]
745
+ >>> ndimage.sum_labels(input, labels, index=1)
746
+ 1
747
+ >>> ndimage.sum_labels(input, labels)
748
+ 6
749
+
750
+
751
+ """
752
+ count, sum = _stats(input, labels, index)
753
+ return sum
754
+
755
+
756
+ def mean(input, labels=None, index=None):
757
+ """
758
+ Calculate the mean of the values of an array at labels.
759
+
760
+ Parameters
761
+ ----------
762
+ input : array_like
763
+ Array on which to compute the mean of elements over distinct
764
+ regions.
765
+ labels : array_like, optional
766
+ Array of labels of same shape, or broadcastable to the same shape as
767
+ `input`. All elements sharing the same label form one region over
768
+ which the mean of the elements is computed.
769
+ index : int or sequence of ints, optional
770
+ Labels of the objects over which the mean is to be computed.
771
+ Default is None, in which case the mean for all values where label is
772
+ greater than 0 is calculated.
773
+
774
+ Returns
775
+ -------
776
+ out : list
777
+ Sequence of same length as `index`, with the mean of the different
778
+ regions labeled by the labels in `index`.
779
+
780
+ See Also
781
+ --------
782
+ variance, standard_deviation, minimum, maximum, sum, label
783
+
784
+ Examples
785
+ --------
786
+ >>> from scipy import ndimage
787
+ >>> import numpy as np
788
+ >>> a = np.arange(25).reshape((5,5))
789
+ >>> labels = np.zeros_like(a)
790
+ >>> labels[3:5,3:5] = 1
791
+ >>> index = np.unique(labels)
792
+ >>> labels
793
+ array([[0, 0, 0, 0, 0],
794
+ [0, 0, 0, 0, 0],
795
+ [0, 0, 0, 0, 0],
796
+ [0, 0, 0, 1, 1],
797
+ [0, 0, 0, 1, 1]])
798
+ >>> index
799
+ array([0, 1])
800
+ >>> ndimage.mean(a, labels=labels, index=index)
801
+ [10.285714285714286, 21.0]
802
+
803
+ """
804
+
805
+ count, sum = _stats(input, labels, index)
806
+ return sum / numpy.asanyarray(count).astype(numpy.float64)
807
+
808
+
809
+ def variance(input, labels=None, index=None):
810
+ """
811
+ Calculate the variance of the values of an N-D image array, optionally at
812
+ specified sub-regions.
813
+
814
+ Parameters
815
+ ----------
816
+ input : array_like
817
+ Nd-image data to process.
818
+ labels : array_like, optional
819
+ Labels defining sub-regions in `input`.
820
+ If not None, must be same shape as `input`.
821
+ index : int or sequence of ints, optional
822
+ `labels` to include in output. If None (default), all values where
823
+ `labels` is non-zero are used.
824
+
825
+ Returns
826
+ -------
827
+ variance : float or ndarray
828
+ Values of variance, for each sub-region if `labels` and `index` are
829
+ specified.
830
+
831
+ See Also
832
+ --------
833
+ label, standard_deviation, maximum, minimum, extrema
834
+
835
+ Examples
836
+ --------
837
+ >>> import numpy as np
838
+ >>> a = np.array([[1, 2, 0, 0],
839
+ ... [5, 3, 0, 4],
840
+ ... [0, 0, 0, 7],
841
+ ... [9, 3, 0, 0]])
842
+ >>> from scipy import ndimage
843
+ >>> ndimage.variance(a)
844
+ 7.609375
845
+
846
+ Features to process can be specified using `labels` and `index`:
847
+
848
+ >>> lbl, nlbl = ndimage.label(a)
849
+ >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))
850
+ array([ 2.1875, 2.25 , 9. ])
851
+
852
+ If no index is given, all non-zero `labels` are processed:
853
+
854
+ >>> ndimage.variance(a, lbl)
855
+ 6.1875
856
+
857
+ """
858
+ count, sum, sum_c_sq = _stats(input, labels, index, centered=True)
859
+ return sum_c_sq / np.asanyarray(count).astype(float)
860
+
861
+
862
+ def standard_deviation(input, labels=None, index=None):
863
+ """
864
+ Calculate the standard deviation of the values of an N-D image array,
865
+ optionally at specified sub-regions.
866
+
867
+ Parameters
868
+ ----------
869
+ input : array_like
870
+ N-D image data to process.
871
+ labels : array_like, optional
872
+ Labels to identify sub-regions in `input`.
873
+ If not None, must be same shape as `input`.
874
+ index : int or sequence of ints, optional
875
+ `labels` to include in output. If None (default), all values where
876
+ `labels` is non-zero are used.
877
+
878
+ Returns
879
+ -------
880
+ standard_deviation : float or ndarray
881
+ Values of standard deviation, for each sub-region if `labels` and
882
+ `index` are specified.
883
+
884
+ See Also
885
+ --------
886
+ label, variance, maximum, minimum, extrema
887
+
888
+ Examples
889
+ --------
890
+ >>> import numpy as np
891
+ >>> a = np.array([[1, 2, 0, 0],
892
+ ... [5, 3, 0, 4],
893
+ ... [0, 0, 0, 7],
894
+ ... [9, 3, 0, 0]])
895
+ >>> from scipy import ndimage
896
+ >>> ndimage.standard_deviation(a)
897
+ 2.7585095613392387
898
+
899
+ Features to process can be specified using `labels` and `index`:
900
+
901
+ >>> lbl, nlbl = ndimage.label(a)
902
+ >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))
903
+ array([ 1.479, 1.5 , 3. ])
904
+
905
+ If no index is given, non-zero `labels` are processed:
906
+
907
+ >>> ndimage.standard_deviation(a, lbl)
908
+ 2.4874685927665499
909
+
910
+ """
911
+ return numpy.sqrt(variance(input, labels, index))
912
+
913
+
914
+ def _select(input, labels=None, index=None, find_min=False, find_max=False,
915
+ find_min_positions=False, find_max_positions=False,
916
+ find_median=False):
917
+ """Returns min, max, or both, plus their positions (if requested), and
918
+ median."""
919
+
920
+ input = numpy.asanyarray(input)
921
+
922
+ find_positions = find_min_positions or find_max_positions
923
+ positions = None
924
+ if find_positions:
925
+ positions = numpy.arange(input.size).reshape(input.shape)
926
+
927
+ def single_group(vals, positions):
928
+ result = []
929
+ if find_min:
930
+ result += [vals.min()]
931
+ if find_min_positions:
932
+ result += [positions[vals == vals.min()][0]]
933
+ if find_max:
934
+ result += [vals.max()]
935
+ if find_max_positions:
936
+ result += [positions[vals == vals.max()][0]]
937
+ if find_median:
938
+ result += [numpy.median(vals)]
939
+ return result
940
+
941
+ if labels is None:
942
+ return single_group(input, positions)
943
+
944
+ # ensure input and labels match sizes
945
+ input, labels = numpy.broadcast_arrays(input, labels)
946
+
947
+ if index is None:
948
+ mask = (labels > 0)
949
+ masked_positions = None
950
+ if find_positions:
951
+ masked_positions = positions[mask]
952
+ return single_group(input[mask], masked_positions)
953
+
954
+ if numpy.isscalar(index):
955
+ mask = (labels == index)
956
+ masked_positions = None
957
+ if find_positions:
958
+ masked_positions = positions[mask]
959
+ return single_group(input[mask], masked_positions)
960
+
961
+ # remap labels to unique integers if necessary, or if the largest
962
+ # label is larger than the number of values.
963
+ if (not _safely_castable_to_int(labels.dtype) or
964
+ labels.min() < 0 or labels.max() > labels.size):
965
+ # remap labels, and indexes
966
+ unique_labels, labels = numpy.unique(labels, return_inverse=True)
967
+ idxs = numpy.searchsorted(unique_labels, index)
968
+
969
+ # make all of idxs valid
970
+ idxs[idxs >= unique_labels.size] = 0
971
+ found = (unique_labels[idxs] == index)
972
+ else:
973
+ # labels are an integer type, and there aren't too many
974
+ idxs = numpy.asanyarray(index, numpy.int_).copy()
975
+ found = (idxs >= 0) & (idxs <= labels.max())
976
+
977
+ idxs[~ found] = labels.max() + 1
978
+
979
+ if find_median:
980
+ order = numpy.lexsort((input.ravel(), labels.ravel()))
981
+ else:
982
+ order = input.ravel().argsort()
983
+ input = input.ravel()[order]
984
+ labels = labels.ravel()[order]
985
+ if find_positions:
986
+ positions = positions.ravel()[order]
987
+
988
+ result = []
989
+ if find_min:
990
+ mins = numpy.zeros(labels.max() + 2, input.dtype)
991
+ mins[labels[::-1]] = input[::-1]
992
+ result += [mins[idxs]]
993
+ if find_min_positions:
994
+ minpos = numpy.zeros(labels.max() + 2, int)
995
+ minpos[labels[::-1]] = positions[::-1]
996
+ result += [minpos[idxs]]
997
+ if find_max:
998
+ maxs = numpy.zeros(labels.max() + 2, input.dtype)
999
+ maxs[labels] = input
1000
+ result += [maxs[idxs]]
1001
+ if find_max_positions:
1002
+ maxpos = numpy.zeros(labels.max() + 2, int)
1003
+ maxpos[labels] = positions
1004
+ result += [maxpos[idxs]]
1005
+ if find_median:
1006
+ locs = numpy.arange(len(labels))
1007
+ lo = numpy.zeros(labels.max() + 2, numpy.int_)
1008
+ lo[labels[::-1]] = locs[::-1]
1009
+ hi = numpy.zeros(labels.max() + 2, numpy.int_)
1010
+ hi[labels] = locs
1011
+ lo = lo[idxs]
1012
+ hi = hi[idxs]
1013
+ # lo is an index to the lowest value in input for each label,
1014
+ # hi is an index to the largest value.
1015
+ # move them to be either the same ((hi - lo) % 2 == 0) or next
1016
+ # to each other ((hi - lo) % 2 == 1), then average.
1017
+ step = (hi - lo) // 2
1018
+ lo += step
1019
+ hi -= step
1020
+ if (np.issubdtype(input.dtype, np.integer)
1021
+ or np.issubdtype(input.dtype, np.bool_)):
1022
+ # avoid integer overflow or boolean addition (gh-12836)
1023
+ result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0]
1024
+ else:
1025
+ result += [(input[lo] + input[hi]) / 2.0]
1026
+
1027
+ return result
1028
+
1029
+
1030
+ def minimum(input, labels=None, index=None):
1031
+ """
1032
+ Calculate the minimum of the values of an array over labeled regions.
1033
+
1034
+ Parameters
1035
+ ----------
1036
+ input : array_like
1037
+ Array_like of values. For each region specified by `labels`, the
1038
+ minimal values of `input` over the region is computed.
1039
+ labels : array_like, optional
1040
+ An array_like of integers marking different regions over which the
1041
+ minimum value of `input` is to be computed. `labels` must have the
1042
+ same shape as `input`. If `labels` is not specified, the minimum
1043
+ over the whole array is returned.
1044
+ index : array_like, optional
1045
+ A list of region labels that are taken into account for computing the
1046
+ minima. If index is None, the minimum over all elements where `labels`
1047
+ is non-zero is returned.
1048
+
1049
+ Returns
1050
+ -------
1051
+ minimum : float or list of floats
1052
+ List of minima of `input` over the regions determined by `labels` and
1053
+ whose index is in `index`. If `index` or `labels` are not specified, a
1054
+ float is returned: the minimal value of `input` if `labels` is None,
1055
+ and the minimal value of elements where `labels` is greater than zero
1056
+ if `index` is None.
1057
+
1058
+ See Also
1059
+ --------
1060
+ label, maximum, median, minimum_position, extrema, sum, mean, variance,
1061
+ standard_deviation
1062
+
1063
+ Notes
1064
+ -----
1065
+ The function returns a Python list and not a NumPy array, use
1066
+ `np.array` to convert the list to an array.
1067
+
1068
+ Examples
1069
+ --------
1070
+ >>> from scipy import ndimage
1071
+ >>> import numpy as np
1072
+ >>> a = np.array([[1, 2, 0, 0],
1073
+ ... [5, 3, 0, 4],
1074
+ ... [0, 0, 0, 7],
1075
+ ... [9, 3, 0, 0]])
1076
+ >>> labels, labels_nb = ndimage.label(a)
1077
+ >>> labels
1078
+ array([[1, 1, 0, 0],
1079
+ [1, 1, 0, 2],
1080
+ [0, 0, 0, 2],
1081
+ [3, 3, 0, 0]])
1082
+ >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))
1083
+ [1.0, 4.0, 3.0]
1084
+ >>> ndimage.minimum(a)
1085
+ 0.0
1086
+ >>> ndimage.minimum(a, labels=labels)
1087
+ 1.0
1088
+
1089
+ """
1090
+ return _select(input, labels, index, find_min=True)[0]
1091
+
1092
+
1093
+ def maximum(input, labels=None, index=None):
1094
+ """
1095
+ Calculate the maximum of the values of an array over labeled regions.
1096
+
1097
+ Parameters
1098
+ ----------
1099
+ input : array_like
1100
+ Array_like of values. For each region specified by `labels`, the
1101
+ maximal values of `input` over the region is computed.
1102
+ labels : array_like, optional
1103
+ An array of integers marking different regions over which the
1104
+ maximum value of `input` is to be computed. `labels` must have the
1105
+ same shape as `input`. If `labels` is not specified, the maximum
1106
+ over the whole array is returned.
1107
+ index : array_like, optional
1108
+ A list of region labels that are taken into account for computing the
1109
+ maxima. If index is None, the maximum over all elements where `labels`
1110
+ is non-zero is returned.
1111
+
1112
+ Returns
1113
+ -------
1114
+ output : float or list of floats
1115
+ List of maxima of `input` over the regions determined by `labels` and
1116
+ whose index is in `index`. If `index` or `labels` are not specified, a
1117
+ float is returned: the maximal value of `input` if `labels` is None,
1118
+ and the maximal value of elements where `labels` is greater than zero
1119
+ if `index` is None.
1120
+
1121
+ See Also
1122
+ --------
1123
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
1124
+ standard_deviation
1125
+
1126
+ Notes
1127
+ -----
1128
+ The function returns a Python list and not a NumPy array, use
1129
+ `np.array` to convert the list to an array.
1130
+
1131
+ Examples
1132
+ --------
1133
+ >>> import numpy as np
1134
+ >>> a = np.arange(16).reshape((4,4))
1135
+ >>> a
1136
+ array([[ 0, 1, 2, 3],
1137
+ [ 4, 5, 6, 7],
1138
+ [ 8, 9, 10, 11],
1139
+ [12, 13, 14, 15]])
1140
+ >>> labels = np.zeros_like(a)
1141
+ >>> labels[:2,:2] = 1
1142
+ >>> labels[2:, 1:3] = 2
1143
+ >>> labels
1144
+ array([[1, 1, 0, 0],
1145
+ [1, 1, 0, 0],
1146
+ [0, 2, 2, 0],
1147
+ [0, 2, 2, 0]])
1148
+ >>> from scipy import ndimage
1149
+ >>> ndimage.maximum(a)
1150
+ 15.0
1151
+ >>> ndimage.maximum(a, labels=labels, index=[1,2])
1152
+ [5.0, 14.0]
1153
+ >>> ndimage.maximum(a, labels=labels)
1154
+ 14.0
1155
+
1156
+ >>> b = np.array([[1, 2, 0, 0],
1157
+ ... [5, 3, 0, 4],
1158
+ ... [0, 0, 0, 7],
1159
+ ... [9, 3, 0, 0]])
1160
+ >>> labels, labels_nb = ndimage.label(b)
1161
+ >>> labels
1162
+ array([[1, 1, 0, 0],
1163
+ [1, 1, 0, 2],
1164
+ [0, 0, 0, 2],
1165
+ [3, 3, 0, 0]])
1166
+ >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))
1167
+ [5.0, 7.0, 9.0]
1168
+
1169
+ """
1170
+ return _select(input, labels, index, find_max=True)[0]
1171
+
1172
+
1173
+ def median(input, labels=None, index=None):
1174
+ """
1175
+ Calculate the median of the values of an array over labeled regions.
1176
+
1177
+ Parameters
1178
+ ----------
1179
+ input : array_like
1180
+ Array_like of values. For each region specified by `labels`, the
1181
+ median value of `input` over the region is computed.
1182
+ labels : array_like, optional
1183
+ An array_like of integers marking different regions over which the
1184
+ median value of `input` is to be computed. `labels` must have the
1185
+ same shape as `input`. If `labels` is not specified, the median
1186
+ over the whole array is returned.
1187
+ index : array_like, optional
1188
+ A list of region labels that are taken into account for computing the
1189
+ medians. If index is None, the median over all elements where `labels`
1190
+ is non-zero is returned.
1191
+
1192
+ Returns
1193
+ -------
1194
+ median : float or list of floats
1195
+ List of medians of `input` over the regions determined by `labels` and
1196
+ whose index is in `index`. If `index` or `labels` are not specified, a
1197
+ float is returned: the median value of `input` if `labels` is None,
1198
+ and the median value of elements where `labels` is greater than zero
1199
+ if `index` is None.
1200
+
1201
+ See Also
1202
+ --------
1203
+ label, minimum, maximum, extrema, sum, mean, variance, standard_deviation
1204
+
1205
+ Notes
1206
+ -----
1207
+ The function returns a Python list and not a NumPy array, use
1208
+ `np.array` to convert the list to an array.
1209
+
1210
+ Examples
1211
+ --------
1212
+ >>> from scipy import ndimage
1213
+ >>> import numpy as np
1214
+ >>> a = np.array([[1, 2, 0, 1],
1215
+ ... [5, 3, 0, 4],
1216
+ ... [0, 0, 0, 7],
1217
+ ... [9, 3, 0, 0]])
1218
+ >>> labels, labels_nb = ndimage.label(a)
1219
+ >>> labels
1220
+ array([[1, 1, 0, 2],
1221
+ [1, 1, 0, 2],
1222
+ [0, 0, 0, 2],
1223
+ [3, 3, 0, 0]])
1224
+ >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))
1225
+ [2.5, 4.0, 6.0]
1226
+ >>> ndimage.median(a)
1227
+ 1.0
1228
+ >>> ndimage.median(a, labels=labels)
1229
+ 3.0
1230
+
1231
+ """
1232
+ return _select(input, labels, index, find_median=True)[0]
1233
+
1234
+
1235
+ def minimum_position(input, labels=None, index=None):
1236
+ """
1237
+ Find the positions of the minimums of the values of an array at labels.
1238
+
1239
+ Parameters
1240
+ ----------
1241
+ input : array_like
1242
+ Array_like of values.
1243
+ labels : array_like, optional
1244
+ An array of integers marking different regions over which the
1245
+ position of the minimum value of `input` is to be computed.
1246
+ `labels` must have the same shape as `input`. If `labels` is not
1247
+ specified, the location of the first minimum over the whole
1248
+ array is returned.
1249
+
1250
+ The `labels` argument only works when `index` is specified.
1251
+ index : array_like, optional
1252
+ A list of region labels that are taken into account for finding the
1253
+ location of the minima. If `index` is None, the ``first`` minimum
1254
+ over all elements where `labels` is non-zero is returned.
1255
+
1256
+ The `index` argument only works when `labels` is specified.
1257
+
1258
+ Returns
1259
+ -------
1260
+ output : list of tuples of ints
1261
+ Tuple of ints or list of tuples of ints that specify the location
1262
+ of minima of `input` over the regions determined by `labels` and
1263
+ whose index is in `index`.
1264
+
1265
+ If `index` or `labels` are not specified, a tuple of ints is
1266
+ returned specifying the location of the first minimal value of `input`.
1267
+
1268
+ See Also
1269
+ --------
1270
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
1271
+ standard_deviation
1272
+
1273
+ Examples
1274
+ --------
1275
+ >>> import numpy as np
1276
+ >>> a = np.array([[10, 20, 30],
1277
+ ... [40, 80, 100],
1278
+ ... [1, 100, 200]])
1279
+ >>> b = np.array([[1, 2, 0, 1],
1280
+ ... [5, 3, 0, 4],
1281
+ ... [0, 0, 0, 7],
1282
+ ... [9, 3, 0, 0]])
1283
+
1284
+ >>> from scipy import ndimage
1285
+
1286
+ >>> ndimage.minimum_position(a)
1287
+ (2, 0)
1288
+ >>> ndimage.minimum_position(b)
1289
+ (0, 2)
1290
+
1291
+ Features to process can be specified using `labels` and `index`:
1292
+
1293
+ >>> label, pos = ndimage.label(a)
1294
+ >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))
1295
+ [(2, 0)]
1296
+
1297
+ >>> label, pos = ndimage.label(b)
1298
+ >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))
1299
+ [(0, 0), (0, 3), (3, 1)]
1300
+
1301
+ """
1302
+ dims = numpy.array(numpy.asarray(input).shape)
1303
+ # see numpy.unravel_index to understand this line.
1304
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
1305
+
1306
+ result = _select(input, labels, index, find_min_positions=True)[0]
1307
+
1308
+ if numpy.isscalar(result):
1309
+ return tuple((result // dim_prod) % dims)
1310
+
1311
+ return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
1312
+
1313
+
1314
+ def maximum_position(input, labels=None, index=None):
1315
+ """
1316
+ Find the positions of the maximums of the values of an array at labels.
1317
+
1318
+ For each region specified by `labels`, the position of the maximum
1319
+ value of `input` within the region is returned.
1320
+
1321
+ Parameters
1322
+ ----------
1323
+ input : array_like
1324
+ Array_like of values.
1325
+ labels : array_like, optional
1326
+ An array of integers marking different regions over which the
1327
+ position of the maximum value of `input` is to be computed.
1328
+ `labels` must have the same shape as `input`. If `labels` is not
1329
+ specified, the location of the first maximum over the whole
1330
+ array is returned.
1331
+
1332
+ The `labels` argument only works when `index` is specified.
1333
+ index : array_like, optional
1334
+ A list of region labels that are taken into account for finding the
1335
+ location of the maxima. If `index` is None, the first maximum
1336
+ over all elements where `labels` is non-zero is returned.
1337
+
1338
+ The `index` argument only works when `labels` is specified.
1339
+
1340
+ Returns
1341
+ -------
1342
+ output : list of tuples of ints
1343
+ List of tuples of ints that specify the location of maxima of
1344
+ `input` over the regions determined by `labels` and whose index
1345
+ is in `index`.
1346
+
1347
+ If `index` or `labels` are not specified, a tuple of ints is
1348
+ returned specifying the location of the ``first`` maximal value
1349
+ of `input`.
1350
+
1351
+ See Also
1352
+ --------
1353
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
1354
+ standard_deviation
1355
+
1356
+ Examples
1357
+ --------
1358
+ >>> from scipy import ndimage
1359
+ >>> import numpy as np
1360
+ >>> a = np.array([[1, 2, 0, 0],
1361
+ ... [5, 3, 0, 4],
1362
+ ... [0, 0, 0, 7],
1363
+ ... [9, 3, 0, 0]])
1364
+ >>> ndimage.maximum_position(a)
1365
+ (3, 0)
1366
+
1367
+ Features to process can be specified using `labels` and `index`:
1368
+
1369
+ >>> lbl = np.array([[0, 1, 2, 3],
1370
+ ... [0, 1, 2, 3],
1371
+ ... [0, 1, 2, 3],
1372
+ ... [0, 1, 2, 3]])
1373
+ >>> ndimage.maximum_position(a, lbl, 1)
1374
+ (1, 1)
1375
+
1376
+ If no index is given, non-zero `labels` are processed:
1377
+
1378
+ >>> ndimage.maximum_position(a, lbl)
1379
+ (2, 3)
1380
+
1381
+ If there are no maxima, the position of the first element is returned:
1382
+
1383
+ >>> ndimage.maximum_position(a, lbl, 2)
1384
+ (0, 2)
1385
+
1386
+ """
1387
+ dims = numpy.array(numpy.asarray(input).shape)
1388
+ # see numpy.unravel_index to understand this line.
1389
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
1390
+
1391
+ result = _select(input, labels, index, find_max_positions=True)[0]
1392
+
1393
+ if numpy.isscalar(result):
1394
+ return tuple((result // dim_prod) % dims)
1395
+
1396
+ return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
1397
+
1398
+
1399
+ def extrema(input, labels=None, index=None):
1400
+ """
1401
+ Calculate the minimums and maximums of the values of an array
1402
+ at labels, along with their positions.
1403
+
1404
+ Parameters
1405
+ ----------
1406
+ input : ndarray
1407
+ N-D image data to process.
1408
+ labels : ndarray, optional
1409
+ Labels of features in input.
1410
+ If not None, must be same shape as `input`.
1411
+ index : int or sequence of ints, optional
1412
+ Labels to include in output. If None (default), all values where
1413
+ non-zero `labels` are used.
1414
+
1415
+ Returns
1416
+ -------
1417
+ minimums, maximums : int or ndarray
1418
+ Values of minimums and maximums in each feature.
1419
+ min_positions, max_positions : tuple or list of tuples
1420
+ Each tuple gives the N-D coordinates of the corresponding minimum
1421
+ or maximum.
1422
+
1423
+ See Also
1424
+ --------
1425
+ maximum, minimum, maximum_position, minimum_position, center_of_mass
1426
+
1427
+ Examples
1428
+ --------
1429
+ >>> import numpy as np
1430
+ >>> a = np.array([[1, 2, 0, 0],
1431
+ ... [5, 3, 0, 4],
1432
+ ... [0, 0, 0, 7],
1433
+ ... [9, 3, 0, 0]])
1434
+ >>> from scipy import ndimage
1435
+ >>> ndimage.extrema(a)
1436
+ (0, 9, (0, 2), (3, 0))
1437
+
1438
+ Features to process can be specified using `labels` and `index`:
1439
+
1440
+ >>> lbl, nlbl = ndimage.label(a)
1441
+ >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
1442
+ (array([1, 4, 3]),
1443
+ array([5, 7, 9]),
1444
+ [(0, 0), (1, 3), (3, 1)],
1445
+ [(1, 0), (2, 3), (3, 0)])
1446
+
1447
+ If no index is given, non-zero `labels` are processed:
1448
+
1449
+ >>> ndimage.extrema(a, lbl)
1450
+ (1, 9, (0, 0), (3, 0))
1451
+
1452
+ """
1453
+ dims = numpy.array(numpy.asarray(input).shape)
1454
+ # see numpy.unravel_index to understand this line.
1455
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
1456
+
1457
+ minimums, min_positions, maximums, max_positions = _select(input, labels,
1458
+ index,
1459
+ find_min=True,
1460
+ find_max=True,
1461
+ find_min_positions=True,
1462
+ find_max_positions=True)
1463
+
1464
+ if numpy.isscalar(minimums):
1465
+ return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
1466
+ tuple((max_positions // dim_prod) % dims))
1467
+
1468
+ min_positions = [
1469
+ tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims
1470
+ ]
1471
+ max_positions = [
1472
+ tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims
1473
+ ]
1474
+
1475
+ return minimums, maximums, min_positions, max_positions
1476
+
1477
+
1478
+ def center_of_mass(input, labels=None, index=None):
1479
+ """
1480
+ Calculate the center of mass of the values of an array at labels.
1481
+
1482
+ Parameters
1483
+ ----------
1484
+ input : ndarray
1485
+ Data from which to calculate center-of-mass. The masses can either
1486
+ be positive or negative.
1487
+ labels : ndarray, optional
1488
+ Labels for objects in `input`, as generated by `ndimage.label`.
1489
+ Only used with `index`. Dimensions must be the same as `input`.
1490
+ index : int or sequence of ints, optional
1491
+ Labels for which to calculate centers-of-mass. If not specified,
1492
+ the combined center of mass of all labels greater than zero
1493
+ will be calculated. Only used with `labels`.
1494
+
1495
+ Returns
1496
+ -------
1497
+ center_of_mass : tuple, or list of tuples
1498
+ Coordinates of centers-of-mass.
1499
+
1500
+ Examples
1501
+ --------
1502
+ >>> import numpy as np
1503
+ >>> a = np.array(([0,0,0,0],
1504
+ ... [0,1,1,0],
1505
+ ... [0,1,1,0],
1506
+ ... [0,1,1,0]))
1507
+ >>> from scipy import ndimage
1508
+ >>> ndimage.center_of_mass(a)
1509
+ (2.0, 1.5)
1510
+
1511
+ Calculation of multiple objects in an image
1512
+
1513
+ >>> b = np.array(([0,1,1,0],
1514
+ ... [0,1,0,0],
1515
+ ... [0,0,0,0],
1516
+ ... [0,0,1,1],
1517
+ ... [0,0,1,1]))
1518
+ >>> lbl = ndimage.label(b)[0]
1519
+ >>> ndimage.center_of_mass(b, lbl, [1,2])
1520
+ [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
1521
+
1522
+ Negative masses are also accepted, which can occur for example when
1523
+ bias is removed from measured data due to random noise.
1524
+
1525
+ >>> c = np.array(([-1,0,0,0],
1526
+ ... [0,-1,-1,0],
1527
+ ... [0,1,-1,0],
1528
+ ... [0,1,1,0]))
1529
+ >>> ndimage.center_of_mass(c)
1530
+ (-4.0, 1.0)
1531
+
1532
+ If there are division by zero issues, the function does not raise an
1533
+ error but rather issues a RuntimeWarning before returning inf and/or NaN.
1534
+
1535
+ >>> d = np.array([-1, 1])
1536
+ >>> ndimage.center_of_mass(d)
1537
+ (inf,)
1538
+ """
1539
+ normalizer = sum(input, labels, index)
1540
+ grids = numpy.ogrid[[slice(0, i) for i in input.shape]]
1541
+
1542
+ results = [sum(input * grids[dir].astype(float), labels, index) / normalizer
1543
+ for dir in range(input.ndim)]
1544
+
1545
+ if numpy.isscalar(results[0]):
1546
+ return tuple(results)
1547
+
1548
+ return [tuple(v) for v in numpy.array(results).T]
1549
+
1550
+
1551
+ def histogram(input, min, max, bins, labels=None, index=None):
1552
+ """
1553
+ Calculate the histogram of the values of an array, optionally at labels.
1554
+
1555
+ Histogram calculates the frequency of values in an array within bins
1556
+ determined by `min`, `max`, and `bins`. The `labels` and `index`
1557
+ keywords can limit the scope of the histogram to specified sub-regions
1558
+ within the array.
1559
+
1560
+ Parameters
1561
+ ----------
1562
+ input : array_like
1563
+ Data for which to calculate histogram.
1564
+ min, max : int
1565
+ Minimum and maximum values of range of histogram bins.
1566
+ bins : int
1567
+ Number of bins.
1568
+ labels : array_like, optional
1569
+ Labels for objects in `input`.
1570
+ If not None, must be same shape as `input`.
1571
+ index : int or sequence of ints, optional
1572
+ Label or labels for which to calculate histogram. If None, all values
1573
+ where label is greater than zero are used
1574
+
1575
+ Returns
1576
+ -------
1577
+ hist : ndarray
1578
+ Histogram counts.
1579
+
1580
+ Examples
1581
+ --------
1582
+ >>> import numpy as np
1583
+ >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ],
1584
+ ... [ 0. , 0.7778, 0. , 0. ],
1585
+ ... [ 0. , 0. , 0. , 0. ],
1586
+ ... [ 0. , 0. , 0.7181, 0.2787],
1587
+ ... [ 0. , 0. , 0.6573, 0.3094]])
1588
+ >>> from scipy import ndimage
1589
+ >>> ndimage.histogram(a, 0, 1, 10)
1590
+ array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0])
1591
+
1592
+ With labels and no indices, non-zero elements are counted:
1593
+
1594
+ >>> lbl, nlbl = ndimage.label(a)
1595
+ >>> ndimage.histogram(a, 0, 1, 10, lbl)
1596
+ array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])
1597
+
1598
+ Indices can be used to count only certain objects:
1599
+
1600
+ >>> ndimage.histogram(a, 0, 1, 10, lbl, 2)
1601
+ array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
1602
+
1603
+ """
1604
+ _bins = numpy.linspace(min, max, bins + 1)
1605
+
1606
+ def _hist(vals):
1607
+ return numpy.histogram(vals, _bins)[0]
1608
+
1609
+ return labeled_comprehension(input, labels, index, _hist, object, None,
1610
+ pass_positions=False)
1611
+
1612
+
1613
+ def watershed_ift(input, markers, structure=None, output=None):
1614
+ """
1615
+ Apply watershed from markers using image foresting transform algorithm.
1616
+
1617
+ Parameters
1618
+ ----------
1619
+ input : array_like
1620
+ Input.
1621
+ markers : array_like
1622
+ Markers are points within each watershed that form the beginning
1623
+ of the process. Negative markers are considered background markers
1624
+ which are processed after the other markers.
1625
+ structure : structure element, optional
1626
+ A structuring element defining the connectivity of the object can be
1627
+ provided. If None, an element is generated with a squared
1628
+ connectivity equal to one.
1629
+ output : ndarray, optional
1630
+ An output array can optionally be provided. The same shape as input.
1631
+
1632
+ Returns
1633
+ -------
1634
+ watershed_ift : ndarray
1635
+ Output. Same shape as `input`.
1636
+
1637
+ References
1638
+ ----------
1639
+ .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image
1640
+ foresting transform: theory, algorithms, and applications",
1641
+ Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.
1642
+
1643
+ """
1644
+ input = numpy.asarray(input)
1645
+ if input.dtype.type not in [numpy.uint8, numpy.uint16]:
1646
+ raise TypeError('only 8 and 16 unsigned inputs are supported')
1647
+
1648
+ if structure is None:
1649
+ structure = _morphology.generate_binary_structure(input.ndim, 1)
1650
+ structure = numpy.asarray(structure, dtype=bool)
1651
+ if structure.ndim != input.ndim:
1652
+ raise RuntimeError('structure and input must have equal rank')
1653
+ for ii in structure.shape:
1654
+ if ii != 3:
1655
+ raise RuntimeError('structure dimensions must be equal to 3')
1656
+
1657
+ if not structure.flags.contiguous:
1658
+ structure = structure.copy()
1659
+ markers = numpy.asarray(markers)
1660
+ if input.shape != markers.shape:
1661
+ raise RuntimeError('input and markers must have equal shape')
1662
+
1663
+ integral_types = [numpy.int8,
1664
+ numpy.int16,
1665
+ numpy.int32,
1666
+ numpy.int64,
1667
+ numpy.intc,
1668
+ numpy.intp]
1669
+
1670
+ if markers.dtype.type not in integral_types:
1671
+ raise RuntimeError('marker should be of integer type')
1672
+
1673
+ if isinstance(output, numpy.ndarray):
1674
+ if output.dtype.type not in integral_types:
1675
+ raise RuntimeError('output should be of integer type')
1676
+ else:
1677
+ output = markers.dtype
1678
+
1679
+ output = _ni_support._get_output(output, input)
1680
+ _nd_image.watershed_ift(input, markers, structure, output)
1681
+ return output
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_morphology.py ADDED
@@ -0,0 +1,2520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ import warnings
32
+ import operator
33
+
34
+ import numpy
35
+ from . import _ni_support
36
+ from . import _nd_image
37
+ from . import _filters
38
+
39
+ __all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
40
+ 'binary_dilation', 'binary_opening', 'binary_closing',
41
+ 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes',
42
+ 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing',
43
+ 'morphological_gradient', 'morphological_laplace', 'white_tophat',
44
+ 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt',
45
+ 'distance_transform_edt']
46
+
47
+
48
+ def _center_is_true(structure, origin):
49
+ structure = numpy.array(structure)
50
+ coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape,
51
+ origin)])
52
+ return bool(structure[coor])
53
+
54
+
55
+ def iterate_structure(structure, iterations, origin=None):
56
+ """
57
+ Iterate a structure by dilating it with itself.
58
+
59
+ Parameters
60
+ ----------
61
+ structure : array_like
62
+ Structuring element (an array of bools, for example), to be dilated with
63
+ itself.
64
+ iterations : int
65
+ number of dilations performed on the structure with itself
66
+ origin : optional
67
+ If origin is None, only the iterated structure is returned. If
68
+ not, a tuple of the iterated structure and the modified origin is
69
+ returned.
70
+
71
+ Returns
72
+ -------
73
+ iterate_structure : ndarray of bools
74
+ A new structuring element obtained by dilating `structure`
75
+ (`iterations` - 1) times with itself.
76
+
77
+ See Also
78
+ --------
79
+ generate_binary_structure
80
+
81
+ Examples
82
+ --------
83
+ >>> from scipy import ndimage
84
+ >>> struct = ndimage.generate_binary_structure(2, 1)
85
+ >>> struct.astype(int)
86
+ array([[0, 1, 0],
87
+ [1, 1, 1],
88
+ [0, 1, 0]])
89
+ >>> ndimage.iterate_structure(struct, 2).astype(int)
90
+ array([[0, 0, 1, 0, 0],
91
+ [0, 1, 1, 1, 0],
92
+ [1, 1, 1, 1, 1],
93
+ [0, 1, 1, 1, 0],
94
+ [0, 0, 1, 0, 0]])
95
+ >>> ndimage.iterate_structure(struct, 3).astype(int)
96
+ array([[0, 0, 0, 1, 0, 0, 0],
97
+ [0, 0, 1, 1, 1, 0, 0],
98
+ [0, 1, 1, 1, 1, 1, 0],
99
+ [1, 1, 1, 1, 1, 1, 1],
100
+ [0, 1, 1, 1, 1, 1, 0],
101
+ [0, 0, 1, 1, 1, 0, 0],
102
+ [0, 0, 0, 1, 0, 0, 0]])
103
+
104
+ """
105
+ structure = numpy.asarray(structure)
106
+ if iterations < 2:
107
+ return structure.copy()
108
+ ni = iterations - 1
109
+ shape = [ii + ni * (ii - 1) for ii in structure.shape]
110
+ pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
111
+ slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None)
112
+ for ii in range(len(shape)))
113
+ out = numpy.zeros(shape, bool)
114
+ out[slc] = structure != 0
115
+ out = binary_dilation(out, structure, iterations=ni)
116
+ if origin is None:
117
+ return out
118
+ else:
119
+ origin = _ni_support._normalize_sequence(origin, structure.ndim)
120
+ origin = [iterations * o for o in origin]
121
+ return out, origin
122
+
123
+
124
+ def generate_binary_structure(rank, connectivity):
125
+ """
126
+ Generate a binary structure for binary morphological operations.
127
+
128
+ Parameters
129
+ ----------
130
+ rank : int
131
+ Number of dimensions of the array to which the structuring element
132
+ will be applied, as returned by `np.ndim`.
133
+ connectivity : int
134
+ `connectivity` determines which elements of the output array belong
135
+ to the structure, i.e., are considered as neighbors of the central
136
+ element. Elements up to a squared distance of `connectivity` from
137
+ the center are considered neighbors. `connectivity` may range from 1
138
+ (no diagonal elements are neighbors) to `rank` (all elements are
139
+ neighbors).
140
+
141
+ Returns
142
+ -------
143
+ output : ndarray of bools
144
+ Structuring element which may be used for binary morphological
145
+ operations, with `rank` dimensions and all dimensions equal to 3.
146
+
147
+ See Also
148
+ --------
149
+ iterate_structure, binary_dilation, binary_erosion
150
+
151
+ Notes
152
+ -----
153
+ `generate_binary_structure` can only create structuring elements with
154
+ dimensions equal to 3, i.e., minimal dimensions. For larger structuring
155
+ elements, that are useful e.g., for eroding large objects, one may either
156
+ use `iterate_structure`, or create directly custom arrays with
157
+ numpy functions such as `numpy.ones`.
158
+
159
+ Examples
160
+ --------
161
+ >>> from scipy import ndimage
162
+ >>> import numpy as np
163
+ >>> struct = ndimage.generate_binary_structure(2, 1)
164
+ >>> struct
165
+ array([[False, True, False],
166
+ [ True, True, True],
167
+ [False, True, False]], dtype=bool)
168
+ >>> a = np.zeros((5,5))
169
+ >>> a[2, 2] = 1
170
+ >>> a
171
+ array([[ 0., 0., 0., 0., 0.],
172
+ [ 0., 0., 0., 0., 0.],
173
+ [ 0., 0., 1., 0., 0.],
174
+ [ 0., 0., 0., 0., 0.],
175
+ [ 0., 0., 0., 0., 0.]])
176
+ >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype)
177
+ >>> b
178
+ array([[ 0., 0., 0., 0., 0.],
179
+ [ 0., 0., 1., 0., 0.],
180
+ [ 0., 1., 1., 1., 0.],
181
+ [ 0., 0., 1., 0., 0.],
182
+ [ 0., 0., 0., 0., 0.]])
183
+ >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype)
184
+ array([[ 0., 0., 1., 0., 0.],
185
+ [ 0., 1., 1., 1., 0.],
186
+ [ 1., 1., 1., 1., 1.],
187
+ [ 0., 1., 1., 1., 0.],
188
+ [ 0., 0., 1., 0., 0.]])
189
+ >>> struct = ndimage.generate_binary_structure(2, 2)
190
+ >>> struct
191
+ array([[ True, True, True],
192
+ [ True, True, True],
193
+ [ True, True, True]], dtype=bool)
194
+ >>> struct = ndimage.generate_binary_structure(3, 1)
195
+ >>> struct # no diagonal elements
196
+ array([[[False, False, False],
197
+ [False, True, False],
198
+ [False, False, False]],
199
+ [[False, True, False],
200
+ [ True, True, True],
201
+ [False, True, False]],
202
+ [[False, False, False],
203
+ [False, True, False],
204
+ [False, False, False]]], dtype=bool)
205
+
206
+ """
207
+ if connectivity < 1:
208
+ connectivity = 1
209
+ if rank < 1:
210
+ return numpy.array(True, dtype=bool)
211
+ output = numpy.fabs(numpy.indices([3] * rank) - 1)
212
+ output = numpy.add.reduce(output, 0)
213
+ return output <= connectivity
214
+
215
+
216
+ def _binary_erosion(input, structure, iterations, mask, output,
217
+ border_value, origin, invert, brute_force):
218
+ try:
219
+ iterations = operator.index(iterations)
220
+ except TypeError as e:
221
+ raise TypeError('iterations parameter should be an integer') from e
222
+
223
+ input = numpy.asarray(input)
224
+ if numpy.iscomplexobj(input):
225
+ raise TypeError('Complex type not supported')
226
+ if structure is None:
227
+ structure = generate_binary_structure(input.ndim, 1)
228
+ else:
229
+ structure = numpy.asarray(structure, dtype=bool)
230
+ if structure.ndim != input.ndim:
231
+ raise RuntimeError('structure and input must have same dimensionality')
232
+ if not structure.flags.contiguous:
233
+ structure = structure.copy()
234
+ if numpy.prod(structure.shape, axis=0) < 1:
235
+ raise RuntimeError('structure must not be empty')
236
+ if mask is not None:
237
+ mask = numpy.asarray(mask)
238
+ if mask.shape != input.shape:
239
+ raise RuntimeError('mask and input must have equal sizes')
240
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
241
+ cit = _center_is_true(structure, origin)
242
+ if isinstance(output, numpy.ndarray):
243
+ if numpy.iscomplexobj(output):
244
+ raise TypeError('Complex output type not supported')
245
+ else:
246
+ output = bool
247
+ output = _ni_support._get_output(output, input)
248
+ temp_needed = numpy.may_share_memory(input, output)
249
+ if temp_needed:
250
+ # input and output arrays cannot share memory
251
+ temp = output
252
+ output = _ni_support._get_output(output.dtype, input)
253
+ if iterations == 1:
254
+ _nd_image.binary_erosion(input, structure, mask, output,
255
+ border_value, origin, invert, cit, 0)
256
+ elif cit and not brute_force:
257
+ changed, coordinate_list = _nd_image.binary_erosion(
258
+ input, structure, mask, output,
259
+ border_value, origin, invert, cit, 1)
260
+ structure = structure[tuple([slice(None, None, -1)] *
261
+ structure.ndim)]
262
+ for ii in range(len(origin)):
263
+ origin[ii] = -origin[ii]
264
+ if not structure.shape[ii] & 1:
265
+ origin[ii] -= 1
266
+ if mask is not None:
267
+ mask = numpy.asarray(mask, dtype=numpy.int8)
268
+ if not structure.flags.contiguous:
269
+ structure = structure.copy()
270
+ _nd_image.binary_erosion2(output, structure, mask, iterations - 1,
271
+ origin, invert, coordinate_list)
272
+ else:
273
+ tmp_in = numpy.empty_like(input, dtype=bool)
274
+ tmp_out = output
275
+ if iterations >= 1 and not iterations & 1:
276
+ tmp_in, tmp_out = tmp_out, tmp_in
277
+ changed = _nd_image.binary_erosion(
278
+ input, structure, mask, tmp_out,
279
+ border_value, origin, invert, cit, 0)
280
+ ii = 1
281
+ while ii < iterations or (iterations < 1 and changed):
282
+ tmp_in, tmp_out = tmp_out, tmp_in
283
+ changed = _nd_image.binary_erosion(
284
+ tmp_in, structure, mask, tmp_out,
285
+ border_value, origin, invert, cit, 0)
286
+ ii += 1
287
+ if temp_needed:
288
+ temp[...] = output
289
+ output = temp
290
+ return output
291
+
292
+
293
+ def binary_erosion(input, structure=None, iterations=1, mask=None, output=None,
294
+ border_value=0, origin=0, brute_force=False):
295
+ """
296
+ Multidimensional binary erosion with a given structuring element.
297
+
298
+ Binary erosion is a mathematical morphology operation used for image
299
+ processing.
300
+
301
+ Parameters
302
+ ----------
303
+ input : array_like
304
+ Binary image to be eroded. Non-zero (True) elements form
305
+ the subset to be eroded.
306
+ structure : array_like, optional
307
+ Structuring element used for the erosion. Non-zero elements are
308
+ considered True. If no structuring element is provided, an element
309
+ is generated with a square connectivity equal to one.
310
+ iterations : int, optional
311
+ The erosion is repeated `iterations` times (one, by default).
312
+ If iterations is less than 1, the erosion is repeated until the
313
+ result does not change anymore.
314
+ mask : array_like, optional
315
+ If a mask is given, only those elements with a True value at
316
+ the corresponding mask element are modified at each iteration.
317
+ output : ndarray, optional
318
+ Array of the same shape as input, into which the output is placed.
319
+ By default, a new array is created.
320
+ border_value : int (cast to 0 or 1), optional
321
+ Value at the border in the output array.
322
+ origin : int or tuple of ints, optional
323
+ Placement of the filter, by default 0.
324
+ brute_force : boolean, optional
325
+ Memory condition: if False, only the pixels whose value was changed in
326
+ the last iteration are tracked as candidates to be updated (eroded) in
327
+ the current iteration; if True all pixels are considered as candidates
328
+ for erosion, regardless of what happened in the previous iteration.
329
+ False by default.
330
+
331
+ Returns
332
+ -------
333
+ binary_erosion : ndarray of bools
334
+ Erosion of the input by the structuring element.
335
+
336
+ See Also
337
+ --------
338
+ grey_erosion, binary_dilation, binary_closing, binary_opening,
339
+ generate_binary_structure
340
+
341
+ Notes
342
+ -----
343
+ Erosion [1]_ is a mathematical morphology operation [2]_ that uses a
344
+ structuring element for shrinking the shapes in an image. The binary
345
+ erosion of an image by a structuring element is the locus of the points
346
+ where a superimposition of the structuring element centered on the point
347
+ is entirely contained in the set of non-zero elements of the image.
348
+
349
+ References
350
+ ----------
351
+ .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
352
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
353
+
354
+ Examples
355
+ --------
356
+ >>> from scipy import ndimage
357
+ >>> import numpy as np
358
+ >>> a = np.zeros((7,7), dtype=int)
359
+ >>> a[1:6, 2:5] = 1
360
+ >>> a
361
+ array([[0, 0, 0, 0, 0, 0, 0],
362
+ [0, 0, 1, 1, 1, 0, 0],
363
+ [0, 0, 1, 1, 1, 0, 0],
364
+ [0, 0, 1, 1, 1, 0, 0],
365
+ [0, 0, 1, 1, 1, 0, 0],
366
+ [0, 0, 1, 1, 1, 0, 0],
367
+ [0, 0, 0, 0, 0, 0, 0]])
368
+ >>> ndimage.binary_erosion(a).astype(a.dtype)
369
+ array([[0, 0, 0, 0, 0, 0, 0],
370
+ [0, 0, 0, 0, 0, 0, 0],
371
+ [0, 0, 0, 1, 0, 0, 0],
372
+ [0, 0, 0, 1, 0, 0, 0],
373
+ [0, 0, 0, 1, 0, 0, 0],
374
+ [0, 0, 0, 0, 0, 0, 0],
375
+ [0, 0, 0, 0, 0, 0, 0]])
376
+ >>> #Erosion removes objects smaller than the structure
377
+ >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)
378
+ array([[0, 0, 0, 0, 0, 0, 0],
379
+ [0, 0, 0, 0, 0, 0, 0],
380
+ [0, 0, 0, 0, 0, 0, 0],
381
+ [0, 0, 0, 0, 0, 0, 0],
382
+ [0, 0, 0, 0, 0, 0, 0],
383
+ [0, 0, 0, 0, 0, 0, 0],
384
+ [0, 0, 0, 0, 0, 0, 0]])
385
+
386
+ """
387
+ return _binary_erosion(input, structure, iterations, mask,
388
+ output, border_value, origin, 0, brute_force)
389
+
390
+
391
+ def binary_dilation(input, structure=None, iterations=1, mask=None,
392
+ output=None, border_value=0, origin=0,
393
+ brute_force=False):
394
+ """
395
+ Multidimensional binary dilation with the given structuring element.
396
+
397
+ Parameters
398
+ ----------
399
+ input : array_like
400
+ Binary array_like to be dilated. Non-zero (True) elements form
401
+ the subset to be dilated.
402
+ structure : array_like, optional
403
+ Structuring element used for the dilation. Non-zero elements are
404
+ considered True. If no structuring element is provided an element
405
+ is generated with a square connectivity equal to one.
406
+ iterations : int, optional
407
+ The dilation is repeated `iterations` times (one, by default).
408
+ If iterations is less than 1, the dilation is repeated until the
409
+ result does not change anymore. Only an integer of iterations is
410
+ accepted.
411
+ mask : array_like, optional
412
+ If a mask is given, only those elements with a True value at
413
+ the corresponding mask element are modified at each iteration.
414
+ output : ndarray, optional
415
+ Array of the same shape as input, into which the output is placed.
416
+ By default, a new array is created.
417
+ border_value : int (cast to 0 or 1), optional
418
+ Value at the border in the output array.
419
+ origin : int or tuple of ints, optional
420
+ Placement of the filter, by default 0.
421
+ brute_force : boolean, optional
422
+ Memory condition: if False, only the pixels whose value was changed in
423
+ the last iteration are tracked as candidates to be updated (dilated)
424
+ in the current iteration; if True all pixels are considered as
425
+ candidates for dilation, regardless of what happened in the previous
426
+ iteration. False by default.
427
+
428
+ Returns
429
+ -------
430
+ binary_dilation : ndarray of bools
431
+ Dilation of the input by the structuring element.
432
+
433
+ See Also
434
+ --------
435
+ grey_dilation, binary_erosion, binary_closing, binary_opening,
436
+ generate_binary_structure
437
+
438
+ Notes
439
+ -----
440
+ Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
441
+ structuring element for expanding the shapes in an image. The binary
442
+ dilation of an image by a structuring element is the locus of the points
443
+ covered by the structuring element, when its center lies within the
444
+ non-zero points of the image.
445
+
446
+ References
447
+ ----------
448
+ .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
449
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
450
+
451
+ Examples
452
+ --------
453
+ >>> from scipy import ndimage
454
+ >>> import numpy as np
455
+ >>> a = np.zeros((5, 5))
456
+ >>> a[2, 2] = 1
457
+ >>> a
458
+ array([[ 0., 0., 0., 0., 0.],
459
+ [ 0., 0., 0., 0., 0.],
460
+ [ 0., 0., 1., 0., 0.],
461
+ [ 0., 0., 0., 0., 0.],
462
+ [ 0., 0., 0., 0., 0.]])
463
+ >>> ndimage.binary_dilation(a)
464
+ array([[False, False, False, False, False],
465
+ [False, False, True, False, False],
466
+ [False, True, True, True, False],
467
+ [False, False, True, False, False],
468
+ [False, False, False, False, False]], dtype=bool)
469
+ >>> ndimage.binary_dilation(a).astype(a.dtype)
470
+ array([[ 0., 0., 0., 0., 0.],
471
+ [ 0., 0., 1., 0., 0.],
472
+ [ 0., 1., 1., 1., 0.],
473
+ [ 0., 0., 1., 0., 0.],
474
+ [ 0., 0., 0., 0., 0.]])
475
+ >>> # 3x3 structuring element with connectivity 1, used by default
476
+ >>> struct1 = ndimage.generate_binary_structure(2, 1)
477
+ >>> struct1
478
+ array([[False, True, False],
479
+ [ True, True, True],
480
+ [False, True, False]], dtype=bool)
481
+ >>> # 3x3 structuring element with connectivity 2
482
+ >>> struct2 = ndimage.generate_binary_structure(2, 2)
483
+ >>> struct2
484
+ array([[ True, True, True],
485
+ [ True, True, True],
486
+ [ True, True, True]], dtype=bool)
487
+ >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
488
+ array([[ 0., 0., 0., 0., 0.],
489
+ [ 0., 0., 1., 0., 0.],
490
+ [ 0., 1., 1., 1., 0.],
491
+ [ 0., 0., 1., 0., 0.],
492
+ [ 0., 0., 0., 0., 0.]])
493
+ >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
494
+ array([[ 0., 0., 0., 0., 0.],
495
+ [ 0., 1., 1., 1., 0.],
496
+ [ 0., 1., 1., 1., 0.],
497
+ [ 0., 1., 1., 1., 0.],
498
+ [ 0., 0., 0., 0., 0.]])
499
+ >>> ndimage.binary_dilation(a, structure=struct1,\\
500
+ ... iterations=2).astype(a.dtype)
501
+ array([[ 0., 0., 1., 0., 0.],
502
+ [ 0., 1., 1., 1., 0.],
503
+ [ 1., 1., 1., 1., 1.],
504
+ [ 0., 1., 1., 1., 0.],
505
+ [ 0., 0., 1., 0., 0.]])
506
+
507
+ """
508
+ input = numpy.asarray(input)
509
+ if structure is None:
510
+ structure = generate_binary_structure(input.ndim, 1)
511
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
512
+ structure = numpy.asarray(structure)
513
+ structure = structure[tuple([slice(None, None, -1)] *
514
+ structure.ndim)]
515
+ for ii in range(len(origin)):
516
+ origin[ii] = -origin[ii]
517
+ if not structure.shape[ii] & 1:
518
+ origin[ii] -= 1
519
+
520
+ return _binary_erosion(input, structure, iterations, mask,
521
+ output, border_value, origin, 1, brute_force)
522
+
523
+
524
+ def binary_opening(input, structure=None, iterations=1, output=None,
525
+ origin=0, mask=None, border_value=0, brute_force=False):
526
+ """
527
+ Multidimensional binary opening with the given structuring element.
528
+
529
+ The *opening* of an input image by a structuring element is the
530
+ *dilation* of the *erosion* of the image by the structuring element.
531
+
532
+ Parameters
533
+ ----------
534
+ input : array_like
535
+ Binary array_like to be opened. Non-zero (True) elements form
536
+ the subset to be opened.
537
+ structure : array_like, optional
538
+ Structuring element used for the opening. Non-zero elements are
539
+ considered True. If no structuring element is provided an element
540
+ is generated with a square connectivity equal to one (i.e., only
541
+ nearest neighbors are connected to the center, diagonally-connected
542
+ elements are not considered neighbors).
543
+ iterations : int, optional
544
+ The erosion step of the opening, then the dilation step are each
545
+ repeated `iterations` times (one, by default). If `iterations` is
546
+ less than 1, each operation is repeated until the result does
547
+ not change anymore. Only an integer of iterations is accepted.
548
+ output : ndarray, optional
549
+ Array of the same shape as input, into which the output is placed.
550
+ By default, a new array is created.
551
+ origin : int or tuple of ints, optional
552
+ Placement of the filter, by default 0.
553
+ mask : array_like, optional
554
+ If a mask is given, only those elements with a True value at
555
+ the corresponding mask element are modified at each iteration.
556
+
557
+ .. versionadded:: 1.1.0
558
+ border_value : int (cast to 0 or 1), optional
559
+ Value at the border in the output array.
560
+
561
+ .. versionadded:: 1.1.0
562
+ brute_force : boolean, optional
563
+ Memory condition: if False, only the pixels whose value was changed in
564
+ the last iteration are tracked as candidates to be updated in the
565
+ current iteration; if true all pixels are considered as candidates for
566
+ update, regardless of what happened in the previous iteration.
567
+ False by default.
568
+
569
+ .. versionadded:: 1.1.0
570
+
571
+ Returns
572
+ -------
573
+ binary_opening : ndarray of bools
574
+ Opening of the input by the structuring element.
575
+
576
+ See Also
577
+ --------
578
+ grey_opening, binary_closing, binary_erosion, binary_dilation,
579
+ generate_binary_structure
580
+
581
+ Notes
582
+ -----
583
+ *Opening* [1]_ is a mathematical morphology operation [2]_ that
584
+ consists in the succession of an erosion and a dilation of the
585
+ input with the same structuring element. Opening, therefore, removes
586
+ objects smaller than the structuring element.
587
+
588
+ Together with *closing* (`binary_closing`), opening can be used for
589
+ noise removal.
590
+
591
+ References
592
+ ----------
593
+ .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29
594
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
595
+
596
+ Examples
597
+ --------
598
+ >>> from scipy import ndimage
599
+ >>> import numpy as np
600
+ >>> a = np.zeros((5,5), dtype=int)
601
+ >>> a[1:4, 1:4] = 1; a[4, 4] = 1
602
+ >>> a
603
+ array([[0, 0, 0, 0, 0],
604
+ [0, 1, 1, 1, 0],
605
+ [0, 1, 1, 1, 0],
606
+ [0, 1, 1, 1, 0],
607
+ [0, 0, 0, 0, 1]])
608
+ >>> # Opening removes small objects
609
+ >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int)
610
+ array([[0, 0, 0, 0, 0],
611
+ [0, 1, 1, 1, 0],
612
+ [0, 1, 1, 1, 0],
613
+ [0, 1, 1, 1, 0],
614
+ [0, 0, 0, 0, 0]])
615
+ >>> # Opening can also smooth corners
616
+ >>> ndimage.binary_opening(a).astype(int)
617
+ array([[0, 0, 0, 0, 0],
618
+ [0, 0, 1, 0, 0],
619
+ [0, 1, 1, 1, 0],
620
+ [0, 0, 1, 0, 0],
621
+ [0, 0, 0, 0, 0]])
622
+ >>> # Opening is the dilation of the erosion of the input
623
+ >>> ndimage.binary_erosion(a).astype(int)
624
+ array([[0, 0, 0, 0, 0],
625
+ [0, 0, 0, 0, 0],
626
+ [0, 0, 1, 0, 0],
627
+ [0, 0, 0, 0, 0],
628
+ [0, 0, 0, 0, 0]])
629
+ >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int)
630
+ array([[0, 0, 0, 0, 0],
631
+ [0, 0, 1, 0, 0],
632
+ [0, 1, 1, 1, 0],
633
+ [0, 0, 1, 0, 0],
634
+ [0, 0, 0, 0, 0]])
635
+
636
+ """
637
+ input = numpy.asarray(input)
638
+ if structure is None:
639
+ rank = input.ndim
640
+ structure = generate_binary_structure(rank, 1)
641
+
642
+ tmp = binary_erosion(input, structure, iterations, mask, None,
643
+ border_value, origin, brute_force)
644
+ return binary_dilation(tmp, structure, iterations, mask, output,
645
+ border_value, origin, brute_force)
646
+
647
+
648
+ def binary_closing(input, structure=None, iterations=1, output=None,
649
+ origin=0, mask=None, border_value=0, brute_force=False):
650
+ """
651
+ Multidimensional binary closing with the given structuring element.
652
+
653
+ The *closing* of an input image by a structuring element is the
654
+ *erosion* of the *dilation* of the image by the structuring element.
655
+
656
+ Parameters
657
+ ----------
658
+ input : array_like
659
+ Binary array_like to be closed. Non-zero (True) elements form
660
+ the subset to be closed.
661
+ structure : array_like, optional
662
+ Structuring element used for the closing. Non-zero elements are
663
+ considered True. If no structuring element is provided an element
664
+ is generated with a square connectivity equal to one (i.e., only
665
+ nearest neighbors are connected to the center, diagonally-connected
666
+ elements are not considered neighbors).
667
+ iterations : int, optional
668
+ The dilation step of the closing, then the erosion step are each
669
+ repeated `iterations` times (one, by default). If iterations is
670
+ less than 1, each operations is repeated until the result does
671
+ not change anymore. Only an integer of iterations is accepted.
672
+ output : ndarray, optional
673
+ Array of the same shape as input, into which the output is placed.
674
+ By default, a new array is created.
675
+ origin : int or tuple of ints, optional
676
+ Placement of the filter, by default 0.
677
+ mask : array_like, optional
678
+ If a mask is given, only those elements with a True value at
679
+ the corresponding mask element are modified at each iteration.
680
+
681
+ .. versionadded:: 1.1.0
682
+ border_value : int (cast to 0 or 1), optional
683
+ Value at the border in the output array.
684
+
685
+ .. versionadded:: 1.1.0
686
+ brute_force : boolean, optional
687
+ Memory condition: if False, only the pixels whose value was changed in
688
+ the last iteration are tracked as candidates to be updated in the
689
+ current iteration; if true al pixels are considered as candidates for
690
+ update, regardless of what happened in the previous iteration.
691
+ False by default.
692
+
693
+ .. versionadded:: 1.1.0
694
+
695
+ Returns
696
+ -------
697
+ binary_closing : ndarray of bools
698
+ Closing of the input by the structuring element.
699
+
700
+ See Also
701
+ --------
702
+ grey_closing, binary_opening, binary_dilation, binary_erosion,
703
+ generate_binary_structure
704
+
705
+ Notes
706
+ -----
707
+ *Closing* [1]_ is a mathematical morphology operation [2]_ that
708
+ consists in the succession of a dilation and an erosion of the
709
+ input with the same structuring element. Closing therefore fills
710
+ holes smaller than the structuring element.
711
+
712
+ Together with *opening* (`binary_opening`), closing can be used for
713
+ noise removal.
714
+
715
+ References
716
+ ----------
717
+ .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29
718
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
719
+
720
+ Examples
721
+ --------
722
+ >>> from scipy import ndimage
723
+ >>> import numpy as np
724
+ >>> a = np.zeros((5,5), dtype=int)
725
+ >>> a[1:-1, 1:-1] = 1; a[2,2] = 0
726
+ >>> a
727
+ array([[0, 0, 0, 0, 0],
728
+ [0, 1, 1, 1, 0],
729
+ [0, 1, 0, 1, 0],
730
+ [0, 1, 1, 1, 0],
731
+ [0, 0, 0, 0, 0]])
732
+ >>> # Closing removes small holes
733
+ >>> ndimage.binary_closing(a).astype(int)
734
+ array([[0, 0, 0, 0, 0],
735
+ [0, 1, 1, 1, 0],
736
+ [0, 1, 1, 1, 0],
737
+ [0, 1, 1, 1, 0],
738
+ [0, 0, 0, 0, 0]])
739
+ >>> # Closing is the erosion of the dilation of the input
740
+ >>> ndimage.binary_dilation(a).astype(int)
741
+ array([[0, 1, 1, 1, 0],
742
+ [1, 1, 1, 1, 1],
743
+ [1, 1, 1, 1, 1],
744
+ [1, 1, 1, 1, 1],
745
+ [0, 1, 1, 1, 0]])
746
+ >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int)
747
+ array([[0, 0, 0, 0, 0],
748
+ [0, 1, 1, 1, 0],
749
+ [0, 1, 1, 1, 0],
750
+ [0, 1, 1, 1, 0],
751
+ [0, 0, 0, 0, 0]])
752
+
753
+
754
+ >>> a = np.zeros((7,7), dtype=int)
755
+ >>> a[1:6, 2:5] = 1; a[1:3,3] = 0
756
+ >>> a
757
+ array([[0, 0, 0, 0, 0, 0, 0],
758
+ [0, 0, 1, 0, 1, 0, 0],
759
+ [0, 0, 1, 0, 1, 0, 0],
760
+ [0, 0, 1, 1, 1, 0, 0],
761
+ [0, 0, 1, 1, 1, 0, 0],
762
+ [0, 0, 1, 1, 1, 0, 0],
763
+ [0, 0, 0, 0, 0, 0, 0]])
764
+ >>> # In addition to removing holes, closing can also
765
+ >>> # coarsen boundaries with fine hollows.
766
+ >>> ndimage.binary_closing(a).astype(int)
767
+ array([[0, 0, 0, 0, 0, 0, 0],
768
+ [0, 0, 1, 0, 1, 0, 0],
769
+ [0, 0, 1, 1, 1, 0, 0],
770
+ [0, 0, 1, 1, 1, 0, 0],
771
+ [0, 0, 1, 1, 1, 0, 0],
772
+ [0, 0, 1, 1, 1, 0, 0],
773
+ [0, 0, 0, 0, 0, 0, 0]])
774
+ >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int)
775
+ array([[0, 0, 0, 0, 0, 0, 0],
776
+ [0, 0, 1, 1, 1, 0, 0],
777
+ [0, 0, 1, 1, 1, 0, 0],
778
+ [0, 0, 1, 1, 1, 0, 0],
779
+ [0, 0, 1, 1, 1, 0, 0],
780
+ [0, 0, 1, 1, 1, 0, 0],
781
+ [0, 0, 0, 0, 0, 0, 0]])
782
+
783
+ """
784
+ input = numpy.asarray(input)
785
+ if structure is None:
786
+ rank = input.ndim
787
+ structure = generate_binary_structure(rank, 1)
788
+
789
+ tmp = binary_dilation(input, structure, iterations, mask, None,
790
+ border_value, origin, brute_force)
791
+ return binary_erosion(tmp, structure, iterations, mask, output,
792
+ border_value, origin, brute_force)
793
+
794
+
795
+ def binary_hit_or_miss(input, structure1=None, structure2=None,
796
+ output=None, origin1=0, origin2=None):
797
+ """
798
+ Multidimensional binary hit-or-miss transform.
799
+
800
+ The hit-or-miss transform finds the locations of a given pattern
801
+ inside the input image.
802
+
803
+ Parameters
804
+ ----------
805
+ input : array_like (cast to booleans)
806
+ Binary image where a pattern is to be detected.
807
+ structure1 : array_like (cast to booleans), optional
808
+ Part of the structuring element to be fitted to the foreground
809
+ (non-zero elements) of `input`. If no value is provided, a
810
+ structure of square connectivity 1 is chosen.
811
+ structure2 : array_like (cast to booleans), optional
812
+ Second part of the structuring element that has to miss completely
813
+ the foreground. If no value is provided, the complementary of
814
+ `structure1` is taken.
815
+ output : ndarray, optional
816
+ Array of the same shape as input, into which the output is placed.
817
+ By default, a new array is created.
818
+ origin1 : int or tuple of ints, optional
819
+ Placement of the first part of the structuring element `structure1`,
820
+ by default 0 for a centered structure.
821
+ origin2 : int or tuple of ints, optional
822
+ Placement of the second part of the structuring element `structure2`,
823
+ by default 0 for a centered structure. If a value is provided for
824
+ `origin1` and not for `origin2`, then `origin2` is set to `origin1`.
825
+
826
+ Returns
827
+ -------
828
+ binary_hit_or_miss : ndarray
829
+ Hit-or-miss transform of `input` with the given structuring
830
+ element (`structure1`, `structure2`).
831
+
832
+ See Also
833
+ --------
834
+ binary_erosion
835
+
836
+ References
837
+ ----------
838
+ .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform
839
+
840
+ Examples
841
+ --------
842
+ >>> from scipy import ndimage
843
+ >>> import numpy as np
844
+ >>> a = np.zeros((7,7), dtype=int)
845
+ >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1
846
+ >>> a
847
+ array([[0, 0, 0, 0, 0, 0, 0],
848
+ [0, 1, 0, 0, 0, 0, 0],
849
+ [0, 0, 1, 1, 0, 0, 0],
850
+ [0, 0, 1, 1, 0, 0, 0],
851
+ [0, 0, 0, 0, 1, 1, 0],
852
+ [0, 0, 0, 0, 1, 1, 0],
853
+ [0, 0, 0, 0, 0, 0, 0]])
854
+ >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
855
+ >>> structure1
856
+ array([[1, 0, 0],
857
+ [0, 1, 1],
858
+ [0, 1, 1]])
859
+ >>> # Find the matches of structure1 in the array a
860
+ >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int)
861
+ array([[0, 0, 0, 0, 0, 0, 0],
862
+ [0, 0, 0, 0, 0, 0, 0],
863
+ [0, 0, 1, 0, 0, 0, 0],
864
+ [0, 0, 0, 0, 0, 0, 0],
865
+ [0, 0, 0, 0, 1, 0, 0],
866
+ [0, 0, 0, 0, 0, 0, 0],
867
+ [0, 0, 0, 0, 0, 0, 0]])
868
+ >>> # Change the origin of the filter
869
+ >>> # origin1=1 is equivalent to origin1=(1,1) here
870
+ >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\
871
+ ... origin1=1).astype(int)
872
+ array([[0, 0, 0, 0, 0, 0, 0],
873
+ [0, 0, 0, 0, 0, 0, 0],
874
+ [0, 0, 0, 0, 0, 0, 0],
875
+ [0, 0, 0, 1, 0, 0, 0],
876
+ [0, 0, 0, 0, 0, 0, 0],
877
+ [0, 0, 0, 0, 0, 1, 0],
878
+ [0, 0, 0, 0, 0, 0, 0]])
879
+
880
+ """
881
+ input = numpy.asarray(input)
882
+ if structure1 is None:
883
+ structure1 = generate_binary_structure(input.ndim, 1)
884
+ if structure2 is None:
885
+ structure2 = numpy.logical_not(structure1)
886
+ origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
887
+ if origin2 is None:
888
+ origin2 = origin1
889
+ else:
890
+ origin2 = _ni_support._normalize_sequence(origin2, input.ndim)
891
+
892
+ tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
893
+ 0, False)
894
+ inplace = isinstance(output, numpy.ndarray)
895
+ result = _binary_erosion(input, structure2, 1, None, output, 0,
896
+ origin2, 1, False)
897
+ if inplace:
898
+ numpy.logical_not(output, output)
899
+ numpy.logical_and(tmp1, output, output)
900
+ else:
901
+ numpy.logical_not(result, result)
902
+ return numpy.logical_and(tmp1, result)
903
+
904
+
905
+ def binary_propagation(input, structure=None, mask=None,
906
+ output=None, border_value=0, origin=0):
907
+ """
908
+ Multidimensional binary propagation with the given structuring element.
909
+
910
+ Parameters
911
+ ----------
912
+ input : array_like
913
+ Binary image to be propagated inside `mask`.
914
+ structure : array_like, optional
915
+ Structuring element used in the successive dilations. The output
916
+ may depend on the structuring element, especially if `mask` has
917
+ several connex components. If no structuring element is
918
+ provided, an element is generated with a squared connectivity equal
919
+ to one.
920
+ mask : array_like, optional
921
+ Binary mask defining the region into which `input` is allowed to
922
+ propagate.
923
+ output : ndarray, optional
924
+ Array of the same shape as input, into which the output is placed.
925
+ By default, a new array is created.
926
+ border_value : int (cast to 0 or 1), optional
927
+ Value at the border in the output array.
928
+ origin : int or tuple of ints, optional
929
+ Placement of the filter, by default 0.
930
+
931
+ Returns
932
+ -------
933
+ binary_propagation : ndarray
934
+ Binary propagation of `input` inside `mask`.
935
+
936
+ Notes
937
+ -----
938
+ This function is functionally equivalent to calling binary_dilation
939
+ with the number of iterations less than one: iterative dilation until
940
+ the result does not change anymore.
941
+
942
+ The succession of an erosion and propagation inside the original image
943
+ can be used instead of an *opening* for deleting small objects while
944
+ keeping the contours of larger objects untouched.
945
+
946
+ References
947
+ ----------
948
+ .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15.
949
+ .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of
950
+ image processing", 1998
951
+ ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf
952
+
953
+ Examples
954
+ --------
955
+ >>> from scipy import ndimage
956
+ >>> import numpy as np
957
+ >>> input = np.zeros((8, 8), dtype=int)
958
+ >>> input[2, 2] = 1
959
+ >>> mask = np.zeros((8, 8), dtype=int)
960
+ >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1
961
+ >>> input
962
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
963
+ [0, 0, 0, 0, 0, 0, 0, 0],
964
+ [0, 0, 1, 0, 0, 0, 0, 0],
965
+ [0, 0, 0, 0, 0, 0, 0, 0],
966
+ [0, 0, 0, 0, 0, 0, 0, 0],
967
+ [0, 0, 0, 0, 0, 0, 0, 0],
968
+ [0, 0, 0, 0, 0, 0, 0, 0],
969
+ [0, 0, 0, 0, 0, 0, 0, 0]])
970
+ >>> mask
971
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
972
+ [0, 1, 1, 1, 0, 0, 0, 0],
973
+ [0, 1, 1, 1, 0, 0, 0, 0],
974
+ [0, 1, 1, 1, 0, 0, 0, 0],
975
+ [0, 0, 0, 0, 1, 0, 0, 0],
976
+ [0, 0, 0, 0, 0, 0, 0, 0],
977
+ [0, 0, 0, 0, 0, 0, 1, 1],
978
+ [0, 0, 0, 0, 0, 0, 1, 1]])
979
+ >>> ndimage.binary_propagation(input, mask=mask).astype(int)
980
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
981
+ [0, 1, 1, 1, 0, 0, 0, 0],
982
+ [0, 1, 1, 1, 0, 0, 0, 0],
983
+ [0, 1, 1, 1, 0, 0, 0, 0],
984
+ [0, 0, 0, 0, 0, 0, 0, 0],
985
+ [0, 0, 0, 0, 0, 0, 0, 0],
986
+ [0, 0, 0, 0, 0, 0, 0, 0],
987
+ [0, 0, 0, 0, 0, 0, 0, 0]])
988
+ >>> ndimage.binary_propagation(input, mask=mask,\\
989
+ ... structure=np.ones((3,3))).astype(int)
990
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
991
+ [0, 1, 1, 1, 0, 0, 0, 0],
992
+ [0, 1, 1, 1, 0, 0, 0, 0],
993
+ [0, 1, 1, 1, 0, 0, 0, 0],
994
+ [0, 0, 0, 0, 1, 0, 0, 0],
995
+ [0, 0, 0, 0, 0, 0, 0, 0],
996
+ [0, 0, 0, 0, 0, 0, 0, 0],
997
+ [0, 0, 0, 0, 0, 0, 0, 0]])
998
+
999
+ >>> # Comparison between opening and erosion+propagation
1000
+ >>> a = np.zeros((6,6), dtype=int)
1001
+ >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1
1002
+ >>> a
1003
+ array([[1, 0, 0, 0, 0, 0],
1004
+ [0, 0, 0, 0, 0, 0],
1005
+ [0, 0, 1, 1, 1, 0],
1006
+ [0, 0, 1, 1, 1, 0],
1007
+ [0, 0, 1, 1, 1, 0],
1008
+ [0, 0, 0, 0, 0, 1]])
1009
+ >>> ndimage.binary_opening(a).astype(int)
1010
+ array([[0, 0, 0, 0, 0, 0],
1011
+ [0, 0, 0, 0, 0, 0],
1012
+ [0, 0, 0, 1, 0, 0],
1013
+ [0, 0, 1, 1, 1, 0],
1014
+ [0, 0, 0, 1, 0, 0],
1015
+ [0, 0, 0, 0, 0, 0]])
1016
+ >>> b = ndimage.binary_erosion(a)
1017
+ >>> b.astype(int)
1018
+ array([[0, 0, 0, 0, 0, 0],
1019
+ [0, 0, 0, 0, 0, 0],
1020
+ [0, 0, 0, 0, 0, 0],
1021
+ [0, 0, 0, 1, 0, 0],
1022
+ [0, 0, 0, 0, 0, 0],
1023
+ [0, 0, 0, 0, 0, 0]])
1024
+ >>> ndimage.binary_propagation(b, mask=a).astype(int)
1025
+ array([[0, 0, 0, 0, 0, 0],
1026
+ [0, 0, 0, 0, 0, 0],
1027
+ [0, 0, 1, 1, 1, 0],
1028
+ [0, 0, 1, 1, 1, 0],
1029
+ [0, 0, 1, 1, 1, 0],
1030
+ [0, 0, 0, 0, 0, 0]])
1031
+
1032
+ """
1033
+ return binary_dilation(input, structure, -1, mask, output,
1034
+ border_value, origin)
1035
+
1036
+
1037
+ def binary_fill_holes(input, structure=None, output=None, origin=0):
1038
+ """
1039
+ Fill the holes in binary objects.
1040
+
1041
+
1042
+ Parameters
1043
+ ----------
1044
+ input : array_like
1045
+ N-D binary array with holes to be filled
1046
+ structure : array_like, optional
1047
+ Structuring element used in the computation; large-size elements
1048
+ make computations faster but may miss holes separated from the
1049
+ background by thin regions. The default element (with a square
1050
+ connectivity equal to one) yields the intuitive result where all
1051
+ holes in the input have been filled.
1052
+ output : ndarray, optional
1053
+ Array of the same shape as input, into which the output is placed.
1054
+ By default, a new array is created.
1055
+ origin : int, tuple of ints, optional
1056
+ Position of the structuring element.
1057
+
1058
+ Returns
1059
+ -------
1060
+ out : ndarray
1061
+ Transformation of the initial image `input` where holes have been
1062
+ filled.
1063
+
1064
+ See Also
1065
+ --------
1066
+ binary_dilation, binary_propagation, label
1067
+
1068
+ Notes
1069
+ -----
1070
+ The algorithm used in this function consists in invading the complementary
1071
+ of the shapes in `input` from the outer boundary of the image,
1072
+ using binary dilations. Holes are not connected to the boundary and are
1073
+ therefore not invaded. The result is the complementary subset of the
1074
+ invaded region.
1075
+
1076
+ References
1077
+ ----------
1078
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
1079
+
1080
+
1081
+ Examples
1082
+ --------
1083
+ >>> from scipy import ndimage
1084
+ >>> import numpy as np
1085
+ >>> a = np.zeros((5, 5), dtype=int)
1086
+ >>> a[1:4, 1:4] = 1
1087
+ >>> a[2,2] = 0
1088
+ >>> a
1089
+ array([[0, 0, 0, 0, 0],
1090
+ [0, 1, 1, 1, 0],
1091
+ [0, 1, 0, 1, 0],
1092
+ [0, 1, 1, 1, 0],
1093
+ [0, 0, 0, 0, 0]])
1094
+ >>> ndimage.binary_fill_holes(a).astype(int)
1095
+ array([[0, 0, 0, 0, 0],
1096
+ [0, 1, 1, 1, 0],
1097
+ [0, 1, 1, 1, 0],
1098
+ [0, 1, 1, 1, 0],
1099
+ [0, 0, 0, 0, 0]])
1100
+ >>> # Too big structuring element
1101
+ >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int)
1102
+ array([[0, 0, 0, 0, 0],
1103
+ [0, 1, 1, 1, 0],
1104
+ [0, 1, 0, 1, 0],
1105
+ [0, 1, 1, 1, 0],
1106
+ [0, 0, 0, 0, 0]])
1107
+
1108
+ """
1109
+ mask = numpy.logical_not(input)
1110
+ tmp = numpy.zeros(mask.shape, bool)
1111
+ inplace = isinstance(output, numpy.ndarray)
1112
+ if inplace:
1113
+ binary_dilation(tmp, structure, -1, mask, output, 1, origin)
1114
+ numpy.logical_not(output, output)
1115
+ else:
1116
+ output = binary_dilation(tmp, structure, -1, mask, None, 1,
1117
+ origin)
1118
+ numpy.logical_not(output, output)
1119
+ return output
1120
+
1121
+
1122
+ def grey_erosion(input, size=None, footprint=None, structure=None,
1123
+ output=None, mode="reflect", cval=0.0, origin=0):
1124
+ """
1125
+ Calculate a greyscale erosion, using either a structuring element,
1126
+ or a footprint corresponding to a flat structuring element.
1127
+
1128
+ Grayscale erosion is a mathematical morphology operation. For the
1129
+ simple case of a full and flat structuring element, it can be viewed
1130
+ as a minimum filter over a sliding window.
1131
+
1132
+ Parameters
1133
+ ----------
1134
+ input : array_like
1135
+ Array over which the grayscale erosion is to be computed.
1136
+ size : tuple of ints
1137
+ Shape of a flat and full structuring element used for the grayscale
1138
+ erosion. Optional if `footprint` or `structure` is provided.
1139
+ footprint : array of ints, optional
1140
+ Positions of non-infinite elements of a flat structuring element
1141
+ used for the grayscale erosion. Non-zero values give the set of
1142
+ neighbors of the center over which the minimum is chosen.
1143
+ structure : array of ints, optional
1144
+ Structuring element used for the grayscale erosion. `structure`
1145
+ may be a non-flat structuring element.
1146
+ output : array, optional
1147
+ An array used for storing the output of the erosion may be provided.
1148
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
1149
+ The `mode` parameter determines how the array borders are
1150
+ handled, where `cval` is the value when mode is equal to
1151
+ 'constant'. Default is 'reflect'
1152
+ cval : scalar, optional
1153
+ Value to fill past edges of input if `mode` is 'constant'. Default
1154
+ is 0.0.
1155
+ origin : scalar, optional
1156
+ The `origin` parameter controls the placement of the filter.
1157
+ Default 0
1158
+
1159
+ Returns
1160
+ -------
1161
+ output : ndarray
1162
+ Grayscale erosion of `input`.
1163
+
1164
+ See Also
1165
+ --------
1166
+ binary_erosion, grey_dilation, grey_opening, grey_closing
1167
+ generate_binary_structure, minimum_filter
1168
+
1169
+ Notes
1170
+ -----
1171
+ The grayscale erosion of an image input by a structuring element s defined
1172
+ over a domain E is given by:
1173
+
1174
+ (input+s)(x) = min {input(y) - s(x-y), for y in E}
1175
+
1176
+ In particular, for structuring elements defined as
1177
+ s(y) = 0 for y in E, the grayscale erosion computes the minimum of the
1178
+ input image inside a sliding window defined by E.
1179
+
1180
+ Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_.
1181
+
1182
+ References
1183
+ ----------
1184
+ .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
1185
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
1186
+
1187
+ Examples
1188
+ --------
1189
+ >>> from scipy import ndimage
1190
+ >>> import numpy as np
1191
+ >>> a = np.zeros((7,7), dtype=int)
1192
+ >>> a[1:6, 1:6] = 3
1193
+ >>> a[4,4] = 2; a[2,3] = 1
1194
+ >>> a
1195
+ array([[0, 0, 0, 0, 0, 0, 0],
1196
+ [0, 3, 3, 3, 3, 3, 0],
1197
+ [0, 3, 3, 1, 3, 3, 0],
1198
+ [0, 3, 3, 3, 3, 3, 0],
1199
+ [0, 3, 3, 3, 2, 3, 0],
1200
+ [0, 3, 3, 3, 3, 3, 0],
1201
+ [0, 0, 0, 0, 0, 0, 0]])
1202
+ >>> ndimage.grey_erosion(a, size=(3,3))
1203
+ array([[0, 0, 0, 0, 0, 0, 0],
1204
+ [0, 0, 0, 0, 0, 0, 0],
1205
+ [0, 0, 1, 1, 1, 0, 0],
1206
+ [0, 0, 1, 1, 1, 0, 0],
1207
+ [0, 0, 3, 2, 2, 0, 0],
1208
+ [0, 0, 0, 0, 0, 0, 0],
1209
+ [0, 0, 0, 0, 0, 0, 0]])
1210
+ >>> footprint = ndimage.generate_binary_structure(2, 1)
1211
+ >>> footprint
1212
+ array([[False, True, False],
1213
+ [ True, True, True],
1214
+ [False, True, False]], dtype=bool)
1215
+ >>> # Diagonally-connected elements are not considered neighbors
1216
+ >>> ndimage.grey_erosion(a, footprint=footprint)
1217
+ array([[0, 0, 0, 0, 0, 0, 0],
1218
+ [0, 0, 0, 0, 0, 0, 0],
1219
+ [0, 0, 1, 1, 1, 0, 0],
1220
+ [0, 0, 3, 1, 2, 0, 0],
1221
+ [0, 0, 3, 2, 2, 0, 0],
1222
+ [0, 0, 0, 0, 0, 0, 0],
1223
+ [0, 0, 0, 0, 0, 0, 0]])
1224
+
1225
+ """
1226
+ if size is None and footprint is None and structure is None:
1227
+ raise ValueError("size, footprint, or structure must be specified")
1228
+
1229
+ return _filters._min_or_max_filter(input, size, footprint, structure,
1230
+ output, mode, cval, origin, 1)
1231
+
1232
+
1233
+ def grey_dilation(input, size=None, footprint=None, structure=None,
1234
+ output=None, mode="reflect", cval=0.0, origin=0):
1235
+ """
1236
+ Calculate a greyscale dilation, using either a structuring element,
1237
+ or a footprint corresponding to a flat structuring element.
1238
+
1239
+ Grayscale dilation is a mathematical morphology operation. For the
1240
+ simple case of a full and flat structuring element, it can be viewed
1241
+ as a maximum filter over a sliding window.
1242
+
1243
+ Parameters
1244
+ ----------
1245
+ input : array_like
1246
+ Array over which the grayscale dilation is to be computed.
1247
+ size : tuple of ints
1248
+ Shape of a flat and full structuring element used for the grayscale
1249
+ dilation. Optional if `footprint` or `structure` is provided.
1250
+ footprint : array of ints, optional
1251
+ Positions of non-infinite elements of a flat structuring element
1252
+ used for the grayscale dilation. Non-zero values give the set of
1253
+ neighbors of the center over which the maximum is chosen.
1254
+ structure : array of ints, optional
1255
+ Structuring element used for the grayscale dilation. `structure`
1256
+ may be a non-flat structuring element.
1257
+ output : array, optional
1258
+ An array used for storing the output of the dilation may be provided.
1259
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
1260
+ The `mode` parameter determines how the array borders are
1261
+ handled, where `cval` is the value when mode is equal to
1262
+ 'constant'. Default is 'reflect'
1263
+ cval : scalar, optional
1264
+ Value to fill past edges of input if `mode` is 'constant'. Default
1265
+ is 0.0.
1266
+ origin : scalar, optional
1267
+ The `origin` parameter controls the placement of the filter.
1268
+ Default 0
1269
+
1270
+ Returns
1271
+ -------
1272
+ grey_dilation : ndarray
1273
+ Grayscale dilation of `input`.
1274
+
1275
+ See Also
1276
+ --------
1277
+ binary_dilation, grey_erosion, grey_closing, grey_opening
1278
+ generate_binary_structure, maximum_filter
1279
+
1280
+ Notes
1281
+ -----
1282
+ The grayscale dilation of an image input by a structuring element s defined
1283
+ over a domain E is given by:
1284
+
1285
+ (input+s)(x) = max {input(y) + s(x-y), for y in E}
1286
+
1287
+ In particular, for structuring elements defined as
1288
+ s(y) = 0 for y in E, the grayscale dilation computes the maximum of the
1289
+ input image inside a sliding window defined by E.
1290
+
1291
+ Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_.
1292
+
1293
+ References
1294
+ ----------
1295
+ .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
1296
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
1297
+
1298
+ Examples
1299
+ --------
1300
+ >>> from scipy import ndimage
1301
+ >>> import numpy as np
1302
+ >>> a = np.zeros((7,7), dtype=int)
1303
+ >>> a[2:5, 2:5] = 1
1304
+ >>> a[4,4] = 2; a[2,3] = 3
1305
+ >>> a
1306
+ array([[0, 0, 0, 0, 0, 0, 0],
1307
+ [0, 0, 0, 0, 0, 0, 0],
1308
+ [0, 0, 1, 3, 1, 0, 0],
1309
+ [0, 0, 1, 1, 1, 0, 0],
1310
+ [0, 0, 1, 1, 2, 0, 0],
1311
+ [0, 0, 0, 0, 0, 0, 0],
1312
+ [0, 0, 0, 0, 0, 0, 0]])
1313
+ >>> ndimage.grey_dilation(a, size=(3,3))
1314
+ array([[0, 0, 0, 0, 0, 0, 0],
1315
+ [0, 1, 3, 3, 3, 1, 0],
1316
+ [0, 1, 3, 3, 3, 1, 0],
1317
+ [0, 1, 3, 3, 3, 2, 0],
1318
+ [0, 1, 1, 2, 2, 2, 0],
1319
+ [0, 1, 1, 2, 2, 2, 0],
1320
+ [0, 0, 0, 0, 0, 0, 0]])
1321
+ >>> ndimage.grey_dilation(a, footprint=np.ones((3,3)))
1322
+ array([[0, 0, 0, 0, 0, 0, 0],
1323
+ [0, 1, 3, 3, 3, 1, 0],
1324
+ [0, 1, 3, 3, 3, 1, 0],
1325
+ [0, 1, 3, 3, 3, 2, 0],
1326
+ [0, 1, 1, 2, 2, 2, 0],
1327
+ [0, 1, 1, 2, 2, 2, 0],
1328
+ [0, 0, 0, 0, 0, 0, 0]])
1329
+ >>> s = ndimage.generate_binary_structure(2,1)
1330
+ >>> s
1331
+ array([[False, True, False],
1332
+ [ True, True, True],
1333
+ [False, True, False]], dtype=bool)
1334
+ >>> ndimage.grey_dilation(a, footprint=s)
1335
+ array([[0, 0, 0, 0, 0, 0, 0],
1336
+ [0, 0, 1, 3, 1, 0, 0],
1337
+ [0, 1, 3, 3, 3, 1, 0],
1338
+ [0, 1, 1, 3, 2, 1, 0],
1339
+ [0, 1, 1, 2, 2, 2, 0],
1340
+ [0, 0, 1, 1, 2, 0, 0],
1341
+ [0, 0, 0, 0, 0, 0, 0]])
1342
+ >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3)))
1343
+ array([[1, 1, 1, 1, 1, 1, 1],
1344
+ [1, 2, 4, 4, 4, 2, 1],
1345
+ [1, 2, 4, 4, 4, 2, 1],
1346
+ [1, 2, 4, 4, 4, 3, 1],
1347
+ [1, 2, 2, 3, 3, 3, 1],
1348
+ [1, 2, 2, 3, 3, 3, 1],
1349
+ [1, 1, 1, 1, 1, 1, 1]])
1350
+
1351
+ """
1352
+ if size is None and footprint is None and structure is None:
1353
+ raise ValueError("size, footprint, or structure must be specified")
1354
+ if structure is not None:
1355
+ structure = numpy.asarray(structure)
1356
+ structure = structure[tuple([slice(None, None, -1)] *
1357
+ structure.ndim)]
1358
+ if footprint is not None:
1359
+ footprint = numpy.asarray(footprint)
1360
+ footprint = footprint[tuple([slice(None, None, -1)] *
1361
+ footprint.ndim)]
1362
+
1363
+ input = numpy.asarray(input)
1364
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
1365
+ for ii in range(len(origin)):
1366
+ origin[ii] = -origin[ii]
1367
+ if footprint is not None:
1368
+ sz = footprint.shape[ii]
1369
+ elif structure is not None:
1370
+ sz = structure.shape[ii]
1371
+ elif numpy.isscalar(size):
1372
+ sz = size
1373
+ else:
1374
+ sz = size[ii]
1375
+ if not sz & 1:
1376
+ origin[ii] -= 1
1377
+
1378
+ return _filters._min_or_max_filter(input, size, footprint, structure,
1379
+ output, mode, cval, origin, 0)
1380
+
1381
+
1382
+ def grey_opening(input, size=None, footprint=None, structure=None,
1383
+ output=None, mode="reflect", cval=0.0, origin=0):
1384
+ """
1385
+ Multidimensional grayscale opening.
1386
+
1387
+ A grayscale opening consists in the succession of a grayscale erosion,
1388
+ and a grayscale dilation.
1389
+
1390
+ Parameters
1391
+ ----------
1392
+ input : array_like
1393
+ Array over which the grayscale opening is to be computed.
1394
+ size : tuple of ints
1395
+ Shape of a flat and full structuring element used for the grayscale
1396
+ opening. Optional if `footprint` or `structure` is provided.
1397
+ footprint : array of ints, optional
1398
+ Positions of non-infinite elements of a flat structuring element
1399
+ used for the grayscale opening.
1400
+ structure : array of ints, optional
1401
+ Structuring element used for the grayscale opening. `structure`
1402
+ may be a non-flat structuring element.
1403
+ output : array, optional
1404
+ An array used for storing the output of the opening may be provided.
1405
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
1406
+ The `mode` parameter determines how the array borders are
1407
+ handled, where `cval` is the value when mode is equal to
1408
+ 'constant'. Default is 'reflect'
1409
+ cval : scalar, optional
1410
+ Value to fill past edges of input if `mode` is 'constant'. Default
1411
+ is 0.0.
1412
+ origin : scalar, optional
1413
+ The `origin` parameter controls the placement of the filter.
1414
+ Default 0
1415
+
1416
+ Returns
1417
+ -------
1418
+ grey_opening : ndarray
1419
+ Result of the grayscale opening of `input` with `structure`.
1420
+
1421
+ See Also
1422
+ --------
1423
+ binary_opening, grey_dilation, grey_erosion, grey_closing
1424
+ generate_binary_structure
1425
+
1426
+ Notes
1427
+ -----
1428
+ The action of a grayscale opening with a flat structuring element amounts
1429
+ to smoothen high local maxima, whereas binary opening erases small objects.
1430
+
1431
+ References
1432
+ ----------
1433
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
1434
+
1435
+ Examples
1436
+ --------
1437
+ >>> from scipy import ndimage
1438
+ >>> import numpy as np
1439
+ >>> a = np.arange(36).reshape((6,6))
1440
+ >>> a[3, 3] = 50
1441
+ >>> a
1442
+ array([[ 0, 1, 2, 3, 4, 5],
1443
+ [ 6, 7, 8, 9, 10, 11],
1444
+ [12, 13, 14, 15, 16, 17],
1445
+ [18, 19, 20, 50, 22, 23],
1446
+ [24, 25, 26, 27, 28, 29],
1447
+ [30, 31, 32, 33, 34, 35]])
1448
+ >>> ndimage.grey_opening(a, size=(3,3))
1449
+ array([[ 0, 1, 2, 3, 4, 4],
1450
+ [ 6, 7, 8, 9, 10, 10],
1451
+ [12, 13, 14, 15, 16, 16],
1452
+ [18, 19, 20, 22, 22, 22],
1453
+ [24, 25, 26, 27, 28, 28],
1454
+ [24, 25, 26, 27, 28, 28]])
1455
+ >>> # Note that the local maximum a[3,3] has disappeared
1456
+
1457
+ """
1458
+ if (size is not None) and (footprint is not None):
1459
+ warnings.warn("ignoring size because footprint is set",
1460
+ UserWarning, stacklevel=2)
1461
+ tmp = grey_erosion(input, size, footprint, structure, None, mode,
1462
+ cval, origin)
1463
+ return grey_dilation(tmp, size, footprint, structure, output, mode,
1464
+ cval, origin)
1465
+
1466
+
1467
+ def grey_closing(input, size=None, footprint=None, structure=None,
1468
+ output=None, mode="reflect", cval=0.0, origin=0):
1469
+ """
1470
+ Multidimensional grayscale closing.
1471
+
1472
+ A grayscale closing consists in the succession of a grayscale dilation,
1473
+ and a grayscale erosion.
1474
+
1475
+ Parameters
1476
+ ----------
1477
+ input : array_like
1478
+ Array over which the grayscale closing is to be computed.
1479
+ size : tuple of ints
1480
+ Shape of a flat and full structuring element used for the grayscale
1481
+ closing. Optional if `footprint` or `structure` is provided.
1482
+ footprint : array of ints, optional
1483
+ Positions of non-infinite elements of a flat structuring element
1484
+ used for the grayscale closing.
1485
+ structure : array of ints, optional
1486
+ Structuring element used for the grayscale closing. `structure`
1487
+ may be a non-flat structuring element.
1488
+ output : array, optional
1489
+ An array used for storing the output of the closing may be provided.
1490
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
1491
+ The `mode` parameter determines how the array borders are
1492
+ handled, where `cval` is the value when mode is equal to
1493
+ 'constant'. Default is 'reflect'
1494
+ cval : scalar, optional
1495
+ Value to fill past edges of input if `mode` is 'constant'. Default
1496
+ is 0.0.
1497
+ origin : scalar, optional
1498
+ The `origin` parameter controls the placement of the filter.
1499
+ Default 0
1500
+
1501
+ Returns
1502
+ -------
1503
+ grey_closing : ndarray
1504
+ Result of the grayscale closing of `input` with `structure`.
1505
+
1506
+ See Also
1507
+ --------
1508
+ binary_closing, grey_dilation, grey_erosion, grey_opening,
1509
+ generate_binary_structure
1510
+
1511
+ Notes
1512
+ -----
1513
+ The action of a grayscale closing with a flat structuring element amounts
1514
+ to smoothen deep local minima, whereas binary closing fills small holes.
1515
+
1516
+ References
1517
+ ----------
1518
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
1519
+
1520
+ Examples
1521
+ --------
1522
+ >>> from scipy import ndimage
1523
+ >>> import numpy as np
1524
+ >>> a = np.arange(36).reshape((6,6))
1525
+ >>> a[3,3] = 0
1526
+ >>> a
1527
+ array([[ 0, 1, 2, 3, 4, 5],
1528
+ [ 6, 7, 8, 9, 10, 11],
1529
+ [12, 13, 14, 15, 16, 17],
1530
+ [18, 19, 20, 0, 22, 23],
1531
+ [24, 25, 26, 27, 28, 29],
1532
+ [30, 31, 32, 33, 34, 35]])
1533
+ >>> ndimage.grey_closing(a, size=(3,3))
1534
+ array([[ 7, 7, 8, 9, 10, 11],
1535
+ [ 7, 7, 8, 9, 10, 11],
1536
+ [13, 13, 14, 15, 16, 17],
1537
+ [19, 19, 20, 20, 22, 23],
1538
+ [25, 25, 26, 27, 28, 29],
1539
+ [31, 31, 32, 33, 34, 35]])
1540
+ >>> # Note that the local minimum a[3,3] has disappeared
1541
+
1542
+ """
1543
+ if (size is not None) and (footprint is not None):
1544
+ warnings.warn("ignoring size because footprint is set",
1545
+ UserWarning, stacklevel=2)
1546
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
1547
+ cval, origin)
1548
+ return grey_erosion(tmp, size, footprint, structure, output, mode,
1549
+ cval, origin)
1550
+
1551
+
1552
+ def morphological_gradient(input, size=None, footprint=None, structure=None,
1553
+ output=None, mode="reflect", cval=0.0, origin=0):
1554
+ """
1555
+ Multidimensional morphological gradient.
1556
+
1557
+ The morphological gradient is calculated as the difference between a
1558
+ dilation and an erosion of the input with a given structuring element.
1559
+
1560
+ Parameters
1561
+ ----------
1562
+ input : array_like
1563
+ Array over which to compute the morphlogical gradient.
1564
+ size : tuple of ints
1565
+ Shape of a flat and full structuring element used for the mathematical
1566
+ morphology operations. Optional if `footprint` or `structure` is
1567
+ provided. A larger `size` yields a more blurred gradient.
1568
+ footprint : array of ints, optional
1569
+ Positions of non-infinite elements of a flat structuring element
1570
+ used for the morphology operations. Larger footprints
1571
+ give a more blurred morphological gradient.
1572
+ structure : array of ints, optional
1573
+ Structuring element used for the morphology operations.
1574
+ `structure` may be a non-flat structuring element.
1575
+ output : array, optional
1576
+ An array used for storing the output of the morphological gradient
1577
+ may be provided.
1578
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
1579
+ The `mode` parameter determines how the array borders are
1580
+ handled, where `cval` is the value when mode is equal to
1581
+ 'constant'. Default is 'reflect'
1582
+ cval : scalar, optional
1583
+ Value to fill past edges of input if `mode` is 'constant'. Default
1584
+ is 0.0.
1585
+ origin : scalar, optional
1586
+ The `origin` parameter controls the placement of the filter.
1587
+ Default 0
1588
+
1589
+ Returns
1590
+ -------
1591
+ morphological_gradient : ndarray
1592
+ Morphological gradient of `input`.
1593
+
1594
+ See Also
1595
+ --------
1596
+ grey_dilation, grey_erosion, gaussian_gradient_magnitude
1597
+
1598
+ Notes
1599
+ -----
1600
+ For a flat structuring element, the morphological gradient
1601
+ computed at a given point corresponds to the maximal difference
1602
+ between elements of the input among the elements covered by the
1603
+ structuring element centered on the point.
1604
+
1605
+ References
1606
+ ----------
1607
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
1608
+
1609
+ Examples
1610
+ --------
1611
+ >>> from scipy import ndimage
1612
+ >>> import numpy as np
1613
+ >>> a = np.zeros((7,7), dtype=int)
1614
+ >>> a[2:5, 2:5] = 1
1615
+ >>> ndimage.morphological_gradient(a, size=(3,3))
1616
+ array([[0, 0, 0, 0, 0, 0, 0],
1617
+ [0, 1, 1, 1, 1, 1, 0],
1618
+ [0, 1, 1, 1, 1, 1, 0],
1619
+ [0, 1, 1, 0, 1, 1, 0],
1620
+ [0, 1, 1, 1, 1, 1, 0],
1621
+ [0, 1, 1, 1, 1, 1, 0],
1622
+ [0, 0, 0, 0, 0, 0, 0]])
1623
+ >>> # The morphological gradient is computed as the difference
1624
+ >>> # between a dilation and an erosion
1625
+ >>> ndimage.grey_dilation(a, size=(3,3)) -\\
1626
+ ... ndimage.grey_erosion(a, size=(3,3))
1627
+ array([[0, 0, 0, 0, 0, 0, 0],
1628
+ [0, 1, 1, 1, 1, 1, 0],
1629
+ [0, 1, 1, 1, 1, 1, 0],
1630
+ [0, 1, 1, 0, 1, 1, 0],
1631
+ [0, 1, 1, 1, 1, 1, 0],
1632
+ [0, 1, 1, 1, 1, 1, 0],
1633
+ [0, 0, 0, 0, 0, 0, 0]])
1634
+ >>> a = np.zeros((7,7), dtype=int)
1635
+ >>> a[2:5, 2:5] = 1
1636
+ >>> a[4,4] = 2; a[2,3] = 3
1637
+ >>> a
1638
+ array([[0, 0, 0, 0, 0, 0, 0],
1639
+ [0, 0, 0, 0, 0, 0, 0],
1640
+ [0, 0, 1, 3, 1, 0, 0],
1641
+ [0, 0, 1, 1, 1, 0, 0],
1642
+ [0, 0, 1, 1, 2, 0, 0],
1643
+ [0, 0, 0, 0, 0, 0, 0],
1644
+ [0, 0, 0, 0, 0, 0, 0]])
1645
+ >>> ndimage.morphological_gradient(a, size=(3,3))
1646
+ array([[0, 0, 0, 0, 0, 0, 0],
1647
+ [0, 1, 3, 3, 3, 1, 0],
1648
+ [0, 1, 3, 3, 3, 1, 0],
1649
+ [0, 1, 3, 2, 3, 2, 0],
1650
+ [0, 1, 1, 2, 2, 2, 0],
1651
+ [0, 1, 1, 2, 2, 2, 0],
1652
+ [0, 0, 0, 0, 0, 0, 0]])
1653
+
1654
+ """
1655
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
1656
+ cval, origin)
1657
+ if isinstance(output, numpy.ndarray):
1658
+ grey_erosion(input, size, footprint, structure, output, mode,
1659
+ cval, origin)
1660
+ return numpy.subtract(tmp, output, output)
1661
+ else:
1662
+ return (tmp - grey_erosion(input, size, footprint, structure,
1663
+ None, mode, cval, origin))
1664
+
1665
+
1666
+ def morphological_laplace(input, size=None, footprint=None,
1667
+ structure=None, output=None,
1668
+ mode="reflect", cval=0.0, origin=0):
1669
+ """
1670
+ Multidimensional morphological laplace.
1671
+
1672
+ Parameters
1673
+ ----------
1674
+ input : array_like
1675
+ Input.
1676
+ size : int or sequence of ints, optional
1677
+ See `structure`.
1678
+ footprint : bool or ndarray, optional
1679
+ See `structure`.
1680
+ structure : structure, optional
1681
+ Either `size`, `footprint`, or the `structure` must be provided.
1682
+ output : ndarray, optional
1683
+ An output array can optionally be provided.
1684
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
1685
+ The mode parameter determines how the array borders are handled.
1686
+ For 'constant' mode, values beyond borders are set to be `cval`.
1687
+ Default is 'reflect'.
1688
+ cval : scalar, optional
1689
+ Value to fill past edges of input if mode is 'constant'.
1690
+ Default is 0.0
1691
+ origin : origin, optional
1692
+ The origin parameter controls the placement of the filter.
1693
+
1694
+ Returns
1695
+ -------
1696
+ morphological_laplace : ndarray
1697
+ Output
1698
+
1699
+ """
1700
+ tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
1701
+ cval, origin)
1702
+ if isinstance(output, numpy.ndarray):
1703
+ grey_erosion(input, size, footprint, structure, output, mode,
1704
+ cval, origin)
1705
+ numpy.add(tmp1, output, output)
1706
+ numpy.subtract(output, input, output)
1707
+ return numpy.subtract(output, input, output)
1708
+ else:
1709
+ tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
1710
+ cval, origin)
1711
+ numpy.add(tmp1, tmp2, tmp2)
1712
+ numpy.subtract(tmp2, input, tmp2)
1713
+ numpy.subtract(tmp2, input, tmp2)
1714
+ return tmp2
1715
+
1716
+
1717
+ def white_tophat(input, size=None, footprint=None, structure=None,
1718
+ output=None, mode="reflect", cval=0.0, origin=0):
1719
+ """
1720
+ Multidimensional white tophat filter.
1721
+
1722
+ Parameters
1723
+ ----------
1724
+ input : array_like
1725
+ Input.
1726
+ size : tuple of ints
1727
+ Shape of a flat and full structuring element used for the filter.
1728
+ Optional if `footprint` or `structure` is provided.
1729
+ footprint : array of ints, optional
1730
+ Positions of elements of a flat structuring element
1731
+ used for the white tophat filter.
1732
+ structure : array of ints, optional
1733
+ Structuring element used for the filter. `structure`
1734
+ may be a non-flat structuring element.
1735
+ output : array, optional
1736
+ An array used for storing the output of the filter may be provided.
1737
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
1738
+ The `mode` parameter determines how the array borders are
1739
+ handled, where `cval` is the value when mode is equal to
1740
+ 'constant'. Default is 'reflect'
1741
+ cval : scalar, optional
1742
+ Value to fill past edges of input if `mode` is 'constant'.
1743
+ Default is 0.0.
1744
+ origin : scalar, optional
1745
+ The `origin` parameter controls the placement of the filter.
1746
+ Default is 0.
1747
+
1748
+ Returns
1749
+ -------
1750
+ output : ndarray
1751
+ Result of the filter of `input` with `structure`.
1752
+
1753
+ See Also
1754
+ --------
1755
+ black_tophat
1756
+
1757
+ Examples
1758
+ --------
1759
+ Subtract gray background from a bright peak.
1760
+
1761
+ >>> from scipy.ndimage import generate_binary_structure, white_tophat
1762
+ >>> import numpy as np
1763
+ >>> square = generate_binary_structure(rank=2, connectivity=3)
1764
+ >>> bright_on_gray = np.array([[2, 3, 3, 3, 2],
1765
+ ... [3, 4, 5, 4, 3],
1766
+ ... [3, 5, 9, 5, 3],
1767
+ ... [3, 4, 5, 4, 3],
1768
+ ... [2, 3, 3, 3, 2]])
1769
+ >>> white_tophat(input=bright_on_gray, structure=square)
1770
+ array([[0, 0, 0, 0, 0],
1771
+ [0, 0, 1, 0, 0],
1772
+ [0, 1, 5, 1, 0],
1773
+ [0, 0, 1, 0, 0],
1774
+ [0, 0, 0, 0, 0]])
1775
+
1776
+ """
1777
+ if (size is not None) and (footprint is not None):
1778
+ warnings.warn("ignoring size because footprint is set",
1779
+ UserWarning, stacklevel=2)
1780
+ tmp = grey_erosion(input, size, footprint, structure, None, mode,
1781
+ cval, origin)
1782
+ tmp = grey_dilation(tmp, size, footprint, structure, output, mode,
1783
+ cval, origin)
1784
+ if tmp is None:
1785
+ tmp = output
1786
+
1787
+ if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
1788
+ numpy.bitwise_xor(input, tmp, out=tmp)
1789
+ else:
1790
+ numpy.subtract(input, tmp, out=tmp)
1791
+ return tmp
1792
+
1793
+
1794
+ def black_tophat(input, size=None, footprint=None,
1795
+ structure=None, output=None, mode="reflect",
1796
+ cval=0.0, origin=0):
1797
+ """
1798
+ Multidimensional black tophat filter.
1799
+
1800
+ Parameters
1801
+ ----------
1802
+ input : array_like
1803
+ Input.
1804
+ size : tuple of ints, optional
1805
+ Shape of a flat and full structuring element used for the filter.
1806
+ Optional if `footprint` or `structure` is provided.
1807
+ footprint : array of ints, optional
1808
+ Positions of non-infinite elements of a flat structuring element
1809
+ used for the black tophat filter.
1810
+ structure : array of ints, optional
1811
+ Structuring element used for the filter. `structure`
1812
+ may be a non-flat structuring element.
1813
+ output : array, optional
1814
+ An array used for storing the output of the filter may be provided.
1815
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
1816
+ The `mode` parameter determines how the array borders are
1817
+ handled, where `cval` is the value when mode is equal to
1818
+ 'constant'. Default is 'reflect'
1819
+ cval : scalar, optional
1820
+ Value to fill past edges of input if `mode` is 'constant'. Default
1821
+ is 0.0.
1822
+ origin : scalar, optional
1823
+ The `origin` parameter controls the placement of the filter.
1824
+ Default 0
1825
+
1826
+ Returns
1827
+ -------
1828
+ black_tophat : ndarray
1829
+ Result of the filter of `input` with `structure`.
1830
+
1831
+ See Also
1832
+ --------
1833
+ white_tophat, grey_opening, grey_closing
1834
+
1835
+ Examples
1836
+ --------
1837
+ Change dark peak to bright peak and subtract background.
1838
+
1839
+ >>> from scipy.ndimage import generate_binary_structure, black_tophat
1840
+ >>> import numpy as np
1841
+ >>> square = generate_binary_structure(rank=2, connectivity=3)
1842
+ >>> dark_on_gray = np.array([[7, 6, 6, 6, 7],
1843
+ ... [6, 5, 4, 5, 6],
1844
+ ... [6, 4, 0, 4, 6],
1845
+ ... [6, 5, 4, 5, 6],
1846
+ ... [7, 6, 6, 6, 7]])
1847
+ >>> black_tophat(input=dark_on_gray, structure=square)
1848
+ array([[0, 0, 0, 0, 0],
1849
+ [0, 0, 1, 0, 0],
1850
+ [0, 1, 5, 1, 0],
1851
+ [0, 0, 1, 0, 0],
1852
+ [0, 0, 0, 0, 0]])
1853
+
1854
+ """
1855
+ if (size is not None) and (footprint is not None):
1856
+ warnings.warn("ignoring size because footprint is set",
1857
+ UserWarning, stacklevel=2)
1858
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
1859
+ cval, origin)
1860
+ tmp = grey_erosion(tmp, size, footprint, structure, output, mode,
1861
+ cval, origin)
1862
+ if tmp is None:
1863
+ tmp = output
1864
+
1865
+ if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
1866
+ numpy.bitwise_xor(tmp, input, out=tmp)
1867
+ else:
1868
+ numpy.subtract(tmp, input, out=tmp)
1869
+ return tmp
1870
+
1871
+
1872
+ def distance_transform_bf(input, metric="euclidean", sampling=None,
1873
+ return_distances=True, return_indices=False,
1874
+ distances=None, indices=None):
1875
+ """
1876
+ Distance transform function by a brute force algorithm.
1877
+
1878
+ This function calculates the distance transform of the `input`, by
1879
+ replacing each foreground (non-zero) element, with its
1880
+ shortest distance to the background (any zero-valued element).
1881
+
1882
+ In addition to the distance transform, the feature transform can
1883
+ be calculated. In this case the index of the closest background
1884
+ element to each foreground element is returned in a separate array.
1885
+
1886
+ Parameters
1887
+ ----------
1888
+ input : array_like
1889
+ Input
1890
+ metric : {'euclidean', 'taxicab', 'chessboard'}, optional
1891
+ 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
1892
+ The default is 'euclidean'.
1893
+ sampling : float, or sequence of float, optional
1894
+ This parameter is only used when `metric` is 'euclidean'.
1895
+ Spacing of elements along each dimension. If a sequence, must be of
1896
+ length equal to the input rank; if a single number, this is used for
1897
+ all axes. If not specified, a grid spacing of unity is implied.
1898
+ return_distances : bool, optional
1899
+ Whether to calculate the distance transform.
1900
+ Default is True.
1901
+ return_indices : bool, optional
1902
+ Whether to calculate the feature transform.
1903
+ Default is False.
1904
+ distances : ndarray, optional
1905
+ An output array to store the calculated distance transform, instead of
1906
+ returning it.
1907
+ `return_distances` must be True.
1908
+ It must be the same shape as `input`, and of type float64 if `metric`
1909
+ is 'euclidean', uint32 otherwise.
1910
+ indices : int32 ndarray, optional
1911
+ An output array to store the calculated feature transform, instead of
1912
+ returning it.
1913
+ `return_indicies` must be True.
1914
+ Its shape must be `(input.ndim,) + input.shape`.
1915
+
1916
+ Returns
1917
+ -------
1918
+ distances : ndarray, optional
1919
+ The calculated distance transform. Returned only when
1920
+ `return_distances` is True and `distances` is not supplied.
1921
+ It will have the same shape as the input array.
1922
+ indices : int32 ndarray, optional
1923
+ The calculated feature transform. It has an input-shaped array for each
1924
+ dimension of the input. See distance_transform_edt documentation for an
1925
+ example.
1926
+ Returned only when `return_indices` is True and `indices` is not
1927
+ supplied.
1928
+
1929
+ See Also
1930
+ --------
1931
+ distance_transform_cdt : Faster distance transform for taxicab and
1932
+ chessboard metrics
1933
+ distance_transform_edt : Faster distance transform for euclidean metric
1934
+
1935
+ Notes
1936
+ -----
1937
+ This function employs a slow brute force algorithm. See also the
1938
+ function `distance_transform_cdt` for more efficient taxicab [1]_ and
1939
+ chessboard algorithms [2]_.
1940
+
1941
+ References
1942
+ ----------
1943
+ .. [1] Taxicab distance. Wikipedia, 2023.
1944
+ https://en.wikipedia.org/wiki/Taxicab_geometry
1945
+ .. [2] Chessboard distance. Wikipedia, 2023.
1946
+ https://en.wikipedia.org/wiki/Chebyshev_distance
1947
+
1948
+ Examples
1949
+ --------
1950
+ Import the necessary modules.
1951
+
1952
+ >>> import numpy as np
1953
+ >>> from scipy.ndimage import distance_transform_bf
1954
+ >>> import matplotlib.pyplot as plt
1955
+ >>> from mpl_toolkits.axes_grid1 import ImageGrid
1956
+
1957
+ First, we create a toy binary image.
1958
+
1959
+ >>> def add_circle(center_x, center_y, radius, image, fillvalue=1):
1960
+ ... # fill circular area with 1
1961
+ ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]]
1962
+ ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2
1963
+ ... circle_shape = np.sqrt(circle) < radius
1964
+ ... image[circle_shape] = fillvalue
1965
+ ... return image
1966
+ >>> image = np.zeros((100, 100), dtype=np.uint8)
1967
+ >>> image[35:65, 20:80] = 1
1968
+ >>> image = add_circle(28, 65, 10, image)
1969
+ >>> image = add_circle(37, 30, 10, image)
1970
+ >>> image = add_circle(70, 45, 20, image)
1971
+ >>> image = add_circle(45, 80, 10, image)
1972
+
1973
+ Next, we set up the figure.
1974
+
1975
+ >>> fig = plt.figure(figsize=(8, 8)) # set up the figure structure
1976
+ >>> grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=(0.4, 0.3),
1977
+ ... label_mode="1", share_all=True,
1978
+ ... cbar_location="right", cbar_mode="each",
1979
+ ... cbar_size="7%", cbar_pad="2%")
1980
+ >>> for ax in grid:
1981
+ ... ax.axis('off') # remove axes from images
1982
+
1983
+ The top left image is the original binary image.
1984
+
1985
+ >>> binary_image = grid[0].imshow(image, cmap='gray')
1986
+ >>> cbar_binary_image = grid.cbar_axes[0].colorbar(binary_image)
1987
+ >>> cbar_binary_image.set_ticks([0, 1])
1988
+ >>> grid[0].set_title("Binary image: foreground in white")
1989
+
1990
+ The distance transform calculates the distance between foreground pixels
1991
+ and the image background according to a distance metric. Available metrics
1992
+ in `distance_transform_bf` are: ``euclidean`` (default), ``taxicab``
1993
+ and ``chessboard``. The top right image contains the distance transform
1994
+ based on the ``euclidean`` metric.
1995
+
1996
+ >>> distance_transform_euclidean = distance_transform_bf(image)
1997
+ >>> euclidean_transform = grid[1].imshow(distance_transform_euclidean,
1998
+ ... cmap='gray')
1999
+ >>> cbar_euclidean = grid.cbar_axes[1].colorbar(euclidean_transform)
2000
+ >>> colorbar_ticks = [0, 10, 20]
2001
+ >>> cbar_euclidean.set_ticks(colorbar_ticks)
2002
+ >>> grid[1].set_title("Euclidean distance")
2003
+
2004
+ The lower left image contains the distance transform using the ``taxicab``
2005
+ metric.
2006
+
2007
+ >>> distance_transform_taxicab = distance_transform_bf(image,
2008
+ ... metric='taxicab')
2009
+ >>> taxicab_transformation = grid[2].imshow(distance_transform_taxicab,
2010
+ ... cmap='gray')
2011
+ >>> cbar_taxicab = grid.cbar_axes[2].colorbar(taxicab_transformation)
2012
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
2013
+ >>> grid[2].set_title("Taxicab distance")
2014
+
2015
+ Finally, the lower right image contains the distance transform using the
2016
+ ``chessboard`` metric.
2017
+
2018
+ >>> distance_transform_cb = distance_transform_bf(image,
2019
+ ... metric='chessboard')
2020
+ >>> chessboard_transformation = grid[3].imshow(distance_transform_cb,
2021
+ ... cmap='gray')
2022
+ >>> cbar_taxicab = grid.cbar_axes[3].colorbar(chessboard_transformation)
2023
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
2024
+ >>> grid[3].set_title("Chessboard distance")
2025
+ >>> plt.show()
2026
+
2027
+ """
2028
+ ft_inplace = isinstance(indices, numpy.ndarray)
2029
+ dt_inplace = isinstance(distances, numpy.ndarray)
2030
+ _distance_tranform_arg_check(
2031
+ dt_inplace, ft_inplace, return_distances, return_indices
2032
+ )
2033
+
2034
+ tmp1 = numpy.asarray(input) != 0
2035
+ struct = generate_binary_structure(tmp1.ndim, tmp1.ndim)
2036
+ tmp2 = binary_dilation(tmp1, struct)
2037
+ tmp2 = numpy.logical_xor(tmp1, tmp2)
2038
+ tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8)
2039
+ metric = metric.lower()
2040
+ if metric == 'euclidean':
2041
+ metric = 1
2042
+ elif metric in ['taxicab', 'cityblock', 'manhattan']:
2043
+ metric = 2
2044
+ elif metric == 'chessboard':
2045
+ metric = 3
2046
+ else:
2047
+ raise RuntimeError('distance metric not supported')
2048
+ if sampling is not None:
2049
+ sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim)
2050
+ sampling = numpy.asarray(sampling, dtype=numpy.float64)
2051
+ if not sampling.flags.contiguous:
2052
+ sampling = sampling.copy()
2053
+ if return_indices:
2054
+ ft = numpy.zeros(tmp1.shape, dtype=numpy.int32)
2055
+ else:
2056
+ ft = None
2057
+ if return_distances:
2058
+ if distances is None:
2059
+ if metric == 1:
2060
+ dt = numpy.zeros(tmp1.shape, dtype=numpy.float64)
2061
+ else:
2062
+ dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32)
2063
+ else:
2064
+ if distances.shape != tmp1.shape:
2065
+ raise RuntimeError('distances array has wrong shape')
2066
+ if metric == 1:
2067
+ if distances.dtype.type != numpy.float64:
2068
+ raise RuntimeError('distances array must be float64')
2069
+ else:
2070
+ if distances.dtype.type != numpy.uint32:
2071
+ raise RuntimeError('distances array must be uint32')
2072
+ dt = distances
2073
+ else:
2074
+ dt = None
2075
+
2076
+ _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft)
2077
+ if return_indices:
2078
+ if isinstance(indices, numpy.ndarray):
2079
+ if indices.dtype.type != numpy.int32:
2080
+ raise RuntimeError('indices array must be int32')
2081
+ if indices.shape != (tmp1.ndim,) + tmp1.shape:
2082
+ raise RuntimeError('indices array has wrong shape')
2083
+ tmp2 = indices
2084
+ else:
2085
+ tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32)
2086
+ ft = numpy.ravel(ft)
2087
+ for ii in range(tmp2.shape[0]):
2088
+ rtmp = numpy.ravel(tmp2[ii, ...])[ft]
2089
+ rtmp.shape = tmp1.shape
2090
+ tmp2[ii, ...] = rtmp
2091
+ ft = tmp2
2092
+
2093
+ # construct and return the result
2094
+ result = []
2095
+ if return_distances and not dt_inplace:
2096
+ result.append(dt)
2097
+ if return_indices and not ft_inplace:
2098
+ result.append(ft)
2099
+
2100
+ if len(result) == 2:
2101
+ return tuple(result)
2102
+ elif len(result) == 1:
2103
+ return result[0]
2104
+ else:
2105
+ return None
2106
+
2107
+
2108
+ def distance_transform_cdt(input, metric='chessboard', return_distances=True,
2109
+ return_indices=False, distances=None, indices=None):
2110
+ """
2111
+ Distance transform for chamfer type of transforms.
2112
+
2113
+ This function calculates the distance transform of the `input`, by
2114
+ replacing each foreground (non-zero) element, with its
2115
+ shortest distance to the background (any zero-valued element).
2116
+
2117
+ In addition to the distance transform, the feature transform can
2118
+ be calculated. In this case the index of the closest background
2119
+ element to each foreground element is returned in a separate array.
2120
+
2121
+ Parameters
2122
+ ----------
2123
+ input : array_like
2124
+ Input. Values of 0 are treated as background.
2125
+ metric : {'chessboard', 'taxicab'} or array_like, optional
2126
+ The `metric` determines the type of chamfering that is done. If the
2127
+ `metric` is equal to 'taxicab' a structure is generated using
2128
+ `generate_binary_structure` with a squared distance equal to 1. If
2129
+ the `metric` is equal to 'chessboard', a `metric` is generated
2130
+ using `generate_binary_structure` with a squared distance equal to
2131
+ the dimensionality of the array. These choices correspond to the
2132
+ common interpretations of the 'taxicab' and the 'chessboard'
2133
+ distance metrics in two dimensions.
2134
+ A custom metric may be provided, in the form of a matrix where
2135
+ each dimension has a length of three.
2136
+ 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
2137
+ The default is 'chessboard'.
2138
+ return_distances : bool, optional
2139
+ Whether to calculate the distance transform.
2140
+ Default is True.
2141
+ return_indices : bool, optional
2142
+ Whether to calculate the feature transform.
2143
+ Default is False.
2144
+ distances : int32 ndarray, optional
2145
+ An output array to store the calculated distance transform, instead of
2146
+ returning it.
2147
+ `return_distances` must be True.
2148
+ It must be the same shape as `input`.
2149
+ indices : int32 ndarray, optional
2150
+ An output array to store the calculated feature transform, instead of
2151
+ returning it.
2152
+ `return_indicies` must be True.
2153
+ Its shape must be `(input.ndim,) + input.shape`.
2154
+
2155
+ Returns
2156
+ -------
2157
+ distances : int32 ndarray, optional
2158
+ The calculated distance transform. Returned only when
2159
+ `return_distances` is True, and `distances` is not supplied.
2160
+ It will have the same shape as the input array.
2161
+ indices : int32 ndarray, optional
2162
+ The calculated feature transform. It has an input-shaped array for each
2163
+ dimension of the input. See distance_transform_edt documentation for an
2164
+ example.
2165
+ Returned only when `return_indices` is True, and `indices` is not
2166
+ supplied.
2167
+
2168
+ See Also
2169
+ --------
2170
+ distance_transform_edt : Fast distance transform for euclidean metric
2171
+ distance_transform_bf : Distance transform for different metrics using
2172
+ a slower brute force algorithm
2173
+
2174
+ Examples
2175
+ --------
2176
+ Import the necessary modules.
2177
+
2178
+ >>> import numpy as np
2179
+ >>> from scipy.ndimage import distance_transform_cdt
2180
+ >>> import matplotlib.pyplot as plt
2181
+ >>> from mpl_toolkits.axes_grid1 import ImageGrid
2182
+
2183
+ First, we create a toy binary image.
2184
+
2185
+ >>> def add_circle(center_x, center_y, radius, image, fillvalue=1):
2186
+ ... # fill circular area with 1
2187
+ ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]]
2188
+ ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2
2189
+ ... circle_shape = np.sqrt(circle) < radius
2190
+ ... image[circle_shape] = fillvalue
2191
+ ... return image
2192
+ >>> image = np.zeros((100, 100), dtype=np.uint8)
2193
+ >>> image[35:65, 20:80] = 1
2194
+ >>> image = add_circle(28, 65, 10, image)
2195
+ >>> image = add_circle(37, 30, 10, image)
2196
+ >>> image = add_circle(70, 45, 20, image)
2197
+ >>> image = add_circle(45, 80, 10, image)
2198
+
2199
+ Next, we set up the figure.
2200
+
2201
+ >>> fig = plt.figure(figsize=(5, 15))
2202
+ >>> grid = ImageGrid(fig, 111, nrows_ncols=(3, 1), axes_pad=(0.5, 0.3),
2203
+ ... label_mode="1", share_all=True,
2204
+ ... cbar_location="right", cbar_mode="each",
2205
+ ... cbar_size="7%", cbar_pad="2%")
2206
+ >>> for ax in grid:
2207
+ ... ax.axis('off')
2208
+ >>> top, middle, bottom = grid
2209
+ >>> colorbar_ticks = [0, 10, 20]
2210
+
2211
+ The top image contains the original binary image.
2212
+
2213
+ >>> binary_image = top.imshow(image, cmap='gray')
2214
+ >>> cbar_binary_image = top.cax.colorbar(binary_image)
2215
+ >>> cbar_binary_image.set_ticks([0, 1])
2216
+ >>> top.set_title("Binary image: foreground in white")
2217
+
2218
+ The middle image contains the distance transform using the ``taxicab``
2219
+ metric.
2220
+
2221
+ >>> distance_taxicab = distance_transform_cdt(image, metric="taxicab")
2222
+ >>> taxicab_transform = middle.imshow(distance_taxicab, cmap='gray')
2223
+ >>> cbar_taxicab = middle.cax.colorbar(taxicab_transform)
2224
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
2225
+ >>> middle.set_title("Taxicab metric")
2226
+
2227
+ The bottom image contains the distance transform using the ``chessboard``
2228
+ metric.
2229
+
2230
+ >>> distance_chessboard = distance_transform_cdt(image,
2231
+ ... metric="chessboard")
2232
+ >>> chessboard_transform = bottom.imshow(distance_chessboard, cmap='gray')
2233
+ >>> cbar_chessboard = bottom.cax.colorbar(chessboard_transform)
2234
+ >>> cbar_chessboard.set_ticks(colorbar_ticks)
2235
+ >>> bottom.set_title("Chessboard metric")
2236
+ >>> plt.tight_layout()
2237
+ >>> plt.show()
2238
+
2239
+ """
2240
+ ft_inplace = isinstance(indices, numpy.ndarray)
2241
+ dt_inplace = isinstance(distances, numpy.ndarray)
2242
+ _distance_tranform_arg_check(
2243
+ dt_inplace, ft_inplace, return_distances, return_indices
2244
+ )
2245
+ input = numpy.asarray(input)
2246
+ if isinstance(metric, str):
2247
+ if metric in ['taxicab', 'cityblock', 'manhattan']:
2248
+ rank = input.ndim
2249
+ metric = generate_binary_structure(rank, 1)
2250
+ elif metric == 'chessboard':
2251
+ rank = input.ndim
2252
+ metric = generate_binary_structure(rank, rank)
2253
+ else:
2254
+ raise ValueError('invalid metric provided')
2255
+ else:
2256
+ try:
2257
+ metric = numpy.asarray(metric)
2258
+ except Exception as e:
2259
+ raise ValueError('invalid metric provided') from e
2260
+ for s in metric.shape:
2261
+ if s != 3:
2262
+ raise ValueError('metric sizes must be equal to 3')
2263
+
2264
+ if not metric.flags.contiguous:
2265
+ metric = metric.copy()
2266
+ if dt_inplace:
2267
+ if distances.dtype.type != numpy.int32:
2268
+ raise ValueError('distances must be of int32 type')
2269
+ if distances.shape != input.shape:
2270
+ raise ValueError('distances has wrong shape')
2271
+ dt = distances
2272
+ dt[...] = numpy.where(input, -1, 0).astype(numpy.int32)
2273
+ else:
2274
+ dt = numpy.where(input, -1, 0).astype(numpy.int32)
2275
+
2276
+ rank = dt.ndim
2277
+ if return_indices:
2278
+ sz = numpy.prod(dt.shape, axis=0)
2279
+ ft = numpy.arange(sz, dtype=numpy.int32)
2280
+ ft.shape = dt.shape
2281
+ else:
2282
+ ft = None
2283
+
2284
+ _nd_image.distance_transform_op(metric, dt, ft)
2285
+ dt = dt[tuple([slice(None, None, -1)] * rank)]
2286
+ if return_indices:
2287
+ ft = ft[tuple([slice(None, None, -1)] * rank)]
2288
+ _nd_image.distance_transform_op(metric, dt, ft)
2289
+ dt = dt[tuple([slice(None, None, -1)] * rank)]
2290
+ if return_indices:
2291
+ ft = ft[tuple([slice(None, None, -1)] * rank)]
2292
+ ft = numpy.ravel(ft)
2293
+ if ft_inplace:
2294
+ if indices.dtype.type != numpy.int32:
2295
+ raise ValueError('indices array must be int32')
2296
+ if indices.shape != (dt.ndim,) + dt.shape:
2297
+ raise ValueError('indices array has wrong shape')
2298
+ tmp = indices
2299
+ else:
2300
+ tmp = numpy.indices(dt.shape, dtype=numpy.int32)
2301
+ for ii in range(tmp.shape[0]):
2302
+ rtmp = numpy.ravel(tmp[ii, ...])[ft]
2303
+ rtmp.shape = dt.shape
2304
+ tmp[ii, ...] = rtmp
2305
+ ft = tmp
2306
+
2307
+ # construct and return the result
2308
+ result = []
2309
+ if return_distances and not dt_inplace:
2310
+ result.append(dt)
2311
+ if return_indices and not ft_inplace:
2312
+ result.append(ft)
2313
+
2314
+ if len(result) == 2:
2315
+ return tuple(result)
2316
+ elif len(result) == 1:
2317
+ return result[0]
2318
+ else:
2319
+ return None
2320
+
2321
+
2322
+ def distance_transform_edt(input, sampling=None, return_distances=True,
2323
+ return_indices=False, distances=None, indices=None):
2324
+ """
2325
+ Exact Euclidean distance transform.
2326
+
2327
+ This function calculates the distance transform of the `input`, by
2328
+ replacing each foreground (non-zero) element, with its
2329
+ shortest distance to the background (any zero-valued element).
2330
+
2331
+ In addition to the distance transform, the feature transform can
2332
+ be calculated. In this case the index of the closest background
2333
+ element to each foreground element is returned in a separate array.
2334
+
2335
+ Parameters
2336
+ ----------
2337
+ input : array_like
2338
+ Input data to transform. Can be any type but will be converted
2339
+ into binary: 1 wherever input equates to True, 0 elsewhere.
2340
+ sampling : float, or sequence of float, optional
2341
+ Spacing of elements along each dimension. If a sequence, must be of
2342
+ length equal to the input rank; if a single number, this is used for
2343
+ all axes. If not specified, a grid spacing of unity is implied.
2344
+ return_distances : bool, optional
2345
+ Whether to calculate the distance transform.
2346
+ Default is True.
2347
+ return_indices : bool, optional
2348
+ Whether to calculate the feature transform.
2349
+ Default is False.
2350
+ distances : float64 ndarray, optional
2351
+ An output array to store the calculated distance transform, instead of
2352
+ returning it.
2353
+ `return_distances` must be True.
2354
+ It must be the same shape as `input`.
2355
+ indices : int32 ndarray, optional
2356
+ An output array to store the calculated feature transform, instead of
2357
+ returning it.
2358
+ `return_indicies` must be True.
2359
+ Its shape must be `(input.ndim,) + input.shape`.
2360
+
2361
+ Returns
2362
+ -------
2363
+ distances : float64 ndarray, optional
2364
+ The calculated distance transform. Returned only when
2365
+ `return_distances` is True and `distances` is not supplied.
2366
+ It will have the same shape as the input array.
2367
+ indices : int32 ndarray, optional
2368
+ The calculated feature transform. It has an input-shaped array for each
2369
+ dimension of the input. See example below.
2370
+ Returned only when `return_indices` is True and `indices` is not
2371
+ supplied.
2372
+
2373
+ Notes
2374
+ -----
2375
+ The Euclidean distance transform gives values of the Euclidean
2376
+ distance::
2377
+
2378
+ n
2379
+ y_i = sqrt(sum (x[i]-b[i])**2)
2380
+ i
2381
+
2382
+ where b[i] is the background point (value 0) with the smallest
2383
+ Euclidean distance to input points x[i], and n is the
2384
+ number of dimensions.
2385
+
2386
+ Examples
2387
+ --------
2388
+ >>> from scipy import ndimage
2389
+ >>> import numpy as np
2390
+ >>> a = np.array(([0,1,1,1,1],
2391
+ ... [0,0,1,1,1],
2392
+ ... [0,1,1,1,1],
2393
+ ... [0,1,1,1,0],
2394
+ ... [0,1,1,0,0]))
2395
+ >>> ndimage.distance_transform_edt(a)
2396
+ array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
2397
+ [ 0. , 0. , 1. , 2. , 2. ],
2398
+ [ 0. , 1. , 1.4142, 1.4142, 1. ],
2399
+ [ 0. , 1. , 1.4142, 1. , 0. ],
2400
+ [ 0. , 1. , 1. , 0. , 0. ]])
2401
+
2402
+ With a sampling of 2 units along x, 1 along y:
2403
+
2404
+ >>> ndimage.distance_transform_edt(a, sampling=[2,1])
2405
+ array([[ 0. , 1. , 2. , 2.8284, 3.6056],
2406
+ [ 0. , 0. , 1. , 2. , 3. ],
2407
+ [ 0. , 1. , 2. , 2.2361, 2. ],
2408
+ [ 0. , 1. , 2. , 1. , 0. ],
2409
+ [ 0. , 1. , 1. , 0. , 0. ]])
2410
+
2411
+ Asking for indices as well:
2412
+
2413
+ >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True)
2414
+ >>> inds
2415
+ array([[[0, 0, 1, 1, 3],
2416
+ [1, 1, 1, 1, 3],
2417
+ [2, 2, 1, 3, 3],
2418
+ [3, 3, 4, 4, 3],
2419
+ [4, 4, 4, 4, 4]],
2420
+ [[0, 0, 1, 1, 4],
2421
+ [0, 1, 1, 1, 4],
2422
+ [0, 0, 1, 4, 4],
2423
+ [0, 0, 3, 3, 4],
2424
+ [0, 0, 3, 3, 4]]])
2425
+
2426
+ With arrays provided for inplace outputs:
2427
+
2428
+ >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32)
2429
+ >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices)
2430
+ array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
2431
+ [ 0. , 0. , 1. , 2. , 2. ],
2432
+ [ 0. , 1. , 1.4142, 1.4142, 1. ],
2433
+ [ 0. , 1. , 1.4142, 1. , 0. ],
2434
+ [ 0. , 1. , 1. , 0. , 0. ]])
2435
+ >>> indices
2436
+ array([[[0, 0, 1, 1, 3],
2437
+ [1, 1, 1, 1, 3],
2438
+ [2, 2, 1, 3, 3],
2439
+ [3, 3, 4, 4, 3],
2440
+ [4, 4, 4, 4, 4]],
2441
+ [[0, 0, 1, 1, 4],
2442
+ [0, 1, 1, 1, 4],
2443
+ [0, 0, 1, 4, 4],
2444
+ [0, 0, 3, 3, 4],
2445
+ [0, 0, 3, 3, 4]]])
2446
+
2447
+ """
2448
+ ft_inplace = isinstance(indices, numpy.ndarray)
2449
+ dt_inplace = isinstance(distances, numpy.ndarray)
2450
+ _distance_tranform_arg_check(
2451
+ dt_inplace, ft_inplace, return_distances, return_indices
2452
+ )
2453
+
2454
+ # calculate the feature transform
2455
+ input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8))
2456
+ if sampling is not None:
2457
+ sampling = _ni_support._normalize_sequence(sampling, input.ndim)
2458
+ sampling = numpy.asarray(sampling, dtype=numpy.float64)
2459
+ if not sampling.flags.contiguous:
2460
+ sampling = sampling.copy()
2461
+
2462
+ if ft_inplace:
2463
+ ft = indices
2464
+ if ft.shape != (input.ndim,) + input.shape:
2465
+ raise RuntimeError('indices array has wrong shape')
2466
+ if ft.dtype.type != numpy.int32:
2467
+ raise RuntimeError('indices array must be int32')
2468
+ else:
2469
+ ft = numpy.zeros((input.ndim,) + input.shape, dtype=numpy.int32)
2470
+
2471
+ _nd_image.euclidean_feature_transform(input, sampling, ft)
2472
+ # if requested, calculate the distance transform
2473
+ if return_distances:
2474
+ dt = ft - numpy.indices(input.shape, dtype=ft.dtype)
2475
+ dt = dt.astype(numpy.float64)
2476
+ if sampling is not None:
2477
+ for ii in range(len(sampling)):
2478
+ dt[ii, ...] *= sampling[ii]
2479
+ numpy.multiply(dt, dt, dt)
2480
+ if dt_inplace:
2481
+ dt = numpy.add.reduce(dt, axis=0)
2482
+ if distances.shape != dt.shape:
2483
+ raise RuntimeError('distances array has wrong shape')
2484
+ if distances.dtype.type != numpy.float64:
2485
+ raise RuntimeError('distances array must be float64')
2486
+ numpy.sqrt(dt, distances)
2487
+ else:
2488
+ dt = numpy.add.reduce(dt, axis=0)
2489
+ dt = numpy.sqrt(dt)
2490
+
2491
+ # construct and return the result
2492
+ result = []
2493
+ if return_distances and not dt_inplace:
2494
+ result.append(dt)
2495
+ if return_indices and not ft_inplace:
2496
+ result.append(ft)
2497
+
2498
+ if len(result) == 2:
2499
+ return tuple(result)
2500
+ elif len(result) == 1:
2501
+ return result[0]
2502
+ else:
2503
+ return None
2504
+
2505
+
2506
+ def _distance_tranform_arg_check(distances_out, indices_out,
2507
+ return_distances, return_indices):
2508
+ """Raise a RuntimeError if the arguments are invalid"""
2509
+ error_msgs = []
2510
+ if (not return_distances) and (not return_indices):
2511
+ error_msgs.append(
2512
+ 'at least one of return_distances/return_indices must be True')
2513
+ if distances_out and not return_distances:
2514
+ error_msgs.append(
2515
+ 'return_distances must be True if distances is supplied'
2516
+ )
2517
+ if indices_out and not return_indices:
2518
+ error_msgs.append('return_indices must be True if indices is supplied')
2519
+ if error_msgs:
2520
+ raise RuntimeError(', '.join(error_msgs))
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (147 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Docstring components common to several ndimage functions."""
2
+ from scipy._lib import doccer
3
+
4
+ __all__ = ['docfiller']
5
+
6
+
7
+ _input_doc = (
8
+ """input : array_like
9
+ The input array.""")
10
+ _axis_doc = (
11
+ """axis : int, optional
12
+ The axis of `input` along which to calculate. Default is -1.""")
13
+ _output_doc = (
14
+ """output : array or dtype, optional
15
+ The array in which to place the output, or the dtype of the
16
+ returned array. By default an array of the same dtype as input
17
+ will be created.""")
18
+ _size_foot_doc = (
19
+ """size : scalar or tuple, optional
20
+ See footprint, below. Ignored if footprint is given.
21
+ footprint : array, optional
22
+ Either `size` or `footprint` must be defined. `size` gives
23
+ the shape that is taken from the input array, at every element
24
+ position, to define the input to the filter function.
25
+ `footprint` is a boolean array that specifies (implicitly) a
26
+ shape, but also which of the elements within this shape will get
27
+ passed to the filter function. Thus ``size=(n,m)`` is equivalent
28
+ to ``footprint=np.ones((n,m))``. We adjust `size` to the number
29
+ of dimensions of the input array, so that, if the input array is
30
+ shape (10,10,10), and `size` is 2, then the actual size used is
31
+ (2,2,2). When `footprint` is given, `size` is ignored.""")
32
+ _mode_reflect_doc = (
33
+ """mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
34
+ The `mode` parameter determines how the input array is extended
35
+ beyond its boundaries. Default is 'reflect'. Behavior for each valid
36
+ value is as follows:
37
+
38
+ 'reflect' (`d c b a | a b c d | d c b a`)
39
+ The input is extended by reflecting about the edge of the last
40
+ pixel. This mode is also sometimes referred to as half-sample
41
+ symmetric.
42
+
43
+ 'constant' (`k k k k | a b c d | k k k k`)
44
+ The input is extended by filling all values beyond the edge with
45
+ the same constant value, defined by the `cval` parameter.
46
+
47
+ 'nearest' (`a a a a | a b c d | d d d d`)
48
+ The input is extended by replicating the last pixel.
49
+
50
+ 'mirror' (`d c b | a b c d | c b a`)
51
+ The input is extended by reflecting about the center of the last
52
+ pixel. This mode is also sometimes referred to as whole-sample
53
+ symmetric.
54
+
55
+ 'wrap' (`a b c d | a b c d | a b c d`)
56
+ The input is extended by wrapping around to the opposite edge.
57
+
58
+ For consistency with the interpolation functions, the following mode
59
+ names can also be used:
60
+
61
+ 'grid-mirror'
62
+ This is a synonym for 'reflect'.
63
+
64
+ 'grid-constant'
65
+ This is a synonym for 'constant'.
66
+
67
+ 'grid-wrap'
68
+ This is a synonym for 'wrap'.""")
69
+
70
+ _mode_interp_constant_doc = (
71
+ """mode : {'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', \
72
+ 'mirror', 'grid-wrap', 'wrap'}, optional
73
+ The `mode` parameter determines how the input array is extended
74
+ beyond its boundaries. Default is 'constant'. Behavior for each valid
75
+ value is as follows (see additional plots and details on
76
+ :ref:`boundary modes <ndimage-interpolation-modes>`):
77
+
78
+ 'reflect' (`d c b a | a b c d | d c b a`)
79
+ The input is extended by reflecting about the edge of the last
80
+ pixel. This mode is also sometimes referred to as half-sample
81
+ symmetric.
82
+
83
+ 'grid-mirror'
84
+ This is a synonym for 'reflect'.
85
+
86
+ 'constant' (`k k k k | a b c d | k k k k`)
87
+ The input is extended by filling all values beyond the edge with
88
+ the same constant value, defined by the `cval` parameter. No
89
+ interpolation is performed beyond the edges of the input.
90
+
91
+ 'grid-constant' (`k k k k | a b c d | k k k k`)
92
+ The input is extended by filling all values beyond the edge with
93
+ the same constant value, defined by the `cval` parameter. Interpolation
94
+ occurs for samples outside the input's extent as well.
95
+
96
+ 'nearest' (`a a a a | a b c d | d d d d`)
97
+ The input is extended by replicating the last pixel.
98
+
99
+ 'mirror' (`d c b | a b c d | c b a`)
100
+ The input is extended by reflecting about the center of the last
101
+ pixel. This mode is also sometimes referred to as whole-sample
102
+ symmetric.
103
+
104
+ 'grid-wrap' (`a b c d | a b c d | a b c d`)
105
+ The input is extended by wrapping around to the opposite edge.
106
+
107
+ 'wrap' (`d b c d | a b c d | b c a b`)
108
+ The input is extended by wrapping around to the opposite edge, but in a
109
+ way such that the last point and initial point exactly overlap. In this
110
+ case it is not well defined which sample will be chosen at the point of
111
+ overlap.""")
112
+ _mode_interp_mirror_doc = (
113
+ _mode_interp_constant_doc.replace("Default is 'constant'",
114
+ "Default is 'mirror'")
115
+ )
116
+ assert _mode_interp_mirror_doc != _mode_interp_constant_doc, \
117
+ 'Default not replaced'
118
+
119
+ _mode_multiple_doc = (
120
+ """mode : str or sequence, optional
121
+ The `mode` parameter determines how the input array is extended
122
+ when the filter overlaps a border. By passing a sequence of modes
123
+ with length equal to the number of dimensions of the input array,
124
+ different modes can be specified along each axis. Default value is
125
+ 'reflect'. The valid values and their behavior is as follows:
126
+
127
+ 'reflect' (`d c b a | a b c d | d c b a`)
128
+ The input is extended by reflecting about the edge of the last
129
+ pixel. This mode is also sometimes referred to as half-sample
130
+ symmetric.
131
+
132
+ 'constant' (`k k k k | a b c d | k k k k`)
133
+ The input is extended by filling all values beyond the edge with
134
+ the same constant value, defined by the `cval` parameter.
135
+
136
+ 'nearest' (`a a a a | a b c d | d d d d`)
137
+ The input is extended by replicating the last pixel.
138
+
139
+ 'mirror' (`d c b | a b c d | c b a`)
140
+ The input is extended by reflecting about the center of the last
141
+ pixel. This mode is also sometimes referred to as whole-sample
142
+ symmetric.
143
+
144
+ 'wrap' (`a b c d | a b c d | a b c d`)
145
+ The input is extended by wrapping around to the opposite edge.
146
+
147
+ For consistency with the interpolation functions, the following mode
148
+ names can also be used:
149
+
150
+ 'grid-constant'
151
+ This is a synonym for 'constant'.
152
+
153
+ 'grid-mirror'
154
+ This is a synonym for 'reflect'.
155
+
156
+ 'grid-wrap'
157
+ This is a synonym for 'wrap'.""")
158
+ _cval_doc = (
159
+ """cval : scalar, optional
160
+ Value to fill past edges of input if `mode` is 'constant'. Default
161
+ is 0.0.""")
162
+ _origin_doc = (
163
+ """origin : int, optional
164
+ Controls the placement of the filter on the input array's pixels.
165
+ A value of 0 (the default) centers the filter over the pixel, with
166
+ positive values shifting the filter to the left, and negative ones
167
+ to the right.""")
168
+ _origin_multiple_doc = (
169
+ """origin : int or sequence, optional
170
+ Controls the placement of the filter on the input array's pixels.
171
+ A value of 0 (the default) centers the filter over the pixel, with
172
+ positive values shifting the filter to the left, and negative ones
173
+ to the right. By passing a sequence of origins with length equal to
174
+ the number of dimensions of the input array, different shifts can
175
+ be specified along each axis.""")
176
+ _extra_arguments_doc = (
177
+ """extra_arguments : sequence, optional
178
+ Sequence of extra positional arguments to pass to passed function.""")
179
+ _extra_keywords_doc = (
180
+ """extra_keywords : dict, optional
181
+ dict of extra keyword arguments to pass to passed function.""")
182
+ _prefilter_doc = (
183
+ """prefilter : bool, optional
184
+ Determines if the input array is prefiltered with `spline_filter`
185
+ before interpolation. The default is True, which will create a
186
+ temporary `float64` array of filtered values if `order > 1`. If
187
+ setting this to False, the output will be slightly blurred if
188
+ `order > 1`, unless the input is prefiltered, i.e. it is the result
189
+ of calling `spline_filter` on the original input.""")
190
+
191
+ docdict = {
192
+ 'input': _input_doc,
193
+ 'axis': _axis_doc,
194
+ 'output': _output_doc,
195
+ 'size_foot': _size_foot_doc,
196
+ 'mode_interp_constant': _mode_interp_constant_doc,
197
+ 'mode_interp_mirror': _mode_interp_mirror_doc,
198
+ 'mode_reflect': _mode_reflect_doc,
199
+ 'mode_multiple': _mode_multiple_doc,
200
+ 'cval': _cval_doc,
201
+ 'origin': _origin_doc,
202
+ 'origin_multiple': _origin_multiple_doc,
203
+ 'extra_arguments': _extra_arguments_doc,
204
+ 'extra_keywords': _extra_keywords_doc,
205
+ 'prefilter': _prefilter_doc
206
+ }
207
+
208
+ docfiller = doccer.filldoc(docdict)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (428 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2003-2005 Peter J. Verveer
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ #
7
+ # 1. Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ #
10
+ # 2. Redistributions in binary form must reproduce the above
11
+ # copyright notice, this list of conditions and the following
12
+ # disclaimer in the documentation and/or other materials provided
13
+ # with the distribution.
14
+ #
15
+ # 3. The name of the author may not be used to endorse or promote
16
+ # products derived from this software without specific prior
17
+ # written permission.
18
+ #
19
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
20
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
+
31
+ from collections.abc import Iterable
32
+ import operator
33
+ import warnings
34
+ import numpy
35
+
36
+
37
+ def _extend_mode_to_code(mode):
38
+ """Convert an extension mode to the corresponding integer code.
39
+ """
40
+ if mode == 'nearest':
41
+ return 0
42
+ elif mode == 'wrap':
43
+ return 1
44
+ elif mode in ['reflect', 'grid-mirror']:
45
+ return 2
46
+ elif mode == 'mirror':
47
+ return 3
48
+ elif mode == 'constant':
49
+ return 4
50
+ elif mode == 'grid-wrap':
51
+ return 5
52
+ elif mode == 'grid-constant':
53
+ return 6
54
+ else:
55
+ raise RuntimeError('boundary mode not supported')
56
+
57
+
58
+ def _normalize_sequence(input, rank):
59
+ """If input is a scalar, create a sequence of length equal to the
60
+ rank by duplicating the input. If input is a sequence,
61
+ check if its length is equal to the length of array.
62
+ """
63
+ is_str = isinstance(input, str)
64
+ if not is_str and isinstance(input, Iterable):
65
+ normalized = list(input)
66
+ if len(normalized) != rank:
67
+ err = "sequence argument must have length equal to input rank"
68
+ raise RuntimeError(err)
69
+ else:
70
+ normalized = [input] * rank
71
+ return normalized
72
+
73
+
74
+ def _get_output(output, input, shape=None, complex_output=False):
75
+ if shape is None:
76
+ shape = input.shape
77
+ if output is None:
78
+ if not complex_output:
79
+ output = numpy.zeros(shape, dtype=input.dtype.name)
80
+ else:
81
+ complex_type = numpy.promote_types(input.dtype, numpy.complex64)
82
+ output = numpy.zeros(shape, dtype=complex_type)
83
+ elif isinstance(output, (type, numpy.dtype)):
84
+ # Classes (like `np.float32`) and dtypes are interpreted as dtype
85
+ if complex_output and numpy.dtype(output).kind != 'c':
86
+ warnings.warn("promoting specified output dtype to complex", stacklevel=3)
87
+ output = numpy.promote_types(output, numpy.complex64)
88
+ output = numpy.zeros(shape, dtype=output)
89
+ elif isinstance(output, str):
90
+ output = numpy.dtype(output)
91
+ if complex_output and output.kind != 'c':
92
+ raise RuntimeError("output must have complex dtype")
93
+ elif not issubclass(output.type, numpy.number):
94
+ raise RuntimeError("output must have numeric dtype")
95
+ output = numpy.zeros(shape, dtype=output)
96
+ elif output.shape != shape:
97
+ raise RuntimeError("output shape not correct")
98
+ elif complex_output and output.dtype.kind != 'c':
99
+ raise RuntimeError("output must have complex dtype")
100
+ return output
101
+
102
+
103
+ def _check_axes(axes, ndim):
104
+ if axes is None:
105
+ return tuple(range(ndim))
106
+ elif numpy.isscalar(axes):
107
+ axes = (operator.index(axes),)
108
+ elif isinstance(axes, Iterable):
109
+ for ax in axes:
110
+ axes = tuple(operator.index(ax) for ax in axes)
111
+ if ax < -ndim or ax > ndim - 1:
112
+ raise ValueError(f"specified axis: {ax} is out of range")
113
+ axes = tuple(ax % ndim if ax < 0 else ax for ax in axes)
114
+ else:
115
+ message = "axes must be an integer, iterable of integers, or None"
116
+ raise ValueError(message)
117
+ if len(tuple(set(axes))) != len(axes):
118
+ raise ValueError("axes must be unique")
119
+ return axes
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/filters.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.ndimage` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'correlate1d', 'convolve1d', 'gaussian_filter1d',
10
+ 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace',
11
+ 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude',
12
+ 'gaussian_gradient_magnitude', 'correlate', 'convolve',
13
+ 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
14
+ 'maximum_filter1d', 'minimum_filter', 'maximum_filter',
15
+ 'rank_filter', 'median_filter', 'percentile_filter',
16
+ 'generic_filter1d', 'generic_filter'
17
+ ]
18
+
19
+
20
+ def __dir__():
21
+ return __all__
22
+
23
+
24
+ def __getattr__(name):
25
+ return _sub_module_deprecation(sub_package='ndimage', module='filters',
26
+ private_modules=['_filters'], all=__all__,
27
+ attribute=name)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/fourier.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.ndimage` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'fourier_gaussian', 'fourier_uniform',
10
+ 'fourier_ellipsoid', 'fourier_shift'
11
+ ]
12
+
13
+
14
+ def __dir__():
15
+ return __all__
16
+
17
+
18
+ def __getattr__(name):
19
+ return _sub_module_deprecation(sub_package='ndimage', module='fourier',
20
+ private_modules=['_fourier'], all=__all__,
21
+ attribute=name)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/interpolation.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.ndimage` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'spline_filter1d', 'spline_filter',
10
+ 'geometric_transform', 'map_coordinates',
11
+ 'affine_transform', 'shift', 'zoom', 'rotate',
12
+ 'docfiller'
13
+ ]
14
+
15
+
16
+ def __dir__():
17
+ return __all__
18
+
19
+
20
+ def __getattr__(name):
21
+ return _sub_module_deprecation(sub_package='ndimage', module='interpolation',
22
+ private_modules=['_interpolation'], all=__all__,
23
+ attribute=name)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/measurements.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.ndimage` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'label', 'find_objects', 'labeled_comprehension',
10
+ 'sum', 'mean', 'variance', 'standard_deviation',
11
+ 'minimum', 'maximum', 'median', 'minimum_position',
12
+ 'maximum_position', 'extrema', 'center_of_mass',
13
+ 'histogram', 'watershed_ift', 'sum_labels'
14
+ ]
15
+
16
+
17
+ def __dir__():
18
+ return __all__
19
+
20
+
21
+ def __getattr__(name):
22
+ return _sub_module_deprecation(sub_package='ndimage', module='measurements',
23
+ private_modules=['_measurements'], all=__all__,
24
+ attribute=name)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/morphology.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.ndimage` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'iterate_structure', 'generate_binary_structure',
10
+ 'binary_erosion', 'binary_dilation', 'binary_opening',
11
+ 'binary_closing', 'binary_hit_or_miss', 'binary_propagation',
12
+ 'binary_fill_holes', 'grey_erosion', 'grey_dilation',
13
+ 'grey_opening', 'grey_closing', 'morphological_gradient',
14
+ 'morphological_laplace', 'white_tophat', 'black_tophat',
15
+ 'distance_transform_bf', 'distance_transform_cdt',
16
+ 'distance_transform_edt'
17
+ ]
18
+
19
+
20
+ def __dir__():
21
+ return __all__
22
+
23
+
24
+ def __getattr__(name):
25
+ return _sub_module_deprecation(sub_package='ndimage', module='morphology',
26
+ private_modules=['_morphology'], all=__all__,
27
+ attribute=name)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import numpy
3
+
4
+ # list of numarray data types
5
+ integer_types: list[type] = [
6
+ numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,
7
+ numpy.int32, numpy.uint32, numpy.int64, numpy.uint64]
8
+
9
+ float_types: list[type] = [numpy.float32, numpy.float64]
10
+
11
+ complex_types: list[type] = [numpy.complex64, numpy.complex128]
12
+
13
+ types: list[type] = integer_types + float_types
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (567 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc ADDED
Binary file (4.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc ADDED
Binary file (63.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc ADDED
Binary file (5.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc ADDED
Binary file (46.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc ADDED
Binary file (36.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc ADDED
Binary file (60 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc ADDED
Binary file (1.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_allclose
3
+
4
+ from scipy import ndimage
5
+ from scipy.ndimage import _ctest
6
+ from scipy.ndimage import _cytest
7
+ from scipy._lib._ccallback import LowLevelCallable
8
+
9
+ FILTER1D_FUNCTIONS = [
10
+ lambda filter_size: _ctest.filter1d(filter_size),
11
+ lambda filter_size: _cytest.filter1d(filter_size, with_signature=False),
12
+ lambda filter_size: LowLevelCallable(
13
+ _cytest.filter1d(filter_size, with_signature=True)
14
+ ),
15
+ lambda filter_size: LowLevelCallable.from_cython(
16
+ _cytest, "_filter1d",
17
+ _cytest.filter1d_capsule(filter_size),
18
+ ),
19
+ ]
20
+
21
+ FILTER2D_FUNCTIONS = [
22
+ lambda weights: _ctest.filter2d(weights),
23
+ lambda weights: _cytest.filter2d(weights, with_signature=False),
24
+ lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)),
25
+ lambda weights: LowLevelCallable.from_cython(_cytest,
26
+ "_filter2d",
27
+ _cytest.filter2d_capsule(weights),),
28
+ ]
29
+
30
+ TRANSFORM_FUNCTIONS = [
31
+ lambda shift: _ctest.transform(shift),
32
+ lambda shift: _cytest.transform(shift, with_signature=False),
33
+ lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)),
34
+ lambda shift: LowLevelCallable.from_cython(_cytest,
35
+ "_transform",
36
+ _cytest.transform_capsule(shift),),
37
+ ]
38
+
39
+
40
+ def test_generic_filter():
41
+ def filter2d(footprint_elements, weights):
42
+ return (weights*footprint_elements).sum()
43
+
44
+ def check(j):
45
+ func = FILTER2D_FUNCTIONS[j]
46
+
47
+ im = np.ones((20, 20))
48
+ im[:10,:10] = 0
49
+ footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
50
+ footprint_size = np.count_nonzero(footprint)
51
+ weights = np.ones(footprint_size)/footprint_size
52
+
53
+ res = ndimage.generic_filter(im, func(weights),
54
+ footprint=footprint)
55
+ std = ndimage.generic_filter(im, filter2d, footprint=footprint,
56
+ extra_arguments=(weights,))
57
+ assert_allclose(res, std, err_msg=f"#{j} failed")
58
+
59
+ for j, func in enumerate(FILTER2D_FUNCTIONS):
60
+ check(j)
61
+
62
+
63
+ def test_generic_filter1d():
64
+ def filter1d(input_line, output_line, filter_size):
65
+ for i in range(output_line.size):
66
+ output_line[i] = 0
67
+ for j in range(filter_size):
68
+ output_line[i] += input_line[i+j]
69
+ output_line /= filter_size
70
+
71
+ def check(j):
72
+ func = FILTER1D_FUNCTIONS[j]
73
+
74
+ im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1))
75
+ filter_size = 3
76
+
77
+ res = ndimage.generic_filter1d(im, func(filter_size),
78
+ filter_size)
79
+ std = ndimage.generic_filter1d(im, filter1d, filter_size,
80
+ extra_arguments=(filter_size,))
81
+ assert_allclose(res, std, err_msg=f"#{j} failed")
82
+
83
+ for j, func in enumerate(FILTER1D_FUNCTIONS):
84
+ check(j)
85
+
86
+
87
+ def test_geometric_transform():
88
+ def transform(output_coordinates, shift):
89
+ return output_coordinates[0] - shift, output_coordinates[1] - shift
90
+
91
+ def check(j):
92
+ func = TRANSFORM_FUNCTIONS[j]
93
+
94
+ im = np.arange(12).reshape(4, 3).astype(np.float64)
95
+ shift = 0.5
96
+
97
+ res = ndimage.geometric_transform(im, func(shift))
98
+ std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,))
99
+ assert_allclose(res, std, err_msg=f"#{j} failed")
100
+
101
+ for j, func in enumerate(TRANSFORM_FUNCTIONS):
102
+ check(j)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Testing data types for ndimage calls
2
+ """
3
+ import numpy as np
4
+ from numpy.testing import assert_array_almost_equal, assert_
5
+ import pytest
6
+
7
+ from scipy import ndimage
8
+
9
+
10
+ def test_map_coordinates_dts():
11
+ # check that ndimage accepts different data types for interpolation
12
+ data = np.array([[4, 1, 3, 2],
13
+ [7, 6, 8, 5],
14
+ [3, 5, 3, 6]])
15
+ shifted_data = np.array([[0, 0, 0, 0],
16
+ [0, 4, 1, 3],
17
+ [0, 7, 6, 8]])
18
+ idx = np.indices(data.shape)
19
+ dts = (np.uint8, np.uint16, np.uint32, np.uint64,
20
+ np.int8, np.int16, np.int32, np.int64,
21
+ np.intp, np.uintp, np.float32, np.float64)
22
+ for order in range(0, 6):
23
+ for data_dt in dts:
24
+ these_data = data.astype(data_dt)
25
+ for coord_dt in dts:
26
+ # affine mapping
27
+ mat = np.eye(2, dtype=coord_dt)
28
+ off = np.zeros((2,), dtype=coord_dt)
29
+ out = ndimage.affine_transform(these_data, mat, off)
30
+ assert_array_almost_equal(these_data, out)
31
+ # map coordinates
32
+ coords_m1 = idx.astype(coord_dt) - 1
33
+ coords_p10 = idx.astype(coord_dt) + 10
34
+ out = ndimage.map_coordinates(these_data, coords_m1, order=order)
35
+ assert_array_almost_equal(out, shifted_data)
36
+ # check constant fill works
37
+ out = ndimage.map_coordinates(these_data, coords_p10, order=order)
38
+ assert_array_almost_equal(out, np.zeros((3,4)))
39
+ # check shift and zoom
40
+ out = ndimage.shift(these_data, 1)
41
+ assert_array_almost_equal(out, shifted_data)
42
+ out = ndimage.zoom(these_data, 1)
43
+ assert_array_almost_equal(these_data, out)
44
+
45
+
46
+ @pytest.mark.xfail(True, reason="Broken on many platforms")
47
+ def test_uint64_max():
48
+ # Test interpolation respects uint64 max. Reported to fail at least on
49
+ # win32 (due to the 32 bit visual C compiler using signed int64 when
50
+ # converting between uint64 to double) and Debian on s390x.
51
+ # Interpolation is always done in double precision floating point, so
52
+ # we use the largest uint64 value for which int(float(big)) still fits
53
+ # in a uint64.
54
+ # This test was last enabled on macOS only, and there it started failing
55
+ # on arm64 as well (see gh-19117).
56
+ big = 2**64 - 1025
57
+ arr = np.array([big, big, big], dtype=np.uint64)
58
+ # Tests geometric transform (map_coordinates, affine_transform)
59
+ inds = np.indices(arr.shape) - 0.1
60
+ x = ndimage.map_coordinates(arr, inds)
61
+ assert_(x[1] == int(float(big)))
62
+ assert_(x[2] == int(float(big)))
63
+ # Tests zoom / shift
64
+ x = ndimage.shift(arr, 0.1)
65
+ assert_(x[1] == int(float(big)))
66
+ assert_(x[2] == int(float(big)))
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py ADDED
@@ -0,0 +1,2189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ''' Some tests for filters '''
2
+ import functools
3
+ import itertools
4
+ import math
5
+ import numpy
6
+
7
+ from numpy.testing import (assert_equal, assert_allclose,
8
+ assert_array_almost_equal,
9
+ assert_array_equal, assert_almost_equal,
10
+ suppress_warnings, assert_)
11
+ import pytest
12
+ from pytest import raises as assert_raises
13
+
14
+ from scipy import ndimage
15
+ from scipy.ndimage._filters import _gaussian_kernel1d
16
+
17
+ from . import types, float_types, complex_types
18
+
19
+
20
+ def sumsq(a, b):
21
+ return math.sqrt(((a - b)**2).sum())
22
+
23
+
24
+ def _complex_correlate(array, kernel, real_dtype, convolve=False,
25
+ mode="reflect", cval=0, ):
26
+ """Utility to perform a reference complex-valued convolutions.
27
+
28
+ When convolve==False, correlation is performed instead
29
+ """
30
+ array = numpy.asarray(array)
31
+ kernel = numpy.asarray(kernel)
32
+ complex_array = array.dtype.kind == 'c'
33
+ complex_kernel = kernel.dtype.kind == 'c'
34
+ if array.ndim == 1:
35
+ func = ndimage.convolve1d if convolve else ndimage.correlate1d
36
+ else:
37
+ func = ndimage.convolve if convolve else ndimage.correlate
38
+ if not convolve:
39
+ kernel = kernel.conj()
40
+ if complex_array and complex_kernel:
41
+ # use: real(cval) for array.real component
42
+ # imag(cval) for array.imag component
43
+ output = (
44
+ func(array.real, kernel.real, output=real_dtype,
45
+ mode=mode, cval=numpy.real(cval)) -
46
+ func(array.imag, kernel.imag, output=real_dtype,
47
+ mode=mode, cval=numpy.imag(cval)) +
48
+ 1j * func(array.imag, kernel.real, output=real_dtype,
49
+ mode=mode, cval=numpy.imag(cval)) +
50
+ 1j * func(array.real, kernel.imag, output=real_dtype,
51
+ mode=mode, cval=numpy.real(cval))
52
+ )
53
+ elif complex_array:
54
+ output = (
55
+ func(array.real, kernel, output=real_dtype, mode=mode,
56
+ cval=numpy.real(cval)) +
57
+ 1j * func(array.imag, kernel, output=real_dtype, mode=mode,
58
+ cval=numpy.imag(cval))
59
+ )
60
+ elif complex_kernel:
61
+ # real array so cval is real too
62
+ output = (
63
+ func(array, kernel.real, output=real_dtype, mode=mode, cval=cval) +
64
+ 1j * func(array, kernel.imag, output=real_dtype, mode=mode,
65
+ cval=cval)
66
+ )
67
+ return output
68
+
69
+
70
+ def _cases_axes_tuple_length_mismatch():
71
+ # Generate combinations of filter function, valid kwargs, and
72
+ # keyword-value pairs for which the value will become with mismatched
73
+ # (invalid) size
74
+ filter_func = ndimage.gaussian_filter
75
+ kwargs = dict(radius=3, mode='constant', sigma=1.0, order=0)
76
+ for key, val in kwargs.items():
77
+ yield filter_func, kwargs, key, val
78
+
79
+ filter_funcs = [ndimage.uniform_filter, ndimage.minimum_filter,
80
+ ndimage.maximum_filter]
81
+ kwargs = dict(size=3, mode='constant', origin=0)
82
+ for filter_func in filter_funcs:
83
+ for key, val in kwargs.items():
84
+ yield filter_func, kwargs, key, val
85
+
86
+
87
+ class TestNdimageFilters:
88
+
89
+ def _validate_complex(self, array, kernel, type2, mode='reflect', cval=0):
90
+ # utility for validating complex-valued correlations
91
+ real_dtype = numpy.asarray([], dtype=type2).real.dtype
92
+ expected = _complex_correlate(
93
+ array, kernel, real_dtype, convolve=False, mode=mode, cval=cval
94
+ )
95
+
96
+ if array.ndim == 1:
97
+ correlate = functools.partial(ndimage.correlate1d, axis=-1,
98
+ mode=mode, cval=cval)
99
+ convolve = functools.partial(ndimage.convolve1d, axis=-1,
100
+ mode=mode, cval=cval)
101
+ else:
102
+ correlate = functools.partial(ndimage.correlate, mode=mode,
103
+ cval=cval)
104
+ convolve = functools.partial(ndimage.convolve, mode=mode,
105
+ cval=cval)
106
+
107
+ # test correlate output dtype
108
+ output = correlate(array, kernel, output=type2)
109
+ assert_array_almost_equal(expected, output)
110
+ assert_equal(output.dtype.type, type2)
111
+
112
+ # test correlate with pre-allocated output
113
+ output = numpy.zeros_like(array, dtype=type2)
114
+ correlate(array, kernel, output=output)
115
+ assert_array_almost_equal(expected, output)
116
+
117
+ # test convolve output dtype
118
+ output = convolve(array, kernel, output=type2)
119
+ expected = _complex_correlate(
120
+ array, kernel, real_dtype, convolve=True, mode=mode, cval=cval,
121
+ )
122
+ assert_array_almost_equal(expected, output)
123
+ assert_equal(output.dtype.type, type2)
124
+
125
+ # convolve with pre-allocated output
126
+ convolve(array, kernel, output=output)
127
+ assert_array_almost_equal(expected, output)
128
+ assert_equal(output.dtype.type, type2)
129
+
130
+ # warns if the output is not a complex dtype
131
+ with pytest.warns(UserWarning,
132
+ match="promoting specified output dtype to complex"):
133
+ correlate(array, kernel, output=real_dtype)
134
+
135
+ with pytest.warns(UserWarning,
136
+ match="promoting specified output dtype to complex"):
137
+ convolve(array, kernel, output=real_dtype)
138
+
139
+ # raises if output array is provided, but is not complex-valued
140
+ output_real = numpy.zeros_like(array, dtype=real_dtype)
141
+ with assert_raises(RuntimeError):
142
+ correlate(array, kernel, output=output_real)
143
+
144
+ with assert_raises(RuntimeError):
145
+ convolve(array, kernel, output=output_real)
146
+
147
+ def test_correlate01(self):
148
+ array = numpy.array([1, 2])
149
+ weights = numpy.array([2])
150
+ expected = [2, 4]
151
+
152
+ output = ndimage.correlate(array, weights)
153
+ assert_array_almost_equal(output, expected)
154
+
155
+ output = ndimage.convolve(array, weights)
156
+ assert_array_almost_equal(output, expected)
157
+
158
+ output = ndimage.correlate1d(array, weights)
159
+ assert_array_almost_equal(output, expected)
160
+
161
+ output = ndimage.convolve1d(array, weights)
162
+ assert_array_almost_equal(output, expected)
163
+
164
+ def test_correlate01_overlap(self):
165
+ array = numpy.arange(256).reshape(16, 16)
166
+ weights = numpy.array([2])
167
+ expected = 2 * array
168
+
169
+ ndimage.correlate1d(array, weights, output=array)
170
+ assert_array_almost_equal(array, expected)
171
+
172
+ def test_correlate02(self):
173
+ array = numpy.array([1, 2, 3])
174
+ kernel = numpy.array([1])
175
+
176
+ output = ndimage.correlate(array, kernel)
177
+ assert_array_almost_equal(array, output)
178
+
179
+ output = ndimage.convolve(array, kernel)
180
+ assert_array_almost_equal(array, output)
181
+
182
+ output = ndimage.correlate1d(array, kernel)
183
+ assert_array_almost_equal(array, output)
184
+
185
+ output = ndimage.convolve1d(array, kernel)
186
+ assert_array_almost_equal(array, output)
187
+
188
+ def test_correlate03(self):
189
+ array = numpy.array([1])
190
+ weights = numpy.array([1, 1])
191
+ expected = [2]
192
+
193
+ output = ndimage.correlate(array, weights)
194
+ assert_array_almost_equal(output, expected)
195
+
196
+ output = ndimage.convolve(array, weights)
197
+ assert_array_almost_equal(output, expected)
198
+
199
+ output = ndimage.correlate1d(array, weights)
200
+ assert_array_almost_equal(output, expected)
201
+
202
+ output = ndimage.convolve1d(array, weights)
203
+ assert_array_almost_equal(output, expected)
204
+
205
+ def test_correlate04(self):
206
+ array = numpy.array([1, 2])
207
+ tcor = [2, 3]
208
+ tcov = [3, 4]
209
+ weights = numpy.array([1, 1])
210
+ output = ndimage.correlate(array, weights)
211
+ assert_array_almost_equal(output, tcor)
212
+ output = ndimage.convolve(array, weights)
213
+ assert_array_almost_equal(output, tcov)
214
+ output = ndimage.correlate1d(array, weights)
215
+ assert_array_almost_equal(output, tcor)
216
+ output = ndimage.convolve1d(array, weights)
217
+ assert_array_almost_equal(output, tcov)
218
+
219
+ def test_correlate05(self):
220
+ array = numpy.array([1, 2, 3])
221
+ tcor = [2, 3, 5]
222
+ tcov = [3, 5, 6]
223
+ kernel = numpy.array([1, 1])
224
+ output = ndimage.correlate(array, kernel)
225
+ assert_array_almost_equal(tcor, output)
226
+ output = ndimage.convolve(array, kernel)
227
+ assert_array_almost_equal(tcov, output)
228
+ output = ndimage.correlate1d(array, kernel)
229
+ assert_array_almost_equal(tcor, output)
230
+ output = ndimage.convolve1d(array, kernel)
231
+ assert_array_almost_equal(tcov, output)
232
+
233
+ def test_correlate06(self):
234
+ array = numpy.array([1, 2, 3])
235
+ tcor = [9, 14, 17]
236
+ tcov = [7, 10, 15]
237
+ weights = numpy.array([1, 2, 3])
238
+ output = ndimage.correlate(array, weights)
239
+ assert_array_almost_equal(output, tcor)
240
+ output = ndimage.convolve(array, weights)
241
+ assert_array_almost_equal(output, tcov)
242
+ output = ndimage.correlate1d(array, weights)
243
+ assert_array_almost_equal(output, tcor)
244
+ output = ndimage.convolve1d(array, weights)
245
+ assert_array_almost_equal(output, tcov)
246
+
247
+ def test_correlate07(self):
248
+ array = numpy.array([1, 2, 3])
249
+ expected = [5, 8, 11]
250
+ weights = numpy.array([1, 2, 1])
251
+ output = ndimage.correlate(array, weights)
252
+ assert_array_almost_equal(output, expected)
253
+ output = ndimage.convolve(array, weights)
254
+ assert_array_almost_equal(output, expected)
255
+ output = ndimage.correlate1d(array, weights)
256
+ assert_array_almost_equal(output, expected)
257
+ output = ndimage.convolve1d(array, weights)
258
+ assert_array_almost_equal(output, expected)
259
+
260
+ def test_correlate08(self):
261
+ array = numpy.array([1, 2, 3])
262
+ tcor = [1, 2, 5]
263
+ tcov = [3, 6, 7]
264
+ weights = numpy.array([1, 2, -1])
265
+ output = ndimage.correlate(array, weights)
266
+ assert_array_almost_equal(output, tcor)
267
+ output = ndimage.convolve(array, weights)
268
+ assert_array_almost_equal(output, tcov)
269
+ output = ndimage.correlate1d(array, weights)
270
+ assert_array_almost_equal(output, tcor)
271
+ output = ndimage.convolve1d(array, weights)
272
+ assert_array_almost_equal(output, tcov)
273
+
274
+ def test_correlate09(self):
275
+ array = []
276
+ kernel = numpy.array([1, 1])
277
+ output = ndimage.correlate(array, kernel)
278
+ assert_array_almost_equal(array, output)
279
+ output = ndimage.convolve(array, kernel)
280
+ assert_array_almost_equal(array, output)
281
+ output = ndimage.correlate1d(array, kernel)
282
+ assert_array_almost_equal(array, output)
283
+ output = ndimage.convolve1d(array, kernel)
284
+ assert_array_almost_equal(array, output)
285
+
286
+ def test_correlate10(self):
287
+ array = [[]]
288
+ kernel = numpy.array([[1, 1]])
289
+ output = ndimage.correlate(array, kernel)
290
+ assert_array_almost_equal(array, output)
291
+ output = ndimage.convolve(array, kernel)
292
+ assert_array_almost_equal(array, output)
293
+
294
+ def test_correlate11(self):
295
+ array = numpy.array([[1, 2, 3],
296
+ [4, 5, 6]])
297
+ kernel = numpy.array([[1, 1],
298
+ [1, 1]])
299
+ output = ndimage.correlate(array, kernel)
300
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
301
+ output = ndimage.convolve(array, kernel)
302
+ assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
303
+
304
+ def test_correlate12(self):
305
+ array = numpy.array([[1, 2, 3],
306
+ [4, 5, 6]])
307
+ kernel = numpy.array([[1, 0],
308
+ [0, 1]])
309
+ output = ndimage.correlate(array, kernel)
310
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
311
+ output = ndimage.convolve(array, kernel)
312
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
313
+
314
+ @pytest.mark.parametrize('dtype_array', types)
315
+ @pytest.mark.parametrize('dtype_kernel', types)
316
+ def test_correlate13(self, dtype_array, dtype_kernel):
317
+ kernel = numpy.array([[1, 0],
318
+ [0, 1]])
319
+ array = numpy.array([[1, 2, 3],
320
+ [4, 5, 6]], dtype_array)
321
+ output = ndimage.correlate(array, kernel, output=dtype_kernel)
322
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
323
+ assert_equal(output.dtype.type, dtype_kernel)
324
+
325
+ output = ndimage.convolve(array, kernel,
326
+ output=dtype_kernel)
327
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
328
+ assert_equal(output.dtype.type, dtype_kernel)
329
+
330
+ @pytest.mark.parametrize('dtype_array', types)
331
+ @pytest.mark.parametrize('dtype_output', types)
332
+ def test_correlate14(self, dtype_array, dtype_output):
333
+ kernel = numpy.array([[1, 0],
334
+ [0, 1]])
335
+ array = numpy.array([[1, 2, 3],
336
+ [4, 5, 6]], dtype_array)
337
+ output = numpy.zeros(array.shape, dtype_output)
338
+ ndimage.correlate(array, kernel, output=output)
339
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
340
+ assert_equal(output.dtype.type, dtype_output)
341
+
342
+ ndimage.convolve(array, kernel, output=output)
343
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
344
+ assert_equal(output.dtype.type, dtype_output)
345
+
346
+ @pytest.mark.parametrize('dtype_array', types)
347
+ def test_correlate15(self, dtype_array):
348
+ kernel = numpy.array([[1, 0],
349
+ [0, 1]])
350
+ array = numpy.array([[1, 2, 3],
351
+ [4, 5, 6]], dtype_array)
352
+ output = ndimage.correlate(array, kernel, output=numpy.float32)
353
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
354
+ assert_equal(output.dtype.type, numpy.float32)
355
+
356
+ output = ndimage.convolve(array, kernel, output=numpy.float32)
357
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
358
+ assert_equal(output.dtype.type, numpy.float32)
359
+
360
+ @pytest.mark.parametrize('dtype_array', types)
361
+ def test_correlate16(self, dtype_array):
362
+ kernel = numpy.array([[0.5, 0],
363
+ [0, 0.5]])
364
+ array = numpy.array([[1, 2, 3], [4, 5, 6]], dtype_array)
365
+ output = ndimage.correlate(array, kernel, output=numpy.float32)
366
+ assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
367
+ assert_equal(output.dtype.type, numpy.float32)
368
+
369
+ output = ndimage.convolve(array, kernel, output=numpy.float32)
370
+ assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
371
+ assert_equal(output.dtype.type, numpy.float32)
372
+
373
+ def test_correlate17(self):
374
+ array = numpy.array([1, 2, 3])
375
+ tcor = [3, 5, 6]
376
+ tcov = [2, 3, 5]
377
+ kernel = numpy.array([1, 1])
378
+ output = ndimage.correlate(array, kernel, origin=-1)
379
+ assert_array_almost_equal(tcor, output)
380
+ output = ndimage.convolve(array, kernel, origin=-1)
381
+ assert_array_almost_equal(tcov, output)
382
+ output = ndimage.correlate1d(array, kernel, origin=-1)
383
+ assert_array_almost_equal(tcor, output)
384
+ output = ndimage.convolve1d(array, kernel, origin=-1)
385
+ assert_array_almost_equal(tcov, output)
386
+
387
+ @pytest.mark.parametrize('dtype_array', types)
388
+ def test_correlate18(self, dtype_array):
389
+ kernel = numpy.array([[1, 0],
390
+ [0, 1]])
391
+ array = numpy.array([[1, 2, 3],
392
+ [4, 5, 6]], dtype_array)
393
+ output = ndimage.correlate(array, kernel,
394
+ output=numpy.float32,
395
+ mode='nearest', origin=-1)
396
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
397
+ assert_equal(output.dtype.type, numpy.float32)
398
+
399
+ output = ndimage.convolve(array, kernel,
400
+ output=numpy.float32,
401
+ mode='nearest', origin=-1)
402
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
403
+ assert_equal(output.dtype.type, numpy.float32)
404
+
405
+ def test_correlate_mode_sequence(self):
406
+ kernel = numpy.ones((2, 2))
407
+ array = numpy.ones((3, 3), float)
408
+ with assert_raises(RuntimeError):
409
+ ndimage.correlate(array, kernel, mode=['nearest', 'reflect'])
410
+ with assert_raises(RuntimeError):
411
+ ndimage.convolve(array, kernel, mode=['nearest', 'reflect'])
412
+
413
+ @pytest.mark.parametrize('dtype_array', types)
414
+ def test_correlate19(self, dtype_array):
415
+ kernel = numpy.array([[1, 0],
416
+ [0, 1]])
417
+ array = numpy.array([[1, 2, 3],
418
+ [4, 5, 6]], dtype_array)
419
+ output = ndimage.correlate(array, kernel,
420
+ output=numpy.float32,
421
+ mode='nearest', origin=[-1, 0])
422
+ assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
423
+ assert_equal(output.dtype.type, numpy.float32)
424
+
425
+ output = ndimage.convolve(array, kernel,
426
+ output=numpy.float32,
427
+ mode='nearest', origin=[-1, 0])
428
+ assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
429
+ assert_equal(output.dtype.type, numpy.float32)
430
+
431
+ @pytest.mark.parametrize('dtype_array', types)
432
+ @pytest.mark.parametrize('dtype_output', types)
433
+ def test_correlate20(self, dtype_array, dtype_output):
434
+ weights = numpy.array([1, 2, 1])
435
+ expected = [[5, 10, 15], [7, 14, 21]]
436
+ array = numpy.array([[1, 2, 3],
437
+ [2, 4, 6]], dtype_array)
438
+ output = numpy.zeros((2, 3), dtype_output)
439
+ ndimage.correlate1d(array, weights, axis=0, output=output)
440
+ assert_array_almost_equal(output, expected)
441
+ ndimage.convolve1d(array, weights, axis=0, output=output)
442
+ assert_array_almost_equal(output, expected)
443
+
444
+ def test_correlate21(self):
445
+ array = numpy.array([[1, 2, 3],
446
+ [2, 4, 6]])
447
+ expected = [[5, 10, 15], [7, 14, 21]]
448
+ weights = numpy.array([1, 2, 1])
449
+ output = ndimage.correlate1d(array, weights, axis=0)
450
+ assert_array_almost_equal(output, expected)
451
+ output = ndimage.convolve1d(array, weights, axis=0)
452
+ assert_array_almost_equal(output, expected)
453
+
454
+ @pytest.mark.parametrize('dtype_array', types)
455
+ @pytest.mark.parametrize('dtype_output', types)
456
+ def test_correlate22(self, dtype_array, dtype_output):
457
+ weights = numpy.array([1, 2, 1])
458
+ expected = [[6, 12, 18], [6, 12, 18]]
459
+ array = numpy.array([[1, 2, 3],
460
+ [2, 4, 6]], dtype_array)
461
+ output = numpy.zeros((2, 3), dtype_output)
462
+ ndimage.correlate1d(array, weights, axis=0,
463
+ mode='wrap', output=output)
464
+ assert_array_almost_equal(output, expected)
465
+ ndimage.convolve1d(array, weights, axis=0,
466
+ mode='wrap', output=output)
467
+ assert_array_almost_equal(output, expected)
468
+
469
+ @pytest.mark.parametrize('dtype_array', types)
470
+ @pytest.mark.parametrize('dtype_output', types)
471
+ def test_correlate23(self, dtype_array, dtype_output):
472
+ weights = numpy.array([1, 2, 1])
473
+ expected = [[5, 10, 15], [7, 14, 21]]
474
+ array = numpy.array([[1, 2, 3],
475
+ [2, 4, 6]], dtype_array)
476
+ output = numpy.zeros((2, 3), dtype_output)
477
+ ndimage.correlate1d(array, weights, axis=0,
478
+ mode='nearest', output=output)
479
+ assert_array_almost_equal(output, expected)
480
+ ndimage.convolve1d(array, weights, axis=0,
481
+ mode='nearest', output=output)
482
+ assert_array_almost_equal(output, expected)
483
+
484
+ @pytest.mark.parametrize('dtype_array', types)
485
+ @pytest.mark.parametrize('dtype_output', types)
486
+ def test_correlate24(self, dtype_array, dtype_output):
487
+ weights = numpy.array([1, 2, 1])
488
+ tcor = [[7, 14, 21], [8, 16, 24]]
489
+ tcov = [[4, 8, 12], [5, 10, 15]]
490
+ array = numpy.array([[1, 2, 3],
491
+ [2, 4, 6]], dtype_array)
492
+ output = numpy.zeros((2, 3), dtype_output)
493
+ ndimage.correlate1d(array, weights, axis=0,
494
+ mode='nearest', output=output, origin=-1)
495
+ assert_array_almost_equal(output, tcor)
496
+ ndimage.convolve1d(array, weights, axis=0,
497
+ mode='nearest', output=output, origin=-1)
498
+ assert_array_almost_equal(output, tcov)
499
+
500
+ @pytest.mark.parametrize('dtype_array', types)
501
+ @pytest.mark.parametrize('dtype_output', types)
502
+ def test_correlate25(self, dtype_array, dtype_output):
503
+ weights = numpy.array([1, 2, 1])
504
+ tcor = [[4, 8, 12], [5, 10, 15]]
505
+ tcov = [[7, 14, 21], [8, 16, 24]]
506
+ array = numpy.array([[1, 2, 3],
507
+ [2, 4, 6]], dtype_array)
508
+ output = numpy.zeros((2, 3), dtype_output)
509
+ ndimage.correlate1d(array, weights, axis=0,
510
+ mode='nearest', output=output, origin=1)
511
+ assert_array_almost_equal(output, tcor)
512
+ ndimage.convolve1d(array, weights, axis=0,
513
+ mode='nearest', output=output, origin=1)
514
+ assert_array_almost_equal(output, tcov)
515
+
516
+ def test_correlate26(self):
517
+ # test fix for gh-11661 (mirror extension of a length 1 signal)
518
+ y = ndimage.convolve1d(numpy.ones(1), numpy.ones(5), mode='mirror')
519
+ assert_array_equal(y, numpy.array(5.))
520
+
521
+ y = ndimage.correlate1d(numpy.ones(1), numpy.ones(5), mode='mirror')
522
+ assert_array_equal(y, numpy.array(5.))
523
+
524
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
525
+ @pytest.mark.parametrize('dtype_input', types)
526
+ @pytest.mark.parametrize('dtype_output', complex_types)
527
+ def test_correlate_complex_kernel(self, dtype_input, dtype_kernel,
528
+ dtype_output):
529
+ kernel = numpy.array([[1, 0],
530
+ [0, 1 + 1j]], dtype_kernel)
531
+ array = numpy.array([[1, 2, 3],
532
+ [4, 5, 6]], dtype_input)
533
+ self._validate_complex(array, kernel, dtype_output)
534
+
535
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
536
+ @pytest.mark.parametrize('dtype_input', types)
537
+ @pytest.mark.parametrize('dtype_output', complex_types)
538
+ @pytest.mark.parametrize('mode', ['grid-constant', 'constant'])
539
+ def test_correlate_complex_kernel_cval(self, dtype_input, dtype_kernel,
540
+ dtype_output, mode):
541
+ # test use of non-zero cval with complex inputs
542
+ # also verifies that mode 'grid-constant' does not segfault
543
+ kernel = numpy.array([[1, 0],
544
+ [0, 1 + 1j]], dtype_kernel)
545
+ array = numpy.array([[1, 2, 3],
546
+ [4, 5, 6]], dtype_input)
547
+ self._validate_complex(array, kernel, dtype_output, mode=mode,
548
+ cval=5.0)
549
+
550
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
551
+ @pytest.mark.parametrize('dtype_input', types)
552
+ def test_correlate_complex_kernel_invalid_cval(self, dtype_input,
553
+ dtype_kernel):
554
+ # cannot give complex cval with a real image
555
+ kernel = numpy.array([[1, 0],
556
+ [0, 1 + 1j]], dtype_kernel)
557
+ array = numpy.array([[1, 2, 3],
558
+ [4, 5, 6]], dtype_input)
559
+ for func in [ndimage.convolve, ndimage.correlate, ndimage.convolve1d,
560
+ ndimage.correlate1d]:
561
+ with pytest.raises(ValueError):
562
+ func(array, kernel, mode='constant', cval=5.0 + 1.0j,
563
+ output=numpy.complex64)
564
+
565
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
566
+ @pytest.mark.parametrize('dtype_input', types)
567
+ @pytest.mark.parametrize('dtype_output', complex_types)
568
+ def test_correlate1d_complex_kernel(self, dtype_input, dtype_kernel,
569
+ dtype_output):
570
+ kernel = numpy.array([1, 1 + 1j], dtype_kernel)
571
+ array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
572
+ self._validate_complex(array, kernel, dtype_output)
573
+
574
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
575
+ @pytest.mark.parametrize('dtype_input', types)
576
+ @pytest.mark.parametrize('dtype_output', complex_types)
577
+ def test_correlate1d_complex_kernel_cval(self, dtype_input, dtype_kernel,
578
+ dtype_output):
579
+ kernel = numpy.array([1, 1 + 1j], dtype_kernel)
580
+ array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
581
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
582
+ cval=5.0)
583
+
584
+ @pytest.mark.parametrize('dtype_kernel', types)
585
+ @pytest.mark.parametrize('dtype_input', complex_types)
586
+ @pytest.mark.parametrize('dtype_output', complex_types)
587
+ def test_correlate_complex_input(self, dtype_input, dtype_kernel,
588
+ dtype_output):
589
+ kernel = numpy.array([[1, 0],
590
+ [0, 1]], dtype_kernel)
591
+ array = numpy.array([[1, 2j, 3],
592
+ [1 + 4j, 5, 6j]], dtype_input)
593
+ self._validate_complex(array, kernel, dtype_output)
594
+
595
+ @pytest.mark.parametrize('dtype_kernel', types)
596
+ @pytest.mark.parametrize('dtype_input', complex_types)
597
+ @pytest.mark.parametrize('dtype_output', complex_types)
598
+ def test_correlate1d_complex_input(self, dtype_input, dtype_kernel,
599
+ dtype_output):
600
+ kernel = numpy.array([1, 0, 1], dtype_kernel)
601
+ array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
602
+ self._validate_complex(array, kernel, dtype_output)
603
+
604
+ @pytest.mark.parametrize('dtype_kernel', types)
605
+ @pytest.mark.parametrize('dtype_input', complex_types)
606
+ @pytest.mark.parametrize('dtype_output', complex_types)
607
+ def test_correlate1d_complex_input_cval(self, dtype_input, dtype_kernel,
608
+ dtype_output):
609
+ kernel = numpy.array([1, 0, 1], dtype_kernel)
610
+ array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
611
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
612
+ cval=5 - 3j)
613
+
614
+ @pytest.mark.parametrize('dtype', complex_types)
615
+ @pytest.mark.parametrize('dtype_output', complex_types)
616
+ def test_correlate_complex_input_and_kernel(self, dtype, dtype_output):
617
+ kernel = numpy.array([[1, 0],
618
+ [0, 1 + 1j]], dtype)
619
+ array = numpy.array([[1, 2j, 3],
620
+ [1 + 4j, 5, 6j]], dtype)
621
+ self._validate_complex(array, kernel, dtype_output)
622
+
623
+ @pytest.mark.parametrize('dtype', complex_types)
624
+ @pytest.mark.parametrize('dtype_output', complex_types)
625
+ def test_correlate_complex_input_and_kernel_cval(self, dtype,
626
+ dtype_output):
627
+ kernel = numpy.array([[1, 0],
628
+ [0, 1 + 1j]], dtype)
629
+ array = numpy.array([[1, 2, 3],
630
+ [4, 5, 6]], dtype)
631
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
632
+ cval=5.0 + 2.0j)
633
+
634
+ @pytest.mark.parametrize('dtype', complex_types)
635
+ @pytest.mark.parametrize('dtype_output', complex_types)
636
+ def test_correlate1d_complex_input_and_kernel(self, dtype, dtype_output):
637
+ kernel = numpy.array([1, 1 + 1j], dtype)
638
+ array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
639
+ self._validate_complex(array, kernel, dtype_output)
640
+
641
+ @pytest.mark.parametrize('dtype', complex_types)
642
+ @pytest.mark.parametrize('dtype_output', complex_types)
643
+ def test_correlate1d_complex_input_and_kernel_cval(self, dtype,
644
+ dtype_output):
645
+ kernel = numpy.array([1, 1 + 1j], dtype)
646
+ array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
647
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
648
+ cval=5.0 + 2.0j)
649
+
650
+ def test_gauss01(self):
651
+ input = numpy.array([[1, 2, 3],
652
+ [2, 4, 6]], numpy.float32)
653
+ output = ndimage.gaussian_filter(input, 0)
654
+ assert_array_almost_equal(output, input)
655
+
656
+ def test_gauss02(self):
657
+ input = numpy.array([[1, 2, 3],
658
+ [2, 4, 6]], numpy.float32)
659
+ output = ndimage.gaussian_filter(input, 1.0)
660
+ assert_equal(input.dtype, output.dtype)
661
+ assert_equal(input.shape, output.shape)
662
+
663
+ def test_gauss03(self):
664
+ # single precision data
665
+ input = numpy.arange(100 * 100).astype(numpy.float32)
666
+ input.shape = (100, 100)
667
+ output = ndimage.gaussian_filter(input, [1.0, 1.0])
668
+
669
+ assert_equal(input.dtype, output.dtype)
670
+ assert_equal(input.shape, output.shape)
671
+
672
+ # input.sum() is 49995000.0. With single precision floats, we can't
673
+ # expect more than 8 digits of accuracy, so use decimal=0 in this test.
674
+ assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'),
675
+ decimal=0)
676
+ assert_(sumsq(input, output) > 1.0)
677
+
678
+ def test_gauss04(self):
679
+ input = numpy.arange(100 * 100).astype(numpy.float32)
680
+ input.shape = (100, 100)
681
+ otype = numpy.float64
682
+ output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
683
+ assert_equal(output.dtype.type, numpy.float64)
684
+ assert_equal(input.shape, output.shape)
685
+ assert_(sumsq(input, output) > 1.0)
686
+
687
+ def test_gauss05(self):
688
+ input = numpy.arange(100 * 100).astype(numpy.float32)
689
+ input.shape = (100, 100)
690
+ otype = numpy.float64
691
+ output = ndimage.gaussian_filter(input, [1.0, 1.0],
692
+ order=1, output=otype)
693
+ assert_equal(output.dtype.type, numpy.float64)
694
+ assert_equal(input.shape, output.shape)
695
+ assert_(sumsq(input, output) > 1.0)
696
+
697
+ def test_gauss06(self):
698
+ input = numpy.arange(100 * 100).astype(numpy.float32)
699
+ input.shape = (100, 100)
700
+ otype = numpy.float64
701
+ output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
702
+ output2 = ndimage.gaussian_filter(input, 1.0, output=otype)
703
+ assert_array_almost_equal(output1, output2)
704
+
705
+ def test_gauss_memory_overlap(self):
706
+ input = numpy.arange(100 * 100).astype(numpy.float32)
707
+ input.shape = (100, 100)
708
+ output1 = ndimage.gaussian_filter(input, 1.0)
709
+ ndimage.gaussian_filter(input, 1.0, output=input)
710
+ assert_array_almost_equal(output1, input)
711
+
712
+ @pytest.mark.parametrize(('filter_func', 'extra_args', 'size0', 'size'),
713
+ [(ndimage.gaussian_filter, (), 0, 1.0),
714
+ (ndimage.uniform_filter, (), 1, 3),
715
+ (ndimage.minimum_filter, (), 1, 3),
716
+ (ndimage.maximum_filter, (), 1, 3),
717
+ (ndimage.median_filter, (), 1, 3),
718
+ (ndimage.rank_filter, (1,), 1, 3),
719
+ (ndimage.percentile_filter, (40,), 1, 3)])
720
+ @pytest.mark.parametrize(
721
+ 'axes',
722
+ tuple(itertools.combinations(range(-3, 3), 1))
723
+ + tuple(itertools.combinations(range(-3, 3), 2))
724
+ + ((0, 1, 2),))
725
+ def test_filter_axes(self, filter_func, extra_args, size0, size, axes):
726
+ # Note: `size` is called `sigma` in `gaussian_filter`
727
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
728
+ axes = numpy.array(axes)
729
+
730
+ if len(set(axes % array.ndim)) != len(axes):
731
+ # parametrized cases with duplicate axes raise an error
732
+ with pytest.raises(ValueError, match="axes must be unique"):
733
+ filter_func(array, *extra_args, size, axes=axes)
734
+ return
735
+ output = filter_func(array, *extra_args, size, axes=axes)
736
+
737
+ # result should be equivalent to sigma=0.0/size=1 on unfiltered axes
738
+ all_sizes = (size if ax in (axes % array.ndim) else size0
739
+ for ax in range(array.ndim))
740
+ expected = filter_func(array, *extra_args, all_sizes)
741
+ assert_allclose(output, expected)
742
+
743
+ kwargs_gauss = dict(radius=[4, 2, 3], order=[0, 1, 2],
744
+ mode=['reflect', 'nearest', 'constant'])
745
+ kwargs_other = dict(origin=(-1, 0, 1),
746
+ mode=['reflect', 'nearest', 'constant'])
747
+ kwargs_rank = dict(origin=(-1, 0, 1))
748
+
749
+ @pytest.mark.parametrize("filter_func, size0, size, kwargs",
750
+ [(ndimage.gaussian_filter, 0, 1.0, kwargs_gauss),
751
+ (ndimage.uniform_filter, 1, 3, kwargs_other),
752
+ (ndimage.maximum_filter, 1, 3, kwargs_other),
753
+ (ndimage.minimum_filter, 1, 3, kwargs_other),
754
+ (ndimage.median_filter, 1, 3, kwargs_rank),
755
+ (ndimage.rank_filter, 1, 3, kwargs_rank),
756
+ (ndimage.percentile_filter, 1, 3, kwargs_rank)])
757
+ @pytest.mark.parametrize('axes', itertools.combinations(range(-3, 3), 2))
758
+ def test_filter_axes_kwargs(self, filter_func, size0, size, kwargs, axes):
759
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
760
+
761
+ kwargs = {key: numpy.array(val) for key, val in kwargs.items()}
762
+ axes = numpy.array(axes)
763
+ n_axes = axes.size
764
+
765
+ if filter_func == ndimage.rank_filter:
766
+ args = (2,) # (rank,)
767
+ elif filter_func == ndimage.percentile_filter:
768
+ args = (30,) # (percentile,)
769
+ else:
770
+ args = ()
771
+
772
+ # form kwargs that specify only the axes in `axes`
773
+ reduced_kwargs = {key: val[axes] for key, val in kwargs.items()}
774
+ if len(set(axes % array.ndim)) != len(axes):
775
+ # parametrized cases with duplicate axes raise an error
776
+ with pytest.raises(ValueError, match="axes must be unique"):
777
+ filter_func(array, *args, [size]*n_axes, axes=axes,
778
+ **reduced_kwargs)
779
+ return
780
+
781
+ output = filter_func(array, *args, [size]*n_axes, axes=axes,
782
+ **reduced_kwargs)
783
+
784
+ # result should be equivalent to sigma=0.0/size=1 on unfiltered axes
785
+ size_3d = numpy.full(array.ndim, fill_value=size0)
786
+ size_3d[axes] = size
787
+ if 'origin' in kwargs:
788
+ # origin should be zero on the axis that has size 0
789
+ origin = numpy.array([0, 0, 0])
790
+ origin[axes] = reduced_kwargs['origin']
791
+ kwargs['origin'] = origin
792
+ expected = filter_func(array, *args, size_3d, **kwargs)
793
+ assert_allclose(output, expected)
794
+
795
+ @pytest.mark.parametrize(
796
+ 'filter_func, args',
797
+ [(ndimage.gaussian_filter, (1.0,)), # args = (sigma,)
798
+ (ndimage.uniform_filter, (3,)), # args = (size,)
799
+ (ndimage.minimum_filter, (3,)), # args = (size,)
800
+ (ndimage.maximum_filter, (3,)), # args = (size,)
801
+ (ndimage.median_filter, (3,)), # args = (size,)
802
+ (ndimage.rank_filter, (2, 3)), # args = (rank, size)
803
+ (ndimage.percentile_filter, (30, 3))]) # args = (percentile, size)
804
+ @pytest.mark.parametrize(
805
+ 'axes', [(1.5,), (0, 1, 2, 3), (3,), (-4,)]
806
+ )
807
+ def test_filter_invalid_axes(self, filter_func, args, axes):
808
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
809
+ if any(isinstance(ax, float) for ax in axes):
810
+ error_class = TypeError
811
+ match = "cannot be interpreted as an integer"
812
+ else:
813
+ error_class = ValueError
814
+ match = "out of range"
815
+ with pytest.raises(error_class, match=match):
816
+ filter_func(array, *args, axes=axes)
817
+
818
+ @pytest.mark.parametrize(
819
+ 'filter_func, kwargs',
820
+ [(ndimage.minimum_filter, {}),
821
+ (ndimage.maximum_filter, {}),
822
+ (ndimage.median_filter, {}),
823
+ (ndimage.rank_filter, dict(rank=3)),
824
+ (ndimage.percentile_filter, dict(percentile=30))])
825
+ @pytest.mark.parametrize(
826
+ 'axes', [(0, ), (1, 2), (0, 1, 2)]
827
+ )
828
+ @pytest.mark.parametrize('separable_footprint', [False, True])
829
+ def test_filter_invalid_footprint_ndim(self, filter_func, kwargs, axes,
830
+ separable_footprint):
831
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
832
+ # create a footprint with one too many dimensions
833
+ footprint = numpy.ones((3,) * (len(axes) + 1))
834
+ if not separable_footprint:
835
+ footprint[(0,) * footprint.ndim] = 0
836
+ if (filter_func in [ndimage.minimum_filter, ndimage.maximum_filter]
837
+ and separable_footprint):
838
+ match = "sequence argument must have length equal to input rank"
839
+ else:
840
+ match = "footprint array has incorrect shape"
841
+ with pytest.raises(RuntimeError, match=match):
842
+ filter_func(array, **kwargs, footprint=footprint, axes=axes)
843
+
844
+ @pytest.mark.parametrize('n_mismatch', [1, 3])
845
+ @pytest.mark.parametrize('filter_func, kwargs, key, val',
846
+ _cases_axes_tuple_length_mismatch())
847
+ def test_filter_tuple_length_mismatch(self, n_mismatch, filter_func,
848
+ kwargs, key, val):
849
+ # Test for the intended RuntimeError when a kwargs has an invalid size
850
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
851
+ kwargs = dict(**kwargs, axes=(0, 1))
852
+ kwargs[key] = (val,) * n_mismatch
853
+ err_msg = "sequence argument must have length equal to input rank"
854
+ with pytest.raises(RuntimeError, match=err_msg):
855
+ filter_func(array, **kwargs)
856
+
857
+ @pytest.mark.parametrize('dtype', types + complex_types)
858
+ def test_prewitt01(self, dtype):
859
+ array = numpy.array([[3, 2, 5, 1, 4],
860
+ [5, 8, 3, 7, 1],
861
+ [5, 6, 9, 3, 5]], dtype)
862
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
863
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
864
+ output = ndimage.prewitt(array, 0)
865
+ assert_array_almost_equal(t, output)
866
+
867
+ @pytest.mark.parametrize('dtype', types + complex_types)
868
+ def test_prewitt02(self, dtype):
869
+ array = numpy.array([[3, 2, 5, 1, 4],
870
+ [5, 8, 3, 7, 1],
871
+ [5, 6, 9, 3, 5]], dtype)
872
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
873
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
874
+ output = numpy.zeros(array.shape, dtype)
875
+ ndimage.prewitt(array, 0, output)
876
+ assert_array_almost_equal(t, output)
877
+
878
+ @pytest.mark.parametrize('dtype', types + complex_types)
879
+ def test_prewitt03(self, dtype):
880
+ array = numpy.array([[3, 2, 5, 1, 4],
881
+ [5, 8, 3, 7, 1],
882
+ [5, 6, 9, 3, 5]], dtype)
883
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
884
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
885
+ output = ndimage.prewitt(array, 1)
886
+ assert_array_almost_equal(t, output)
887
+
888
+ @pytest.mark.parametrize('dtype', types + complex_types)
889
+ def test_prewitt04(self, dtype):
890
+ array = numpy.array([[3, 2, 5, 1, 4],
891
+ [5, 8, 3, 7, 1],
892
+ [5, 6, 9, 3, 5]], dtype)
893
+ t = ndimage.prewitt(array, -1)
894
+ output = ndimage.prewitt(array, 1)
895
+ assert_array_almost_equal(t, output)
896
+
897
+ @pytest.mark.parametrize('dtype', types + complex_types)
898
+ def test_sobel01(self, dtype):
899
+ array = numpy.array([[3, 2, 5, 1, 4],
900
+ [5, 8, 3, 7, 1],
901
+ [5, 6, 9, 3, 5]], dtype)
902
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
903
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
904
+ output = ndimage.sobel(array, 0)
905
+ assert_array_almost_equal(t, output)
906
+
907
+ @pytest.mark.parametrize('dtype', types + complex_types)
908
+ def test_sobel02(self, dtype):
909
+ array = numpy.array([[3, 2, 5, 1, 4],
910
+ [5, 8, 3, 7, 1],
911
+ [5, 6, 9, 3, 5]], dtype)
912
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
913
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
914
+ output = numpy.zeros(array.shape, dtype)
915
+ ndimage.sobel(array, 0, output)
916
+ assert_array_almost_equal(t, output)
917
+
918
+ @pytest.mark.parametrize('dtype', types + complex_types)
919
+ def test_sobel03(self, dtype):
920
+ array = numpy.array([[3, 2, 5, 1, 4],
921
+ [5, 8, 3, 7, 1],
922
+ [5, 6, 9, 3, 5]], dtype)
923
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
924
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
925
+ output = numpy.zeros(array.shape, dtype)
926
+ output = ndimage.sobel(array, 1)
927
+ assert_array_almost_equal(t, output)
928
+
929
+ @pytest.mark.parametrize('dtype', types + complex_types)
930
+ def test_sobel04(self, dtype):
931
+ array = numpy.array([[3, 2, 5, 1, 4],
932
+ [5, 8, 3, 7, 1],
933
+ [5, 6, 9, 3, 5]], dtype)
934
+ t = ndimage.sobel(array, -1)
935
+ output = ndimage.sobel(array, 1)
936
+ assert_array_almost_equal(t, output)
937
+
938
+ @pytest.mark.parametrize('dtype',
939
+ [numpy.int32, numpy.float32, numpy.float64,
940
+ numpy.complex64, numpy.complex128])
941
+ def test_laplace01(self, dtype):
942
+ array = numpy.array([[3, 2, 5, 1, 4],
943
+ [5, 8, 3, 7, 1],
944
+ [5, 6, 9, 3, 5]], dtype) * 100
945
+ tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
946
+ tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
947
+ output = ndimage.laplace(array)
948
+ assert_array_almost_equal(tmp1 + tmp2, output)
949
+
950
+ @pytest.mark.parametrize('dtype',
951
+ [numpy.int32, numpy.float32, numpy.float64,
952
+ numpy.complex64, numpy.complex128])
953
+ def test_laplace02(self, dtype):
954
+ array = numpy.array([[3, 2, 5, 1, 4],
955
+ [5, 8, 3, 7, 1],
956
+ [5, 6, 9, 3, 5]], dtype) * 100
957
+ tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
958
+ tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
959
+ output = numpy.zeros(array.shape, dtype)
960
+ ndimage.laplace(array, output=output)
961
+ assert_array_almost_equal(tmp1 + tmp2, output)
962
+
963
+ @pytest.mark.parametrize('dtype',
964
+ [numpy.int32, numpy.float32, numpy.float64,
965
+ numpy.complex64, numpy.complex128])
966
+ def test_gaussian_laplace01(self, dtype):
967
+ array = numpy.array([[3, 2, 5, 1, 4],
968
+ [5, 8, 3, 7, 1],
969
+ [5, 6, 9, 3, 5]], dtype) * 100
970
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
971
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
972
+ output = ndimage.gaussian_laplace(array, 1.0)
973
+ assert_array_almost_equal(tmp1 + tmp2, output)
974
+
975
+ @pytest.mark.parametrize('dtype',
976
+ [numpy.int32, numpy.float32, numpy.float64,
977
+ numpy.complex64, numpy.complex128])
978
+ def test_gaussian_laplace02(self, dtype):
979
+ array = numpy.array([[3, 2, 5, 1, 4],
980
+ [5, 8, 3, 7, 1],
981
+ [5, 6, 9, 3, 5]], dtype) * 100
982
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
983
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
984
+ output = numpy.zeros(array.shape, dtype)
985
+ ndimage.gaussian_laplace(array, 1.0, output)
986
+ assert_array_almost_equal(tmp1 + tmp2, output)
987
+
988
+ @pytest.mark.parametrize('dtype', types + complex_types)
989
+ def test_generic_laplace01(self, dtype):
990
+ def derivative2(input, axis, output, mode, cval, a, b):
991
+ sigma = [a, b / 2.0]
992
+ input = numpy.asarray(input)
993
+ order = [0] * input.ndim
994
+ order[axis] = 2
995
+ return ndimage.gaussian_filter(input, sigma, order,
996
+ output, mode, cval)
997
+ array = numpy.array([[3, 2, 5, 1, 4],
998
+ [5, 8, 3, 7, 1],
999
+ [5, 6, 9, 3, 5]], dtype)
1000
+ output = numpy.zeros(array.shape, dtype)
1001
+ tmp = ndimage.generic_laplace(array, derivative2,
1002
+ extra_arguments=(1.0,),
1003
+ extra_keywords={'b': 2.0})
1004
+ ndimage.gaussian_laplace(array, 1.0, output)
1005
+ assert_array_almost_equal(tmp, output)
1006
+
1007
+ @pytest.mark.parametrize('dtype',
1008
+ [numpy.int32, numpy.float32, numpy.float64,
1009
+ numpy.complex64, numpy.complex128])
1010
+ def test_gaussian_gradient_magnitude01(self, dtype):
1011
+ array = numpy.array([[3, 2, 5, 1, 4],
1012
+ [5, 8, 3, 7, 1],
1013
+ [5, 6, 9, 3, 5]], dtype) * 100
1014
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
1015
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
1016
+ output = ndimage.gaussian_gradient_magnitude(array, 1.0)
1017
+ expected = tmp1 * tmp1 + tmp2 * tmp2
1018
+ expected = numpy.sqrt(expected).astype(dtype)
1019
+ assert_array_almost_equal(expected, output)
1020
+
1021
+ @pytest.mark.parametrize('dtype',
1022
+ [numpy.int32, numpy.float32, numpy.float64,
1023
+ numpy.complex64, numpy.complex128])
1024
+ def test_gaussian_gradient_magnitude02(self, dtype):
1025
+ array = numpy.array([[3, 2, 5, 1, 4],
1026
+ [5, 8, 3, 7, 1],
1027
+ [5, 6, 9, 3, 5]], dtype) * 100
1028
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
1029
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
1030
+ output = numpy.zeros(array.shape, dtype)
1031
+ ndimage.gaussian_gradient_magnitude(array, 1.0, output)
1032
+ expected = tmp1 * tmp1 + tmp2 * tmp2
1033
+ expected = numpy.sqrt(expected).astype(dtype)
1034
+ assert_array_almost_equal(expected, output)
1035
+
1036
+ def test_generic_gradient_magnitude01(self):
1037
+ array = numpy.array([[3, 2, 5, 1, 4],
1038
+ [5, 8, 3, 7, 1],
1039
+ [5, 6, 9, 3, 5]], numpy.float64)
1040
+
1041
+ def derivative(input, axis, output, mode, cval, a, b):
1042
+ sigma = [a, b / 2.0]
1043
+ input = numpy.asarray(input)
1044
+ order = [0] * input.ndim
1045
+ order[axis] = 1
1046
+ return ndimage.gaussian_filter(input, sigma, order,
1047
+ output, mode, cval)
1048
+ tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
1049
+ tmp2 = ndimage.generic_gradient_magnitude(
1050
+ array, derivative, extra_arguments=(1.0,),
1051
+ extra_keywords={'b': 2.0})
1052
+ assert_array_almost_equal(tmp1, tmp2)
1053
+
1054
+ def test_uniform01(self):
1055
+ array = numpy.array([2, 4, 6])
1056
+ size = 2
1057
+ output = ndimage.uniform_filter1d(array, size, origin=-1)
1058
+ assert_array_almost_equal([3, 5, 6], output)
1059
+
1060
+ def test_uniform01_complex(self):
1061
+ array = numpy.array([2 + 1j, 4 + 2j, 6 + 3j], dtype=numpy.complex128)
1062
+ size = 2
1063
+ output = ndimage.uniform_filter1d(array, size, origin=-1)
1064
+ assert_array_almost_equal([3, 5, 6], output.real)
1065
+ assert_array_almost_equal([1.5, 2.5, 3], output.imag)
1066
+
1067
+ def test_uniform02(self):
1068
+ array = numpy.array([1, 2, 3])
1069
+ filter_shape = [0]
1070
+ output = ndimage.uniform_filter(array, filter_shape)
1071
+ assert_array_almost_equal(array, output)
1072
+
1073
+ def test_uniform03(self):
1074
+ array = numpy.array([1, 2, 3])
1075
+ filter_shape = [1]
1076
+ output = ndimage.uniform_filter(array, filter_shape)
1077
+ assert_array_almost_equal(array, output)
1078
+
1079
+ def test_uniform04(self):
1080
+ array = numpy.array([2, 4, 6])
1081
+ filter_shape = [2]
1082
+ output = ndimage.uniform_filter(array, filter_shape)
1083
+ assert_array_almost_equal([2, 3, 5], output)
1084
+
1085
+ def test_uniform05(self):
1086
+ array = []
1087
+ filter_shape = [1]
1088
+ output = ndimage.uniform_filter(array, filter_shape)
1089
+ assert_array_almost_equal([], output)
1090
+
1091
+ @pytest.mark.parametrize('dtype_array', types)
1092
+ @pytest.mark.parametrize('dtype_output', types)
1093
+ def test_uniform06(self, dtype_array, dtype_output):
1094
+ filter_shape = [2, 2]
1095
+ array = numpy.array([[4, 8, 12],
1096
+ [16, 20, 24]], dtype_array)
1097
+ output = ndimage.uniform_filter(
1098
+ array, filter_shape, output=dtype_output)
1099
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
1100
+ assert_equal(output.dtype.type, dtype_output)
1101
+
1102
+ @pytest.mark.parametrize('dtype_array', complex_types)
1103
+ @pytest.mark.parametrize('dtype_output', complex_types)
1104
+ def test_uniform06_complex(self, dtype_array, dtype_output):
1105
+ filter_shape = [2, 2]
1106
+ array = numpy.array([[4, 8 + 5j, 12],
1107
+ [16, 20, 24]], dtype_array)
1108
+ output = ndimage.uniform_filter(
1109
+ array, filter_shape, output=dtype_output)
1110
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output.real)
1111
+ assert_equal(output.dtype.type, dtype_output)
1112
+
1113
+ def test_minimum_filter01(self):
1114
+ array = numpy.array([1, 2, 3, 4, 5])
1115
+ filter_shape = numpy.array([2])
1116
+ output = ndimage.minimum_filter(array, filter_shape)
1117
+ assert_array_almost_equal([1, 1, 2, 3, 4], output)
1118
+
1119
+ def test_minimum_filter02(self):
1120
+ array = numpy.array([1, 2, 3, 4, 5])
1121
+ filter_shape = numpy.array([3])
1122
+ output = ndimage.minimum_filter(array, filter_shape)
1123
+ assert_array_almost_equal([1, 1, 2, 3, 4], output)
1124
+
1125
+ def test_minimum_filter03(self):
1126
+ array = numpy.array([3, 2, 5, 1, 4])
1127
+ filter_shape = numpy.array([2])
1128
+ output = ndimage.minimum_filter(array, filter_shape)
1129
+ assert_array_almost_equal([3, 2, 2, 1, 1], output)
1130
+
1131
+ def test_minimum_filter04(self):
1132
+ array = numpy.array([3, 2, 5, 1, 4])
1133
+ filter_shape = numpy.array([3])
1134
+ output = ndimage.minimum_filter(array, filter_shape)
1135
+ assert_array_almost_equal([2, 2, 1, 1, 1], output)
1136
+
1137
+ def test_minimum_filter05(self):
1138
+ array = numpy.array([[3, 2, 5, 1, 4],
1139
+ [7, 6, 9, 3, 5],
1140
+ [5, 8, 3, 7, 1]])
1141
+ filter_shape = numpy.array([2, 3])
1142
+ output = ndimage.minimum_filter(array, filter_shape)
1143
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
1144
+ [2, 2, 1, 1, 1],
1145
+ [5, 3, 3, 1, 1]], output)
1146
+
1147
+ def test_minimum_filter05_overlap(self):
1148
+ array = numpy.array([[3, 2, 5, 1, 4],
1149
+ [7, 6, 9, 3, 5],
1150
+ [5, 8, 3, 7, 1]])
1151
+ filter_shape = numpy.array([2, 3])
1152
+ ndimage.minimum_filter(array, filter_shape, output=array)
1153
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
1154
+ [2, 2, 1, 1, 1],
1155
+ [5, 3, 3, 1, 1]], array)
1156
+
1157
+ def test_minimum_filter06(self):
1158
+ array = numpy.array([[3, 2, 5, 1, 4],
1159
+ [7, 6, 9, 3, 5],
1160
+ [5, 8, 3, 7, 1]])
1161
+ footprint = [[1, 1, 1], [1, 1, 1]]
1162
+ output = ndimage.minimum_filter(array, footprint=footprint)
1163
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
1164
+ [2, 2, 1, 1, 1],
1165
+ [5, 3, 3, 1, 1]], output)
1166
+ # separable footprint should allow mode sequence
1167
+ output2 = ndimage.minimum_filter(array, footprint=footprint,
1168
+ mode=['reflect', 'reflect'])
1169
+ assert_array_almost_equal(output2, output)
1170
+
1171
+ def test_minimum_filter07(self):
1172
+ array = numpy.array([[3, 2, 5, 1, 4],
1173
+ [7, 6, 9, 3, 5],
1174
+ [5, 8, 3, 7, 1]])
1175
+ footprint = [[1, 0, 1], [1, 1, 0]]
1176
+ output = ndimage.minimum_filter(array, footprint=footprint)
1177
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
1178
+ [2, 3, 1, 3, 1],
1179
+ [5, 5, 3, 3, 1]], output)
1180
+ with assert_raises(RuntimeError):
1181
+ ndimage.minimum_filter(array, footprint=footprint,
1182
+ mode=['reflect', 'constant'])
1183
+
1184
+ def test_minimum_filter08(self):
1185
+ array = numpy.array([[3, 2, 5, 1, 4],
1186
+ [7, 6, 9, 3, 5],
1187
+ [5, 8, 3, 7, 1]])
1188
+ footprint = [[1, 0, 1], [1, 1, 0]]
1189
+ output = ndimage.minimum_filter(array, footprint=footprint, origin=-1)
1190
+ assert_array_almost_equal([[3, 1, 3, 1, 1],
1191
+ [5, 3, 3, 1, 1],
1192
+ [3, 3, 1, 1, 1]], output)
1193
+
1194
+ def test_minimum_filter09(self):
1195
+ array = numpy.array([[3, 2, 5, 1, 4],
1196
+ [7, 6, 9, 3, 5],
1197
+ [5, 8, 3, 7, 1]])
1198
+ footprint = [[1, 0, 1], [1, 1, 0]]
1199
+ output = ndimage.minimum_filter(array, footprint=footprint,
1200
+ origin=[-1, 0])
1201
+ assert_array_almost_equal([[2, 3, 1, 3, 1],
1202
+ [5, 5, 3, 3, 1],
1203
+ [5, 3, 3, 1, 1]], output)
1204
+
1205
+ def test_maximum_filter01(self):
1206
+ array = numpy.array([1, 2, 3, 4, 5])
1207
+ filter_shape = numpy.array([2])
1208
+ output = ndimage.maximum_filter(array, filter_shape)
1209
+ assert_array_almost_equal([1, 2, 3, 4, 5], output)
1210
+
1211
+ def test_maximum_filter02(self):
1212
+ array = numpy.array([1, 2, 3, 4, 5])
1213
+ filter_shape = numpy.array([3])
1214
+ output = ndimage.maximum_filter(array, filter_shape)
1215
+ assert_array_almost_equal([2, 3, 4, 5, 5], output)
1216
+
1217
+ def test_maximum_filter03(self):
1218
+ array = numpy.array([3, 2, 5, 1, 4])
1219
+ filter_shape = numpy.array([2])
1220
+ output = ndimage.maximum_filter(array, filter_shape)
1221
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
1222
+
1223
+ def test_maximum_filter04(self):
1224
+ array = numpy.array([3, 2, 5, 1, 4])
1225
+ filter_shape = numpy.array([3])
1226
+ output = ndimage.maximum_filter(array, filter_shape)
1227
+ assert_array_almost_equal([3, 5, 5, 5, 4], output)
1228
+
1229
+ def test_maximum_filter05(self):
1230
+ array = numpy.array([[3, 2, 5, 1, 4],
1231
+ [7, 6, 9, 3, 5],
1232
+ [5, 8, 3, 7, 1]])
1233
+ filter_shape = numpy.array([2, 3])
1234
+ output = ndimage.maximum_filter(array, filter_shape)
1235
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
1236
+ [7, 9, 9, 9, 5],
1237
+ [8, 9, 9, 9, 7]], output)
1238
+
1239
+ def test_maximum_filter06(self):
1240
+ array = numpy.array([[3, 2, 5, 1, 4],
1241
+ [7, 6, 9, 3, 5],
1242
+ [5, 8, 3, 7, 1]])
1243
+ footprint = [[1, 1, 1], [1, 1, 1]]
1244
+ output = ndimage.maximum_filter(array, footprint=footprint)
1245
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
1246
+ [7, 9, 9, 9, 5],
1247
+ [8, 9, 9, 9, 7]], output)
1248
+ # separable footprint should allow mode sequence
1249
+ output2 = ndimage.maximum_filter(array, footprint=footprint,
1250
+ mode=['reflect', 'reflect'])
1251
+ assert_array_almost_equal(output2, output)
1252
+
1253
+ def test_maximum_filter07(self):
1254
+ array = numpy.array([[3, 2, 5, 1, 4],
1255
+ [7, 6, 9, 3, 5],
1256
+ [5, 8, 3, 7, 1]])
1257
+ footprint = [[1, 0, 1], [1, 1, 0]]
1258
+ output = ndimage.maximum_filter(array, footprint=footprint)
1259
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
1260
+ [7, 7, 9, 9, 5],
1261
+ [7, 9, 8, 9, 7]], output)
1262
+ # non-separable footprint should not allow mode sequence
1263
+ with assert_raises(RuntimeError):
1264
+ ndimage.maximum_filter(array, footprint=footprint,
1265
+ mode=['reflect', 'reflect'])
1266
+
1267
+ def test_maximum_filter08(self):
1268
+ array = numpy.array([[3, 2, 5, 1, 4],
1269
+ [7, 6, 9, 3, 5],
1270
+ [5, 8, 3, 7, 1]])
1271
+ footprint = [[1, 0, 1], [1, 1, 0]]
1272
+ output = ndimage.maximum_filter(array, footprint=footprint, origin=-1)
1273
+ assert_array_almost_equal([[7, 9, 9, 5, 5],
1274
+ [9, 8, 9, 7, 5],
1275
+ [8, 8, 7, 7, 7]], output)
1276
+
1277
+ def test_maximum_filter09(self):
1278
+ array = numpy.array([[3, 2, 5, 1, 4],
1279
+ [7, 6, 9, 3, 5],
1280
+ [5, 8, 3, 7, 1]])
1281
+ footprint = [[1, 0, 1], [1, 1, 0]]
1282
+ output = ndimage.maximum_filter(array, footprint=footprint,
1283
+ origin=[-1, 0])
1284
+ assert_array_almost_equal([[7, 7, 9, 9, 5],
1285
+ [7, 9, 8, 9, 7],
1286
+ [8, 8, 8, 7, 7]], output)
1287
+
1288
+ @pytest.mark.parametrize(
1289
+ 'axes', tuple(itertools.combinations(range(-3, 3), 2))
1290
+ )
1291
+ @pytest.mark.parametrize(
1292
+ 'filter_func, kwargs',
1293
+ [(ndimage.minimum_filter, {}),
1294
+ (ndimage.maximum_filter, {}),
1295
+ (ndimage.median_filter, {}),
1296
+ (ndimage.rank_filter, dict(rank=3)),
1297
+ (ndimage.percentile_filter, dict(percentile=60))]
1298
+ )
1299
+ def test_minmax_nonseparable_axes(self, filter_func, axes, kwargs):
1300
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float32).reshape(6, 8, 12)
1301
+ # use 2D triangular footprint because it is non-separable
1302
+ footprint = numpy.tri(5)
1303
+ axes = numpy.array(axes)
1304
+
1305
+ if len(set(axes % array.ndim)) != len(axes):
1306
+ # parametrized cases with duplicate axes raise an error
1307
+ with pytest.raises(ValueError):
1308
+ filter_func(array, footprint=footprint, axes=axes, **kwargs)
1309
+ return
1310
+ output = filter_func(array, footprint=footprint, axes=axes, **kwargs)
1311
+
1312
+ missing_axis = tuple(set(range(3)) - set(axes % array.ndim))[0]
1313
+ footprint_3d = numpy.expand_dims(footprint, missing_axis)
1314
+ expected = filter_func(array, footprint=footprint_3d, **kwargs)
1315
+ assert_allclose(output, expected)
1316
+
1317
+ def test_rank01(self):
1318
+ array = numpy.array([1, 2, 3, 4, 5])
1319
+ output = ndimage.rank_filter(array, 1, size=2)
1320
+ assert_array_almost_equal(array, output)
1321
+ output = ndimage.percentile_filter(array, 100, size=2)
1322
+ assert_array_almost_equal(array, output)
1323
+ output = ndimage.median_filter(array, 2)
1324
+ assert_array_almost_equal(array, output)
1325
+
1326
+ def test_rank02(self):
1327
+ array = numpy.array([1, 2, 3, 4, 5])
1328
+ output = ndimage.rank_filter(array, 1, size=[3])
1329
+ assert_array_almost_equal(array, output)
1330
+ output = ndimage.percentile_filter(array, 50, size=3)
1331
+ assert_array_almost_equal(array, output)
1332
+ output = ndimage.median_filter(array, (3,))
1333
+ assert_array_almost_equal(array, output)
1334
+
1335
+ def test_rank03(self):
1336
+ array = numpy.array([3, 2, 5, 1, 4])
1337
+ output = ndimage.rank_filter(array, 1, size=[2])
1338
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
1339
+ output = ndimage.percentile_filter(array, 100, size=2)
1340
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
1341
+
1342
+ def test_rank04(self):
1343
+ array = numpy.array([3, 2, 5, 1, 4])
1344
+ expected = [3, 3, 2, 4, 4]
1345
+ output = ndimage.rank_filter(array, 1, size=3)
1346
+ assert_array_almost_equal(expected, output)
1347
+ output = ndimage.percentile_filter(array, 50, size=3)
1348
+ assert_array_almost_equal(expected, output)
1349
+ output = ndimage.median_filter(array, size=3)
1350
+ assert_array_almost_equal(expected, output)
1351
+
1352
+ def test_rank05(self):
1353
+ array = numpy.array([3, 2, 5, 1, 4])
1354
+ expected = [3, 3, 2, 4, 4]
1355
+ output = ndimage.rank_filter(array, -2, size=3)
1356
+ assert_array_almost_equal(expected, output)
1357
+
1358
+ def test_rank06(self):
1359
+ array = numpy.array([[3, 2, 5, 1, 4],
1360
+ [5, 8, 3, 7, 1],
1361
+ [5, 6, 9, 3, 5]])
1362
+ expected = [[2, 2, 1, 1, 1],
1363
+ [3, 3, 2, 1, 1],
1364
+ [5, 5, 3, 3, 1]]
1365
+ output = ndimage.rank_filter(array, 1, size=[2, 3])
1366
+ assert_array_almost_equal(expected, output)
1367
+ output = ndimage.percentile_filter(array, 17, size=(2, 3))
1368
+ assert_array_almost_equal(expected, output)
1369
+
1370
+ def test_rank06_overlap(self):
1371
+ array = numpy.array([[3, 2, 5, 1, 4],
1372
+ [5, 8, 3, 7, 1],
1373
+ [5, 6, 9, 3, 5]])
1374
+ array_copy = array.copy()
1375
+ expected = [[2, 2, 1, 1, 1],
1376
+ [3, 3, 2, 1, 1],
1377
+ [5, 5, 3, 3, 1]]
1378
+ ndimage.rank_filter(array, 1, size=[2, 3], output=array)
1379
+ assert_array_almost_equal(expected, array)
1380
+
1381
+ ndimage.percentile_filter(array_copy, 17, size=(2, 3),
1382
+ output=array_copy)
1383
+ assert_array_almost_equal(expected, array_copy)
1384
+
1385
+ def test_rank07(self):
1386
+ array = numpy.array([[3, 2, 5, 1, 4],
1387
+ [5, 8, 3, 7, 1],
1388
+ [5, 6, 9, 3, 5]])
1389
+ expected = [[3, 5, 5, 5, 4],
1390
+ [5, 5, 7, 5, 4],
1391
+ [6, 8, 8, 7, 5]]
1392
+ output = ndimage.rank_filter(array, -2, size=[2, 3])
1393
+ assert_array_almost_equal(expected, output)
1394
+
1395
+ def test_rank08(self):
1396
+ array = numpy.array([[3, 2, 5, 1, 4],
1397
+ [5, 8, 3, 7, 1],
1398
+ [5, 6, 9, 3, 5]])
1399
+ expected = [[3, 3, 2, 4, 4],
1400
+ [5, 5, 5, 4, 4],
1401
+ [5, 6, 7, 5, 5]]
1402
+ output = ndimage.percentile_filter(array, 50.0, size=(2, 3))
1403
+ assert_array_almost_equal(expected, output)
1404
+ output = ndimage.rank_filter(array, 3, size=(2, 3))
1405
+ assert_array_almost_equal(expected, output)
1406
+ output = ndimage.median_filter(array, size=(2, 3))
1407
+ assert_array_almost_equal(expected, output)
1408
+
1409
+ # non-separable: does not allow mode sequence
1410
+ with assert_raises(RuntimeError):
1411
+ ndimage.percentile_filter(array, 50.0, size=(2, 3),
1412
+ mode=['reflect', 'constant'])
1413
+ with assert_raises(RuntimeError):
1414
+ ndimage.rank_filter(array, 3, size=(2, 3), mode=['reflect']*2)
1415
+ with assert_raises(RuntimeError):
1416
+ ndimage.median_filter(array, size=(2, 3), mode=['reflect']*2)
1417
+
1418
+ @pytest.mark.parametrize('dtype', types)
1419
+ def test_rank09(self, dtype):
1420
+ expected = [[3, 3, 2, 4, 4],
1421
+ [3, 5, 2, 5, 1],
1422
+ [5, 5, 8, 3, 5]]
1423
+ footprint = [[1, 0, 1], [0, 1, 0]]
1424
+ array = numpy.array([[3, 2, 5, 1, 4],
1425
+ [5, 8, 3, 7, 1],
1426
+ [5, 6, 9, 3, 5]], dtype)
1427
+ output = ndimage.rank_filter(array, 1, footprint=footprint)
1428
+ assert_array_almost_equal(expected, output)
1429
+ output = ndimage.percentile_filter(array, 35, footprint=footprint)
1430
+ assert_array_almost_equal(expected, output)
1431
+
1432
+ def test_rank10(self):
1433
+ array = numpy.array([[3, 2, 5, 1, 4],
1434
+ [7, 6, 9, 3, 5],
1435
+ [5, 8, 3, 7, 1]])
1436
+ expected = [[2, 2, 1, 1, 1],
1437
+ [2, 3, 1, 3, 1],
1438
+ [5, 5, 3, 3, 1]]
1439
+ footprint = [[1, 0, 1], [1, 1, 0]]
1440
+ output = ndimage.rank_filter(array, 0, footprint=footprint)
1441
+ assert_array_almost_equal(expected, output)
1442
+ output = ndimage.percentile_filter(array, 0.0, footprint=footprint)
1443
+ assert_array_almost_equal(expected, output)
1444
+
1445
+ def test_rank11(self):
1446
+ array = numpy.array([[3, 2, 5, 1, 4],
1447
+ [7, 6, 9, 3, 5],
1448
+ [5, 8, 3, 7, 1]])
1449
+ expected = [[3, 5, 5, 5, 4],
1450
+ [7, 7, 9, 9, 5],
1451
+ [7, 9, 8, 9, 7]]
1452
+ footprint = [[1, 0, 1], [1, 1, 0]]
1453
+ output = ndimage.rank_filter(array, -1, footprint=footprint)
1454
+ assert_array_almost_equal(expected, output)
1455
+ output = ndimage.percentile_filter(array, 100.0, footprint=footprint)
1456
+ assert_array_almost_equal(expected, output)
1457
+
1458
+ @pytest.mark.parametrize('dtype', types)
1459
+ def test_rank12(self, dtype):
1460
+ expected = [[3, 3, 2, 4, 4],
1461
+ [3, 5, 2, 5, 1],
1462
+ [5, 5, 8, 3, 5]]
1463
+ footprint = [[1, 0, 1], [0, 1, 0]]
1464
+ array = numpy.array([[3, 2, 5, 1, 4],
1465
+ [5, 8, 3, 7, 1],
1466
+ [5, 6, 9, 3, 5]], dtype)
1467
+ output = ndimage.rank_filter(array, 1, footprint=footprint)
1468
+ assert_array_almost_equal(expected, output)
1469
+ output = ndimage.percentile_filter(array, 50.0,
1470
+ footprint=footprint)
1471
+ assert_array_almost_equal(expected, output)
1472
+ output = ndimage.median_filter(array, footprint=footprint)
1473
+ assert_array_almost_equal(expected, output)
1474
+
1475
+ @pytest.mark.parametrize('dtype', types)
1476
+ def test_rank13(self, dtype):
1477
+ expected = [[5, 2, 5, 1, 1],
1478
+ [5, 8, 3, 5, 5],
1479
+ [6, 6, 5, 5, 5]]
1480
+ footprint = [[1, 0, 1], [0, 1, 0]]
1481
+ array = numpy.array([[3, 2, 5, 1, 4],
1482
+ [5, 8, 3, 7, 1],
1483
+ [5, 6, 9, 3, 5]], dtype)
1484
+ output = ndimage.rank_filter(array, 1, footprint=footprint,
1485
+ origin=-1)
1486
+ assert_array_almost_equal(expected, output)
1487
+
1488
+ @pytest.mark.parametrize('dtype', types)
1489
+ def test_rank14(self, dtype):
1490
+ expected = [[3, 5, 2, 5, 1],
1491
+ [5, 5, 8, 3, 5],
1492
+ [5, 6, 6, 5, 5]]
1493
+ footprint = [[1, 0, 1], [0, 1, 0]]
1494
+ array = numpy.array([[3, 2, 5, 1, 4],
1495
+ [5, 8, 3, 7, 1],
1496
+ [5, 6, 9, 3, 5]], dtype)
1497
+ output = ndimage.rank_filter(array, 1, footprint=footprint,
1498
+ origin=[-1, 0])
1499
+ assert_array_almost_equal(expected, output)
1500
+
1501
+ @pytest.mark.parametrize('dtype', types)
1502
+ def test_rank15(self, dtype):
1503
+ expected = [[2, 3, 1, 4, 1],
1504
+ [5, 3, 7, 1, 1],
1505
+ [5, 5, 3, 3, 3]]
1506
+ footprint = [[1, 0, 1], [0, 1, 0]]
1507
+ array = numpy.array([[3, 2, 5, 1, 4],
1508
+ [5, 8, 3, 7, 1],
1509
+ [5, 6, 9, 3, 5]], dtype)
1510
+ output = ndimage.rank_filter(array, 0, footprint=footprint,
1511
+ origin=[-1, 0])
1512
+ assert_array_almost_equal(expected, output)
1513
+
1514
+ @pytest.mark.parametrize('dtype', types)
1515
+ def test_generic_filter1d01(self, dtype):
1516
+ weights = numpy.array([1.1, 2.2, 3.3])
1517
+
1518
+ def _filter_func(input, output, fltr, total):
1519
+ fltr = fltr / total
1520
+ for ii in range(input.shape[0] - 2):
1521
+ output[ii] = input[ii] * fltr[0]
1522
+ output[ii] += input[ii + 1] * fltr[1]
1523
+ output[ii] += input[ii + 2] * fltr[2]
1524
+ a = numpy.arange(12, dtype=dtype)
1525
+ a.shape = (3, 4)
1526
+ r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1)
1527
+ r2 = ndimage.generic_filter1d(
1528
+ a, _filter_func, 3, axis=0, origin=-1,
1529
+ extra_arguments=(weights,),
1530
+ extra_keywords={'total': weights.sum()})
1531
+ assert_array_almost_equal(r1, r2)
1532
+
1533
+ @pytest.mark.parametrize('dtype', types)
1534
+ def test_generic_filter01(self, dtype):
1535
+ filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
1536
+ footprint = numpy.array([[1, 0], [0, 1]])
1537
+ cf = numpy.array([1., 4.])
1538
+
1539
+ def _filter_func(buffer, weights, total=1.0):
1540
+ weights = cf / total
1541
+ return (buffer * weights).sum()
1542
+
1543
+ a = numpy.arange(12, dtype=dtype)
1544
+ a.shape = (3, 4)
1545
+ r1 = ndimage.correlate(a, filter_ * footprint)
1546
+ if dtype in float_types:
1547
+ r1 /= 5
1548
+ else:
1549
+ r1 //= 5
1550
+ r2 = ndimage.generic_filter(
1551
+ a, _filter_func, footprint=footprint, extra_arguments=(cf,),
1552
+ extra_keywords={'total': cf.sum()})
1553
+ assert_array_almost_equal(r1, r2)
1554
+
1555
+ # generic_filter doesn't allow mode sequence
1556
+ with assert_raises(RuntimeError):
1557
+ r2 = ndimage.generic_filter(
1558
+ a, _filter_func, mode=['reflect', 'reflect'],
1559
+ footprint=footprint, extra_arguments=(cf,),
1560
+ extra_keywords={'total': cf.sum()})
1561
+
1562
+ @pytest.mark.parametrize(
1563
+ 'mode, expected_value',
1564
+ [('nearest', [1, 1, 2]),
1565
+ ('wrap', [3, 1, 2]),
1566
+ ('reflect', [1, 1, 2]),
1567
+ ('mirror', [2, 1, 2]),
1568
+ ('constant', [0, 1, 2])]
1569
+ )
1570
+ def test_extend01(self, mode, expected_value):
1571
+ array = numpy.array([1, 2, 3])
1572
+ weights = numpy.array([1, 0])
1573
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
1574
+ assert_array_equal(output, expected_value)
1575
+
1576
+ @pytest.mark.parametrize(
1577
+ 'mode, expected_value',
1578
+ [('nearest', [1, 1, 1]),
1579
+ ('wrap', [3, 1, 2]),
1580
+ ('reflect', [3, 3, 2]),
1581
+ ('mirror', [1, 2, 3]),
1582
+ ('constant', [0, 0, 0])]
1583
+ )
1584
+ def test_extend02(self, mode, expected_value):
1585
+ array = numpy.array([1, 2, 3])
1586
+ weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0])
1587
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
1588
+ assert_array_equal(output, expected_value)
1589
+
1590
+ @pytest.mark.parametrize(
1591
+ 'mode, expected_value',
1592
+ [('nearest', [2, 3, 3]),
1593
+ ('wrap', [2, 3, 1]),
1594
+ ('reflect', [2, 3, 3]),
1595
+ ('mirror', [2, 3, 2]),
1596
+ ('constant', [2, 3, 0])]
1597
+ )
1598
+ def test_extend03(self, mode, expected_value):
1599
+ array = numpy.array([1, 2, 3])
1600
+ weights = numpy.array([0, 0, 1])
1601
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
1602
+ assert_array_equal(output, expected_value)
1603
+
1604
+ @pytest.mark.parametrize(
1605
+ 'mode, expected_value',
1606
+ [('nearest', [3, 3, 3]),
1607
+ ('wrap', [2, 3, 1]),
1608
+ ('reflect', [2, 1, 1]),
1609
+ ('mirror', [1, 2, 3]),
1610
+ ('constant', [0, 0, 0])]
1611
+ )
1612
+ def test_extend04(self, mode, expected_value):
1613
+ array = numpy.array([1, 2, 3])
1614
+ weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
1615
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
1616
+ assert_array_equal(output, expected_value)
1617
+
1618
+ @pytest.mark.parametrize(
1619
+ 'mode, expected_value',
1620
+ [('nearest', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
1621
+ ('wrap', [[9, 7, 8], [3, 1, 2], [6, 4, 5]]),
1622
+ ('reflect', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
1623
+ ('mirror', [[5, 4, 5], [2, 1, 2], [5, 4, 5]]),
1624
+ ('constant', [[0, 0, 0], [0, 1, 2], [0, 4, 5]])]
1625
+ )
1626
+ def test_extend05(self, mode, expected_value):
1627
+ array = numpy.array([[1, 2, 3],
1628
+ [4, 5, 6],
1629
+ [7, 8, 9]])
1630
+ weights = numpy.array([[1, 0], [0, 0]])
1631
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1632
+ assert_array_equal(output, expected_value)
1633
+
1634
+ @pytest.mark.parametrize(
1635
+ 'mode, expected_value',
1636
+ [('nearest', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
1637
+ ('wrap', [[5, 6, 4], [8, 9, 7], [2, 3, 1]]),
1638
+ ('reflect', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
1639
+ ('mirror', [[5, 6, 5], [8, 9, 8], [5, 6, 5]]),
1640
+ ('constant', [[5, 6, 0], [8, 9, 0], [0, 0, 0]])]
1641
+ )
1642
+ def test_extend06(self, mode, expected_value):
1643
+ array = numpy.array([[1, 2, 3],
1644
+ [4, 5, 6],
1645
+ [7, 8, 9]])
1646
+ weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
1647
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1648
+ assert_array_equal(output, expected_value)
1649
+
1650
+ @pytest.mark.parametrize(
1651
+ 'mode, expected_value',
1652
+ [('nearest', [3, 3, 3]),
1653
+ ('wrap', [2, 3, 1]),
1654
+ ('reflect', [2, 1, 1]),
1655
+ ('mirror', [1, 2, 3]),
1656
+ ('constant', [0, 0, 0])]
1657
+ )
1658
+ def test_extend07(self, mode, expected_value):
1659
+ array = numpy.array([1, 2, 3])
1660
+ weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
1661
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1662
+ assert_array_equal(output, expected_value)
1663
+
1664
+ @pytest.mark.parametrize(
1665
+ 'mode, expected_value',
1666
+ [('nearest', [[3], [3], [3]]),
1667
+ ('wrap', [[2], [3], [1]]),
1668
+ ('reflect', [[2], [1], [1]]),
1669
+ ('mirror', [[1], [2], [3]]),
1670
+ ('constant', [[0], [0], [0]])]
1671
+ )
1672
+ def test_extend08(self, mode, expected_value):
1673
+ array = numpy.array([[1], [2], [3]])
1674
+ weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
1675
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1676
+ assert_array_equal(output, expected_value)
1677
+
1678
+ @pytest.mark.parametrize(
1679
+ 'mode, expected_value',
1680
+ [('nearest', [3, 3, 3]),
1681
+ ('wrap', [2, 3, 1]),
1682
+ ('reflect', [2, 1, 1]),
1683
+ ('mirror', [1, 2, 3]),
1684
+ ('constant', [0, 0, 0])]
1685
+ )
1686
+ def test_extend09(self, mode, expected_value):
1687
+ array = numpy.array([1, 2, 3])
1688
+ weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
1689
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1690
+ assert_array_equal(output, expected_value)
1691
+
1692
+ @pytest.mark.parametrize(
1693
+ 'mode, expected_value',
1694
+ [('nearest', [[3], [3], [3]]),
1695
+ ('wrap', [[2], [3], [1]]),
1696
+ ('reflect', [[2], [1], [1]]),
1697
+ ('mirror', [[1], [2], [3]]),
1698
+ ('constant', [[0], [0], [0]])]
1699
+ )
1700
+ def test_extend10(self, mode, expected_value):
1701
+ array = numpy.array([[1], [2], [3]])
1702
+ weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
1703
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
1704
+ assert_array_equal(output, expected_value)
1705
+
1706
+
1707
+ def test_ticket_701():
1708
+ # Test generic filter sizes
1709
+ arr = numpy.arange(4).reshape((2, 2))
1710
+ def func(x):
1711
+ return numpy.min(x)
1712
+ res = ndimage.generic_filter(arr, func, size=(1, 1))
1713
+ # The following raises an error unless ticket 701 is fixed
1714
+ res2 = ndimage.generic_filter(arr, func, size=1)
1715
+ assert_equal(res, res2)
1716
+
1717
+
1718
+ def test_gh_5430():
1719
+ # At least one of these raises an error unless gh-5430 is
1720
+ # fixed. In py2k an int is implemented using a C long, so
1721
+ # which one fails depends on your system. In py3k there is only
1722
+ # one arbitrary precision integer type, so both should fail.
1723
+ sigma = numpy.int32(1)
1724
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
1725
+ assert_equal(out, [sigma])
1726
+ sigma = numpy.int64(1)
1727
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
1728
+ assert_equal(out, [sigma])
1729
+ # This worked before; make sure it still works
1730
+ sigma = 1
1731
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
1732
+ assert_equal(out, [sigma])
1733
+ # This worked before; make sure it still works
1734
+ sigma = [1, 1]
1735
+ out = ndimage._ni_support._normalize_sequence(sigma, 2)
1736
+ assert_equal(out, sigma)
1737
+ # Also include the OPs original example to make sure we fixed the issue
1738
+ x = numpy.random.normal(size=(256, 256))
1739
+ perlin = numpy.zeros_like(x)
1740
+ for i in 2**numpy.arange(6):
1741
+ perlin += ndimage.gaussian_filter(x, i, mode="wrap") * i**2
1742
+ # This also fixes gh-4106, show that the OPs example now runs.
1743
+ x = numpy.int64(21)
1744
+ ndimage._ni_support._normalize_sequence(x, 0)
1745
+
1746
+
1747
+ def test_gaussian_kernel1d():
1748
+ radius = 10
1749
+ sigma = 2
1750
+ sigma2 = sigma * sigma
1751
+ x = numpy.arange(-radius, radius + 1, dtype=numpy.double)
1752
+ phi_x = numpy.exp(-0.5 * x * x / sigma2)
1753
+ phi_x /= phi_x.sum()
1754
+ assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
1755
+ assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
1756
+ assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
1757
+ _gaussian_kernel1d(sigma, 2, radius))
1758
+ assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
1759
+ _gaussian_kernel1d(sigma, 3, radius))
1760
+
1761
+
1762
+ def test_orders_gauss():
1763
+ # Check order inputs to Gaussians
1764
+ arr = numpy.zeros((1,))
1765
+ assert_equal(0, ndimage.gaussian_filter(arr, 1, order=0))
1766
+ assert_equal(0, ndimage.gaussian_filter(arr, 1, order=3))
1767
+ assert_raises(ValueError, ndimage.gaussian_filter, arr, 1, -1)
1768
+ assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=0))
1769
+ assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=3))
1770
+ assert_raises(ValueError, ndimage.gaussian_filter1d, arr, 1, -1, -1)
1771
+
1772
+
1773
+ def test_valid_origins():
1774
+ """Regression test for #1311."""
1775
+ def func(x):
1776
+ return numpy.mean(x)
1777
+ data = numpy.array([1, 2, 3, 4, 5], dtype=numpy.float64)
1778
+ assert_raises(ValueError, ndimage.generic_filter, data, func, size=3,
1779
+ origin=2)
1780
+ assert_raises(ValueError, ndimage.generic_filter1d, data, func,
1781
+ filter_size=3, origin=2)
1782
+ assert_raises(ValueError, ndimage.percentile_filter, data, 0.2, size=3,
1783
+ origin=2)
1784
+
1785
+ for filter in [ndimage.uniform_filter, ndimage.minimum_filter,
1786
+ ndimage.maximum_filter, ndimage.maximum_filter1d,
1787
+ ndimage.median_filter, ndimage.minimum_filter1d]:
1788
+ # This should work, since for size == 3, the valid range for origin is
1789
+ # -1 to 1.
1790
+ list(filter(data, 3, origin=-1))
1791
+ list(filter(data, 3, origin=1))
1792
+ # Just check this raises an error instead of silently accepting or
1793
+ # segfaulting.
1794
+ assert_raises(ValueError, filter, data, 3, origin=2)
1795
+
1796
+
1797
+ def test_bad_convolve_and_correlate_origins():
1798
+ """Regression test for gh-822."""
1799
+ # Before gh-822 was fixed, these would generate seg. faults or
1800
+ # other crashes on many system.
1801
+ assert_raises(ValueError, ndimage.correlate1d,
1802
+ [0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
1803
+ assert_raises(ValueError, ndimage.correlate,
1804
+ [0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
1805
+ assert_raises(ValueError, ndimage.correlate,
1806
+ numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, 1])
1807
+
1808
+ assert_raises(ValueError, ndimage.convolve1d,
1809
+ numpy.arange(10), numpy.ones(3), origin=-2)
1810
+ assert_raises(ValueError, ndimage.convolve,
1811
+ numpy.arange(10), numpy.ones(3), origin=[-2])
1812
+ assert_raises(ValueError, ndimage.convolve,
1813
+ numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, -2])
1814
+
1815
+
1816
+ def test_multiple_modes():
1817
+ # Test that the filters with multiple mode cababilities for different
1818
+ # dimensions give the same result as applying a single mode.
1819
+ arr = numpy.array([[1., 0., 0.],
1820
+ [1., 1., 0.],
1821
+ [0., 0., 0.]])
1822
+
1823
+ mode1 = 'reflect'
1824
+ mode2 = ['reflect', 'reflect']
1825
+
1826
+ assert_equal(ndimage.gaussian_filter(arr, 1, mode=mode1),
1827
+ ndimage.gaussian_filter(arr, 1, mode=mode2))
1828
+ assert_equal(ndimage.prewitt(arr, mode=mode1),
1829
+ ndimage.prewitt(arr, mode=mode2))
1830
+ assert_equal(ndimage.sobel(arr, mode=mode1),
1831
+ ndimage.sobel(arr, mode=mode2))
1832
+ assert_equal(ndimage.laplace(arr, mode=mode1),
1833
+ ndimage.laplace(arr, mode=mode2))
1834
+ assert_equal(ndimage.gaussian_laplace(arr, 1, mode=mode1),
1835
+ ndimage.gaussian_laplace(arr, 1, mode=mode2))
1836
+ assert_equal(ndimage.maximum_filter(arr, size=5, mode=mode1),
1837
+ ndimage.maximum_filter(arr, size=5, mode=mode2))
1838
+ assert_equal(ndimage.minimum_filter(arr, size=5, mode=mode1),
1839
+ ndimage.minimum_filter(arr, size=5, mode=mode2))
1840
+ assert_equal(ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode1),
1841
+ ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode2))
1842
+ assert_equal(ndimage.uniform_filter(arr, 5, mode=mode1),
1843
+ ndimage.uniform_filter(arr, 5, mode=mode2))
1844
+
1845
+
1846
+ def test_multiple_modes_sequentially():
1847
+ # Test that the filters with multiple mode cababilities for different
1848
+ # dimensions give the same result as applying the filters with
1849
+ # different modes sequentially
1850
+ arr = numpy.array([[1., 0., 0.],
1851
+ [1., 1., 0.],
1852
+ [0., 0., 0.]])
1853
+
1854
+ modes = ['reflect', 'wrap']
1855
+
1856
+ expected = ndimage.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
1857
+ expected = ndimage.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
1858
+ assert_equal(expected,
1859
+ ndimage.gaussian_filter(arr, 1, mode=modes))
1860
+
1861
+ expected = ndimage.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
1862
+ expected = ndimage.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
1863
+ assert_equal(expected,
1864
+ ndimage.uniform_filter(arr, 5, mode=modes))
1865
+
1866
+ expected = ndimage.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
1867
+ expected = ndimage.maximum_filter1d(expected, size=5, axis=1,
1868
+ mode=modes[1])
1869
+ assert_equal(expected,
1870
+ ndimage.maximum_filter(arr, size=5, mode=modes))
1871
+
1872
+ expected = ndimage.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
1873
+ expected = ndimage.minimum_filter1d(expected, size=5, axis=1,
1874
+ mode=modes[1])
1875
+ assert_equal(expected,
1876
+ ndimage.minimum_filter(arr, size=5, mode=modes))
1877
+
1878
+
1879
+ def test_multiple_modes_prewitt():
1880
+ # Test prewitt filter for multiple extrapolation modes
1881
+ arr = numpy.array([[1., 0., 0.],
1882
+ [1., 1., 0.],
1883
+ [0., 0., 0.]])
1884
+
1885
+ expected = numpy.array([[1., -3., 2.],
1886
+ [1., -2., 1.],
1887
+ [1., -1., 0.]])
1888
+
1889
+ modes = ['reflect', 'wrap']
1890
+
1891
+ assert_equal(expected,
1892
+ ndimage.prewitt(arr, mode=modes))
1893
+
1894
+
1895
+ def test_multiple_modes_sobel():
1896
+ # Test sobel filter for multiple extrapolation modes
1897
+ arr = numpy.array([[1., 0., 0.],
1898
+ [1., 1., 0.],
1899
+ [0., 0., 0.]])
1900
+
1901
+ expected = numpy.array([[1., -4., 3.],
1902
+ [2., -3., 1.],
1903
+ [1., -1., 0.]])
1904
+
1905
+ modes = ['reflect', 'wrap']
1906
+
1907
+ assert_equal(expected,
1908
+ ndimage.sobel(arr, mode=modes))
1909
+
1910
+
1911
+ def test_multiple_modes_laplace():
1912
+ # Test laplace filter for multiple extrapolation modes
1913
+ arr = numpy.array([[1., 0., 0.],
1914
+ [1., 1., 0.],
1915
+ [0., 0., 0.]])
1916
+
1917
+ expected = numpy.array([[-2., 2., 1.],
1918
+ [-2., -3., 2.],
1919
+ [1., 1., 0.]])
1920
+
1921
+ modes = ['reflect', 'wrap']
1922
+
1923
+ assert_equal(expected,
1924
+ ndimage.laplace(arr, mode=modes))
1925
+
1926
+
1927
+ def test_multiple_modes_gaussian_laplace():
1928
+ # Test gaussian_laplace filter for multiple extrapolation modes
1929
+ arr = numpy.array([[1., 0., 0.],
1930
+ [1., 1., 0.],
1931
+ [0., 0., 0.]])
1932
+
1933
+ expected = numpy.array([[-0.28438687, 0.01559809, 0.19773499],
1934
+ [-0.36630503, -0.20069774, 0.07483620],
1935
+ [0.15849176, 0.18495566, 0.21934094]])
1936
+
1937
+ modes = ['reflect', 'wrap']
1938
+
1939
+ assert_almost_equal(expected,
1940
+ ndimage.gaussian_laplace(arr, 1, mode=modes))
1941
+
1942
+
1943
+ def test_multiple_modes_gaussian_gradient_magnitude():
1944
+ # Test gaussian_gradient_magnitude filter for multiple
1945
+ # extrapolation modes
1946
+ arr = numpy.array([[1., 0., 0.],
1947
+ [1., 1., 0.],
1948
+ [0., 0., 0.]])
1949
+
1950
+ expected = numpy.array([[0.04928965, 0.09745625, 0.06405368],
1951
+ [0.23056905, 0.14025305, 0.04550846],
1952
+ [0.19894369, 0.14950060, 0.06796850]])
1953
+
1954
+ modes = ['reflect', 'wrap']
1955
+
1956
+ calculated = ndimage.gaussian_gradient_magnitude(arr, 1, mode=modes)
1957
+
1958
+ assert_almost_equal(expected, calculated)
1959
+
1960
+
1961
+ def test_multiple_modes_uniform():
1962
+ # Test uniform filter for multiple extrapolation modes
1963
+ arr = numpy.array([[1., 0., 0.],
1964
+ [1., 1., 0.],
1965
+ [0., 0., 0.]])
1966
+
1967
+ expected = numpy.array([[0.32, 0.40, 0.48],
1968
+ [0.20, 0.28, 0.32],
1969
+ [0.28, 0.32, 0.40]])
1970
+
1971
+ modes = ['reflect', 'wrap']
1972
+
1973
+ assert_almost_equal(expected,
1974
+ ndimage.uniform_filter(arr, 5, mode=modes))
1975
+
1976
+
1977
+ def test_gaussian_truncate():
1978
+ # Test that Gaussian filters can be truncated at different widths.
1979
+ # These tests only check that the result has the expected number
1980
+ # of nonzero elements.
1981
+ arr = numpy.zeros((100, 100), float)
1982
+ arr[50, 50] = 1
1983
+ num_nonzeros_2 = (ndimage.gaussian_filter(arr, 5, truncate=2) > 0).sum()
1984
+ assert_equal(num_nonzeros_2, 21**2)
1985
+ num_nonzeros_5 = (ndimage.gaussian_filter(arr, 5, truncate=5) > 0).sum()
1986
+ assert_equal(num_nonzeros_5, 51**2)
1987
+
1988
+ # Test truncate when sigma is a sequence.
1989
+ f = ndimage.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
1990
+ fpos = f > 0
1991
+ n0 = fpos.any(axis=0).sum()
1992
+ # n0 should be 2*int(2.5*3.5 + 0.5) + 1
1993
+ assert_equal(n0, 19)
1994
+ n1 = fpos.any(axis=1).sum()
1995
+ # n1 should be 2*int(0.5*3.5 + 0.5) + 1
1996
+ assert_equal(n1, 5)
1997
+
1998
+ # Test gaussian_filter1d.
1999
+ x = numpy.zeros(51)
2000
+ x[25] = 1
2001
+ f = ndimage.gaussian_filter1d(x, sigma=2, truncate=3.5)
2002
+ n = (f > 0).sum()
2003
+ assert_equal(n, 15)
2004
+
2005
+ # Test gaussian_laplace
2006
+ y = ndimage.gaussian_laplace(x, sigma=2, truncate=3.5)
2007
+ nonzero_indices = numpy.nonzero(y != 0)[0]
2008
+ n = numpy.ptp(nonzero_indices) + 1
2009
+ assert_equal(n, 15)
2010
+
2011
+ # Test gaussian_gradient_magnitude
2012
+ y = ndimage.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
2013
+ nonzero_indices = numpy.nonzero(y != 0)[0]
2014
+ n = numpy.ptp(nonzero_indices) + 1
2015
+ assert_equal(n, 15)
2016
+
2017
+
2018
+ def test_gaussian_radius():
2019
+ # Test that Gaussian filters with radius argument produce the same
2020
+ # results as the filters with corresponding truncate argument.
2021
+ # radius = int(truncate * sigma + 0.5)
2022
+ # Test gaussian_filter1d
2023
+ x = numpy.zeros(7)
2024
+ x[3] = 1
2025
+ f1 = ndimage.gaussian_filter1d(x, sigma=2, truncate=1.5)
2026
+ f2 = ndimage.gaussian_filter1d(x, sigma=2, radius=3)
2027
+ assert_equal(f1, f2)
2028
+
2029
+ # Test gaussian_filter when sigma is a number.
2030
+ a = numpy.zeros((9, 9))
2031
+ a[4, 4] = 1
2032
+ f1 = ndimage.gaussian_filter(a, sigma=0.5, truncate=3.5)
2033
+ f2 = ndimage.gaussian_filter(a, sigma=0.5, radius=2)
2034
+ assert_equal(f1, f2)
2035
+
2036
+ # Test gaussian_filter when sigma is a sequence.
2037
+ a = numpy.zeros((50, 50))
2038
+ a[25, 25] = 1
2039
+ f1 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], truncate=3.5)
2040
+ f2 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], radius=[2, 9])
2041
+ assert_equal(f1, f2)
2042
+
2043
+
2044
+ def test_gaussian_radius_invalid():
2045
+ # radius must be a nonnegative integer
2046
+ with assert_raises(ValueError):
2047
+ ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=-1)
2048
+ with assert_raises(ValueError):
2049
+ ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=1.1)
2050
+
2051
+
2052
+ class TestThreading:
2053
+ def check_func_thread(self, n, fun, args, out):
2054
+ from threading import Thread
2055
+ thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]})
2056
+ for x in range(n)]
2057
+ [t.start() for t in thrds]
2058
+ [t.join() for t in thrds]
2059
+
2060
+ def check_func_serial(self, n, fun, args, out):
2061
+ for i in range(n):
2062
+ fun(*args, output=out[i])
2063
+
2064
+ def test_correlate1d(self):
2065
+ d = numpy.random.randn(5000)
2066
+ os = numpy.empty((4, d.size))
2067
+ ot = numpy.empty_like(os)
2068
+ k = numpy.arange(5)
2069
+ self.check_func_serial(4, ndimage.correlate1d, (d, k), os)
2070
+ self.check_func_thread(4, ndimage.correlate1d, (d, k), ot)
2071
+ assert_array_equal(os, ot)
2072
+
2073
+ def test_correlate(self):
2074
+ d = numpy.random.randn(500, 500)
2075
+ k = numpy.random.randn(10, 10)
2076
+ os = numpy.empty([4] + list(d.shape))
2077
+ ot = numpy.empty_like(os)
2078
+ self.check_func_serial(4, ndimage.correlate, (d, k), os)
2079
+ self.check_func_thread(4, ndimage.correlate, (d, k), ot)
2080
+ assert_array_equal(os, ot)
2081
+
2082
+ def test_median_filter(self):
2083
+ d = numpy.random.randn(500, 500)
2084
+ os = numpy.empty([4] + list(d.shape))
2085
+ ot = numpy.empty_like(os)
2086
+ self.check_func_serial(4, ndimage.median_filter, (d, 3), os)
2087
+ self.check_func_thread(4, ndimage.median_filter, (d, 3), ot)
2088
+ assert_array_equal(os, ot)
2089
+
2090
+ def test_uniform_filter1d(self):
2091
+ d = numpy.random.randn(5000)
2092
+ os = numpy.empty((4, d.size))
2093
+ ot = numpy.empty_like(os)
2094
+ self.check_func_serial(4, ndimage.uniform_filter1d, (d, 5), os)
2095
+ self.check_func_thread(4, ndimage.uniform_filter1d, (d, 5), ot)
2096
+ assert_array_equal(os, ot)
2097
+
2098
+ def test_minmax_filter(self):
2099
+ d = numpy.random.randn(500, 500)
2100
+ os = numpy.empty([4] + list(d.shape))
2101
+ ot = numpy.empty_like(os)
2102
+ self.check_func_serial(4, ndimage.maximum_filter, (d, 3), os)
2103
+ self.check_func_thread(4, ndimage.maximum_filter, (d, 3), ot)
2104
+ assert_array_equal(os, ot)
2105
+ self.check_func_serial(4, ndimage.minimum_filter, (d, 3), os)
2106
+ self.check_func_thread(4, ndimage.minimum_filter, (d, 3), ot)
2107
+ assert_array_equal(os, ot)
2108
+
2109
+
2110
+ def test_minmaximum_filter1d():
2111
+ # Regression gh-3898
2112
+ in_ = numpy.arange(10)
2113
+ out = ndimage.minimum_filter1d(in_, 1)
2114
+ assert_equal(in_, out)
2115
+ out = ndimage.maximum_filter1d(in_, 1)
2116
+ assert_equal(in_, out)
2117
+ # Test reflect
2118
+ out = ndimage.minimum_filter1d(in_, 5, mode='reflect')
2119
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
2120
+ out = ndimage.maximum_filter1d(in_, 5, mode='reflect')
2121
+ assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
2122
+ # Test constant
2123
+ out = ndimage.minimum_filter1d(in_, 5, mode='constant', cval=-1)
2124
+ assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
2125
+ out = ndimage.maximum_filter1d(in_, 5, mode='constant', cval=10)
2126
+ assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
2127
+ # Test nearest
2128
+ out = ndimage.minimum_filter1d(in_, 5, mode='nearest')
2129
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
2130
+ out = ndimage.maximum_filter1d(in_, 5, mode='nearest')
2131
+ assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
2132
+ # Test wrap
2133
+ out = ndimage.minimum_filter1d(in_, 5, mode='wrap')
2134
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
2135
+ out = ndimage.maximum_filter1d(in_, 5, mode='wrap')
2136
+ assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
2137
+
2138
+
2139
+ def test_uniform_filter1d_roundoff_errors():
2140
+ # gh-6930
2141
+ in_ = numpy.repeat([0, 1, 0], [9, 9, 9])
2142
+ for filter_size in range(3, 10):
2143
+ out = ndimage.uniform_filter1d(in_, filter_size)
2144
+ assert_equal(out.sum(), 10 - filter_size)
2145
+
2146
+
2147
+ def test_footprint_all_zeros():
2148
+ # regression test for gh-6876: footprint of all zeros segfaults
2149
+ arr = numpy.random.randint(0, 100, (100, 100))
2150
+ kernel = numpy.zeros((3, 3), bool)
2151
+ with assert_raises(ValueError):
2152
+ ndimage.maximum_filter(arr, footprint=kernel)
2153
+
2154
+
2155
+ def test_gaussian_filter():
2156
+ # Test gaussian filter with numpy.float16
2157
+ # gh-8207
2158
+ data = numpy.array([1], dtype=numpy.float16)
2159
+ sigma = 1.0
2160
+ with assert_raises(RuntimeError):
2161
+ ndimage.gaussian_filter(data, sigma)
2162
+
2163
+
2164
+ def test_rank_filter_noninteger_rank():
2165
+ # regression test for issue 9388: ValueError for
2166
+ # non integer rank when performing rank_filter
2167
+ arr = numpy.random.random((10, 20, 30))
2168
+ assert_raises(TypeError, ndimage.rank_filter, arr, 0.5,
2169
+ footprint=numpy.ones((1, 1, 10), dtype=bool))
2170
+
2171
+
2172
+ def test_size_footprint_both_set():
2173
+ # test for input validation, expect user warning when
2174
+ # size and footprint is set
2175
+ with suppress_warnings() as sup:
2176
+ sup.filter(UserWarning,
2177
+ "ignoring size because footprint is set")
2178
+ arr = numpy.random.random((10, 20, 30))
2179
+ ndimage.rank_filter(arr, 5, size=2, footprint=numpy.ones((1, 1, 10),
2180
+ dtype=bool))
2181
+
2182
+
2183
+ def test_byte_order_median():
2184
+ """Regression test for #413: median_filter does not handle bytes orders."""
2185
+ a = numpy.arange(9, dtype='<f4').reshape(3, 3)
2186
+ ref = ndimage.median_filter(a, (3, 3))
2187
+ b = numpy.arange(9, dtype='>f4').reshape(3, 3)
2188
+ t = ndimage.median_filter(b, (3, 3))
2189
+ assert_array_almost_equal(ref, t)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_fourier.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+ from numpy import fft
3
+ from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
4
+ assert_equal)
5
+
6
+ import pytest
7
+
8
+ from scipy import ndimage
9
+
10
+
11
+ class TestNdimageFourier:
12
+
13
+ @pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
14
+ @pytest.mark.parametrize('dtype, dec',
15
+ [(numpy.float32, 6), (numpy.float64, 14)])
16
+ def test_fourier_gaussian_real01(self, shape, dtype, dec):
17
+ a = numpy.zeros(shape, dtype)
18
+ a[0, 0] = 1.0
19
+ a = fft.rfft(a, shape[0], 0)
20
+ a = fft.fft(a, shape[1], 1)
21
+ a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0)
22
+ a = fft.ifft(a, shape[1], 1)
23
+ a = fft.irfft(a, shape[0], 0)
24
+ assert_almost_equal(ndimage.sum(a), 1, decimal=dec)
25
+
26
+ @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
27
+ @pytest.mark.parametrize('dtype, dec',
28
+ [(numpy.complex64, 6), (numpy.complex128, 14)])
29
+ def test_fourier_gaussian_complex01(self, shape, dtype, dec):
30
+ a = numpy.zeros(shape, dtype)
31
+ a[0, 0] = 1.0
32
+ a = fft.fft(a, shape[0], 0)
33
+ a = fft.fft(a, shape[1], 1)
34
+ a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, 0)
35
+ a = fft.ifft(a, shape[1], 1)
36
+ a = fft.ifft(a, shape[0], 0)
37
+ assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
38
+
39
+ @pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
40
+ @pytest.mark.parametrize('dtype, dec',
41
+ [(numpy.float32, 6), (numpy.float64, 14)])
42
+ def test_fourier_uniform_real01(self, shape, dtype, dec):
43
+ a = numpy.zeros(shape, dtype)
44
+ a[0, 0] = 1.0
45
+ a = fft.rfft(a, shape[0], 0)
46
+ a = fft.fft(a, shape[1], 1)
47
+ a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0)
48
+ a = fft.ifft(a, shape[1], 1)
49
+ a = fft.irfft(a, shape[0], 0)
50
+ assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)
51
+
52
+ @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
53
+ @pytest.mark.parametrize('dtype, dec',
54
+ [(numpy.complex64, 6), (numpy.complex128, 14)])
55
+ def test_fourier_uniform_complex01(self, shape, dtype, dec):
56
+ a = numpy.zeros(shape, dtype)
57
+ a[0, 0] = 1.0
58
+ a = fft.fft(a, shape[0], 0)
59
+ a = fft.fft(a, shape[1], 1)
60
+ a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)
61
+ a = fft.ifft(a, shape[1], 1)
62
+ a = fft.ifft(a, shape[0], 0)
63
+ assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
64
+
65
+ @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
66
+ @pytest.mark.parametrize('dtype, dec',
67
+ [(numpy.float32, 4), (numpy.float64, 11)])
68
+ def test_fourier_shift_real01(self, shape, dtype, dec):
69
+ expected = numpy.arange(shape[0] * shape[1], dtype=dtype)
70
+ expected.shape = shape
71
+ a = fft.rfft(expected, shape[0], 0)
72
+ a = fft.fft(a, shape[1], 1)
73
+ a = ndimage.fourier_shift(a, [1, 1], shape[0], 0)
74
+ a = fft.ifft(a, shape[1], 1)
75
+ a = fft.irfft(a, shape[0], 0)
76
+ assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1],
77
+ decimal=dec)
78
+ assert_array_almost_equal(a.imag, numpy.zeros(shape),
79
+ decimal=dec)
80
+
81
+ @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
82
+ @pytest.mark.parametrize('dtype, dec',
83
+ [(numpy.complex64, 4), (numpy.complex128, 11)])
84
+ def test_fourier_shift_complex01(self, shape, dtype, dec):
85
+ expected = numpy.arange(shape[0] * shape[1], dtype=dtype)
86
+ expected.shape = shape
87
+ a = fft.fft(expected, shape[0], 0)
88
+ a = fft.fft(a, shape[1], 1)
89
+ a = ndimage.fourier_shift(a, [1, 1], -1, 0)
90
+ a = fft.ifft(a, shape[1], 1)
91
+ a = fft.ifft(a, shape[0], 0)
92
+ assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1],
93
+ decimal=dec)
94
+ assert_array_almost_equal(a.imag, numpy.zeros(shape),
95
+ decimal=dec)
96
+
97
+ @pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
98
+ @pytest.mark.parametrize('dtype, dec',
99
+ [(numpy.float32, 5), (numpy.float64, 14)])
100
+ def test_fourier_ellipsoid_real01(self, shape, dtype, dec):
101
+ a = numpy.zeros(shape, dtype)
102
+ a[0, 0] = 1.0
103
+ a = fft.rfft(a, shape[0], 0)
104
+ a = fft.fft(a, shape[1], 1)
105
+ a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],
106
+ shape[0], 0)
107
+ a = fft.ifft(a, shape[1], 1)
108
+ a = fft.irfft(a, shape[0], 0)
109
+ assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)
110
+
111
+ @pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
112
+ @pytest.mark.parametrize('dtype, dec',
113
+ [(numpy.complex64, 5), (numpy.complex128, 14)])
114
+ def test_fourier_ellipsoid_complex01(self, shape, dtype, dec):
115
+ a = numpy.zeros(shape, dtype)
116
+ a[0, 0] = 1.0
117
+ a = fft.fft(a, shape[0], 0)
118
+ a = fft.fft(a, shape[1], 1)
119
+ a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0)
120
+ a = fft.ifft(a, shape[1], 1)
121
+ a = fft.ifft(a, shape[0], 0)
122
+ assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
123
+
124
+ def test_fourier_ellipsoid_unimplemented_ndim(self):
125
+ # arrays with ndim > 3 raise NotImplementedError
126
+ x = numpy.ones((4, 6, 8, 10), dtype=numpy.complex128)
127
+ with pytest.raises(NotImplementedError):
128
+ ndimage.fourier_ellipsoid(x, 3)
129
+
130
+ def test_fourier_ellipsoid_1d_complex(self):
131
+ # expected result of 1d ellipsoid is the same as for fourier_uniform
132
+ for shape in [(32, ), (31, )]:
133
+ for type_, dec in zip([numpy.complex64, numpy.complex128],
134
+ [5, 14]):
135
+ x = numpy.ones(shape, dtype=type_)
136
+ a = ndimage.fourier_ellipsoid(x, 5, -1, 0)
137
+ b = ndimage.fourier_uniform(x, 5, -1, 0)
138
+ assert_array_almost_equal(a, b, decimal=dec)
139
+
140
+ @pytest.mark.parametrize('shape', [(0, ), (0, 10), (10, 0)])
141
+ @pytest.mark.parametrize('dtype',
142
+ [numpy.float32, numpy.float64,
143
+ numpy.complex64, numpy.complex128])
144
+ @pytest.mark.parametrize('test_func',
145
+ [ndimage.fourier_ellipsoid,
146
+ ndimage.fourier_gaussian,
147
+ ndimage.fourier_uniform])
148
+ def test_fourier_zero_length_dims(self, shape, dtype, test_func):
149
+ a = numpy.ones(shape, dtype)
150
+ b = test_func(a, 3)
151
+ assert_equal(a, b)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py ADDED
@@ -0,0 +1,1327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import numpy
4
+ from numpy.testing import (assert_, assert_equal, assert_array_equal,
5
+ assert_array_almost_equal, assert_allclose,
6
+ suppress_warnings)
7
+ import pytest
8
+ from pytest import raises as assert_raises
9
+ import scipy.ndimage as ndimage
10
+
11
+ from . import types
12
+
13
+ eps = 1e-12
14
+
15
+ ndimage_to_numpy_mode = {
16
+ 'mirror': 'reflect',
17
+ 'reflect': 'symmetric',
18
+ 'grid-mirror': 'symmetric',
19
+ 'grid-wrap': 'wrap',
20
+ 'nearest': 'edge',
21
+ 'grid-constant': 'constant',
22
+ }
23
+
24
+
25
+ class TestNdimageInterpolation:
26
+
27
+ @pytest.mark.parametrize(
28
+ 'mode, expected_value',
29
+ [('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
30
+ ('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
31
+ ('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
32
+ ('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
33
+ ('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
34
+ ('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
35
+ ('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
36
+ )
37
+ def test_boundaries(self, mode, expected_value):
38
+ def shift(x):
39
+ return (x[0] + 0.5,)
40
+
41
+ data = numpy.array([1, 2, 3, 4.])
42
+ assert_array_equal(
43
+ expected_value,
44
+ ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
45
+ output_shape=(7,), order=1))
46
+
47
+ @pytest.mark.parametrize(
48
+ 'mode, expected_value',
49
+ [('nearest', [1, 1, 2, 3]),
50
+ ('wrap', [3, 1, 2, 3]),
51
+ ('grid-wrap', [4, 1, 2, 3]),
52
+ ('mirror', [2, 1, 2, 3]),
53
+ ('reflect', [1, 1, 2, 3]),
54
+ ('constant', [-1, 1, 2, 3]),
55
+ ('grid-constant', [-1, 1, 2, 3])]
56
+ )
57
+ def test_boundaries2(self, mode, expected_value):
58
+ def shift(x):
59
+ return (x[0] - 0.9,)
60
+
61
+ data = numpy.array([1, 2, 3, 4])
62
+ assert_array_equal(
63
+ expected_value,
64
+ ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
65
+ output_shape=(4,)))
66
+
67
+ @pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
68
+ 'grid-wrap', 'grid-constant',
69
+ 'nearest'])
70
+ @pytest.mark.parametrize('order', range(6))
71
+ def test_boundary_spline_accuracy(self, mode, order):
72
+ """Tests based on examples from gh-2640"""
73
+ data = numpy.arange(-6, 7, dtype=float)
74
+ x = numpy.linspace(-8, 15, num=1000)
75
+ y = ndimage.map_coordinates(data, [x], order=order, mode=mode)
76
+
77
+ # compute expected value using explicit padding via numpy.pad
78
+ npad = 32
79
+ pad_mode = ndimage_to_numpy_mode.get(mode)
80
+ padded = numpy.pad(data, npad, mode=pad_mode)
81
+ expected = ndimage.map_coordinates(padded, [npad + x], order=order,
82
+ mode=mode)
83
+
84
+ atol = 1e-5 if mode == 'grid-constant' else 1e-12
85
+ assert_allclose(y, expected, rtol=1e-7, atol=atol)
86
+
87
+ @pytest.mark.parametrize('order', range(2, 6))
88
+ @pytest.mark.parametrize('dtype', types)
89
+ def test_spline01(self, dtype, order):
90
+ data = numpy.ones([], dtype)
91
+ out = ndimage.spline_filter(data, order=order)
92
+ assert_array_almost_equal(out, 1)
93
+
94
+ @pytest.mark.parametrize('order', range(2, 6))
95
+ @pytest.mark.parametrize('dtype', types)
96
+ def test_spline02(self, dtype, order):
97
+ data = numpy.array([1], dtype)
98
+ out = ndimage.spline_filter(data, order=order)
99
+ assert_array_almost_equal(out, [1])
100
+
101
+ @pytest.mark.parametrize('order', range(2, 6))
102
+ @pytest.mark.parametrize('dtype', types)
103
+ def test_spline03(self, dtype, order):
104
+ data = numpy.ones([], dtype)
105
+ out = ndimage.spline_filter(data, order, output=dtype)
106
+ assert_array_almost_equal(out, 1)
107
+
108
+ @pytest.mark.parametrize('order', range(2, 6))
109
+ @pytest.mark.parametrize('dtype', types)
110
+ def test_spline04(self, dtype, order):
111
+ data = numpy.ones([4], dtype)
112
+ out = ndimage.spline_filter(data, order)
113
+ assert_array_almost_equal(out, [1, 1, 1, 1])
114
+
115
+ @pytest.mark.parametrize('order', range(2, 6))
116
+ @pytest.mark.parametrize('dtype', types)
117
+ def test_spline05(self, dtype, order):
118
+ data = numpy.ones([4, 4], dtype)
119
+ out = ndimage.spline_filter(data, order=order)
120
+ assert_array_almost_equal(out, [[1, 1, 1, 1],
121
+ [1, 1, 1, 1],
122
+ [1, 1, 1, 1],
123
+ [1, 1, 1, 1]])
124
+
125
+ @pytest.mark.parametrize('order', range(0, 6))
126
+ def test_geometric_transform01(self, order):
127
+ data = numpy.array([1])
128
+
129
+ def mapping(x):
130
+ return x
131
+
132
+ out = ndimage.geometric_transform(data, mapping, data.shape,
133
+ order=order)
134
+ assert_array_almost_equal(out, [1])
135
+
136
+ @pytest.mark.parametrize('order', range(0, 6))
137
+ def test_geometric_transform02(self, order):
138
+ data = numpy.ones([4])
139
+
140
+ def mapping(x):
141
+ return x
142
+
143
+ out = ndimage.geometric_transform(data, mapping, data.shape,
144
+ order=order)
145
+ assert_array_almost_equal(out, [1, 1, 1, 1])
146
+
147
+ @pytest.mark.parametrize('order', range(0, 6))
148
+ def test_geometric_transform03(self, order):
149
+ data = numpy.ones([4])
150
+
151
+ def mapping(x):
152
+ return (x[0] - 1,)
153
+
154
+ out = ndimage.geometric_transform(data, mapping, data.shape,
155
+ order=order)
156
+ assert_array_almost_equal(out, [0, 1, 1, 1])
157
+
158
+ @pytest.mark.parametrize('order', range(0, 6))
159
+ def test_geometric_transform04(self, order):
160
+ data = numpy.array([4, 1, 3, 2])
161
+
162
+ def mapping(x):
163
+ return (x[0] - 1,)
164
+
165
+ out = ndimage.geometric_transform(data, mapping, data.shape,
166
+ order=order)
167
+ assert_array_almost_equal(out, [0, 4, 1, 3])
168
+
169
+ @pytest.mark.parametrize('order', range(0, 6))
170
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
171
+ def test_geometric_transform05(self, order, dtype):
172
+ data = numpy.array([[1, 1, 1, 1],
173
+ [1, 1, 1, 1],
174
+ [1, 1, 1, 1]], dtype=dtype)
175
+ expected = numpy.array([[0, 1, 1, 1],
176
+ [0, 1, 1, 1],
177
+ [0, 1, 1, 1]], dtype=dtype)
178
+ if data.dtype.kind == 'c':
179
+ data -= 1j * data
180
+ expected -= 1j * expected
181
+
182
+ def mapping(x):
183
+ return (x[0], x[1] - 1)
184
+
185
+ out = ndimage.geometric_transform(data, mapping, data.shape,
186
+ order=order)
187
+ assert_array_almost_equal(out, expected)
188
+
189
+ @pytest.mark.parametrize('order', range(0, 6))
190
+ def test_geometric_transform06(self, order):
191
+ data = numpy.array([[4, 1, 3, 2],
192
+ [7, 6, 8, 5],
193
+ [3, 5, 3, 6]])
194
+
195
+ def mapping(x):
196
+ return (x[0], x[1] - 1)
197
+
198
+ out = ndimage.geometric_transform(data, mapping, data.shape,
199
+ order=order)
200
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
201
+ [0, 7, 6, 8],
202
+ [0, 3, 5, 3]])
203
+
204
+ @pytest.mark.parametrize('order', range(0, 6))
205
+ def test_geometric_transform07(self, order):
206
+ data = numpy.array([[4, 1, 3, 2],
207
+ [7, 6, 8, 5],
208
+ [3, 5, 3, 6]])
209
+
210
+ def mapping(x):
211
+ return (x[0] - 1, x[1])
212
+
213
+ out = ndimage.geometric_transform(data, mapping, data.shape,
214
+ order=order)
215
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
216
+ [4, 1, 3, 2],
217
+ [7, 6, 8, 5]])
218
+
219
+ @pytest.mark.parametrize('order', range(0, 6))
220
+ def test_geometric_transform08(self, order):
221
+ data = numpy.array([[4, 1, 3, 2],
222
+ [7, 6, 8, 5],
223
+ [3, 5, 3, 6]])
224
+
225
+ def mapping(x):
226
+ return (x[0] - 1, x[1] - 1)
227
+
228
+ out = ndimage.geometric_transform(data, mapping, data.shape,
229
+ order=order)
230
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
231
+ [0, 4, 1, 3],
232
+ [0, 7, 6, 8]])
233
+
234
+ @pytest.mark.parametrize('order', range(0, 6))
235
+ def test_geometric_transform10(self, order):
236
+ data = numpy.array([[4, 1, 3, 2],
237
+ [7, 6, 8, 5],
238
+ [3, 5, 3, 6]])
239
+
240
+ def mapping(x):
241
+ return (x[0] - 1, x[1] - 1)
242
+
243
+ if (order > 1):
244
+ filtered = ndimage.spline_filter(data, order=order)
245
+ else:
246
+ filtered = data
247
+ out = ndimage.geometric_transform(filtered, mapping, data.shape,
248
+ order=order, prefilter=False)
249
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
250
+ [0, 4, 1, 3],
251
+ [0, 7, 6, 8]])
252
+
253
+ @pytest.mark.parametrize('order', range(0, 6))
254
+ def test_geometric_transform13(self, order):
255
+ data = numpy.ones([2], numpy.float64)
256
+
257
+ def mapping(x):
258
+ return (x[0] // 2,)
259
+
260
+ out = ndimage.geometric_transform(data, mapping, [4], order=order)
261
+ assert_array_almost_equal(out, [1, 1, 1, 1])
262
+
263
+ @pytest.mark.parametrize('order', range(0, 6))
264
+ def test_geometric_transform14(self, order):
265
+ data = [1, 5, 2, 6, 3, 7, 4, 4]
266
+
267
+ def mapping(x):
268
+ return (2 * x[0],)
269
+
270
+ out = ndimage.geometric_transform(data, mapping, [4], order=order)
271
+ assert_array_almost_equal(out, [1, 2, 3, 4])
272
+
273
+ @pytest.mark.parametrize('order', range(0, 6))
274
+ def test_geometric_transform15(self, order):
275
+ data = [1, 2, 3, 4]
276
+
277
+ def mapping(x):
278
+ return (x[0] / 2,)
279
+
280
+ out = ndimage.geometric_transform(data, mapping, [8], order=order)
281
+ assert_array_almost_equal(out[::2], [1, 2, 3, 4])
282
+
283
+ @pytest.mark.parametrize('order', range(0, 6))
284
+ def test_geometric_transform16(self, order):
285
+ data = [[1, 2, 3, 4],
286
+ [5, 6, 7, 8],
287
+ [9.0, 10, 11, 12]]
288
+
289
+ def mapping(x):
290
+ return (x[0], x[1] * 2)
291
+
292
+ out = ndimage.geometric_transform(data, mapping, (3, 2),
293
+ order=order)
294
+ assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
295
+
296
+ @pytest.mark.parametrize('order', range(0, 6))
297
+ def test_geometric_transform17(self, order):
298
+ data = [[1, 2, 3, 4],
299
+ [5, 6, 7, 8],
300
+ [9, 10, 11, 12]]
301
+
302
+ def mapping(x):
303
+ return (x[0] * 2, x[1])
304
+
305
+ out = ndimage.geometric_transform(data, mapping, (1, 4),
306
+ order=order)
307
+ assert_array_almost_equal(out, [[1, 2, 3, 4]])
308
+
309
+ @pytest.mark.parametrize('order', range(0, 6))
310
+ def test_geometric_transform18(self, order):
311
+ data = [[1, 2, 3, 4],
312
+ [5, 6, 7, 8],
313
+ [9, 10, 11, 12]]
314
+
315
+ def mapping(x):
316
+ return (x[0] * 2, x[1] * 2)
317
+
318
+ out = ndimage.geometric_transform(data, mapping, (1, 2),
319
+ order=order)
320
+ assert_array_almost_equal(out, [[1, 3]])
321
+
322
+ @pytest.mark.parametrize('order', range(0, 6))
323
+ def test_geometric_transform19(self, order):
324
+ data = [[1, 2, 3, 4],
325
+ [5, 6, 7, 8],
326
+ [9, 10, 11, 12]]
327
+
328
+ def mapping(x):
329
+ return (x[0], x[1] / 2)
330
+
331
+ out = ndimage.geometric_transform(data, mapping, (3, 8),
332
+ order=order)
333
+ assert_array_almost_equal(out[..., ::2], data)
334
+
335
+ @pytest.mark.parametrize('order', range(0, 6))
336
+ def test_geometric_transform20(self, order):
337
+ data = [[1, 2, 3, 4],
338
+ [5, 6, 7, 8],
339
+ [9, 10, 11, 12]]
340
+
341
+ def mapping(x):
342
+ return (x[0] / 2, x[1])
343
+
344
+ out = ndimage.geometric_transform(data, mapping, (6, 4),
345
+ order=order)
346
+ assert_array_almost_equal(out[::2, ...], data)
347
+
348
+ @pytest.mark.parametrize('order', range(0, 6))
349
+ def test_geometric_transform21(self, order):
350
+ data = [[1, 2, 3, 4],
351
+ [5, 6, 7, 8],
352
+ [9, 10, 11, 12]]
353
+
354
+ def mapping(x):
355
+ return (x[0] / 2, x[1] / 2)
356
+
357
+ out = ndimage.geometric_transform(data, mapping, (6, 8),
358
+ order=order)
359
+ assert_array_almost_equal(out[::2, ::2], data)
360
+
361
+ @pytest.mark.parametrize('order', range(0, 6))
362
+ def test_geometric_transform22(self, order):
363
+ data = numpy.array([[1, 2, 3, 4],
364
+ [5, 6, 7, 8],
365
+ [9, 10, 11, 12]], numpy.float64)
366
+
367
+ def mapping1(x):
368
+ return (x[0] / 2, x[1] / 2)
369
+
370
+ def mapping2(x):
371
+ return (x[0] * 2, x[1] * 2)
372
+
373
+ out = ndimage.geometric_transform(data, mapping1,
374
+ (6, 8), order=order)
375
+ out = ndimage.geometric_transform(out, mapping2,
376
+ (3, 4), order=order)
377
+ assert_array_almost_equal(out, data)
378
+
379
+ @pytest.mark.parametrize('order', range(0, 6))
380
+ def test_geometric_transform23(self, order):
381
+ data = [[1, 2, 3, 4],
382
+ [5, 6, 7, 8],
383
+ [9, 10, 11, 12]]
384
+
385
+ def mapping(x):
386
+ return (1, x[0] * 2)
387
+
388
+ out = ndimage.geometric_transform(data, mapping, (2,), order=order)
389
+ out = out.astype(numpy.int32)
390
+ assert_array_almost_equal(out, [5, 7])
391
+
392
+ @pytest.mark.parametrize('order', range(0, 6))
393
+ def test_geometric_transform24(self, order):
394
+ data = [[1, 2, 3, 4],
395
+ [5, 6, 7, 8],
396
+ [9, 10, 11, 12]]
397
+
398
+ def mapping(x, a, b):
399
+ return (a, x[0] * b)
400
+
401
+ out = ndimage.geometric_transform(
402
+ data, mapping, (2,), order=order, extra_arguments=(1,),
403
+ extra_keywords={'b': 2})
404
+ assert_array_almost_equal(out, [5, 7])
405
+
406
+ def test_geometric_transform_grid_constant_order1(self):
407
+ # verify interpolation outside the original bounds
408
+ x = numpy.array([[1, 2, 3],
409
+ [4, 5, 6]], dtype=float)
410
+
411
+ def mapping(x):
412
+ return (x[0] - 0.5), (x[1] - 0.5)
413
+
414
+ expected_result = numpy.array([[0.25, 0.75, 1.25],
415
+ [1.25, 3.00, 4.00]])
416
+ assert_array_almost_equal(
417
+ ndimage.geometric_transform(x, mapping, mode='grid-constant',
418
+ order=1),
419
+ expected_result,
420
+ )
421
+
422
+ @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
423
+ 'mirror', 'reflect'])
424
+ @pytest.mark.parametrize('order', range(6))
425
+ def test_geometric_transform_vs_padded(self, order, mode):
426
+ x = numpy.arange(144, dtype=float).reshape(12, 12)
427
+
428
+ def mapping(x):
429
+ return (x[0] - 0.4), (x[1] + 2.3)
430
+
431
+ # Manually pad and then extract center after the transform to get the
432
+ # expected result.
433
+ npad = 24
434
+ pad_mode = ndimage_to_numpy_mode.get(mode)
435
+ xp = numpy.pad(x, npad, mode=pad_mode)
436
+ center_slice = tuple([slice(npad, -npad)] * x.ndim)
437
+ expected_result = ndimage.geometric_transform(
438
+ xp, mapping, mode=mode, order=order)[center_slice]
439
+
440
+ assert_allclose(
441
+ ndimage.geometric_transform(x, mapping, mode=mode,
442
+ order=order),
443
+ expected_result,
444
+ rtol=1e-7,
445
+ )
446
+
447
+ def test_geometric_transform_endianness_with_output_parameter(self):
448
+ # geometric transform given output ndarray or dtype with
449
+ # non-native endianness. see issue #4127
450
+ data = numpy.array([1])
451
+
452
+ def mapping(x):
453
+ return x
454
+
455
+ for out in [data.dtype, data.dtype.newbyteorder(),
456
+ numpy.empty_like(data),
457
+ numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
458
+ returned = ndimage.geometric_transform(data, mapping, data.shape,
459
+ output=out)
460
+ result = out if returned is None else returned
461
+ assert_array_almost_equal(result, [1])
462
+
463
+ def test_geometric_transform_with_string_output(self):
464
+ data = numpy.array([1])
465
+
466
+ def mapping(x):
467
+ return x
468
+
469
+ out = ndimage.geometric_transform(data, mapping, output='f')
470
+ assert_(out.dtype is numpy.dtype('f'))
471
+ assert_array_almost_equal(out, [1])
472
+
473
+ @pytest.mark.parametrize('order', range(0, 6))
474
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
475
+ def test_map_coordinates01(self, order, dtype):
476
+ data = numpy.array([[4, 1, 3, 2],
477
+ [7, 6, 8, 5],
478
+ [3, 5, 3, 6]])
479
+ expected = numpy.array([[0, 0, 0, 0],
480
+ [0, 4, 1, 3],
481
+ [0, 7, 6, 8]])
482
+ if data.dtype.kind == 'c':
483
+ data = data - 1j * data
484
+ expected = expected - 1j * expected
485
+
486
+ idx = numpy.indices(data.shape)
487
+ idx -= 1
488
+
489
+ out = ndimage.map_coordinates(data, idx, order=order)
490
+ assert_array_almost_equal(out, expected)
491
+
492
+ @pytest.mark.parametrize('order', range(0, 6))
493
+ def test_map_coordinates02(self, order):
494
+ data = numpy.array([[4, 1, 3, 2],
495
+ [7, 6, 8, 5],
496
+ [3, 5, 3, 6]])
497
+ idx = numpy.indices(data.shape, numpy.float64)
498
+ idx -= 0.5
499
+
500
+ out1 = ndimage.shift(data, 0.5, order=order)
501
+ out2 = ndimage.map_coordinates(data, idx, order=order)
502
+ assert_array_almost_equal(out1, out2)
503
+
504
+ def test_map_coordinates03(self):
505
+ data = numpy.array([[4, 1, 3, 2],
506
+ [7, 6, 8, 5],
507
+ [3, 5, 3, 6]], order='F')
508
+ idx = numpy.indices(data.shape) - 1
509
+ out = ndimage.map_coordinates(data, idx)
510
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
511
+ [0, 4, 1, 3],
512
+ [0, 7, 6, 8]])
513
+ assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
514
+ idx = numpy.indices(data[::2].shape) - 1
515
+ out = ndimage.map_coordinates(data[::2], idx)
516
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
517
+ [0, 4, 1, 3]])
518
+ assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
519
+ idx = numpy.indices(data[:, ::2].shape) - 1
520
+ out = ndimage.map_coordinates(data[:, ::2], idx)
521
+ assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
522
+ assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
523
+
524
+ def test_map_coordinates_endianness_with_output_parameter(self):
525
+ # output parameter given as array or dtype with either endianness
526
+ # see issue #4127
527
+ data = numpy.array([[1, 2], [7, 6]])
528
+ expected = numpy.array([[0, 0], [0, 1]])
529
+ idx = numpy.indices(data.shape)
530
+ idx -= 1
531
+ for out in [
532
+ data.dtype,
533
+ data.dtype.newbyteorder(),
534
+ numpy.empty_like(expected),
535
+ numpy.empty_like(expected).astype(expected.dtype.newbyteorder())
536
+ ]:
537
+ returned = ndimage.map_coordinates(data, idx, output=out)
538
+ result = out if returned is None else returned
539
+ assert_array_almost_equal(result, expected)
540
+
541
+ def test_map_coordinates_with_string_output(self):
542
+ data = numpy.array([[1]])
543
+ idx = numpy.indices(data.shape)
544
+ out = ndimage.map_coordinates(data, idx, output='f')
545
+ assert_(out.dtype is numpy.dtype('f'))
546
+ assert_array_almost_equal(out, [[1]])
547
+
548
+ @pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8,
549
+ reason='do not run on 32 bit or windows '
550
+ '(no sparse memory)')
551
+ def test_map_coordinates_large_data(self):
552
+ # check crash on large data
553
+ try:
554
+ n = 30000
555
+ a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n)
556
+ # fill the part we might read
557
+ a[n - 3:, n - 3:] = 0
558
+ ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
559
+ except MemoryError as e:
560
+ raise pytest.skip('Not enough memory available') from e
561
+
562
+ @pytest.mark.parametrize('order', range(0, 6))
563
+ def test_affine_transform01(self, order):
564
+ data = numpy.array([1])
565
+ out = ndimage.affine_transform(data, [[1]], order=order)
566
+ assert_array_almost_equal(out, [1])
567
+
568
+ @pytest.mark.parametrize('order', range(0, 6))
569
+ def test_affine_transform02(self, order):
570
+ data = numpy.ones([4])
571
+ out = ndimage.affine_transform(data, [[1]], order=order)
572
+ assert_array_almost_equal(out, [1, 1, 1, 1])
573
+
574
+ @pytest.mark.parametrize('order', range(0, 6))
575
+ def test_affine_transform03(self, order):
576
+ data = numpy.ones([4])
577
+ out = ndimage.affine_transform(data, [[1]], -1, order=order)
578
+ assert_array_almost_equal(out, [0, 1, 1, 1])
579
+
580
+ @pytest.mark.parametrize('order', range(0, 6))
581
+ def test_affine_transform04(self, order):
582
+ data = numpy.array([4, 1, 3, 2])
583
+ out = ndimage.affine_transform(data, [[1]], -1, order=order)
584
+ assert_array_almost_equal(out, [0, 4, 1, 3])
585
+
586
+ @pytest.mark.parametrize('order', range(0, 6))
587
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
588
+ def test_affine_transform05(self, order, dtype):
589
+ data = numpy.array([[1, 1, 1, 1],
590
+ [1, 1, 1, 1],
591
+ [1, 1, 1, 1]], dtype=dtype)
592
+ expected = numpy.array([[0, 1, 1, 1],
593
+ [0, 1, 1, 1],
594
+ [0, 1, 1, 1]], dtype=dtype)
595
+ if data.dtype.kind == 'c':
596
+ data -= 1j * data
597
+ expected -= 1j * expected
598
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
599
+ [0, -1], order=order)
600
+ assert_array_almost_equal(out, expected)
601
+
602
+ @pytest.mark.parametrize('order', range(0, 6))
603
+ def test_affine_transform06(self, order):
604
+ data = numpy.array([[4, 1, 3, 2],
605
+ [7, 6, 8, 5],
606
+ [3, 5, 3, 6]])
607
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
608
+ [0, -1], order=order)
609
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
610
+ [0, 7, 6, 8],
611
+ [0, 3, 5, 3]])
612
+
613
+ @pytest.mark.parametrize('order', range(0, 6))
614
+ def test_affine_transform07(self, order):
615
+ data = numpy.array([[4, 1, 3, 2],
616
+ [7, 6, 8, 5],
617
+ [3, 5, 3, 6]])
618
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
619
+ [-1, 0], order=order)
620
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
621
+ [4, 1, 3, 2],
622
+ [7, 6, 8, 5]])
623
+
624
+ @pytest.mark.parametrize('order', range(0, 6))
625
+ def test_affine_transform08(self, order):
626
+ data = numpy.array([[4, 1, 3, 2],
627
+ [7, 6, 8, 5],
628
+ [3, 5, 3, 6]])
629
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
630
+ [-1, -1], order=order)
631
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
632
+ [0, 4, 1, 3],
633
+ [0, 7, 6, 8]])
634
+
635
+ @pytest.mark.parametrize('order', range(0, 6))
636
+ def test_affine_transform09(self, order):
637
+ data = numpy.array([[4, 1, 3, 2],
638
+ [7, 6, 8, 5],
639
+ [3, 5, 3, 6]])
640
+ if (order > 1):
641
+ filtered = ndimage.spline_filter(data, order=order)
642
+ else:
643
+ filtered = data
644
+ out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
645
+ [-1, -1], order=order,
646
+ prefilter=False)
647
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
648
+ [0, 4, 1, 3],
649
+ [0, 7, 6, 8]])
650
+
651
+ @pytest.mark.parametrize('order', range(0, 6))
652
+ def test_affine_transform10(self, order):
653
+ data = numpy.ones([2], numpy.float64)
654
+ out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
655
+ order=order)
656
+ assert_array_almost_equal(out, [1, 1, 1, 0])
657
+
658
+ @pytest.mark.parametrize('order', range(0, 6))
659
+ def test_affine_transform11(self, order):
660
+ data = [1, 5, 2, 6, 3, 7, 4, 4]
661
+ out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
662
+ assert_array_almost_equal(out, [1, 2, 3, 4])
663
+
664
+ @pytest.mark.parametrize('order', range(0, 6))
665
+ def test_affine_transform12(self, order):
666
+ data = [1, 2, 3, 4]
667
+ out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
668
+ assert_array_almost_equal(out[::2], [1, 2, 3, 4])
669
+
670
+ @pytest.mark.parametrize('order', range(0, 6))
671
+ def test_affine_transform13(self, order):
672
+ data = [[1, 2, 3, 4],
673
+ [5, 6, 7, 8],
674
+ [9.0, 10, 11, 12]]
675
+ out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
676
+ order=order)
677
+ assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
678
+
679
+ @pytest.mark.parametrize('order', range(0, 6))
680
+ def test_affine_transform14(self, order):
681
+ data = [[1, 2, 3, 4],
682
+ [5, 6, 7, 8],
683
+ [9, 10, 11, 12]]
684
+ out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
685
+ order=order)
686
+ assert_array_almost_equal(out, [[1, 2, 3, 4]])
687
+
688
+ @pytest.mark.parametrize('order', range(0, 6))
689
+ def test_affine_transform15(self, order):
690
+ data = [[1, 2, 3, 4],
691
+ [5, 6, 7, 8],
692
+ [9, 10, 11, 12]]
693
+ out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
694
+ order=order)
695
+ assert_array_almost_equal(out, [[1, 3]])
696
+
697
+ @pytest.mark.parametrize('order', range(0, 6))
698
+ def test_affine_transform16(self, order):
699
+ data = [[1, 2, 3, 4],
700
+ [5, 6, 7, 8],
701
+ [9, 10, 11, 12]]
702
+ out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
703
+ (3, 8), order=order)
704
+ assert_array_almost_equal(out[..., ::2], data)
705
+
706
+ @pytest.mark.parametrize('order', range(0, 6))
707
+ def test_affine_transform17(self, order):
708
+ data = [[1, 2, 3, 4],
709
+ [5, 6, 7, 8],
710
+ [9, 10, 11, 12]]
711
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
712
+ (6, 4), order=order)
713
+ assert_array_almost_equal(out[::2, ...], data)
714
+
715
+ @pytest.mark.parametrize('order', range(0, 6))
716
+ def test_affine_transform18(self, order):
717
+ data = [[1, 2, 3, 4],
718
+ [5, 6, 7, 8],
719
+ [9, 10, 11, 12]]
720
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
721
+ (6, 8), order=order)
722
+ assert_array_almost_equal(out[::2, ::2], data)
723
+
724
+ @pytest.mark.parametrize('order', range(0, 6))
725
+ def test_affine_transform19(self, order):
726
+ data = numpy.array([[1, 2, 3, 4],
727
+ [5, 6, 7, 8],
728
+ [9, 10, 11, 12]], numpy.float64)
729
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
730
+ (6, 8), order=order)
731
+ out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
732
+ (3, 4), order=order)
733
+ assert_array_almost_equal(out, data)
734
+
735
+ @pytest.mark.parametrize('order', range(0, 6))
736
+ def test_affine_transform20(self, order):
737
+ data = [[1, 2, 3, 4],
738
+ [5, 6, 7, 8],
739
+ [9, 10, 11, 12]]
740
+ out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
741
+ order=order)
742
+ assert_array_almost_equal(out, [1, 3])
743
+
744
+ @pytest.mark.parametrize('order', range(0, 6))
745
+ def test_affine_transform21(self, order):
746
+ data = [[1, 2, 3, 4],
747
+ [5, 6, 7, 8],
748
+ [9, 10, 11, 12]]
749
+ out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
750
+ order=order)
751
+ assert_array_almost_equal(out, [1, 9])
752
+
753
+ @pytest.mark.parametrize('order', range(0, 6))
754
+ def test_affine_transform22(self, order):
755
+ # shift and offset interaction; see issue #1547
756
+ data = numpy.array([4, 1, 3, 2])
757
+ out = ndimage.affine_transform(data, [[2]], [-1], (3,),
758
+ order=order)
759
+ assert_array_almost_equal(out, [0, 1, 2])
760
+
761
+ @pytest.mark.parametrize('order', range(0, 6))
762
+ def test_affine_transform23(self, order):
763
+ # shift and offset interaction; see issue #1547
764
+ data = numpy.array([4, 1, 3, 2])
765
+ out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
766
+ order=order)
767
+ assert_array_almost_equal(out[::2], [0, 4, 1, 3])
768
+
769
+ @pytest.mark.parametrize('order', range(0, 6))
770
+ def test_affine_transform24(self, order):
771
+ # consistency between diagonal and non-diagonal case; see issue #1547
772
+ data = numpy.array([4, 1, 3, 2])
773
+ with suppress_warnings() as sup:
774
+ sup.filter(UserWarning,
775
+ 'The behavior of affine_transform with a 1-D array .* '
776
+ 'has changed')
777
+ out1 = ndimage.affine_transform(data, [2], -1, order=order)
778
+ out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
779
+ assert_array_almost_equal(out1, out2)
780
+
781
+ @pytest.mark.parametrize('order', range(0, 6))
782
+ def test_affine_transform25(self, order):
783
+ # consistency between diagonal and non-diagonal case; see issue #1547
784
+ data = numpy.array([4, 1, 3, 2])
785
+ with suppress_warnings() as sup:
786
+ sup.filter(UserWarning,
787
+ 'The behavior of affine_transform with a 1-D array .* '
788
+ 'has changed')
789
+ out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
790
+ out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
791
+ assert_array_almost_equal(out1, out2)
792
+
793
+ @pytest.mark.parametrize('order', range(0, 6))
794
+ def test_affine_transform26(self, order):
795
+ # test homogeneous coordinates
796
+ data = numpy.array([[4, 1, 3, 2],
797
+ [7, 6, 8, 5],
798
+ [3, 5, 3, 6]])
799
+ if (order > 1):
800
+ filtered = ndimage.spline_filter(data, order=order)
801
+ else:
802
+ filtered = data
803
+ tform_original = numpy.eye(2)
804
+ offset_original = -numpy.ones((2, 1))
805
+ tform_h1 = numpy.hstack((tform_original, offset_original))
806
+ tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]]))
807
+ out1 = ndimage.affine_transform(filtered, tform_original,
808
+ offset_original.ravel(),
809
+ order=order, prefilter=False)
810
+ out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
811
+ prefilter=False)
812
+ out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
813
+ prefilter=False)
814
+ for out in [out1, out2, out3]:
815
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
816
+ [0, 4, 1, 3],
817
+ [0, 7, 6, 8]])
818
+
819
+ def test_affine_transform27(self):
820
+ # test valid homogeneous transformation matrix
821
+ data = numpy.array([[4, 1, 3, 2],
822
+ [7, 6, 8, 5],
823
+ [3, 5, 3, 6]])
824
+ tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1))))
825
+ tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]]))
826
+ assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
827
+
828
+ def test_affine_transform_1d_endianness_with_output_parameter(self):
829
+ # 1d affine transform given output ndarray or dtype with
830
+ # either endianness. see issue #7388
831
+ data = numpy.ones((2, 2))
832
+ for out in [numpy.empty_like(data),
833
+ numpy.empty_like(data).astype(data.dtype.newbyteorder()),
834
+ data.dtype, data.dtype.newbyteorder()]:
835
+ with suppress_warnings() as sup:
836
+ sup.filter(UserWarning,
837
+ 'The behavior of affine_transform with a 1-D array '
838
+ '.* has changed')
839
+ returned = ndimage.affine_transform(data, [1, 1], output=out)
840
+ result = out if returned is None else returned
841
+ assert_array_almost_equal(result, [[1, 1], [1, 1]])
842
+
843
+ def test_affine_transform_multi_d_endianness_with_output_parameter(self):
844
+ # affine transform given output ndarray or dtype with either endianness
845
+ # see issue #4127
846
+ data = numpy.array([1])
847
+ for out in [data.dtype, data.dtype.newbyteorder(),
848
+ numpy.empty_like(data),
849
+ numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
850
+ returned = ndimage.affine_transform(data, [[1]], output=out)
851
+ result = out if returned is None else returned
852
+ assert_array_almost_equal(result, [1])
853
+
854
+ def test_affine_transform_output_shape(self):
855
+ # don't require output_shape when out of a different size is given
856
+ data = numpy.arange(8, dtype=numpy.float64)
857
+ out = numpy.ones((16,))
858
+
859
+ ndimage.affine_transform(data, [[1]], output=out)
860
+ assert_array_almost_equal(out[:8], data)
861
+
862
+ # mismatched output shape raises an error
863
+ with pytest.raises(RuntimeError):
864
+ ndimage.affine_transform(
865
+ data, [[1]], output=out, output_shape=(12,))
866
+
867
+ def test_affine_transform_with_string_output(self):
868
+ data = numpy.array([1])
869
+ out = ndimage.affine_transform(data, [[1]], output='f')
870
+ assert_(out.dtype is numpy.dtype('f'))
871
+ assert_array_almost_equal(out, [1])
872
+
873
+ @pytest.mark.parametrize('shift',
874
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
875
+ @pytest.mark.parametrize('order', range(0, 6))
876
+ def test_affine_transform_shift_via_grid_wrap(self, shift, order):
877
+ # For mode 'grid-wrap', integer shifts should match numpy.roll
878
+ x = numpy.array([[0, 1],
879
+ [2, 3]])
880
+ affine = numpy.zeros((2, 3))
881
+ affine[:2, :2] = numpy.eye(2)
882
+ affine[:, 2] = shift
883
+ assert_array_almost_equal(
884
+ ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
885
+ numpy.roll(x, shift, axis=(0, 1)),
886
+ )
887
+
888
+ @pytest.mark.parametrize('order', range(0, 6))
889
+ def test_affine_transform_shift_reflect(self, order):
890
+ # shift by x.shape results in reflection
891
+ x = numpy.array([[0, 1, 2],
892
+ [3, 4, 5]])
893
+ affine = numpy.zeros((2, 3))
894
+ affine[:2, :2] = numpy.eye(2)
895
+ affine[:, 2] = x.shape
896
+ assert_array_almost_equal(
897
+ ndimage.affine_transform(x, affine, mode='reflect', order=order),
898
+ x[::-1, ::-1],
899
+ )
900
+
901
+ @pytest.mark.parametrize('order', range(0, 6))
902
+ def test_shift01(self, order):
903
+ data = numpy.array([1])
904
+ out = ndimage.shift(data, [1], order=order)
905
+ assert_array_almost_equal(out, [0])
906
+
907
+ @pytest.mark.parametrize('order', range(0, 6))
908
+ def test_shift02(self, order):
909
+ data = numpy.ones([4])
910
+ out = ndimage.shift(data, [1], order=order)
911
+ assert_array_almost_equal(out, [0, 1, 1, 1])
912
+
913
+ @pytest.mark.parametrize('order', range(0, 6))
914
+ def test_shift03(self, order):
915
+ data = numpy.ones([4])
916
+ out = ndimage.shift(data, -1, order=order)
917
+ assert_array_almost_equal(out, [1, 1, 1, 0])
918
+
919
+ @pytest.mark.parametrize('order', range(0, 6))
920
+ def test_shift04(self, order):
921
+ data = numpy.array([4, 1, 3, 2])
922
+ out = ndimage.shift(data, 1, order=order)
923
+ assert_array_almost_equal(out, [0, 4, 1, 3])
924
+
925
+ @pytest.mark.parametrize('order', range(0, 6))
926
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
927
+ def test_shift05(self, order, dtype):
928
+ data = numpy.array([[1, 1, 1, 1],
929
+ [1, 1, 1, 1],
930
+ [1, 1, 1, 1]], dtype=dtype)
931
+ expected = numpy.array([[0, 1, 1, 1],
932
+ [0, 1, 1, 1],
933
+ [0, 1, 1, 1]], dtype=dtype)
934
+ if data.dtype.kind == 'c':
935
+ data -= 1j * data
936
+ expected -= 1j * expected
937
+ out = ndimage.shift(data, [0, 1], order=order)
938
+ assert_array_almost_equal(out, expected)
939
+
940
+ @pytest.mark.parametrize('order', range(0, 6))
941
+ @pytest.mark.parametrize('mode', ['constant', 'grid-constant'])
942
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
943
+ def test_shift_with_nonzero_cval(self, order, mode, dtype):
944
+ data = numpy.array([[1, 1, 1, 1],
945
+ [1, 1, 1, 1],
946
+ [1, 1, 1, 1]], dtype=dtype)
947
+
948
+ expected = numpy.array([[0, 1, 1, 1],
949
+ [0, 1, 1, 1],
950
+ [0, 1, 1, 1]], dtype=dtype)
951
+
952
+ if data.dtype.kind == 'c':
953
+ data -= 1j * data
954
+ expected -= 1j * expected
955
+ cval = 5.0
956
+ expected[:, 0] = cval # specific to shift of [0, 1] used below
957
+ out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval)
958
+ assert_array_almost_equal(out, expected)
959
+
960
+ @pytest.mark.parametrize('order', range(0, 6))
961
+ def test_shift06(self, order):
962
+ data = numpy.array([[4, 1, 3, 2],
963
+ [7, 6, 8, 5],
964
+ [3, 5, 3, 6]])
965
+ out = ndimage.shift(data, [0, 1], order=order)
966
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
967
+ [0, 7, 6, 8],
968
+ [0, 3, 5, 3]])
969
+
970
+ @pytest.mark.parametrize('order', range(0, 6))
971
+ def test_shift07(self, order):
972
+ data = numpy.array([[4, 1, 3, 2],
973
+ [7, 6, 8, 5],
974
+ [3, 5, 3, 6]])
975
+ out = ndimage.shift(data, [1, 0], order=order)
976
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
977
+ [4, 1, 3, 2],
978
+ [7, 6, 8, 5]])
979
+
980
+ @pytest.mark.parametrize('order', range(0, 6))
981
+ def test_shift08(self, order):
982
+ data = numpy.array([[4, 1, 3, 2],
983
+ [7, 6, 8, 5],
984
+ [3, 5, 3, 6]])
985
+ out = ndimage.shift(data, [1, 1], order=order)
986
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
987
+ [0, 4, 1, 3],
988
+ [0, 7, 6, 8]])
989
+
990
+ @pytest.mark.parametrize('order', range(0, 6))
991
+ def test_shift09(self, order):
992
+ data = numpy.array([[4, 1, 3, 2],
993
+ [7, 6, 8, 5],
994
+ [3, 5, 3, 6]])
995
+ if (order > 1):
996
+ filtered = ndimage.spline_filter(data, order=order)
997
+ else:
998
+ filtered = data
999
+ out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
1000
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
1001
+ [0, 4, 1, 3],
1002
+ [0, 7, 6, 8]])
1003
+
1004
+ @pytest.mark.parametrize('shift',
1005
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
1006
+ @pytest.mark.parametrize('order', range(0, 6))
1007
+ def test_shift_grid_wrap(self, shift, order):
1008
+ # For mode 'grid-wrap', integer shifts should match numpy.roll
1009
+ x = numpy.array([[0, 1],
1010
+ [2, 3]])
1011
+ assert_array_almost_equal(
1012
+ ndimage.shift(x, shift, mode='grid-wrap', order=order),
1013
+ numpy.roll(x, shift, axis=(0, 1)),
1014
+ )
1015
+
1016
+ @pytest.mark.parametrize('shift',
1017
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
1018
+ @pytest.mark.parametrize('order', range(0, 6))
1019
+ def test_shift_grid_constant1(self, shift, order):
1020
+ # For integer shifts, 'constant' and 'grid-constant' should be equal
1021
+ x = numpy.arange(20).reshape((5, 4))
1022
+ assert_array_almost_equal(
1023
+ ndimage.shift(x, shift, mode='grid-constant', order=order),
1024
+ ndimage.shift(x, shift, mode='constant', order=order),
1025
+ )
1026
+
1027
+ def test_shift_grid_constant_order1(self):
1028
+ x = numpy.array([[1, 2, 3],
1029
+ [4, 5, 6]], dtype=float)
1030
+ expected_result = numpy.array([[0.25, 0.75, 1.25],
1031
+ [1.25, 3.00, 4.00]])
1032
+ assert_array_almost_equal(
1033
+ ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1),
1034
+ expected_result,
1035
+ )
1036
+
1037
+ @pytest.mark.parametrize('order', range(0, 6))
1038
+ def test_shift_reflect(self, order):
1039
+ # shift by x.shape results in reflection
1040
+ x = numpy.array([[0, 1, 2],
1041
+ [3, 4, 5]])
1042
+ assert_array_almost_equal(
1043
+ ndimage.shift(x, x.shape, mode='reflect', order=order),
1044
+ x[::-1, ::-1],
1045
+ )
1046
+
1047
+ @pytest.mark.parametrize('order', range(0, 6))
1048
+ @pytest.mark.parametrize('prefilter', [False, True])
1049
+ def test_shift_nearest_boundary(self, order, prefilter):
1050
+ # verify that shifting at least order // 2 beyond the end of the array
1051
+ # gives a value equal to the edge value.
1052
+ x = numpy.arange(16)
1053
+ kwargs = dict(mode='nearest', order=order, prefilter=prefilter)
1054
+ assert_array_almost_equal(
1055
+ ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0],
1056
+ )
1057
+ assert_array_almost_equal(
1058
+ ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1],
1059
+ )
1060
+
1061
+ @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
1062
+ 'mirror', 'reflect'])
1063
+ @pytest.mark.parametrize('order', range(6))
1064
+ def test_shift_vs_padded(self, order, mode):
1065
+ x = numpy.arange(144, dtype=float).reshape(12, 12)
1066
+ shift = (0.4, -2.3)
1067
+
1068
+ # manually pad and then extract center to get expected result
1069
+ npad = 32
1070
+ pad_mode = ndimage_to_numpy_mode.get(mode)
1071
+ xp = numpy.pad(x, npad, mode=pad_mode)
1072
+ center_slice = tuple([slice(npad, -npad)] * x.ndim)
1073
+ expected_result = ndimage.shift(
1074
+ xp, shift, mode=mode, order=order)[center_slice]
1075
+
1076
+ assert_allclose(
1077
+ ndimage.shift(x, shift, mode=mode, order=order),
1078
+ expected_result,
1079
+ rtol=1e-7,
1080
+ )
1081
+
1082
+ @pytest.mark.parametrize('order', range(0, 6))
1083
+ def test_zoom1(self, order):
1084
+ for z in [2, [2, 2]]:
1085
+ arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float)
1086
+ arr = ndimage.zoom(arr, z, order=order)
1087
+ assert_equal(arr.shape, (10, 10))
1088
+ assert_(numpy.all(arr[-1, :] != 0))
1089
+ assert_(numpy.all(arr[-1, :] >= (20 - eps)))
1090
+ assert_(numpy.all(arr[0, :] <= (5 + eps)))
1091
+ assert_(numpy.all(arr >= (0 - eps)))
1092
+ assert_(numpy.all(arr <= (24 + eps)))
1093
+
1094
+ def test_zoom2(self):
1095
+ arr = numpy.arange(12).reshape((3, 4))
1096
+ out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
1097
+ assert_array_equal(out, arr)
1098
+
1099
+ def test_zoom3(self):
1100
+ arr = numpy.array([[1, 2]])
1101
+ out1 = ndimage.zoom(arr, (2, 1))
1102
+ out2 = ndimage.zoom(arr, (1, 2))
1103
+
1104
+ assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]]))
1105
+ assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]]))
1106
+
1107
+ @pytest.mark.parametrize('order', range(0, 6))
1108
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
1109
+ def test_zoom_affine01(self, order, dtype):
1110
+ data = numpy.asarray([[1, 2, 3, 4],
1111
+ [5, 6, 7, 8],
1112
+ [9, 10, 11, 12]], dtype=dtype)
1113
+ if data.dtype.kind == 'c':
1114
+ data -= 1j * data
1115
+ with suppress_warnings() as sup:
1116
+ sup.filter(UserWarning,
1117
+ 'The behavior of affine_transform with a 1-D array .* '
1118
+ 'has changed')
1119
+ out = ndimage.affine_transform(data, [0.5, 0.5], 0,
1120
+ (6, 8), order=order)
1121
+ assert_array_almost_equal(out[::2, ::2], data)
1122
+
1123
+ def test_zoom_infinity(self):
1124
+ # Ticket #1419 regression test
1125
+ dim = 8
1126
+ ndimage.zoom(numpy.zeros((dim, dim)), 1. / dim, mode='nearest')
1127
+
1128
+ def test_zoom_zoomfactor_one(self):
1129
+ # Ticket #1122 regression test
1130
+ arr = numpy.zeros((1, 5, 5))
1131
+ zoom = (1.0, 2.0, 2.0)
1132
+
1133
+ out = ndimage.zoom(arr, zoom, cval=7)
1134
+ ref = numpy.zeros((1, 10, 10))
1135
+ assert_array_almost_equal(out, ref)
1136
+
1137
+ def test_zoom_output_shape_roundoff(self):
1138
+ arr = numpy.zeros((3, 11, 25))
1139
+ zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
1140
+ out = ndimage.zoom(arr, zoom)
1141
+ assert_array_equal(out.shape, (4, 15, 29))
1142
+
1143
+ @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
1144
+ @pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
1145
+ 'mirror', 'grid-wrap', 'grid-mirror',
1146
+ 'grid-constant'])
1147
+ def test_zoom_by_int_order0(self, zoom, mode):
1148
+ # order 0 zoom should be the same as replication via numpy.kron
1149
+ # Note: This is not True for general x shapes when grid_mode is False,
1150
+ # but works here for all modes because the size ratio happens to
1151
+ # always be an integer when x.shape = (2, 2).
1152
+ x = numpy.array([[0, 1],
1153
+ [2, 3]], dtype=float)
1154
+ # x = numpy.arange(16, dtype=float).reshape(4, 4)
1155
+ assert_array_almost_equal(
1156
+ ndimage.zoom(x, zoom, order=0, mode=mode),
1157
+ numpy.kron(x, numpy.ones(zoom))
1158
+ )
1159
+
1160
+ @pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
1161
+ @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
1162
+ @pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
1163
+ 'grid-wrap', 'grid-constant'])
1164
+ def test_zoom_grid_by_int_order0(self, shape, zoom, mode):
1165
+ # When grid_mode is True, order 0 zoom should be the same as
1166
+ # replication via numpy.kron. The only exceptions to this are the
1167
+ # non-grid modes 'constant' and 'wrap'.
1168
+ x = numpy.arange(numpy.prod(shape), dtype=float).reshape(shape)
1169
+ assert_array_almost_equal(
1170
+ ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
1171
+ numpy.kron(x, numpy.ones(zoom))
1172
+ )
1173
+
1174
+ @pytest.mark.parametrize('mode', ['constant', 'wrap'])
1175
+ def test_zoom_grid_mode_warnings(self, mode):
1176
+ # Warn on use of non-grid modes when grid_mode is True
1177
+ x = numpy.arange(9, dtype=float).reshape((3, 3))
1178
+ with pytest.warns(UserWarning,
1179
+ match="It is recommended to use mode"):
1180
+ ndimage.zoom(x, 2, mode=mode, grid_mode=True),
1181
+
1182
+ @pytest.mark.parametrize('order', range(0, 6))
1183
+ def test_rotate01(self, order):
1184
+ data = numpy.array([[0, 0, 0, 0],
1185
+ [0, 1, 1, 0],
1186
+ [0, 0, 0, 0]], dtype=numpy.float64)
1187
+ out = ndimage.rotate(data, 0, order=order)
1188
+ assert_array_almost_equal(out, data)
1189
+
1190
+ @pytest.mark.parametrize('order', range(0, 6))
1191
+ def test_rotate02(self, order):
1192
+ data = numpy.array([[0, 0, 0, 0],
1193
+ [0, 1, 0, 0],
1194
+ [0, 0, 0, 0]], dtype=numpy.float64)
1195
+ expected = numpy.array([[0, 0, 0],
1196
+ [0, 0, 0],
1197
+ [0, 1, 0],
1198
+ [0, 0, 0]], dtype=numpy.float64)
1199
+ out = ndimage.rotate(data, 90, order=order)
1200
+ assert_array_almost_equal(out, expected)
1201
+
1202
+ @pytest.mark.parametrize('order', range(0, 6))
1203
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
1204
+ def test_rotate03(self, order, dtype):
1205
+ data = numpy.array([[0, 0, 0, 0, 0],
1206
+ [0, 1, 1, 0, 0],
1207
+ [0, 0, 0, 0, 0]], dtype=dtype)
1208
+ expected = numpy.array([[0, 0, 0],
1209
+ [0, 0, 0],
1210
+ [0, 1, 0],
1211
+ [0, 1, 0],
1212
+ [0, 0, 0]], dtype=dtype)
1213
+ if data.dtype.kind == 'c':
1214
+ data -= 1j * data
1215
+ expected -= 1j * expected
1216
+ out = ndimage.rotate(data, 90, order=order)
1217
+ assert_array_almost_equal(out, expected)
1218
+
1219
+ @pytest.mark.parametrize('order', range(0, 6))
1220
+ def test_rotate04(self, order):
1221
+ data = numpy.array([[0, 0, 0, 0, 0],
1222
+ [0, 1, 1, 0, 0],
1223
+ [0, 0, 0, 0, 0]], dtype=numpy.float64)
1224
+ expected = numpy.array([[0, 0, 0, 0, 0],
1225
+ [0, 0, 1, 0, 0],
1226
+ [0, 0, 1, 0, 0]], dtype=numpy.float64)
1227
+ out = ndimage.rotate(data, 90, reshape=False, order=order)
1228
+ assert_array_almost_equal(out, expected)
1229
+
1230
+ @pytest.mark.parametrize('order', range(0, 6))
1231
+ def test_rotate05(self, order):
1232
+ data = numpy.empty((4, 3, 3))
1233
+ for i in range(3):
1234
+ data[:, :, i] = numpy.array([[0, 0, 0],
1235
+ [0, 1, 0],
1236
+ [0, 1, 0],
1237
+ [0, 0, 0]], dtype=numpy.float64)
1238
+ expected = numpy.array([[0, 0, 0, 0],
1239
+ [0, 1, 1, 0],
1240
+ [0, 0, 0, 0]], dtype=numpy.float64)
1241
+ out = ndimage.rotate(data, 90, order=order)
1242
+ for i in range(3):
1243
+ assert_array_almost_equal(out[:, :, i], expected)
1244
+
1245
+ @pytest.mark.parametrize('order', range(0, 6))
1246
+ def test_rotate06(self, order):
1247
+ data = numpy.empty((3, 4, 3))
1248
+ for i in range(3):
1249
+ data[:, :, i] = numpy.array([[0, 0, 0, 0],
1250
+ [0, 1, 1, 0],
1251
+ [0, 0, 0, 0]], dtype=numpy.float64)
1252
+ expected = numpy.array([[0, 0, 0],
1253
+ [0, 1, 0],
1254
+ [0, 1, 0],
1255
+ [0, 0, 0]], dtype=numpy.float64)
1256
+ out = ndimage.rotate(data, 90, order=order)
1257
+ for i in range(3):
1258
+ assert_array_almost_equal(out[:, :, i], expected)
1259
+
1260
+ @pytest.mark.parametrize('order', range(0, 6))
1261
+ def test_rotate07(self, order):
1262
+ data = numpy.array([[[0, 0, 0, 0, 0],
1263
+ [0, 1, 1, 0, 0],
1264
+ [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
1265
+ data = data.transpose()
1266
+ expected = numpy.array([[[0, 0, 0],
1267
+ [0, 1, 0],
1268
+ [0, 1, 0],
1269
+ [0, 0, 0],
1270
+ [0, 0, 0]]] * 2, dtype=numpy.float64)
1271
+ expected = expected.transpose([2, 1, 0])
1272
+ out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
1273
+ assert_array_almost_equal(out, expected)
1274
+
1275
+ @pytest.mark.parametrize('order', range(0, 6))
1276
+ def test_rotate08(self, order):
1277
+ data = numpy.array([[[0, 0, 0, 0, 0],
1278
+ [0, 1, 1, 0, 0],
1279
+ [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
1280
+ data = data.transpose()
1281
+ expected = numpy.array([[[0, 0, 1, 0, 0],
1282
+ [0, 0, 1, 0, 0],
1283
+ [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
1284
+ expected = expected.transpose()
1285
+ out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
1286
+ assert_array_almost_equal(out, expected)
1287
+
1288
+ def test_rotate09(self):
1289
+ data = numpy.array([[0, 0, 0, 0, 0],
1290
+ [0, 1, 1, 0, 0],
1291
+ [0, 0, 0, 0, 0]] * 2, dtype=numpy.float64)
1292
+ with assert_raises(ValueError):
1293
+ ndimage.rotate(data, 90, axes=(0, data.ndim))
1294
+
1295
+ def test_rotate10(self):
1296
+ data = numpy.arange(45, dtype=numpy.float64).reshape((3, 5, 3))
1297
+
1298
+ # The output of ndimage.rotate before refactoring
1299
+ expected = numpy.array([[[0.0, 0.0, 0.0],
1300
+ [0.0, 0.0, 0.0],
1301
+ [6.54914793, 7.54914793, 8.54914793],
1302
+ [10.84520162, 11.84520162, 12.84520162],
1303
+ [0.0, 0.0, 0.0]],
1304
+ [[6.19286575, 7.19286575, 8.19286575],
1305
+ [13.4730712, 14.4730712, 15.4730712],
1306
+ [21.0, 22.0, 23.0],
1307
+ [28.5269288, 29.5269288, 30.5269288],
1308
+ [35.80713425, 36.80713425, 37.80713425]],
1309
+ [[0.0, 0.0, 0.0],
1310
+ [31.15479838, 32.15479838, 33.15479838],
1311
+ [35.45085207, 36.45085207, 37.45085207],
1312
+ [0.0, 0.0, 0.0],
1313
+ [0.0, 0.0, 0.0]]])
1314
+
1315
+ out = ndimage.rotate(data, angle=12, reshape=False)
1316
+ assert_array_almost_equal(out, expected)
1317
+
1318
+ def test_rotate_exact_180(self):
1319
+ a = numpy.tile(numpy.arange(5), (5, 1))
1320
+ b = ndimage.rotate(ndimage.rotate(a, 180), -180)
1321
+ assert_equal(a, b)
1322
+
1323
+
1324
+ def test_zoom_output_shape():
1325
+ """Ticket #643"""
1326
+ x = numpy.arange(12).reshape((3, 4))
1327
+ ndimage.zoom(x, 2, output=numpy.zeros((6, 8)))
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py ADDED
@@ -0,0 +1,1409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+
3
+ import numpy as np
4
+ from numpy.testing import (
5
+ assert_,
6
+ assert_allclose,
7
+ assert_almost_equal,
8
+ assert_array_almost_equal,
9
+ assert_array_equal,
10
+ assert_equal,
11
+ suppress_warnings,
12
+ )
13
+ from pytest import raises as assert_raises
14
+
15
+ import scipy.ndimage as ndimage
16
+
17
+
18
+ from . import types
19
+
20
+
21
+ class Test_measurements_stats:
22
+ """ndimage._measurements._stats() is a utility used by other functions."""
23
+
24
+ def test_a(self):
25
+ x = [0, 1, 2, 6]
26
+ labels = [0, 0, 1, 1]
27
+ index = [0, 1]
28
+ for shp in [(4,), (2, 2)]:
29
+ x = np.array(x).reshape(shp)
30
+ labels = np.array(labels).reshape(shp)
31
+ counts, sums = ndimage._measurements._stats(
32
+ x, labels=labels, index=index)
33
+ assert_array_equal(counts, [2, 2])
34
+ assert_array_equal(sums, [1.0, 8.0])
35
+
36
+ def test_b(self):
37
+ # Same data as test_a, but different labels. The label 9 exceeds the
38
+ # length of 'labels', so this test will follow a different code path.
39
+ x = [0, 1, 2, 6]
40
+ labels = [0, 0, 9, 9]
41
+ index = [0, 9]
42
+ for shp in [(4,), (2, 2)]:
43
+ x = np.array(x).reshape(shp)
44
+ labels = np.array(labels).reshape(shp)
45
+ counts, sums = ndimage._measurements._stats(
46
+ x, labels=labels, index=index)
47
+ assert_array_equal(counts, [2, 2])
48
+ assert_array_equal(sums, [1.0, 8.0])
49
+
50
+ def test_a_centered(self):
51
+ x = [0, 1, 2, 6]
52
+ labels = [0, 0, 1, 1]
53
+ index = [0, 1]
54
+ for shp in [(4,), (2, 2)]:
55
+ x = np.array(x).reshape(shp)
56
+ labels = np.array(labels).reshape(shp)
57
+ counts, sums, centers = ndimage._measurements._stats(
58
+ x, labels=labels, index=index, centered=True)
59
+ assert_array_equal(counts, [2, 2])
60
+ assert_array_equal(sums, [1.0, 8.0])
61
+ assert_array_equal(centers, [0.5, 8.0])
62
+
63
+ def test_b_centered(self):
64
+ x = [0, 1, 2, 6]
65
+ labels = [0, 0, 9, 9]
66
+ index = [0, 9]
67
+ for shp in [(4,), (2, 2)]:
68
+ x = np.array(x).reshape(shp)
69
+ labels = np.array(labels).reshape(shp)
70
+ counts, sums, centers = ndimage._measurements._stats(
71
+ x, labels=labels, index=index, centered=True)
72
+ assert_array_equal(counts, [2, 2])
73
+ assert_array_equal(sums, [1.0, 8.0])
74
+ assert_array_equal(centers, [0.5, 8.0])
75
+
76
+ def test_nonint_labels(self):
77
+ x = [0, 1, 2, 6]
78
+ labels = [0.0, 0.0, 9.0, 9.0]
79
+ index = [0.0, 9.0]
80
+ for shp in [(4,), (2, 2)]:
81
+ x = np.array(x).reshape(shp)
82
+ labels = np.array(labels).reshape(shp)
83
+ counts, sums, centers = ndimage._measurements._stats(
84
+ x, labels=labels, index=index, centered=True)
85
+ assert_array_equal(counts, [2, 2])
86
+ assert_array_equal(sums, [1.0, 8.0])
87
+ assert_array_equal(centers, [0.5, 8.0])
88
+
89
+
90
+ class Test_measurements_select:
91
+ """ndimage._measurements._select() is a utility used by other functions."""
92
+
93
+ def test_basic(self):
94
+ x = [0, 1, 6, 2]
95
+ cases = [
96
+ ([0, 0, 1, 1], [0, 1]), # "Small" integer labels
97
+ ([0, 0, 9, 9], [0, 9]), # A label larger than len(labels)
98
+ ([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels
99
+ ]
100
+ for labels, index in cases:
101
+ result = ndimage._measurements._select(
102
+ x, labels=labels, index=index)
103
+ assert_(len(result) == 0)
104
+ result = ndimage._measurements._select(
105
+ x, labels=labels, index=index, find_max=True)
106
+ assert_(len(result) == 1)
107
+ assert_array_equal(result[0], [1, 6])
108
+ result = ndimage._measurements._select(
109
+ x, labels=labels, index=index, find_min=True)
110
+ assert_(len(result) == 1)
111
+ assert_array_equal(result[0], [0, 2])
112
+ result = ndimage._measurements._select(
113
+ x, labels=labels, index=index, find_min=True,
114
+ find_min_positions=True)
115
+ assert_(len(result) == 2)
116
+ assert_array_equal(result[0], [0, 2])
117
+ assert_array_equal(result[1], [0, 3])
118
+ assert_equal(result[1].dtype.kind, 'i')
119
+ result = ndimage._measurements._select(
120
+ x, labels=labels, index=index, find_max=True,
121
+ find_max_positions=True)
122
+ assert_(len(result) == 2)
123
+ assert_array_equal(result[0], [1, 6])
124
+ assert_array_equal(result[1], [1, 2])
125
+ assert_equal(result[1].dtype.kind, 'i')
126
+
127
+
128
+ def test_label01():
129
+ data = np.ones([])
130
+ out, n = ndimage.label(data)
131
+ assert_array_almost_equal(out, 1)
132
+ assert_equal(n, 1)
133
+
134
+
135
+ def test_label02():
136
+ data = np.zeros([])
137
+ out, n = ndimage.label(data)
138
+ assert_array_almost_equal(out, 0)
139
+ assert_equal(n, 0)
140
+
141
+
142
+ def test_label03():
143
+ data = np.ones([1])
144
+ out, n = ndimage.label(data)
145
+ assert_array_almost_equal(out, [1])
146
+ assert_equal(n, 1)
147
+
148
+
149
+ def test_label04():
150
+ data = np.zeros([1])
151
+ out, n = ndimage.label(data)
152
+ assert_array_almost_equal(out, [0])
153
+ assert_equal(n, 0)
154
+
155
+
156
+ def test_label05():
157
+ data = np.ones([5])
158
+ out, n = ndimage.label(data)
159
+ assert_array_almost_equal(out, [1, 1, 1, 1, 1])
160
+ assert_equal(n, 1)
161
+
162
+
163
+ def test_label06():
164
+ data = np.array([1, 0, 1, 1, 0, 1])
165
+ out, n = ndimage.label(data)
166
+ assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
167
+ assert_equal(n, 3)
168
+
169
+
170
+ def test_label07():
171
+ data = np.array([[0, 0, 0, 0, 0, 0],
172
+ [0, 0, 0, 0, 0, 0],
173
+ [0, 0, 0, 0, 0, 0],
174
+ [0, 0, 0, 0, 0, 0],
175
+ [0, 0, 0, 0, 0, 0],
176
+ [0, 0, 0, 0, 0, 0]])
177
+ out, n = ndimage.label(data)
178
+ assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
179
+ [0, 0, 0, 0, 0, 0],
180
+ [0, 0, 0, 0, 0, 0],
181
+ [0, 0, 0, 0, 0, 0],
182
+ [0, 0, 0, 0, 0, 0],
183
+ [0, 0, 0, 0, 0, 0]])
184
+ assert_equal(n, 0)
185
+
186
+
187
+ def test_label08():
188
+ data = np.array([[1, 0, 0, 0, 0, 0],
189
+ [0, 0, 1, 1, 0, 0],
190
+ [0, 0, 1, 1, 1, 0],
191
+ [1, 1, 0, 0, 0, 0],
192
+ [1, 1, 0, 0, 0, 0],
193
+ [0, 0, 0, 1, 1, 0]])
194
+ out, n = ndimage.label(data)
195
+ assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
196
+ [0, 0, 2, 2, 0, 0],
197
+ [0, 0, 2, 2, 2, 0],
198
+ [3, 3, 0, 0, 0, 0],
199
+ [3, 3, 0, 0, 0, 0],
200
+ [0, 0, 0, 4, 4, 0]])
201
+ assert_equal(n, 4)
202
+
203
+
204
+ def test_label09():
205
+ data = np.array([[1, 0, 0, 0, 0, 0],
206
+ [0, 0, 1, 1, 0, 0],
207
+ [0, 0, 1, 1, 1, 0],
208
+ [1, 1, 0, 0, 0, 0],
209
+ [1, 1, 0, 0, 0, 0],
210
+ [0, 0, 0, 1, 1, 0]])
211
+ struct = ndimage.generate_binary_structure(2, 2)
212
+ out, n = ndimage.label(data, struct)
213
+ assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
214
+ [0, 0, 2, 2, 0, 0],
215
+ [0, 0, 2, 2, 2, 0],
216
+ [2, 2, 0, 0, 0, 0],
217
+ [2, 2, 0, 0, 0, 0],
218
+ [0, 0, 0, 3, 3, 0]])
219
+ assert_equal(n, 3)
220
+
221
+
222
+ def test_label10():
223
+ data = np.array([[0, 0, 0, 0, 0, 0],
224
+ [0, 1, 1, 0, 1, 0],
225
+ [0, 1, 1, 1, 1, 0],
226
+ [0, 0, 0, 0, 0, 0]])
227
+ struct = ndimage.generate_binary_structure(2, 2)
228
+ out, n = ndimage.label(data, struct)
229
+ assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
230
+ [0, 1, 1, 0, 1, 0],
231
+ [0, 1, 1, 1, 1, 0],
232
+ [0, 0, 0, 0, 0, 0]])
233
+ assert_equal(n, 1)
234
+
235
+
236
+ def test_label11():
237
+ for type in types:
238
+ data = np.array([[1, 0, 0, 0, 0, 0],
239
+ [0, 0, 1, 1, 0, 0],
240
+ [0, 0, 1, 1, 1, 0],
241
+ [1, 1, 0, 0, 0, 0],
242
+ [1, 1, 0, 0, 0, 0],
243
+ [0, 0, 0, 1, 1, 0]], type)
244
+ out, n = ndimage.label(data)
245
+ expected = [[1, 0, 0, 0, 0, 0],
246
+ [0, 0, 2, 2, 0, 0],
247
+ [0, 0, 2, 2, 2, 0],
248
+ [3, 3, 0, 0, 0, 0],
249
+ [3, 3, 0, 0, 0, 0],
250
+ [0, 0, 0, 4, 4, 0]]
251
+ assert_array_almost_equal(out, expected)
252
+ assert_equal(n, 4)
253
+
254
+
255
+ def test_label11_inplace():
256
+ for type in types:
257
+ data = np.array([[1, 0, 0, 0, 0, 0],
258
+ [0, 0, 1, 1, 0, 0],
259
+ [0, 0, 1, 1, 1, 0],
260
+ [1, 1, 0, 0, 0, 0],
261
+ [1, 1, 0, 0, 0, 0],
262
+ [0, 0, 0, 1, 1, 0]], type)
263
+ n = ndimage.label(data, output=data)
264
+ expected = [[1, 0, 0, 0, 0, 0],
265
+ [0, 0, 2, 2, 0, 0],
266
+ [0, 0, 2, 2, 2, 0],
267
+ [3, 3, 0, 0, 0, 0],
268
+ [3, 3, 0, 0, 0, 0],
269
+ [0, 0, 0, 4, 4, 0]]
270
+ assert_array_almost_equal(data, expected)
271
+ assert_equal(n, 4)
272
+
273
+
274
+ def test_label12():
275
+ for type in types:
276
+ data = np.array([[0, 0, 0, 0, 1, 1],
277
+ [0, 0, 0, 0, 0, 1],
278
+ [0, 0, 1, 0, 1, 1],
279
+ [0, 0, 1, 1, 1, 1],
280
+ [0, 0, 0, 1, 1, 0]], type)
281
+ out, n = ndimage.label(data)
282
+ expected = [[0, 0, 0, 0, 1, 1],
283
+ [0, 0, 0, 0, 0, 1],
284
+ [0, 0, 1, 0, 1, 1],
285
+ [0, 0, 1, 1, 1, 1],
286
+ [0, 0, 0, 1, 1, 0]]
287
+ assert_array_almost_equal(out, expected)
288
+ assert_equal(n, 1)
289
+
290
+
291
+ def test_label13():
292
+ for type in types:
293
+ data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
294
+ [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
295
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
296
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
297
+ type)
298
+ out, n = ndimage.label(data)
299
+ expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
300
+ [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
301
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
302
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
303
+ assert_array_almost_equal(out, expected)
304
+ assert_equal(n, 1)
305
+
306
+
307
+ def test_label_output_typed():
308
+ data = np.ones([5])
309
+ for t in types:
310
+ output = np.zeros([5], dtype=t)
311
+ n = ndimage.label(data, output=output)
312
+ assert_array_almost_equal(output, 1)
313
+ assert_equal(n, 1)
314
+
315
+
316
+ def test_label_output_dtype():
317
+ data = np.ones([5])
318
+ for t in types:
319
+ output, n = ndimage.label(data, output=t)
320
+ assert_array_almost_equal(output, 1)
321
+ assert output.dtype == t
322
+
323
+
324
+ def test_label_output_wrong_size():
325
+ data = np.ones([5])
326
+ for t in types:
327
+ output = np.zeros([10], t)
328
+ assert_raises((RuntimeError, ValueError),
329
+ ndimage.label, data, output=output)
330
+
331
+
332
+ def test_label_structuring_elements():
333
+ data = np.loadtxt(os.path.join(os.path.dirname(
334
+ __file__), "data", "label_inputs.txt"))
335
+ strels = np.loadtxt(os.path.join(
336
+ os.path.dirname(__file__), "data", "label_strels.txt"))
337
+ results = np.loadtxt(os.path.join(
338
+ os.path.dirname(__file__), "data", "label_results.txt"))
339
+ data = data.reshape((-1, 7, 7))
340
+ strels = strels.reshape((-1, 3, 3))
341
+ results = results.reshape((-1, 7, 7))
342
+ r = 0
343
+ for i in range(data.shape[0]):
344
+ d = data[i, :, :]
345
+ for j in range(strels.shape[0]):
346
+ s = strels[j, :, :]
347
+ assert_equal(ndimage.label(d, s)[0], results[r, :, :])
348
+ r += 1
349
+
350
+
351
+ def test_ticket_742():
352
+ def SE(img, thresh=.7, size=4):
353
+ mask = img > thresh
354
+ rank = len(mask.shape)
355
+ la, co = ndimage.label(mask,
356
+ ndimage.generate_binary_structure(rank, rank))
357
+ _ = ndimage.find_objects(la)
358
+
359
+ if np.dtype(np.intp) != np.dtype('i'):
360
+ shape = (3, 1240, 1240)
361
+ a = np.random.rand(np.prod(shape)).reshape(shape)
362
+ # shouldn't crash
363
+ SE(a)
364
+
365
+
366
+ def test_gh_issue_3025():
367
+ """Github issue #3025 - improper merging of labels"""
368
+ d = np.zeros((60, 320))
369
+ d[:, :257] = 1
370
+ d[:, 260:] = 1
371
+ d[36, 257] = 1
372
+ d[35, 258] = 1
373
+ d[35, 259] = 1
374
+ assert ndimage.label(d, np.ones((3, 3)))[1] == 1
375
+
376
+
377
+ def test_label_default_dtype():
378
+ test_array = np.random.rand(10, 10)
379
+ label, no_features = ndimage.label(test_array > 0.5)
380
+ assert_(label.dtype in (np.int32, np.int64))
381
+ # Shouldn't raise an exception
382
+ ndimage.find_objects(label)
383
+
384
+
385
+ def test_find_objects01():
386
+ data = np.ones([], dtype=int)
387
+ out = ndimage.find_objects(data)
388
+ assert_(out == [()])
389
+
390
+
391
+ def test_find_objects02():
392
+ data = np.zeros([], dtype=int)
393
+ out = ndimage.find_objects(data)
394
+ assert_(out == [])
395
+
396
+
397
+ def test_find_objects03():
398
+ data = np.ones([1], dtype=int)
399
+ out = ndimage.find_objects(data)
400
+ assert_equal(out, [(slice(0, 1, None),)])
401
+
402
+
403
+ def test_find_objects04():
404
+ data = np.zeros([1], dtype=int)
405
+ out = ndimage.find_objects(data)
406
+ assert_equal(out, [])
407
+
408
+
409
+ def test_find_objects05():
410
+ data = np.ones([5], dtype=int)
411
+ out = ndimage.find_objects(data)
412
+ assert_equal(out, [(slice(0, 5, None),)])
413
+
414
+
415
+ def test_find_objects06():
416
+ data = np.array([1, 0, 2, 2, 0, 3])
417
+ out = ndimage.find_objects(data)
418
+ assert_equal(out, [(slice(0, 1, None),),
419
+ (slice(2, 4, None),),
420
+ (slice(5, 6, None),)])
421
+
422
+
423
+ def test_find_objects07():
424
+ data = np.array([[0, 0, 0, 0, 0, 0],
425
+ [0, 0, 0, 0, 0, 0],
426
+ [0, 0, 0, 0, 0, 0],
427
+ [0, 0, 0, 0, 0, 0],
428
+ [0, 0, 0, 0, 0, 0],
429
+ [0, 0, 0, 0, 0, 0]])
430
+ out = ndimage.find_objects(data)
431
+ assert_equal(out, [])
432
+
433
+
434
+ def test_find_objects08():
435
+ data = np.array([[1, 0, 0, 0, 0, 0],
436
+ [0, 0, 2, 2, 0, 0],
437
+ [0, 0, 2, 2, 2, 0],
438
+ [3, 3, 0, 0, 0, 0],
439
+ [3, 3, 0, 0, 0, 0],
440
+ [0, 0, 0, 4, 4, 0]])
441
+ out = ndimage.find_objects(data)
442
+ assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
443
+ (slice(1, 3, None), slice(2, 5, None)),
444
+ (slice(3, 5, None), slice(0, 2, None)),
445
+ (slice(5, 6, None), slice(3, 5, None))])
446
+
447
+
448
+ def test_find_objects09():
449
+ data = np.array([[1, 0, 0, 0, 0, 0],
450
+ [0, 0, 2, 2, 0, 0],
451
+ [0, 0, 2, 2, 2, 0],
452
+ [0, 0, 0, 0, 0, 0],
453
+ [0, 0, 0, 0, 0, 0],
454
+ [0, 0, 0, 4, 4, 0]])
455
+ out = ndimage.find_objects(data)
456
+ assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
457
+ (slice(1, 3, None), slice(2, 5, None)),
458
+ None,
459
+ (slice(5, 6, None), slice(3, 5, None))])
460
+
461
+
462
+ def test_value_indices01():
463
+ "Test dictionary keys and entries"
464
+ data = np.array([[1, 0, 0, 0, 0, 0],
465
+ [0, 0, 2, 2, 0, 0],
466
+ [0, 0, 2, 2, 2, 0],
467
+ [0, 0, 0, 0, 0, 0],
468
+ [0, 0, 0, 0, 0, 0],
469
+ [0, 0, 0, 4, 4, 0]])
470
+ vi = ndimage.value_indices(data, ignore_value=0)
471
+ true_keys = [1, 2, 4]
472
+ assert_equal(list(vi.keys()), true_keys)
473
+
474
+ truevi = {}
475
+ for k in true_keys:
476
+ truevi[k] = np.where(data == k)
477
+
478
+ vi = ndimage.value_indices(data, ignore_value=0)
479
+ assert_equal(vi, truevi)
480
+
481
+
482
+ def test_value_indices02():
483
+ "Test input checking"
484
+ data = np.zeros((5, 4), dtype=np.float32)
485
+ msg = "Parameter 'arr' must be an integer array"
486
+ with assert_raises(ValueError, match=msg):
487
+ ndimage.value_indices(data)
488
+
489
+
490
+ def test_value_indices03():
491
+ "Test different input array shapes, from 1-D to 4-D"
492
+ for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
493
+ a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape)
494
+ trueKeys = np.unique(a)
495
+ vi = ndimage.value_indices(a)
496
+ assert_equal(list(vi.keys()), list(trueKeys))
497
+ for k in trueKeys:
498
+ trueNdx = np.where(a == k)
499
+ assert_equal(vi[k], trueNdx)
500
+
501
+
502
+ def test_sum01():
503
+ for type in types:
504
+ input = np.array([], type)
505
+ output = ndimage.sum(input)
506
+ assert_equal(output, 0.0)
507
+
508
+
509
+ def test_sum02():
510
+ for type in types:
511
+ input = np.zeros([0, 4], type)
512
+ output = ndimage.sum(input)
513
+ assert_equal(output, 0.0)
514
+
515
+
516
+ def test_sum03():
517
+ for type in types:
518
+ input = np.ones([], type)
519
+ output = ndimage.sum(input)
520
+ assert_almost_equal(output, 1.0)
521
+
522
+
523
+ def test_sum04():
524
+ for type in types:
525
+ input = np.array([1, 2], type)
526
+ output = ndimage.sum(input)
527
+ assert_almost_equal(output, 3.0)
528
+
529
+
530
+ def test_sum05():
531
+ for type in types:
532
+ input = np.array([[1, 2], [3, 4]], type)
533
+ output = ndimage.sum(input)
534
+ assert_almost_equal(output, 10.0)
535
+
536
+
537
+ def test_sum06():
538
+ labels = np.array([], bool)
539
+ for type in types:
540
+ input = np.array([], type)
541
+ output = ndimage.sum(input, labels=labels)
542
+ assert_equal(output, 0.0)
543
+
544
+
545
+ def test_sum07():
546
+ labels = np.ones([0, 4], bool)
547
+ for type in types:
548
+ input = np.zeros([0, 4], type)
549
+ output = ndimage.sum(input, labels=labels)
550
+ assert_equal(output, 0.0)
551
+
552
+
553
+ def test_sum08():
554
+ labels = np.array([1, 0], bool)
555
+ for type in types:
556
+ input = np.array([1, 2], type)
557
+ output = ndimage.sum(input, labels=labels)
558
+ assert_equal(output, 1.0)
559
+
560
+
561
+ def test_sum09():
562
+ labels = np.array([1, 0], bool)
563
+ for type in types:
564
+ input = np.array([[1, 2], [3, 4]], type)
565
+ output = ndimage.sum(input, labels=labels)
566
+ assert_almost_equal(output, 4.0)
567
+
568
+
569
+ def test_sum10():
570
+ labels = np.array([1, 0], bool)
571
+ input = np.array([[1, 2], [3, 4]], bool)
572
+ output = ndimage.sum(input, labels=labels)
573
+ assert_almost_equal(output, 2.0)
574
+
575
+
576
+ def test_sum11():
577
+ labels = np.array([1, 2], np.int8)
578
+ for type in types:
579
+ input = np.array([[1, 2], [3, 4]], type)
580
+ output = ndimage.sum(input, labels=labels,
581
+ index=2)
582
+ assert_almost_equal(output, 6.0)
583
+
584
+
585
+ def test_sum12():
586
+ labels = np.array([[1, 2], [2, 4]], np.int8)
587
+ for type in types:
588
+ input = np.array([[1, 2], [3, 4]], type)
589
+ output = ndimage.sum(input, labels=labels, index=[4, 8, 2])
590
+ assert_array_almost_equal(output, [4.0, 0.0, 5.0])
591
+
592
+
593
+ def test_sum_labels():
594
+ labels = np.array([[1, 2], [2, 4]], np.int8)
595
+ for type in types:
596
+ input = np.array([[1, 2], [3, 4]], type)
597
+ output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2])
598
+ output_labels = ndimage.sum_labels(
599
+ input, labels=labels, index=[4, 8, 2])
600
+
601
+ assert (output_sum == output_labels).all()
602
+ assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0])
603
+
604
+
605
+ def test_mean01():
606
+ labels = np.array([1, 0], bool)
607
+ for type in types:
608
+ input = np.array([[1, 2], [3, 4]], type)
609
+ output = ndimage.mean(input, labels=labels)
610
+ assert_almost_equal(output, 2.0)
611
+
612
+
613
+ def test_mean02():
614
+ labels = np.array([1, 0], bool)
615
+ input = np.array([[1, 2], [3, 4]], bool)
616
+ output = ndimage.mean(input, labels=labels)
617
+ assert_almost_equal(output, 1.0)
618
+
619
+
620
+ def test_mean03():
621
+ labels = np.array([1, 2])
622
+ for type in types:
623
+ input = np.array([[1, 2], [3, 4]], type)
624
+ output = ndimage.mean(input, labels=labels,
625
+ index=2)
626
+ assert_almost_equal(output, 3.0)
627
+
628
+
629
+ def test_mean04():
630
+ labels = np.array([[1, 2], [2, 4]], np.int8)
631
+ with np.errstate(all='ignore'):
632
+ for type in types:
633
+ input = np.array([[1, 2], [3, 4]], type)
634
+ output = ndimage.mean(input, labels=labels,
635
+ index=[4, 8, 2])
636
+ assert_array_almost_equal(output[[0, 2]], [4.0, 2.5])
637
+ assert_(np.isnan(output[1]))
638
+
639
+
640
+ def test_minimum01():
641
+ labels = np.array([1, 0], bool)
642
+ for type in types:
643
+ input = np.array([[1, 2], [3, 4]], type)
644
+ output = ndimage.minimum(input, labels=labels)
645
+ assert_almost_equal(output, 1.0)
646
+
647
+
648
+ def test_minimum02():
649
+ labels = np.array([1, 0], bool)
650
+ input = np.array([[2, 2], [2, 4]], bool)
651
+ output = ndimage.minimum(input, labels=labels)
652
+ assert_almost_equal(output, 1.0)
653
+
654
+
655
+ def test_minimum03():
656
+ labels = np.array([1, 2])
657
+ for type in types:
658
+ input = np.array([[1, 2], [3, 4]], type)
659
+ output = ndimage.minimum(input, labels=labels,
660
+ index=2)
661
+ assert_almost_equal(output, 2.0)
662
+
663
+
664
+ def test_minimum04():
665
+ labels = np.array([[1, 2], [2, 3]])
666
+ for type in types:
667
+ input = np.array([[1, 2], [3, 4]], type)
668
+ output = ndimage.minimum(input, labels=labels,
669
+ index=[2, 3, 8])
670
+ assert_array_almost_equal(output, [2.0, 4.0, 0.0])
671
+
672
+
673
+ def test_maximum01():
674
+ labels = np.array([1, 0], bool)
675
+ for type in types:
676
+ input = np.array([[1, 2], [3, 4]], type)
677
+ output = ndimage.maximum(input, labels=labels)
678
+ assert_almost_equal(output, 3.0)
679
+
680
+
681
+ def test_maximum02():
682
+ labels = np.array([1, 0], bool)
683
+ input = np.array([[2, 2], [2, 4]], bool)
684
+ output = ndimage.maximum(input, labels=labels)
685
+ assert_almost_equal(output, 1.0)
686
+
687
+
688
+ def test_maximum03():
689
+ labels = np.array([1, 2])
690
+ for type in types:
691
+ input = np.array([[1, 2], [3, 4]], type)
692
+ output = ndimage.maximum(input, labels=labels,
693
+ index=2)
694
+ assert_almost_equal(output, 4.0)
695
+
696
+
697
+ def test_maximum04():
698
+ labels = np.array([[1, 2], [2, 3]])
699
+ for type in types:
700
+ input = np.array([[1, 2], [3, 4]], type)
701
+ output = ndimage.maximum(input, labels=labels,
702
+ index=[2, 3, 8])
703
+ assert_array_almost_equal(output, [3.0, 4.0, 0.0])
704
+
705
+
706
+ def test_maximum05():
707
+ # Regression test for ticket #501 (Trac)
708
+ x = np.array([-3, -2, -1])
709
+ assert_equal(ndimage.maximum(x), -1)
710
+
711
+
712
+ def test_median01():
713
+ a = np.array([[1, 2, 0, 1],
714
+ [5, 3, 0, 4],
715
+ [0, 0, 0, 7],
716
+ [9, 3, 0, 0]])
717
+ labels = np.array([[1, 1, 0, 2],
718
+ [1, 1, 0, 2],
719
+ [0, 0, 0, 2],
720
+ [3, 3, 0, 0]])
721
+ output = ndimage.median(a, labels=labels, index=[1, 2, 3])
722
+ assert_array_almost_equal(output, [2.5, 4.0, 6.0])
723
+
724
+
725
+ def test_median02():
726
+ a = np.array([[1, 2, 0, 1],
727
+ [5, 3, 0, 4],
728
+ [0, 0, 0, 7],
729
+ [9, 3, 0, 0]])
730
+ output = ndimage.median(a)
731
+ assert_almost_equal(output, 1.0)
732
+
733
+
734
+ def test_median03():
735
+ a = np.array([[1, 2, 0, 1],
736
+ [5, 3, 0, 4],
737
+ [0, 0, 0, 7],
738
+ [9, 3, 0, 0]])
739
+ labels = np.array([[1, 1, 0, 2],
740
+ [1, 1, 0, 2],
741
+ [0, 0, 0, 2],
742
+ [3, 3, 0, 0]])
743
+ output = ndimage.median(a, labels=labels)
744
+ assert_almost_equal(output, 3.0)
745
+
746
+
747
+ def test_median_gh12836_bool():
748
+ # test boolean addition fix on example from gh-12836
749
+ a = np.asarray([1, 1], dtype=bool)
750
+ output = ndimage.median(a, labels=np.ones((2,)), index=[1])
751
+ assert_array_almost_equal(output, [1.0])
752
+
753
+
754
+ def test_median_no_int_overflow():
755
+ # test integer overflow fix on example from gh-12836
756
+ a = np.asarray([65, 70], dtype=np.int8)
757
+ output = ndimage.median(a, labels=np.ones((2,)), index=[1])
758
+ assert_array_almost_equal(output, [67.5])
759
+
760
+
761
+ def test_variance01():
762
+ with np.errstate(all='ignore'):
763
+ for type in types:
764
+ input = np.array([], type)
765
+ with suppress_warnings() as sup:
766
+ sup.filter(RuntimeWarning, "Mean of empty slice")
767
+ output = ndimage.variance(input)
768
+ assert_(np.isnan(output))
769
+
770
+
771
+ def test_variance02():
772
+ for type in types:
773
+ input = np.array([1], type)
774
+ output = ndimage.variance(input)
775
+ assert_almost_equal(output, 0.0)
776
+
777
+
778
+ def test_variance03():
779
+ for type in types:
780
+ input = np.array([1, 3], type)
781
+ output = ndimage.variance(input)
782
+ assert_almost_equal(output, 1.0)
783
+
784
+
785
+ def test_variance04():
786
+ input = np.array([1, 0], bool)
787
+ output = ndimage.variance(input)
788
+ assert_almost_equal(output, 0.25)
789
+
790
+
791
+ def test_variance05():
792
+ labels = [2, 2, 3]
793
+ for type in types:
794
+ input = np.array([1, 3, 8], type)
795
+ output = ndimage.variance(input, labels, 2)
796
+ assert_almost_equal(output, 1.0)
797
+
798
+
799
+ def test_variance06():
800
+ labels = [2, 2, 3, 3, 4]
801
+ with np.errstate(all='ignore'):
802
+ for type in types:
803
+ input = np.array([1, 3, 8, 10, 8], type)
804
+ output = ndimage.variance(input, labels, [2, 3, 4])
805
+ assert_array_almost_equal(output, [1.0, 1.0, 0.0])
806
+
807
+
808
+ def test_standard_deviation01():
809
+ with np.errstate(all='ignore'):
810
+ for type in types:
811
+ input = np.array([], type)
812
+ with suppress_warnings() as sup:
813
+ sup.filter(RuntimeWarning, "Mean of empty slice")
814
+ output = ndimage.standard_deviation(input)
815
+ assert_(np.isnan(output))
816
+
817
+
818
+ def test_standard_deviation02():
819
+ for type in types:
820
+ input = np.array([1], type)
821
+ output = ndimage.standard_deviation(input)
822
+ assert_almost_equal(output, 0.0)
823
+
824
+
825
+ def test_standard_deviation03():
826
+ for type in types:
827
+ input = np.array([1, 3], type)
828
+ output = ndimage.standard_deviation(input)
829
+ assert_almost_equal(output, np.sqrt(1.0))
830
+
831
+
832
+ def test_standard_deviation04():
833
+ input = np.array([1, 0], bool)
834
+ output = ndimage.standard_deviation(input)
835
+ assert_almost_equal(output, 0.5)
836
+
837
+
838
+ def test_standard_deviation05():
839
+ labels = [2, 2, 3]
840
+ for type in types:
841
+ input = np.array([1, 3, 8], type)
842
+ output = ndimage.standard_deviation(input, labels, 2)
843
+ assert_almost_equal(output, 1.0)
844
+
845
+
846
+ def test_standard_deviation06():
847
+ labels = [2, 2, 3, 3, 4]
848
+ with np.errstate(all='ignore'):
849
+ for type in types:
850
+ input = np.array([1, 3, 8, 10, 8], type)
851
+ output = ndimage.standard_deviation(input, labels, [2, 3, 4])
852
+ assert_array_almost_equal(output, [1.0, 1.0, 0.0])
853
+
854
+
855
+ def test_standard_deviation07():
856
+ labels = [1]
857
+ with np.errstate(all='ignore'):
858
+ for type in types:
859
+ input = np.array([-0.00619519], type)
860
+ output = ndimage.standard_deviation(input, labels, [1])
861
+ assert_array_almost_equal(output, [0])
862
+
863
+
864
+ def test_minimum_position01():
865
+ labels = np.array([1, 0], bool)
866
+ for type in types:
867
+ input = np.array([[1, 2], [3, 4]], type)
868
+ output = ndimage.minimum_position(input, labels=labels)
869
+ assert_equal(output, (0, 0))
870
+
871
+
872
+ def test_minimum_position02():
873
+ for type in types:
874
+ input = np.array([[5, 4, 2, 5],
875
+ [3, 7, 0, 2],
876
+ [1, 5, 1, 1]], type)
877
+ output = ndimage.minimum_position(input)
878
+ assert_equal(output, (1, 2))
879
+
880
+
881
+ def test_minimum_position03():
882
+ input = np.array([[5, 4, 2, 5],
883
+ [3, 7, 0, 2],
884
+ [1, 5, 1, 1]], bool)
885
+ output = ndimage.minimum_position(input)
886
+ assert_equal(output, (1, 2))
887
+
888
+
889
+ def test_minimum_position04():
890
+ input = np.array([[5, 4, 2, 5],
891
+ [3, 7, 1, 2],
892
+ [1, 5, 1, 1]], bool)
893
+ output = ndimage.minimum_position(input)
894
+ assert_equal(output, (0, 0))
895
+
896
+
897
+ def test_minimum_position05():
898
+ labels = [1, 2, 0, 4]
899
+ for type in types:
900
+ input = np.array([[5, 4, 2, 5],
901
+ [3, 7, 0, 2],
902
+ [1, 5, 2, 3]], type)
903
+ output = ndimage.minimum_position(input, labels)
904
+ assert_equal(output, (2, 0))
905
+
906
+
907
+ def test_minimum_position06():
908
+ labels = [1, 2, 3, 4]
909
+ for type in types:
910
+ input = np.array([[5, 4, 2, 5],
911
+ [3, 7, 0, 2],
912
+ [1, 5, 1, 1]], type)
913
+ output = ndimage.minimum_position(input, labels, 2)
914
+ assert_equal(output, (0, 1))
915
+
916
+
917
+ def test_minimum_position07():
918
+ labels = [1, 2, 3, 4]
919
+ for type in types:
920
+ input = np.array([[5, 4, 2, 5],
921
+ [3, 7, 0, 2],
922
+ [1, 5, 1, 1]], type)
923
+ output = ndimage.minimum_position(input, labels,
924
+ [2, 3])
925
+ assert_equal(output[0], (0, 1))
926
+ assert_equal(output[1], (1, 2))
927
+
928
+
929
+ def test_maximum_position01():
930
+ labels = np.array([1, 0], bool)
931
+ for type in types:
932
+ input = np.array([[1, 2], [3, 4]], type)
933
+ output = ndimage.maximum_position(input,
934
+ labels=labels)
935
+ assert_equal(output, (1, 0))
936
+
937
+
938
+ def test_maximum_position02():
939
+ for type in types:
940
+ input = np.array([[5, 4, 2, 5],
941
+ [3, 7, 8, 2],
942
+ [1, 5, 1, 1]], type)
943
+ output = ndimage.maximum_position(input)
944
+ assert_equal(output, (1, 2))
945
+
946
+
947
+ def test_maximum_position03():
948
+ input = np.array([[5, 4, 2, 5],
949
+ [3, 7, 8, 2],
950
+ [1, 5, 1, 1]], bool)
951
+ output = ndimage.maximum_position(input)
952
+ assert_equal(output, (0, 0))
953
+
954
+
955
+ def test_maximum_position04():
956
+ labels = [1, 2, 0, 4]
957
+ for type in types:
958
+ input = np.array([[5, 4, 2, 5],
959
+ [3, 7, 8, 2],
960
+ [1, 5, 1, 1]], type)
961
+ output = ndimage.maximum_position(input, labels)
962
+ assert_equal(output, (1, 1))
963
+
964
+
965
+ def test_maximum_position05():
966
+ labels = [1, 2, 0, 4]
967
+ for type in types:
968
+ input = np.array([[5, 4, 2, 5],
969
+ [3, 7, 8, 2],
970
+ [1, 5, 1, 1]], type)
971
+ output = ndimage.maximum_position(input, labels, 1)
972
+ assert_equal(output, (0, 0))
973
+
974
+
975
+ def test_maximum_position06():
976
+ labels = [1, 2, 0, 4]
977
+ for type in types:
978
+ input = np.array([[5, 4, 2, 5],
979
+ [3, 7, 8, 2],
980
+ [1, 5, 1, 1]], type)
981
+ output = ndimage.maximum_position(input, labels,
982
+ [1, 2])
983
+ assert_equal(output[0], (0, 0))
984
+ assert_equal(output[1], (1, 1))
985
+
986
+
987
+ def test_maximum_position07():
988
+ # Test float labels
989
+ labels = np.array([1.0, 2.5, 0.0, 4.5])
990
+ for type in types:
991
+ input = np.array([[5, 4, 2, 5],
992
+ [3, 7, 8, 2],
993
+ [1, 5, 1, 1]], type)
994
+ output = ndimage.maximum_position(input, labels,
995
+ [1.0, 4.5])
996
+ assert_equal(output[0], (0, 0))
997
+ assert_equal(output[1], (0, 3))
998
+
999
+
1000
+ def test_extrema01():
1001
+ labels = np.array([1, 0], bool)
1002
+ for type in types:
1003
+ input = np.array([[1, 2], [3, 4]], type)
1004
+ output1 = ndimage.extrema(input, labels=labels)
1005
+ output2 = ndimage.minimum(input, labels=labels)
1006
+ output3 = ndimage.maximum(input, labels=labels)
1007
+ output4 = ndimage.minimum_position(input,
1008
+ labels=labels)
1009
+ output5 = ndimage.maximum_position(input,
1010
+ labels=labels)
1011
+ assert_equal(output1, (output2, output3, output4, output5))
1012
+
1013
+
1014
+ def test_extrema02():
1015
+ labels = np.array([1, 2])
1016
+ for type in types:
1017
+ input = np.array([[1, 2], [3, 4]], type)
1018
+ output1 = ndimage.extrema(input, labels=labels,
1019
+ index=2)
1020
+ output2 = ndimage.minimum(input, labels=labels,
1021
+ index=2)
1022
+ output3 = ndimage.maximum(input, labels=labels,
1023
+ index=2)
1024
+ output4 = ndimage.minimum_position(input,
1025
+ labels=labels, index=2)
1026
+ output5 = ndimage.maximum_position(input,
1027
+ labels=labels, index=2)
1028
+ assert_equal(output1, (output2, output3, output4, output5))
1029
+
1030
+
1031
+ def test_extrema03():
1032
+ labels = np.array([[1, 2], [2, 3]])
1033
+ for type in types:
1034
+ input = np.array([[1, 2], [3, 4]], type)
1035
+ output1 = ndimage.extrema(input, labels=labels,
1036
+ index=[2, 3, 8])
1037
+ output2 = ndimage.minimum(input, labels=labels,
1038
+ index=[2, 3, 8])
1039
+ output3 = ndimage.maximum(input, labels=labels,
1040
+ index=[2, 3, 8])
1041
+ output4 = ndimage.minimum_position(input,
1042
+ labels=labels, index=[2, 3, 8])
1043
+ output5 = ndimage.maximum_position(input,
1044
+ labels=labels, index=[2, 3, 8])
1045
+ assert_array_almost_equal(output1[0], output2)
1046
+ assert_array_almost_equal(output1[1], output3)
1047
+ assert_array_almost_equal(output1[2], output4)
1048
+ assert_array_almost_equal(output1[3], output5)
1049
+
1050
+
1051
+ def test_extrema04():
1052
+ labels = [1, 2, 0, 4]
1053
+ for type in types:
1054
+ input = np.array([[5, 4, 2, 5],
1055
+ [3, 7, 8, 2],
1056
+ [1, 5, 1, 1]], type)
1057
+ output1 = ndimage.extrema(input, labels, [1, 2])
1058
+ output2 = ndimage.minimum(input, labels, [1, 2])
1059
+ output3 = ndimage.maximum(input, labels, [1, 2])
1060
+ output4 = ndimage.minimum_position(input, labels,
1061
+ [1, 2])
1062
+ output5 = ndimage.maximum_position(input, labels,
1063
+ [1, 2])
1064
+ assert_array_almost_equal(output1[0], output2)
1065
+ assert_array_almost_equal(output1[1], output3)
1066
+ assert_array_almost_equal(output1[2], output4)
1067
+ assert_array_almost_equal(output1[3], output5)
1068
+
1069
+
1070
+ def test_center_of_mass01():
1071
+ expected = [0.0, 0.0]
1072
+ for type in types:
1073
+ input = np.array([[1, 0], [0, 0]], type)
1074
+ output = ndimage.center_of_mass(input)
1075
+ assert_array_almost_equal(output, expected)
1076
+
1077
+
1078
+ def test_center_of_mass02():
1079
+ expected = [1, 0]
1080
+ for type in types:
1081
+ input = np.array([[0, 0], [1, 0]], type)
1082
+ output = ndimage.center_of_mass(input)
1083
+ assert_array_almost_equal(output, expected)
1084
+
1085
+
1086
+ def test_center_of_mass03():
1087
+ expected = [0, 1]
1088
+ for type in types:
1089
+ input = np.array([[0, 1], [0, 0]], type)
1090
+ output = ndimage.center_of_mass(input)
1091
+ assert_array_almost_equal(output, expected)
1092
+
1093
+
1094
+ def test_center_of_mass04():
1095
+ expected = [1, 1]
1096
+ for type in types:
1097
+ input = np.array([[0, 0], [0, 1]], type)
1098
+ output = ndimage.center_of_mass(input)
1099
+ assert_array_almost_equal(output, expected)
1100
+
1101
+
1102
+ def test_center_of_mass05():
1103
+ expected = [0.5, 0.5]
1104
+ for type in types:
1105
+ input = np.array([[1, 1], [1, 1]], type)
1106
+ output = ndimage.center_of_mass(input)
1107
+ assert_array_almost_equal(output, expected)
1108
+
1109
+
1110
+ def test_center_of_mass06():
1111
+ expected = [0.5, 0.5]
1112
+ input = np.array([[1, 2], [3, 1]], bool)
1113
+ output = ndimage.center_of_mass(input)
1114
+ assert_array_almost_equal(output, expected)
1115
+
1116
+
1117
+ def test_center_of_mass07():
1118
+ labels = [1, 0]
1119
+ expected = [0.5, 0.0]
1120
+ input = np.array([[1, 2], [3, 1]], bool)
1121
+ output = ndimage.center_of_mass(input, labels)
1122
+ assert_array_almost_equal(output, expected)
1123
+
1124
+
1125
+ def test_center_of_mass08():
1126
+ labels = [1, 2]
1127
+ expected = [0.5, 1.0]
1128
+ input = np.array([[5, 2], [3, 1]], bool)
1129
+ output = ndimage.center_of_mass(input, labels, 2)
1130
+ assert_array_almost_equal(output, expected)
1131
+
1132
+
1133
+ def test_center_of_mass09():
1134
+ labels = [1, 2]
1135
+ expected = [(0.5, 0.0), (0.5, 1.0)]
1136
+ input = np.array([[1, 2], [1, 1]], bool)
1137
+ output = ndimage.center_of_mass(input, labels, [1, 2])
1138
+ assert_array_almost_equal(output, expected)
1139
+
1140
+
1141
+ def test_histogram01():
1142
+ expected = np.ones(10)
1143
+ input = np.arange(10)
1144
+ output = ndimage.histogram(input, 0, 10, 10)
1145
+ assert_array_almost_equal(output, expected)
1146
+
1147
+
1148
+ def test_histogram02():
1149
+ labels = [1, 1, 1, 1, 2, 2, 2, 2]
1150
+ expected = [0, 2, 0, 1, 1]
1151
+ input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
1152
+ output = ndimage.histogram(input, 0, 4, 5, labels, 1)
1153
+ assert_array_almost_equal(output, expected)
1154
+
1155
+
1156
+ def test_histogram03():
1157
+ labels = [1, 0, 1, 1, 2, 2, 2, 2]
1158
+ expected1 = [0, 1, 0, 1, 1]
1159
+ expected2 = [0, 0, 0, 3, 0]
1160
+ input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
1161
+ output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
1162
+
1163
+ assert_array_almost_equal(output[0], expected1)
1164
+ assert_array_almost_equal(output[1], expected2)
1165
+
1166
+
1167
+ def test_stat_funcs_2d():
1168
+ a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
1169
+ lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
1170
+
1171
+ mean = ndimage.mean(a, labels=lbl, index=[1, 2])
1172
+ assert_array_equal(mean, [7.0, 4.0])
1173
+
1174
+ var = ndimage.variance(a, labels=lbl, index=[1, 2])
1175
+ assert_array_equal(var, [2.5, 1.0])
1176
+
1177
+ std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
1178
+ assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))
1179
+
1180
+ med = ndimage.median(a, labels=lbl, index=[1, 2])
1181
+ assert_array_equal(med, [7.0, 4.0])
1182
+
1183
+ min = ndimage.minimum(a, labels=lbl, index=[1, 2])
1184
+ assert_array_equal(min, [5, 3])
1185
+
1186
+ max = ndimage.maximum(a, labels=lbl, index=[1, 2])
1187
+ assert_array_equal(max, [9, 5])
1188
+
1189
+
1190
+ class TestWatershedIft:
1191
+
1192
+ def test_watershed_ift01(self):
1193
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
1194
+ [0, 1, 1, 1, 1, 1, 0],
1195
+ [0, 1, 0, 0, 0, 1, 0],
1196
+ [0, 1, 0, 0, 0, 1, 0],
1197
+ [0, 1, 0, 0, 0, 1, 0],
1198
+ [0, 1, 1, 1, 1, 1, 0],
1199
+ [0, 0, 0, 0, 0, 0, 0],
1200
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
1201
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
1202
+ [0, 0, 0, 0, 0, 0, 0],
1203
+ [0, 0, 0, 0, 0, 0, 0],
1204
+ [0, 0, 0, 1, 0, 0, 0],
1205
+ [0, 0, 0, 0, 0, 0, 0],
1206
+ [0, 0, 0, 0, 0, 0, 0],
1207
+ [0, 0, 0, 0, 0, 0, 0],
1208
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
1209
+ out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
1210
+ [1, 1, 1],
1211
+ [1, 1, 1]])
1212
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
1213
+ [-1, 1, 1, 1, 1, 1, -1],
1214
+ [-1, 1, 1, 1, 1, 1, -1],
1215
+ [-1, 1, 1, 1, 1, 1, -1],
1216
+ [-1, 1, 1, 1, 1, 1, -1],
1217
+ [-1, 1, 1, 1, 1, 1, -1],
1218
+ [-1, -1, -1, -1, -1, -1, -1],
1219
+ [-1, -1, -1, -1, -1, -1, -1]]
1220
+ assert_array_almost_equal(out, expected)
1221
+
1222
+ def test_watershed_ift02(self):
1223
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
1224
+ [0, 1, 1, 1, 1, 1, 0],
1225
+ [0, 1, 0, 0, 0, 1, 0],
1226
+ [0, 1, 0, 0, 0, 1, 0],
1227
+ [0, 1, 0, 0, 0, 1, 0],
1228
+ [0, 1, 1, 1, 1, 1, 0],
1229
+ [0, 0, 0, 0, 0, 0, 0],
1230
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
1231
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
1232
+ [0, 0, 0, 0, 0, 0, 0],
1233
+ [0, 0, 0, 0, 0, 0, 0],
1234
+ [0, 0, 0, 1, 0, 0, 0],
1235
+ [0, 0, 0, 0, 0, 0, 0],
1236
+ [0, 0, 0, 0, 0, 0, 0],
1237
+ [0, 0, 0, 0, 0, 0, 0],
1238
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
1239
+ out = ndimage.watershed_ift(data, markers)
1240
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
1241
+ [-1, -1, 1, 1, 1, -1, -1],
1242
+ [-1, 1, 1, 1, 1, 1, -1],
1243
+ [-1, 1, 1, 1, 1, 1, -1],
1244
+ [-1, 1, 1, 1, 1, 1, -1],
1245
+ [-1, -1, 1, 1, 1, -1, -1],
1246
+ [-1, -1, -1, -1, -1, -1, -1],
1247
+ [-1, -1, -1, -1, -1, -1, -1]]
1248
+ assert_array_almost_equal(out, expected)
1249
+
1250
+ def test_watershed_ift03(self):
1251
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
1252
+ [0, 1, 1, 1, 1, 1, 0],
1253
+ [0, 1, 0, 1, 0, 1, 0],
1254
+ [0, 1, 0, 1, 0, 1, 0],
1255
+ [0, 1, 0, 1, 0, 1, 0],
1256
+ [0, 1, 1, 1, 1, 1, 0],
1257
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
1258
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
1259
+ [0, 0, 0, 0, 0, 0, 0],
1260
+ [0, 0, 0, 0, 0, 0, 0],
1261
+ [0, 0, 2, 0, 3, 0, 0],
1262
+ [0, 0, 0, 0, 0, 0, 0],
1263
+ [0, 0, 0, 0, 0, 0, 0],
1264
+ [0, 0, 0, 0, 0, 0, -1]], np.int8)
1265
+ out = ndimage.watershed_ift(data, markers)
1266
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
1267
+ [-1, -1, 2, -1, 3, -1, -1],
1268
+ [-1, 2, 2, 3, 3, 3, -1],
1269
+ [-1, 2, 2, 3, 3, 3, -1],
1270
+ [-1, 2, 2, 3, 3, 3, -1],
1271
+ [-1, -1, 2, -1, 3, -1, -1],
1272
+ [-1, -1, -1, -1, -1, -1, -1]]
1273
+ assert_array_almost_equal(out, expected)
1274
+
1275
+ def test_watershed_ift04(self):
1276
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
1277
+ [0, 1, 1, 1, 1, 1, 0],
1278
+ [0, 1, 0, 1, 0, 1, 0],
1279
+ [0, 1, 0, 1, 0, 1, 0],
1280
+ [0, 1, 0, 1, 0, 1, 0],
1281
+ [0, 1, 1, 1, 1, 1, 0],
1282
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
1283
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
1284
+ [0, 0, 0, 0, 0, 0, 0],
1285
+ [0, 0, 0, 0, 0, 0, 0],
1286
+ [0, 0, 2, 0, 3, 0, 0],
1287
+ [0, 0, 0, 0, 0, 0, 0],
1288
+ [0, 0, 0, 0, 0, 0, 0],
1289
+ [0, 0, 0, 0, 0, 0, -1]],
1290
+ np.int8)
1291
+ out = ndimage.watershed_ift(data, markers,
1292
+ structure=[[1, 1, 1],
1293
+ [1, 1, 1],
1294
+ [1, 1, 1]])
1295
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
1296
+ [-1, 2, 2, 3, 3, 3, -1],
1297
+ [-1, 2, 2, 3, 3, 3, -1],
1298
+ [-1, 2, 2, 3, 3, 3, -1],
1299
+ [-1, 2, 2, 3, 3, 3, -1],
1300
+ [-1, 2, 2, 3, 3, 3, -1],
1301
+ [-1, -1, -1, -1, -1, -1, -1]]
1302
+ assert_array_almost_equal(out, expected)
1303
+
1304
+ def test_watershed_ift05(self):
1305
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
1306
+ [0, 1, 1, 1, 1, 1, 0],
1307
+ [0, 1, 0, 1, 0, 1, 0],
1308
+ [0, 1, 0, 1, 0, 1, 0],
1309
+ [0, 1, 0, 1, 0, 1, 0],
1310
+ [0, 1, 1, 1, 1, 1, 0],
1311
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
1312
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
1313
+ [0, 0, 0, 0, 0, 0, 0],
1314
+ [0, 0, 0, 0, 0, 0, 0],
1315
+ [0, 0, 3, 0, 2, 0, 0],
1316
+ [0, 0, 0, 0, 0, 0, 0],
1317
+ [0, 0, 0, 0, 0, 0, 0],
1318
+ [0, 0, 0, 0, 0, 0, -1]],
1319
+ np.int8)
1320
+ out = ndimage.watershed_ift(data, markers,
1321
+ structure=[[1, 1, 1],
1322
+ [1, 1, 1],
1323
+ [1, 1, 1]])
1324
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
1325
+ [-1, 3, 3, 2, 2, 2, -1],
1326
+ [-1, 3, 3, 2, 2, 2, -1],
1327
+ [-1, 3, 3, 2, 2, 2, -1],
1328
+ [-1, 3, 3, 2, 2, 2, -1],
1329
+ [-1, 3, 3, 2, 2, 2, -1],
1330
+ [-1, -1, -1, -1, -1, -1, -1]]
1331
+ assert_array_almost_equal(out, expected)
1332
+
1333
+ def test_watershed_ift06(self):
1334
+ data = np.array([[0, 1, 0, 0, 0, 1, 0],
1335
+ [0, 1, 0, 0, 0, 1, 0],
1336
+ [0, 1, 0, 0, 0, 1, 0],
1337
+ [0, 1, 1, 1, 1, 1, 0],
1338
+ [0, 0, 0, 0, 0, 0, 0],
1339
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
1340
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
1341
+ [0, 0, 0, 1, 0, 0, 0],
1342
+ [0, 0, 0, 0, 0, 0, 0],
1343
+ [0, 0, 0, 0, 0, 0, 0],
1344
+ [0, 0, 0, 0, 0, 0, 0],
1345
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
1346
+ out = ndimage.watershed_ift(data, markers,
1347
+ structure=[[1, 1, 1],
1348
+ [1, 1, 1],
1349
+ [1, 1, 1]])
1350
+ expected = [[-1, 1, 1, 1, 1, 1, -1],
1351
+ [-1, 1, 1, 1, 1, 1, -1],
1352
+ [-1, 1, 1, 1, 1, 1, -1],
1353
+ [-1, 1, 1, 1, 1, 1, -1],
1354
+ [-1, -1, -1, -1, -1, -1, -1],
1355
+ [-1, -1, -1, -1, -1, -1, -1]]
1356
+ assert_array_almost_equal(out, expected)
1357
+
1358
+ def test_watershed_ift07(self):
1359
+ shape = (7, 6)
1360
+ data = np.zeros(shape, dtype=np.uint8)
1361
+ data = data.transpose()
1362
+ data[...] = np.array([[0, 1, 0, 0, 0, 1, 0],
1363
+ [0, 1, 0, 0, 0, 1, 0],
1364
+ [0, 1, 0, 0, 0, 1, 0],
1365
+ [0, 1, 1, 1, 1, 1, 0],
1366
+ [0, 0, 0, 0, 0, 0, 0],
1367
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
1368
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
1369
+ [0, 0, 0, 1, 0, 0, 0],
1370
+ [0, 0, 0, 0, 0, 0, 0],
1371
+ [0, 0, 0, 0, 0, 0, 0],
1372
+ [0, 0, 0, 0, 0, 0, 0],
1373
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
1374
+ out = np.zeros(shape, dtype=np.int16)
1375
+ out = out.transpose()
1376
+ ndimage.watershed_ift(data, markers,
1377
+ structure=[[1, 1, 1],
1378
+ [1, 1, 1],
1379
+ [1, 1, 1]],
1380
+ output=out)
1381
+ expected = [[-1, 1, 1, 1, 1, 1, -1],
1382
+ [-1, 1, 1, 1, 1, 1, -1],
1383
+ [-1, 1, 1, 1, 1, 1, -1],
1384
+ [-1, 1, 1, 1, 1, 1, -1],
1385
+ [-1, -1, -1, -1, -1, -1, -1],
1386
+ [-1, -1, -1, -1, -1, -1, -1]]
1387
+ assert_array_almost_equal(out, expected)
1388
+
1389
+ def test_watershed_ift08(self):
1390
+ # Test cost larger than uint8. See gh-10069.
1391
+ data = np.array([[256, 0],
1392
+ [0, 0]], np.uint16)
1393
+ markers = np.array([[1, 0],
1394
+ [0, 0]], np.int8)
1395
+ out = ndimage.watershed_ift(data, markers)
1396
+ expected = [[1, 1],
1397
+ [1, 1]]
1398
+ assert_array_almost_equal(out, expected)
1399
+
1400
+ def test_watershed_ift09(self):
1401
+ # Test large cost. See gh-19575
1402
+ data = np.array([[np.iinfo(np.uint16).max, 0],
1403
+ [0, 0]], np.uint16)
1404
+ markers = np.array([[1, 0],
1405
+ [0, 0]], np.int8)
1406
+ out = ndimage.watershed_ift(data, markers)
1407
+ expected = [[1, 1],
1408
+ [1, 1]]
1409
+ assert_allclose(out, expected)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_ni_support.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from .._ni_support import _get_output
5
+
6
+
7
+ @pytest.mark.parametrize(
8
+ 'dtype',
9
+ [
10
+ # String specifiers
11
+ 'f4', 'float32', 'complex64', 'complex128',
12
+ # Type and dtype specifiers
13
+ np.float32, float, np.dtype('f4'),
14
+ # Derive from input
15
+ None,
16
+ ],
17
+ )
18
+ def test_get_output_basic(dtype):
19
+ shape = (2, 3)
20
+
21
+ input_ = np.zeros(shape, 'float32')
22
+
23
+ # For None, derive dtype from input
24
+ expected_dtype = 'float32' if dtype is None else dtype
25
+
26
+ # Output is dtype-specifier, retrieve shape from input
27
+ result = _get_output(dtype, input_)
28
+ assert result.shape == shape
29
+ assert result.dtype == np.dtype(expected_dtype)
30
+
31
+ # Output is dtype specifier, with explicit shape, overriding input
32
+ result = _get_output(dtype, input_, shape=(3, 2))
33
+ assert result.shape == (3, 2)
34
+ assert result.dtype == np.dtype(expected_dtype)
35
+
36
+ # Output is pre-allocated array, return directly
37
+ output = np.zeros(shape, dtype)
38
+ result = _get_output(output, input_)
39
+ assert result is output
40
+
41
+
42
+ def test_get_output_complex():
43
+ shape = (2, 3)
44
+
45
+ input_ = np.zeros(shape)
46
+
47
+ # None, promote input type to complex
48
+ result = _get_output(None, input_, complex_output=True)
49
+ assert result.shape == shape
50
+ assert result.dtype == np.dtype('complex128')
51
+
52
+ # Explicit type, promote type to complex
53
+ with pytest.warns(UserWarning, match='promoting specified output dtype to complex'):
54
+ result = _get_output(float, input_, complex_output=True)
55
+ assert result.shape == shape
56
+ assert result.dtype == np.dtype('complex128')
57
+
58
+ # String specifier, simply verify complex output
59
+ result = _get_output('complex64', input_, complex_output=True)
60
+ assert result.shape == shape
61
+ assert result.dtype == np.dtype('complex64')
62
+
63
+
64
+ def test_get_output_error_cases():
65
+ input_ = np.zeros((2, 3), 'float32')
66
+
67
+ # Two separate paths can raise the same error
68
+ with pytest.raises(RuntimeError, match='output must have complex dtype'):
69
+ _get_output('float32', input_, complex_output=True)
70
+ with pytest.raises(RuntimeError, match='output must have complex dtype'):
71
+ _get_output(np.zeros((2, 3)), input_, complex_output=True)
72
+
73
+ with pytest.raises(RuntimeError, match='output must have numeric dtype'):
74
+ _get_output('void', input_)
75
+
76
+ with pytest.raises(RuntimeError, match='shape not correct'):
77
+ _get_output(np.zeros((3, 2)), input_)
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for spline filtering."""
2
+ import numpy as np
3
+ import pytest
4
+
5
+ from numpy.testing import assert_almost_equal
6
+
7
+ from scipy import ndimage
8
+
9
+
10
+ def get_spline_knot_values(order):
11
+ """Knot values to the right of a B-spline's center."""
12
+ knot_values = {0: [1],
13
+ 1: [1],
14
+ 2: [6, 1],
15
+ 3: [4, 1],
16
+ 4: [230, 76, 1],
17
+ 5: [66, 26, 1]}
18
+
19
+ return knot_values[order]
20
+
21
+
22
+ def make_spline_knot_matrix(n, order, mode='mirror'):
23
+ """Matrix to invert to find the spline coefficients."""
24
+ knot_values = get_spline_knot_values(order)
25
+
26
+ matrix = np.zeros((n, n))
27
+ for diag, knot_value in enumerate(knot_values):
28
+ indices = np.arange(diag, n)
29
+ if diag == 0:
30
+ matrix[indices, indices] = knot_value
31
+ else:
32
+ matrix[indices, indices - diag] = knot_value
33
+ matrix[indices - diag, indices] = knot_value
34
+
35
+ knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
36
+
37
+ if mode == 'mirror':
38
+ start, step = 1, 1
39
+ elif mode == 'reflect':
40
+ start, step = 0, 1
41
+ elif mode == 'grid-wrap':
42
+ start, step = -1, -1
43
+ else:
44
+ raise ValueError(f'unsupported mode {mode}')
45
+
46
+ for row in range(len(knot_values) - 1):
47
+ for idx, knot_value in enumerate(knot_values[row + 1:]):
48
+ matrix[row, start + step*idx] += knot_value
49
+ matrix[-row - 1, -start - 1 - step*idx] += knot_value
50
+
51
+ return matrix / knot_values_sum
52
+
53
+
54
+ @pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
55
+ @pytest.mark.parametrize('mode', ['mirror', 'grid-wrap', 'reflect'])
56
+ def test_spline_filter_vs_matrix_solution(order, mode):
57
+ n = 100
58
+ eye = np.eye(n, dtype=float)
59
+ spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order,
60
+ mode=mode)
61
+ spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order,
62
+ mode=mode)
63
+ matrix = make_spline_knot_matrix(n, order, mode=mode)
64
+ assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
65
+ assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc ADDED
Binary file (62.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc ADDED
Binary file (5.78 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc ADDED
Binary file (16.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc ADDED
Binary file (23.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc ADDED
Binary file (25.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc ADDED
Binary file (44.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc ADDED
Binary file (15 kB). View file