diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b4f7365c430c7d791757748c62f203d9bcee4e2
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f8f652d2d6e3359ce84515aa67813c7946d2c234
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe4d2fbbdd95f8d796c23c04a146e4d6c4e9485b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/interpolate/__pycache__/fitpack.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f619dded6615a284392c4273559f226a1c8c72c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__init__.py
@@ -0,0 +1,169 @@
+"""
+=========================================================
+Multidimensional image processing (:mod:`scipy.ndimage`)
+=========================================================
+
+.. currentmodule:: scipy.ndimage
+
+This package contains various functions for multidimensional image
+processing.
+
+
+Filters
+=======
+
+.. autosummary::
+ :toctree: generated/
+
+ convolve - Multidimensional convolution
+ convolve1d - 1-D convolution along the given axis
+ correlate - Multidimensional correlation
+ correlate1d - 1-D correlation along the given axis
+ gaussian_filter
+ gaussian_filter1d
+ gaussian_gradient_magnitude
+ gaussian_laplace
+ generic_filter - Multidimensional filter using a given function
+ generic_filter1d - 1-D generic filter along the given axis
+ generic_gradient_magnitude
+ generic_laplace
+ laplace - N-D Laplace filter based on approximate second derivatives
+ maximum_filter
+ maximum_filter1d
+ median_filter - Calculates a multidimensional median filter
+ minimum_filter
+ minimum_filter1d
+ percentile_filter - Calculates a multidimensional percentile filter
+ prewitt
+ rank_filter - Calculates a multidimensional rank filter
+ sobel
+ uniform_filter - Multidimensional uniform filter
+ uniform_filter1d - 1-D uniform filter along the given axis
+
+Fourier filters
+===============
+
+.. autosummary::
+ :toctree: generated/
+
+ fourier_ellipsoid
+ fourier_gaussian
+ fourier_shift
+ fourier_uniform
+
+Interpolation
+=============
+
+.. autosummary::
+ :toctree: generated/
+
+ affine_transform - Apply an affine transformation
+ geometric_transform - Apply an arbitrary geometric transform
+ map_coordinates - Map input array to new coordinates by interpolation
+ rotate - Rotate an array
+ shift - Shift an array
+ spline_filter
+ spline_filter1d
+ zoom - Zoom an array
+
+Measurements
+============
+
+.. autosummary::
+ :toctree: generated/
+
+ center_of_mass - The center of mass of the values of an array at labels
+ extrema - Min's and max's of an array at labels, with their positions
+ find_objects - Find objects in a labeled array
+ histogram - Histogram of the values of an array, optionally at labels
+ label - Label features in an array
+ labeled_comprehension
+ maximum
+ maximum_position
+ mean - Mean of the values of an array at labels
+ median
+ minimum
+ minimum_position
+ standard_deviation - Standard deviation of an N-D image array
+ sum_labels - Sum of the values of the array
+ value_indices - Find indices of each distinct value in given array
+ variance - Variance of the values of an N-D image array
+ watershed_ift
+
+Morphology
+==========
+
+.. autosummary::
+ :toctree: generated/
+
+ binary_closing
+ binary_dilation
+ binary_erosion
+ binary_fill_holes
+ binary_hit_or_miss
+ binary_opening
+ binary_propagation
+ black_tophat
+ distance_transform_bf
+ distance_transform_cdt
+ distance_transform_edt
+ generate_binary_structure
+ grey_closing
+ grey_dilation
+ grey_erosion
+ grey_opening
+ iterate_structure
+ morphological_gradient
+ morphological_laplace
+ white_tophat
+
+"""
+
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from ._filters import *
+from ._fourier import *
+from ._interpolation import *
+from ._measurements import *
+from ._morphology import *
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import filters
+from . import fourier
+from . import interpolation
+from . import measurements
+from . import morphology
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..0d05e123ba1f7f45c1f37795e7ba5cd0257018b4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..af816c8e2ce5740bd2b524a4bca51b4d0ccdac20
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_filters.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2907614d5acffcd8dcfaf054d84d69b438e7923
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_filters.py
@@ -0,0 +1,1852 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from collections.abc import Iterable
+import numbers
+import warnings
+import numpy
+import operator
+
+from scipy._lib._util import normalize_axis_index
+from . import _ni_support
+from . import _nd_image
+from . import _ni_docstrings
+
+__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
+ 'prewitt', 'sobel', 'generic_laplace', 'laplace',
+ 'gaussian_laplace', 'generic_gradient_magnitude',
+ 'gaussian_gradient_magnitude', 'correlate', 'convolve',
+ 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
+ 'maximum_filter1d', 'minimum_filter', 'maximum_filter',
+ 'rank_filter', 'median_filter', 'percentile_filter',
+ 'generic_filter1d', 'generic_filter']
+
+
+def _invalid_origin(origin, lenw):
+ return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
+
+
+def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
+ """Complex convolution via a linear combination of real convolutions."""
+ complex_input = input.dtype.kind == 'c'
+ complex_weights = weights.dtype.kind == 'c'
+ if complex_input and complex_weights:
+ # real component of the output
+ func(input.real, weights.real, output=output.real,
+ cval=numpy.real(cval), **kwargs)
+ output.real -= func(input.imag, weights.imag, output=None,
+ cval=numpy.imag(cval), **kwargs)
+ # imaginary component of the output
+ func(input.real, weights.imag, output=output.imag,
+ cval=numpy.real(cval), **kwargs)
+ output.imag += func(input.imag, weights.real, output=None,
+ cval=numpy.imag(cval), **kwargs)
+ elif complex_input:
+ func(input.real, weights, output=output.real, cval=numpy.real(cval),
+ **kwargs)
+ func(input.imag, weights, output=output.imag, cval=numpy.imag(cval),
+ **kwargs)
+ else:
+ if numpy.iscomplexobj(cval):
+ raise ValueError("Cannot provide a complex-valued cval when the "
+ "input is real.")
+ func(input, weights.real, output=output.real, cval=cval, **kwargs)
+ func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
+ return output
+
+
+@_ni_docstrings.docfiller
+def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
+ cval=0.0, origin=0):
+ """Calculate a 1-D correlation along the given axis.
+
+ The lines of the array along the given axis are correlated with the
+ given weights.
+
+ Parameters
+ ----------
+ %(input)s
+ weights : array
+ 1-D sequence of numbers.
+ %(axis)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin)s
+
+ Returns
+ -------
+ result : ndarray
+ Correlation result. Has the same shape as `input`.
+
+ Examples
+ --------
+ >>> from scipy.ndimage import correlate1d
+ >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
+ array([ 8, 26, 8, 12, 7, 28, 36, 9])
+ """
+ input = numpy.asarray(input)
+ weights = numpy.asarray(weights)
+ complex_input = input.dtype.kind == 'c'
+ complex_weights = weights.dtype.kind == 'c'
+ if complex_input or complex_weights:
+ if complex_weights:
+ weights = weights.conj()
+ weights = weights.astype(numpy.complex128, copy=False)
+ kwargs = dict(axis=axis, mode=mode, origin=origin)
+ output = _ni_support._get_output(output, input, complex_output=True)
+ return _complex_via_real_components(correlate1d, input, weights,
+ output, cval, **kwargs)
+
+ output = _ni_support._get_output(output, input)
+ weights = numpy.asarray(weights, dtype=numpy.float64)
+ if weights.ndim != 1 or weights.shape[0] < 1:
+ raise RuntimeError('no filter weights given')
+ if not weights.flags.contiguous:
+ weights = weights.copy()
+ axis = normalize_axis_index(axis, input.ndim)
+ if _invalid_origin(origin, len(weights)):
+ raise ValueError('Invalid origin; origin must satisfy '
+ '-(len(weights) // 2) <= origin <= '
+ '(len(weights)-1) // 2')
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.correlate1d(input, weights, axis, output, mode, cval,
+ origin)
+ return output
+
+
+@_ni_docstrings.docfiller
+def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
+ cval=0.0, origin=0):
+ """Calculate a 1-D convolution along the given axis.
+
+ The lines of the array along the given axis are convolved with the
+ given weights.
+
+ Parameters
+ ----------
+ %(input)s
+ weights : ndarray
+ 1-D sequence of numbers.
+ %(axis)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin)s
+
+ Returns
+ -------
+ convolve1d : ndarray
+ Convolved array with same shape as input
+
+ Examples
+ --------
+ >>> from scipy.ndimage import convolve1d
+ >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
+ array([14, 24, 4, 13, 12, 36, 27, 0])
+ """
+ weights = weights[::-1]
+ origin = -origin
+ if not len(weights) & 1:
+ origin -= 1
+ weights = numpy.asarray(weights)
+ if weights.dtype.kind == 'c':
+ # pre-conjugate here to counteract the conjugation in correlate1d
+ weights = weights.conj()
+ return correlate1d(input, weights, axis, output, mode, cval, origin)
+
+
+def _gaussian_kernel1d(sigma, order, radius):
+ """
+ Computes a 1-D Gaussian convolution kernel.
+ """
+ if order < 0:
+ raise ValueError('order must be non-negative')
+ exponent_range = numpy.arange(order + 1)
+ sigma2 = sigma * sigma
+ x = numpy.arange(-radius, radius+1)
+ phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
+ phi_x = phi_x / phi_x.sum()
+
+ if order == 0:
+ return phi_x
+ else:
+ # f(x) = q(x) * phi(x) = q(x) * exp(p(x))
+ # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
+ # p'(x) = -1 / sigma ** 2
+ # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
+ # coefficients of q(x)
+ q = numpy.zeros(order + 1)
+ q[0] = 1
+ D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
+ P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
+ Q_deriv = D + P
+ for _ in range(order):
+ q = Q_deriv.dot(q)
+ q = (x[:, None] ** exponent_range).dot(q)
+ return q * phi_x
+
+
+@_ni_docstrings.docfiller
+def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
+ mode="reflect", cval=0.0, truncate=4.0, *, radius=None):
+ """1-D Gaussian filter.
+
+ Parameters
+ ----------
+ %(input)s
+ sigma : scalar
+ standard deviation for Gaussian kernel
+ %(axis)s
+ order : int, optional
+ An order of 0 corresponds to convolution with a Gaussian
+ kernel. A positive order corresponds to convolution with
+ that derivative of a Gaussian.
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ truncate : float, optional
+ Truncate the filter at this many standard deviations.
+ Default is 4.0.
+ radius : None or int, optional
+ Radius of the Gaussian kernel. If specified, the size of
+ the kernel will be ``2*radius + 1``, and `truncate` is ignored.
+ Default is None.
+
+ Returns
+ -------
+ gaussian_filter1d : ndarray
+
+ Notes
+ -----
+ The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
+ `radius` is None, a default ``radius = round(truncate * sigma)`` will be
+ used.
+
+ Examples
+ --------
+ >>> from scipy.ndimage import gaussian_filter1d
+ >>> import numpy as np
+ >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
+ array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
+ >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
+ array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
+ >>> import matplotlib.pyplot as plt
+ >>> rng = np.random.default_rng()
+ >>> x = rng.standard_normal(101).cumsum()
+ >>> y3 = gaussian_filter1d(x, 3)
+ >>> y6 = gaussian_filter1d(x, 6)
+ >>> plt.plot(x, 'k', label='original data')
+ >>> plt.plot(y3, '--', label='filtered, sigma=3')
+ >>> plt.plot(y6, ':', label='filtered, sigma=6')
+ >>> plt.legend()
+ >>> plt.grid()
+ >>> plt.show()
+
+ """
+ sd = float(sigma)
+ # make the radius of the filter equal to truncate standard deviations
+ lw = int(truncate * sd + 0.5)
+ if radius is not None:
+ lw = radius
+ if not isinstance(lw, numbers.Integral) or lw < 0:
+ raise ValueError('Radius must be a nonnegative integer.')
+ # Since we are calling correlate, not convolve, revert the kernel
+ weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
+ return correlate1d(input, weights, axis, output, mode, cval, 0)
+
+
+@_ni_docstrings.docfiller
+def gaussian_filter(input, sigma, order=0, output=None,
+ mode="reflect", cval=0.0, truncate=4.0, *, radius=None,
+ axes=None):
+ """Multidimensional Gaussian filter.
+
+ Parameters
+ ----------
+ %(input)s
+ sigma : scalar or sequence of scalars
+ Standard deviation for Gaussian kernel. The standard
+ deviations of the Gaussian filter are given for each axis as a
+ sequence, or as a single number, in which case it is equal for
+ all axes.
+ order : int or sequence of ints, optional
+ The order of the filter along each axis is given as a sequence
+ of integers, or as a single number. An order of 0 corresponds
+ to convolution with a Gaussian kernel. A positive order
+ corresponds to convolution with that derivative of a Gaussian.
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+ truncate : float, optional
+ Truncate the filter at this many standard deviations.
+ Default is 4.0.
+ radius : None or int or sequence of ints, optional
+ Radius of the Gaussian kernel. The radius are given for each axis
+ as a sequence, or as a single number, in which case it is equal
+ for all axes. If specified, the size of the kernel along each axis
+ will be ``2*radius + 1``, and `truncate` is ignored.
+ Default is None.
+ axes : tuple of int or None, optional
+ If None, `input` is filtered along all axes. Otherwise,
+ `input` is filtered along the specified axes. When `axes` is
+ specified, any tuples used for `sigma`, `order`, `mode` and/or `radius`
+ must match the length of `axes`. The ith entry in any of these tuples
+ corresponds to the ith entry in `axes`.
+
+ Returns
+ -------
+ gaussian_filter : ndarray
+ Returned array of same shape as `input`.
+
+ Notes
+ -----
+ The multidimensional filter is implemented as a sequence of
+ 1-D convolution filters. The intermediate arrays are
+ stored in the same data type as the output. Therefore, for output
+ types with a limited precision, the results may be imprecise
+ because intermediate results may be stored with insufficient
+ precision.
+
+ The Gaussian kernel will have size ``2*radius + 1`` along each axis. If
+ `radius` is None, the default ``radius = round(truncate * sigma)`` will be
+ used.
+
+ Examples
+ --------
+ >>> from scipy.ndimage import gaussian_filter
+ >>> import numpy as np
+ >>> a = np.arange(50, step=2).reshape((5,5))
+ >>> a
+ array([[ 0, 2, 4, 6, 8],
+ [10, 12, 14, 16, 18],
+ [20, 22, 24, 26, 28],
+ [30, 32, 34, 36, 38],
+ [40, 42, 44, 46, 48]])
+ >>> gaussian_filter(a, sigma=1)
+ array([[ 4, 6, 8, 9, 11],
+ [10, 12, 14, 15, 17],
+ [20, 22, 24, 25, 27],
+ [29, 31, 33, 34, 36],
+ [35, 37, 39, 40, 42]])
+
+ >>> from scipy import datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = gaussian_filter(ascent, sigma=5)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ input = numpy.asarray(input)
+ output = _ni_support._get_output(output, input)
+
+ axes = _ni_support._check_axes(axes, input.ndim)
+ num_axes = len(axes)
+ orders = _ni_support._normalize_sequence(order, num_axes)
+ sigmas = _ni_support._normalize_sequence(sigma, num_axes)
+ modes = _ni_support._normalize_sequence(mode, num_axes)
+ radiuses = _ni_support._normalize_sequence(radius, num_axes)
+ axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii])
+ for ii in range(num_axes) if sigmas[ii] > 1e-15]
+ if len(axes) > 0:
+ for axis, sigma, order, mode, radius in axes:
+ gaussian_filter1d(input, sigma, axis, order, output,
+ mode, cval, truncate, radius=radius)
+ input = output
+ else:
+ output[...] = input[...]
+ return output
+
+
+@_ni_docstrings.docfiller
+def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
+ """Calculate a Prewitt filter.
+
+ Parameters
+ ----------
+ %(input)s
+ %(axis)s
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+
+ Returns
+ -------
+ prewitt : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ See Also
+ --------
+ sobel: Sobel filter
+
+ Notes
+ -----
+ This function computes the one-dimensional Prewitt filter.
+ Horizontal edges are emphasised with the horizontal transform (axis=0),
+ vertical edges with the vertical transform (axis=1), and so on for higher
+ dimensions. These can be combined to give the magnitude.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> import numpy as np
+ >>> ascent = datasets.ascent()
+ >>> prewitt_h = ndimage.prewitt(ascent, axis=0)
+ >>> prewitt_v = ndimage.prewitt(ascent, axis=1)
+ >>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2)
+ >>> magnitude *= 255 / np.max(magnitude) # Normalization
+ >>> fig, axes = plt.subplots(2, 2, figsize = (8, 8))
+ >>> plt.gray()
+ >>> axes[0, 0].imshow(ascent)
+ >>> axes[0, 1].imshow(prewitt_h)
+ >>> axes[1, 0].imshow(prewitt_v)
+ >>> axes[1, 1].imshow(magnitude)
+ >>> titles = ["original", "horizontal", "vertical", "magnitude"]
+ >>> for i, ax in enumerate(axes.ravel()):
+ ... ax.set_title(titles[i])
+ ... ax.axis("off")
+ >>> plt.show()
+
+ """
+ input = numpy.asarray(input)
+ axis = normalize_axis_index(axis, input.ndim)
+ output = _ni_support._get_output(output, input)
+ modes = _ni_support._normalize_sequence(mode, input.ndim)
+ correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
+ axes = [ii for ii in range(input.ndim) if ii != axis]
+ for ii in axes:
+ correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
+ return output
+
+
+@_ni_docstrings.docfiller
+def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
+ """Calculate a Sobel filter.
+
+ Parameters
+ ----------
+ %(input)s
+ %(axis)s
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+
+ Returns
+ -------
+ sobel : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Notes
+ -----
+ This function computes the axis-specific Sobel gradient.
+ The horizontal edges can be emphasised with the horizontal transform (axis=0),
+ the vertical edges with the vertical transform (axis=1) and so on for higher
+ dimensions. These can be combined to give the magnitude.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> import numpy as np
+ >>> ascent = datasets.ascent().astype('int32')
+ >>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient
+ >>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient
+ >>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2)
+ >>> magnitude *= 255.0 / np.max(magnitude) # normalization
+ >>> fig, axs = plt.subplots(2, 2, figsize=(8, 8))
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> axs[0, 0].imshow(ascent)
+ >>> axs[0, 1].imshow(sobel_h)
+ >>> axs[1, 0].imshow(sobel_v)
+ >>> axs[1, 1].imshow(magnitude)
+ >>> titles = ["original", "horizontal", "vertical", "magnitude"]
+ >>> for i, ax in enumerate(axs.ravel()):
+ ... ax.set_title(titles[i])
+ ... ax.axis("off")
+ >>> plt.show()
+
+ """
+ input = numpy.asarray(input)
+ axis = normalize_axis_index(axis, input.ndim)
+ output = _ni_support._get_output(output, input)
+ modes = _ni_support._normalize_sequence(mode, input.ndim)
+ correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
+ axes = [ii for ii in range(input.ndim) if ii != axis]
+ for ii in axes:
+ correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
+ return output
+
+
+@_ni_docstrings.docfiller
+def generic_laplace(input, derivative2, output=None, mode="reflect",
+ cval=0.0,
+ extra_arguments=(),
+ extra_keywords=None):
+ """
+ N-D Laplace filter using a provided second derivative function.
+
+ Parameters
+ ----------
+ %(input)s
+ derivative2 : callable
+ Callable with the following signature::
+
+ derivative2(input, axis, output, mode, cval,
+ *extra_arguments, **extra_keywords)
+
+ See `extra_arguments`, `extra_keywords` below.
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+ %(extra_keywords)s
+ %(extra_arguments)s
+
+ Returns
+ -------
+ generic_laplace : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ """
+ if extra_keywords is None:
+ extra_keywords = {}
+ input = numpy.asarray(input)
+ output = _ni_support._get_output(output, input)
+ axes = list(range(input.ndim))
+ if len(axes) > 0:
+ modes = _ni_support._normalize_sequence(mode, len(axes))
+ derivative2(input, axes[0], output, modes[0], cval,
+ *extra_arguments, **extra_keywords)
+ for ii in range(1, len(axes)):
+ tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
+ *extra_arguments, **extra_keywords)
+ output += tmp
+ else:
+ output[...] = input[...]
+ return output
+
+
+@_ni_docstrings.docfiller
+def laplace(input, output=None, mode="reflect", cval=0.0):
+ """N-D Laplace filter based on approximate second derivatives.
+
+ Parameters
+ ----------
+ %(input)s
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+
+ Returns
+ -------
+ laplace : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.laplace(ascent)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ def derivative2(input, axis, output, mode, cval):
+ return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
+ return generic_laplace(input, derivative2, output, mode, cval)
+
+
+@_ni_docstrings.docfiller
+def gaussian_laplace(input, sigma, output=None, mode="reflect",
+ cval=0.0, **kwargs):
+ """Multidimensional Laplace filter using Gaussian second derivatives.
+
+ Parameters
+ ----------
+ %(input)s
+ sigma : scalar or sequence of scalars
+ The standard deviations of the Gaussian filter are given for
+ each axis as a sequence, or as a single number, in which case
+ it is equal for all axes.
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+ Extra keyword arguments will be passed to gaussian_filter().
+
+ Returns
+ -------
+ gaussian_laplace : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> ascent = datasets.ascent()
+
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+
+ >>> result = ndimage.gaussian_laplace(ascent, sigma=1)
+ >>> ax1.imshow(result)
+
+ >>> result = ndimage.gaussian_laplace(ascent, sigma=3)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ input = numpy.asarray(input)
+
+ def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
+ order = [0] * input.ndim
+ order[axis] = 2
+ return gaussian_filter(input, sigma, order, output, mode, cval,
+ **kwargs)
+
+ return generic_laplace(input, derivative2, output, mode, cval,
+ extra_arguments=(sigma,),
+ extra_keywords=kwargs)
+
+
+@_ni_docstrings.docfiller
+def generic_gradient_magnitude(input, derivative, output=None,
+ mode="reflect", cval=0.0,
+ extra_arguments=(), extra_keywords=None):
+ """Gradient magnitude using a provided gradient function.
+
+ Parameters
+ ----------
+ %(input)s
+ derivative : callable
+ Callable with the following signature::
+
+ derivative(input, axis, output, mode, cval,
+ *extra_arguments, **extra_keywords)
+
+ See `extra_arguments`, `extra_keywords` below.
+ `derivative` can assume that `input` and `output` are ndarrays.
+ Note that the output from `derivative` is modified inplace;
+ be careful to copy important inputs before returning them.
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+ %(extra_keywords)s
+ %(extra_arguments)s
+
+ Returns
+ -------
+ generic_gradient_matnitude : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ """
+ if extra_keywords is None:
+ extra_keywords = {}
+ input = numpy.asarray(input)
+ output = _ni_support._get_output(output, input)
+ axes = list(range(input.ndim))
+ if len(axes) > 0:
+ modes = _ni_support._normalize_sequence(mode, len(axes))
+ derivative(input, axes[0], output, modes[0], cval,
+ *extra_arguments, **extra_keywords)
+ numpy.multiply(output, output, output)
+ for ii in range(1, len(axes)):
+ tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
+ *extra_arguments, **extra_keywords)
+ numpy.multiply(tmp, tmp, tmp)
+ output += tmp
+ # This allows the sqrt to work with a different default casting
+ numpy.sqrt(output, output, casting='unsafe')
+ else:
+ output[...] = input[...]
+ return output
+
+
+@_ni_docstrings.docfiller
+def gaussian_gradient_magnitude(input, sigma, output=None,
+ mode="reflect", cval=0.0, **kwargs):
+ """Multidimensional gradient magnitude using Gaussian derivatives.
+
+ Parameters
+ ----------
+ %(input)s
+ sigma : scalar or sequence of scalars
+ The standard deviations of the Gaussian filter are given for
+ each axis as a sequence, or as a single number, in which case
+ it is equal for all axes.
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+ Extra keyword arguments will be passed to gaussian_filter().
+
+ Returns
+ -------
+ gaussian_gradient_magnitude : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ input = numpy.asarray(input)
+
+ def derivative(input, axis, output, mode, cval, sigma, **kwargs):
+ order = [0] * input.ndim
+ order[axis] = 1
+ return gaussian_filter(input, sigma, order, output, mode,
+ cval, **kwargs)
+
+ return generic_gradient_magnitude(input, derivative, output, mode,
+ cval, extra_arguments=(sigma,),
+ extra_keywords=kwargs)
+
+
+def _correlate_or_convolve(input, weights, output, mode, cval, origin,
+ convolution):
+ input = numpy.asarray(input)
+ weights = numpy.asarray(weights)
+ complex_input = input.dtype.kind == 'c'
+ complex_weights = weights.dtype.kind == 'c'
+ if complex_input or complex_weights:
+ if complex_weights and not convolution:
+ # As for numpy.correlate, conjugate weights rather than input.
+ weights = weights.conj()
+ kwargs = dict(
+ mode=mode, origin=origin, convolution=convolution
+ )
+ output = _ni_support._get_output(output, input, complex_output=True)
+
+ return _complex_via_real_components(_correlate_or_convolve, input,
+ weights, output, cval, **kwargs)
+
+ origins = _ni_support._normalize_sequence(origin, input.ndim)
+ weights = numpy.asarray(weights, dtype=numpy.float64)
+ wshape = [ii for ii in weights.shape if ii > 0]
+ if len(wshape) != input.ndim:
+ raise RuntimeError('filter weights array has incorrect shape.')
+ if convolution:
+ weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
+ for ii in range(len(origins)):
+ origins[ii] = -origins[ii]
+ if not weights.shape[ii] & 1:
+ origins[ii] -= 1
+ for origin, lenw in zip(origins, wshape):
+ if _invalid_origin(origin, lenw):
+ raise ValueError('Invalid origin; origin must satisfy '
+ '-(weights.shape[k] // 2) <= origin[k] <= '
+ '(weights.shape[k]-1) // 2')
+
+ if not weights.flags.contiguous:
+ weights = weights.copy()
+ output = _ni_support._get_output(output, input)
+ temp_needed = numpy.may_share_memory(input, output)
+ if temp_needed:
+ # input and output arrays cannot share memory
+ temp = output
+ output = _ni_support._get_output(output.dtype, input)
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
+ raise RuntimeError("A sequence of modes is not supported")
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.correlate(input, weights, output, mode, cval, origins)
+ if temp_needed:
+ temp[...] = output
+ output = temp
+ return output
+
+
+@_ni_docstrings.docfiller
+def correlate(input, weights, output=None, mode='reflect', cval=0.0,
+ origin=0):
+ """
+ Multidimensional correlation.
+
+ The array is correlated with the given kernel.
+
+ Parameters
+ ----------
+ %(input)s
+ weights : ndarray
+ array of weights, same number of dimensions as input
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin_multiple)s
+
+ Returns
+ -------
+ result : ndarray
+ The result of correlation of `input` with `weights`.
+
+ See Also
+ --------
+ convolve : Convolve an image with a kernel.
+
+ Examples
+ --------
+ Correlation is the process of moving a filter mask often referred to
+ as kernel over the image and computing the sum of products at each location.
+
+ >>> from scipy.ndimage import correlate
+ >>> import numpy as np
+ >>> input_img = np.arange(25).reshape(5,5)
+ >>> print(input_img)
+ [[ 0 1 2 3 4]
+ [ 5 6 7 8 9]
+ [10 11 12 13 14]
+ [15 16 17 18 19]
+ [20 21 22 23 24]]
+
+ Define a kernel (weights) for correlation. In this example, it is for sum of
+ center and up, down, left and right next elements.
+
+ >>> weights = [[0, 1, 0],
+ ... [1, 1, 1],
+ ... [0, 1, 0]]
+
+ We can calculate a correlation result:
+ For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
+
+ >>> correlate(input_img, weights)
+ array([[ 6, 10, 15, 20, 24],
+ [ 26, 30, 35, 40, 44],
+ [ 51, 55, 60, 65, 69],
+ [ 76, 80, 85, 90, 94],
+ [ 96, 100, 105, 110, 114]])
+
+ """
+ return _correlate_or_convolve(input, weights, output, mode, cval,
+ origin, False)
+
+
+@_ni_docstrings.docfiller
+def convolve(input, weights, output=None, mode='reflect', cval=0.0,
+ origin=0):
+ """
+ Multidimensional convolution.
+
+ The array is convolved with the given kernel.
+
+ Parameters
+ ----------
+ %(input)s
+ weights : array_like
+ Array of weights, same number of dimensions as input
+ %(output)s
+ %(mode_reflect)s
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0
+ origin : int, optional
+ Controls the origin of the input signal, which is where the
+ filter is centered to produce the first element of the output.
+ Positive values shift the filter to the right, and negative values
+ shift the filter to the left. Default is 0.
+
+ Returns
+ -------
+ result : ndarray
+ The result of convolution of `input` with `weights`.
+
+ See Also
+ --------
+ correlate : Correlate an image with a kernel.
+
+ Notes
+ -----
+ Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
+ W is the `weights` kernel,
+ j is the N-D spatial index over :math:`W`,
+ I is the `input` and k is the coordinate of the center of
+ W, specified by `origin` in the input parameters.
+
+ Examples
+ --------
+ Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
+ because in this case borders (i.e., where the `weights` kernel, centered
+ on any one value, extends beyond an edge of `input`) are treated as zeros.
+
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
+ >>> from scipy import ndimage
+ >>> ndimage.convolve(a, k, mode='constant', cval=0.0)
+ array([[11, 10, 7, 4],
+ [10, 3, 11, 11],
+ [15, 12, 14, 7],
+ [12, 3, 7, 0]])
+
+ Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
+ with 1.0's (and then extracting only the original region of the result).
+
+ >>> ndimage.convolve(a, k, mode='constant', cval=1.0)
+ array([[13, 11, 8, 7],
+ [11, 3, 11, 14],
+ [16, 12, 14, 10],
+ [15, 6, 10, 5]])
+
+ With ``mode='reflect'`` (the default), outer values are reflected at the
+ edge of `input` to fill in missing values.
+
+ >>> b = np.array([[2, 0, 0],
+ ... [1, 0, 0],
+ ... [0, 0, 0]])
+ >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
+ >>> ndimage.convolve(b, k, mode='reflect')
+ array([[5, 0, 0],
+ [3, 0, 0],
+ [1, 0, 0]])
+
+ This includes diagonally at the corners.
+
+ >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
+ >>> ndimage.convolve(b, k)
+ array([[4, 2, 0],
+ [3, 2, 0],
+ [1, 1, 0]])
+
+ With ``mode='nearest'``, the single nearest value in to an edge in
+ `input` is repeated as many times as needed to match the overlapping
+ `weights`.
+
+ >>> c = np.array([[2, 0, 1],
+ ... [1, 0, 0],
+ ... [0, 0, 0]])
+ >>> k = np.array([[0, 1, 0],
+ ... [0, 1, 0],
+ ... [0, 1, 0],
+ ... [0, 1, 0],
+ ... [0, 1, 0]])
+ >>> ndimage.convolve(c, k, mode='nearest')
+ array([[7, 0, 3],
+ [5, 0, 2],
+ [3, 0, 1]])
+
+ """
+ return _correlate_or_convolve(input, weights, output, mode, cval,
+ origin, True)
+
+
+@_ni_docstrings.docfiller
+def uniform_filter1d(input, size, axis=-1, output=None,
+ mode="reflect", cval=0.0, origin=0):
+ """Calculate a 1-D uniform filter along the given axis.
+
+ The lines of the array along the given axis are filtered with a
+ uniform filter of given size.
+
+ Parameters
+ ----------
+ %(input)s
+ size : int
+ length of uniform filter
+ %(axis)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin)s
+
+ Returns
+ -------
+ result : ndarray
+ Filtered array. Has same shape as `input`.
+
+ Examples
+ --------
+ >>> from scipy.ndimage import uniform_filter1d
+ >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
+ array([4, 3, 4, 1, 4, 6, 6, 3])
+ """
+ input = numpy.asarray(input)
+ axis = normalize_axis_index(axis, input.ndim)
+ if size < 1:
+ raise RuntimeError('incorrect filter size')
+ complex_output = input.dtype.kind == 'c'
+ output = _ni_support._get_output(output, input,
+ complex_output=complex_output)
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
+ raise ValueError('invalid origin')
+ mode = _ni_support._extend_mode_to_code(mode)
+ if not complex_output:
+ _nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
+ origin)
+ else:
+ _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
+ numpy.real(cval), origin)
+ _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
+ numpy.imag(cval), origin)
+ return output
+
+
+@_ni_docstrings.docfiller
+def uniform_filter(input, size=3, output=None, mode="reflect",
+ cval=0.0, origin=0, *, axes=None):
+ """Multidimensional uniform filter.
+
+ Parameters
+ ----------
+ %(input)s
+ size : int or sequence of ints, optional
+ The sizes of the uniform filter are given for each axis as a
+ sequence, or as a single number, in which case the size is
+ equal for all axes.
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+ %(origin_multiple)s
+ axes : tuple of int or None, optional
+ If None, `input` is filtered along all axes. Otherwise,
+ `input` is filtered along the specified axes. When `axes` is
+ specified, any tuples used for `size`, `origin`, and/or `mode`
+ must match the length of `axes`. The ith entry in any of these tuples
+ corresponds to the ith entry in `axes`.
+
+ Returns
+ -------
+ uniform_filter : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Notes
+ -----
+ The multidimensional filter is implemented as a sequence of
+ 1-D uniform filters. The intermediate arrays are stored
+ in the same data type as the output. Therefore, for output types
+ with a limited precision, the results may be imprecise because
+ intermediate results may be stored with insufficient precision.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.uniform_filter(ascent, size=20)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ input = numpy.asarray(input)
+ output = _ni_support._get_output(output, input,
+ complex_output=input.dtype.kind == 'c')
+ axes = _ni_support._check_axes(axes, input.ndim)
+ num_axes = len(axes)
+ sizes = _ni_support._normalize_sequence(size, num_axes)
+ origins = _ni_support._normalize_sequence(origin, num_axes)
+ modes = _ni_support._normalize_sequence(mode, num_axes)
+ axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
+ for ii in range(num_axes) if sizes[ii] > 1]
+ if len(axes) > 0:
+ for axis, size, origin, mode in axes:
+ uniform_filter1d(input, int(size), axis, output, mode,
+ cval, origin)
+ input = output
+ else:
+ output[...] = input[...]
+ return output
+
+
+@_ni_docstrings.docfiller
+def minimum_filter1d(input, size, axis=-1, output=None,
+ mode="reflect", cval=0.0, origin=0):
+ """Calculate a 1-D minimum filter along the given axis.
+
+ The lines of the array along the given axis are filtered with a
+ minimum filter of given size.
+
+ Parameters
+ ----------
+ %(input)s
+ size : int
+ length along which to calculate 1D minimum
+ %(axis)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin)s
+
+ Returns
+ -------
+ result : ndarray.
+ Filtered image. Has the same shape as `input`.
+
+ Notes
+ -----
+ This function implements the MINLIST algorithm [1]_, as described by
+ Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
+ the `input` length, regardless of filter size.
+
+ References
+ ----------
+ .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
+ .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
+
+
+ Examples
+ --------
+ >>> from scipy.ndimage import minimum_filter1d
+ >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
+ array([2, 0, 0, 0, 1, 1, 0, 0])
+ """
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ axis = normalize_axis_index(axis, input.ndim)
+ if size < 1:
+ raise RuntimeError('incorrect filter size')
+ output = _ni_support._get_output(output, input)
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
+ raise ValueError('invalid origin')
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
+ origin, 1)
+ return output
+
+
+@_ni_docstrings.docfiller
+def maximum_filter1d(input, size, axis=-1, output=None,
+ mode="reflect", cval=0.0, origin=0):
+ """Calculate a 1-D maximum filter along the given axis.
+
+ The lines of the array along the given axis are filtered with a
+ maximum filter of given size.
+
+ Parameters
+ ----------
+ %(input)s
+ size : int
+ Length along which to calculate the 1-D maximum.
+ %(axis)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin)s
+
+ Returns
+ -------
+ maximum1d : ndarray, None
+ Maximum-filtered array with same shape as input.
+ None if `output` is not None
+
+ Notes
+ -----
+ This function implements the MAXLIST algorithm [1]_, as described by
+ Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
+ the `input` length, regardless of filter size.
+
+ References
+ ----------
+ .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
+ .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
+
+ Examples
+ --------
+ >>> from scipy.ndimage import maximum_filter1d
+ >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
+ array([8, 8, 8, 4, 9, 9, 9, 9])
+ """
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ axis = normalize_axis_index(axis, input.ndim)
+ if size < 1:
+ raise RuntimeError('incorrect filter size')
+ output = _ni_support._get_output(output, input)
+ if (size // 2 + origin < 0) or (size // 2 + origin >= size):
+ raise ValueError('invalid origin')
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
+ origin, 0)
+ return output
+
+
+def _min_or_max_filter(input, size, footprint, structure, output, mode,
+ cval, origin, minimum, axes=None):
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=3)
+ if structure is None:
+ if footprint is None:
+ if size is None:
+ raise RuntimeError("no footprint provided")
+ separable = True
+ else:
+ footprint = numpy.asarray(footprint, dtype=bool)
+ if not footprint.any():
+ raise ValueError("All-zero footprint is not supported.")
+ if footprint.all():
+ size = footprint.shape
+ footprint = None
+ separable = True
+ else:
+ separable = False
+ else:
+ structure = numpy.asarray(structure, dtype=numpy.float64)
+ separable = False
+ if footprint is None:
+ footprint = numpy.ones(structure.shape, bool)
+ else:
+ footprint = numpy.asarray(footprint, dtype=bool)
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ output = _ni_support._get_output(output, input)
+ temp_needed = numpy.may_share_memory(input, output)
+ if temp_needed:
+ # input and output arrays cannot share memory
+ temp = output
+ output = _ni_support._get_output(output.dtype, input)
+ axes = _ni_support._check_axes(axes, input.ndim)
+ num_axes = len(axes)
+ if separable:
+ origins = _ni_support._normalize_sequence(origin, num_axes)
+ sizes = _ni_support._normalize_sequence(size, num_axes)
+ modes = _ni_support._normalize_sequence(mode, num_axes)
+ axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
+ for ii in range(len(axes)) if sizes[ii] > 1]
+ if minimum:
+ filter_ = minimum_filter1d
+ else:
+ filter_ = maximum_filter1d
+ if len(axes) > 0:
+ for axis, size, origin, mode in axes:
+ filter_(input, int(size), axis, output, mode, cval, origin)
+ input = output
+ else:
+ output[...] = input[...]
+ else:
+ origins = _ni_support._normalize_sequence(origin, input.ndim)
+ if num_axes < input.ndim:
+ if footprint.ndim != num_axes:
+ raise RuntimeError("footprint array has incorrect shape")
+ footprint = numpy.expand_dims(
+ footprint,
+ tuple(ax for ax in range(input.ndim) if ax not in axes)
+ )
+ fshape = [ii for ii in footprint.shape if ii > 0]
+ if len(fshape) != input.ndim:
+ raise RuntimeError('footprint array has incorrect shape.')
+ for origin, lenf in zip(origins, fshape):
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
+ raise ValueError('invalid origin')
+ if not footprint.flags.contiguous:
+ footprint = footprint.copy()
+ if structure is not None:
+ if len(structure.shape) != input.ndim:
+ raise RuntimeError('structure array has incorrect shape')
+ if num_axes != structure.ndim:
+ structure = numpy.expand_dims(
+ structure,
+ tuple(ax for ax in range(structure.ndim) if ax not in axes)
+ )
+ if not structure.flags.contiguous:
+ structure = structure.copy()
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
+ raise RuntimeError(
+ "A sequence of modes is not supported for non-separable "
+ "footprints")
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.min_or_max_filter(input, footprint, structure, output,
+ mode, cval, origins, minimum)
+ if temp_needed:
+ temp[...] = output
+ output = temp
+ return output
+
+
+@_ni_docstrings.docfiller
+def minimum_filter(input, size=None, footprint=None, output=None,
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
+ """Calculate a multidimensional minimum filter.
+
+ Parameters
+ ----------
+ %(input)s
+ %(size_foot)s
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+ %(origin_multiple)s
+ axes : tuple of int or None, optional
+ If None, `input` is filtered along all axes. Otherwise,
+ `input` is filtered along the specified axes. When `axes` is
+ specified, any tuples used for `size`, `origin`, and/or `mode`
+ must match the length of `axes`. The ith entry in any of these tuples
+ corresponds to the ith entry in `axes`.
+
+ Returns
+ -------
+ minimum_filter : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Notes
+ -----
+ A sequence of modes (one per axis) is only supported when the footprint is
+ separable. Otherwise, a single mode string must be provided.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.minimum_filter(ascent, size=20)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
+ cval, origin, 1, axes)
+
+
+@_ni_docstrings.docfiller
+def maximum_filter(input, size=None, footprint=None, output=None,
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
+ """Calculate a multidimensional maximum filter.
+
+ Parameters
+ ----------
+ %(input)s
+ %(size_foot)s
+ %(output)s
+ %(mode_multiple)s
+ %(cval)s
+ %(origin_multiple)s
+ axes : tuple of int or None, optional
+ If None, `input` is filtered along all axes. Otherwise,
+ `input` is filtered along the specified axes. When `axes` is
+ specified, any tuples used for `size`, `origin`, and/or `mode`
+ must match the length of `axes`. The ith entry in any of these tuples
+ corresponds to the ith entry in `axes`.
+
+ Returns
+ -------
+ maximum_filter : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Notes
+ -----
+ A sequence of modes (one per axis) is only supported when the footprint is
+ separable. Otherwise, a single mode string must be provided.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.maximum_filter(ascent, size=20)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ return _min_or_max_filter(input, size, footprint, None, output, mode,
+ cval, origin, 0, axes)
+
+
+@_ni_docstrings.docfiller
+def _rank_filter(input, rank, size=None, footprint=None, output=None,
+ mode="reflect", cval=0.0, origin=0, operation='rank',
+ axes=None):
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=3)
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ axes = _ni_support._check_axes(axes, input.ndim)
+ num_axes = len(axes)
+ origins = _ni_support._normalize_sequence(origin, num_axes)
+ if footprint is None:
+ if size is None:
+ raise RuntimeError("no footprint or filter size provided")
+ sizes = _ni_support._normalize_sequence(size, num_axes)
+ footprint = numpy.ones(sizes, dtype=bool)
+ else:
+ footprint = numpy.asarray(footprint, dtype=bool)
+ if num_axes < input.ndim:
+ # set origin = 0 for any axes not being filtered
+ origins_temp = [0,] * input.ndim
+ for o, ax in zip(origins, axes):
+ origins_temp[ax] = o
+ origins = origins_temp
+
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
+ # set mode = 'constant' for any axes not being filtered
+ modes = _ni_support._normalize_sequence(mode, num_axes)
+ modes_temp = ['constant'] * input.ndim
+ for m, ax in zip(modes, axes):
+ modes_temp[ax] = m
+ mode = modes_temp
+
+ # insert singleton dimension along any non-filtered axes
+ if footprint.ndim != num_axes:
+ raise RuntimeError("footprint array has incorrect shape")
+ footprint = numpy.expand_dims(
+ footprint,
+ tuple(ax for ax in range(input.ndim) if ax not in axes)
+ )
+ fshape = [ii for ii in footprint.shape if ii > 0]
+ if len(fshape) != input.ndim:
+ raise RuntimeError('footprint array has incorrect shape.')
+ for origin, lenf in zip(origins, fshape):
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
+ raise ValueError('invalid origin')
+ if not footprint.flags.contiguous:
+ footprint = footprint.copy()
+ filter_size = numpy.where(footprint, 1, 0).sum()
+ if operation == 'median':
+ rank = filter_size // 2
+ elif operation == 'percentile':
+ percentile = rank
+ if percentile < 0.0:
+ percentile += 100.0
+ if percentile < 0 or percentile > 100:
+ raise RuntimeError('invalid percentile')
+ if percentile == 100.0:
+ rank = filter_size - 1
+ else:
+ rank = int(float(filter_size) * percentile / 100.0)
+ if rank < 0:
+ rank += filter_size
+ if rank < 0 or rank >= filter_size:
+ raise RuntimeError('rank not within filter footprint size')
+ if rank == 0:
+ return minimum_filter(input, None, footprint, output, mode, cval,
+ origins, axes=None)
+ elif rank == filter_size - 1:
+ return maximum_filter(input, None, footprint, output, mode, cval,
+ origins, axes=None)
+ else:
+ output = _ni_support._get_output(output, input)
+ temp_needed = numpy.may_share_memory(input, output)
+ if temp_needed:
+ # input and output arrays cannot share memory
+ temp = output
+ output = _ni_support._get_output(output.dtype, input)
+ if not isinstance(mode, str) and isinstance(mode, Iterable):
+ raise RuntimeError(
+ "A sequence of modes is not supported by non-separable rank "
+ "filters")
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.rank_filter(input, rank, footprint, output, mode, cval,
+ origins)
+ if temp_needed:
+ temp[...] = output
+ output = temp
+ return output
+
+
+@_ni_docstrings.docfiller
+def rank_filter(input, rank, size=None, footprint=None, output=None,
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
+ """Calculate a multidimensional rank filter.
+
+ Parameters
+ ----------
+ %(input)s
+ rank : int
+ The rank parameter may be less than zero, i.e., rank = -1
+ indicates the largest element.
+ %(size_foot)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin_multiple)s
+ axes : tuple of int or None, optional
+ If None, `input` is filtered along all axes. Otherwise,
+ `input` is filtered along the specified axes.
+
+ Returns
+ -------
+ rank_filter : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.rank_filter(ascent, rank=42, size=20)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ rank = operator.index(rank)
+ return _rank_filter(input, rank, size, footprint, output, mode, cval,
+ origin, 'rank', axes=axes)
+
+
+@_ni_docstrings.docfiller
+def median_filter(input, size=None, footprint=None, output=None,
+ mode="reflect", cval=0.0, origin=0, *, axes=None):
+ """
+ Calculate a multidimensional median filter.
+
+ Parameters
+ ----------
+ %(input)s
+ %(size_foot)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin_multiple)s
+ axes : tuple of int or None, optional
+ If None, `input` is filtered along all axes. Otherwise,
+ `input` is filtered along the specified axes.
+
+ Returns
+ -------
+ median_filter : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ See Also
+ --------
+ scipy.signal.medfilt2d
+
+ Notes
+ -----
+ For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
+ the specialised function `scipy.signal.medfilt2d` may be faster. It is
+ however limited to constant mode with ``cval=0``.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.median_filter(ascent, size=20)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ return _rank_filter(input, 0, size, footprint, output, mode, cval,
+ origin, 'median', axes=axes)
+
+
+@_ni_docstrings.docfiller
+def percentile_filter(input, percentile, size=None, footprint=None,
+ output=None, mode="reflect", cval=0.0, origin=0, *,
+ axes=None):
+ """Calculate a multidimensional percentile filter.
+
+ Parameters
+ ----------
+ %(input)s
+ percentile : scalar
+ The percentile parameter may be less than zero, i.e.,
+ percentile = -20 equals percentile = 80
+ %(size_foot)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin_multiple)s
+ axes : tuple of int or None, optional
+ If None, `input` is filtered along all axes. Otherwise,
+ `input` is filtered along the specified axes.
+
+ Returns
+ -------
+ percentile_filter : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure()
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result)
+ >>> plt.show()
+ """
+ return _rank_filter(input, percentile, size, footprint, output, mode,
+ cval, origin, 'percentile', axes=axes)
+
+
+@_ni_docstrings.docfiller
+def generic_filter1d(input, function, filter_size, axis=-1,
+ output=None, mode="reflect", cval=0.0, origin=0,
+ extra_arguments=(), extra_keywords=None):
+ """Calculate a 1-D filter along the given axis.
+
+ `generic_filter1d` iterates over the lines of the array, calling the
+ given function at each line. The arguments of the line are the
+ input line, and the output line. The input and output lines are 1-D
+ double arrays. The input line is extended appropriately according
+ to the filter size and origin. The output line must be modified
+ in-place with the result.
+
+ Parameters
+ ----------
+ %(input)s
+ function : {callable, scipy.LowLevelCallable}
+ Function to apply along given axis.
+ filter_size : scalar
+ Length of the filter.
+ %(axis)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin)s
+ %(extra_arguments)s
+ %(extra_keywords)s
+
+ Returns
+ -------
+ generic_filter1d : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Notes
+ -----
+ This function also accepts low-level callback functions with one of
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
+
+ .. code:: c
+
+ int function(double *input_line, npy_intp input_length,
+ double *output_line, npy_intp output_length,
+ void *user_data)
+ int function(double *input_line, intptr_t input_length,
+ double *output_line, intptr_t output_length,
+ void *user_data)
+
+ The calling function iterates over the lines of the input and output
+ arrays, calling the callback function at each line. The current line
+ is extended according to the border conditions set by the calling
+ function, and the result is copied into the array that is passed
+ through ``input_line``. The length of the input line (after extension)
+ is passed through ``input_length``. The callback function should apply
+ the filter and store the result in the array passed through
+ ``output_line``. The length of the output line is passed through
+ ``output_length``. ``user_data`` is the data pointer provided
+ to `scipy.LowLevelCallable` as-is.
+
+ The callback function must return an integer error status that is zero
+ if something went wrong and one otherwise. If an error occurs, you should
+ normally set the python error status with an informative message
+ before returning, otherwise a default error message is set by the
+ calling function.
+
+ In addition, some other low-level function pointer specifications
+ are accepted, but these are for backward compatibility only and should
+ not be used in new code.
+
+ """
+ if extra_keywords is None:
+ extra_keywords = {}
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ output = _ni_support._get_output(output, input)
+ if filter_size < 1:
+ raise RuntimeError('invalid filter size')
+ axis = normalize_axis_index(axis, input.ndim)
+ if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
+ filter_size):
+ raise ValueError('invalid origin')
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.generic_filter1d(input, function, filter_size, axis, output,
+ mode, cval, origin, extra_arguments,
+ extra_keywords)
+ return output
+
+
+@_ni_docstrings.docfiller
+def generic_filter(input, function, size=None, footprint=None,
+ output=None, mode="reflect", cval=0.0, origin=0,
+ extra_arguments=(), extra_keywords=None):
+ """Calculate a multidimensional filter using the given function.
+
+ At each element the provided function is called. The input values
+ within the filter footprint at that element are passed to the function
+ as a 1-D array of double values.
+
+ Parameters
+ ----------
+ %(input)s
+ function : {callable, scipy.LowLevelCallable}
+ Function to apply at each element.
+ %(size_foot)s
+ %(output)s
+ %(mode_reflect)s
+ %(cval)s
+ %(origin_multiple)s
+ %(extra_arguments)s
+ %(extra_keywords)s
+
+ Returns
+ -------
+ generic_filter : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ Notes
+ -----
+ This function also accepts low-level callback functions with one of
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
+
+ .. code:: c
+
+ int callback(double *buffer, npy_intp filter_size,
+ double *return_value, void *user_data)
+ int callback(double *buffer, intptr_t filter_size,
+ double *return_value, void *user_data)
+
+ The calling function iterates over the elements of the input and
+ output arrays, calling the callback function at each element. The
+ elements within the footprint of the filter at the current element are
+ passed through the ``buffer`` parameter, and the number of elements
+ within the footprint through ``filter_size``. The calculated value is
+ returned in ``return_value``. ``user_data`` is the data pointer provided
+ to `scipy.LowLevelCallable` as-is.
+
+ The callback function must return an integer error status that is zero
+ if something went wrong and one otherwise. If an error occurs, you should
+ normally set the python error status with an informative message
+ before returning, otherwise a default error message is set by the
+ calling function.
+
+ In addition, some other low-level function pointer specifications
+ are accepted, but these are for backward compatibility only and should
+ not be used in new code.
+
+ Examples
+ --------
+ Import the necessary modules and load the example image used for
+ filtering.
+
+ >>> import numpy as np
+ >>> from scipy import datasets
+ >>> from scipy.ndimage import generic_filter
+ >>> import matplotlib.pyplot as plt
+ >>> ascent = datasets.ascent()
+
+ Compute a maximum filter with kernel size 10 by passing a simple NumPy
+ aggregation function as argument to `function`.
+
+ >>> maximum_filter_result = generic_filter(ascent, np.amax, [10, 10])
+
+ While a maximmum filter could also directly be obtained using
+ `maximum_filter`, `generic_filter` allows generic Python function or
+ `scipy.LowLevelCallable` to be used as a filter. Here, we compute the
+ range between maximum and minimum value as an example for a kernel size
+ of 5.
+
+ >>> def custom_filter(image):
+ ... return np.amax(image) - np.amin(image)
+ >>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5])
+
+ Plot the original and filtered images.
+
+ >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12))
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> top, middle, bottom = axes
+ >>> for ax in axes:
+ ... ax.set_axis_off() # remove coordinate system
+ >>> top.imshow(ascent)
+ >>> top.set_title("Original image")
+ >>> middle.imshow(maximum_filter_result)
+ >>> middle.set_title("Maximum filter, Kernel: 10x10")
+ >>> bottom.imshow(custom_filter_result)
+ >>> bottom.set_title("Custom filter, Kernel: 5x5")
+ >>> fig.tight_layout()
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ if extra_keywords is None:
+ extra_keywords = {}
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ origins = _ni_support._normalize_sequence(origin, input.ndim)
+ if footprint is None:
+ if size is None:
+ raise RuntimeError("no footprint or filter size provided")
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
+ footprint = numpy.ones(sizes, dtype=bool)
+ else:
+ footprint = numpy.asarray(footprint, dtype=bool)
+ fshape = [ii for ii in footprint.shape if ii > 0]
+ if len(fshape) != input.ndim:
+ raise RuntimeError('filter footprint array has incorrect shape.')
+ for origin, lenf in zip(origins, fshape):
+ if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
+ raise ValueError('invalid origin')
+ if not footprint.flags.contiguous:
+ footprint = footprint.copy()
+ output = _ni_support._get_output(output, input)
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.generic_filter(input, function, footprint, output, mode,
+ cval, origins, extra_arguments, extra_keywords)
+ return output
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_fourier.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_fourier.py
new file mode 100644
index 0000000000000000000000000000000000000000..8966dd6d9a94341f3b68561d8f6d7f8e73e074e8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_fourier.py
@@ -0,0 +1,307 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import numpy
+from scipy._lib._util import normalize_axis_index
+from . import _ni_support
+from . import _nd_image
+
+__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
+ 'fourier_shift']
+
+
+def _get_output_fourier(output, input):
+ if output is None:
+ if input.dtype.type in [numpy.complex64, numpy.complex128,
+ numpy.float32]:
+ output = numpy.zeros(input.shape, dtype=input.dtype)
+ else:
+ output = numpy.zeros(input.shape, dtype=numpy.float64)
+ elif type(output) is type:
+ if output not in [numpy.complex64, numpy.complex128,
+ numpy.float32, numpy.float64]:
+ raise RuntimeError("output type not supported")
+ output = numpy.zeros(input.shape, dtype=output)
+ elif output.shape != input.shape:
+ raise RuntimeError("output shape not correct")
+ return output
+
+
+def _get_output_fourier_complex(output, input):
+ if output is None:
+ if input.dtype.type in [numpy.complex64, numpy.complex128]:
+ output = numpy.zeros(input.shape, dtype=input.dtype)
+ else:
+ output = numpy.zeros(input.shape, dtype=numpy.complex128)
+ elif type(output) is type:
+ if output not in [numpy.complex64, numpy.complex128]:
+ raise RuntimeError("output type not supported")
+ output = numpy.zeros(input.shape, dtype=output)
+ elif output.shape != input.shape:
+ raise RuntimeError("output shape not correct")
+ return output
+
+
+def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
+ """
+ Multidimensional Gaussian fourier filter.
+
+ The array is multiplied with the fourier transform of a Gaussian
+ kernel.
+
+ Parameters
+ ----------
+ input : array_like
+ The input array.
+ sigma : float or sequence
+ The sigma of the Gaussian kernel. If a float, `sigma` is the same for
+ all axes. If a sequence, `sigma` has to contain one value for each
+ axis.
+ n : int, optional
+ If `n` is negative (default), then the input is assumed to be the
+ result of a complex fft.
+ If `n` is larger than or equal to zero, the input is assumed to be the
+ result of a real fft, and `n` gives the length of the array before
+ transformation along the real transform direction.
+ axis : int, optional
+ The axis of the real transform.
+ output : ndarray, optional
+ If given, the result of filtering the input is placed in this array.
+
+ Returns
+ -------
+ fourier_gaussian : ndarray
+ The filtered input.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import numpy.fft
+ >>> import matplotlib.pyplot as plt
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ascent = datasets.ascent()
+ >>> input_ = numpy.fft.fft2(ascent)
+ >>> result = ndimage.fourier_gaussian(input_, sigma=4)
+ >>> result = numpy.fft.ifft2(result)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
+ >>> plt.show()
+ """
+ input = numpy.asarray(input)
+ output = _get_output_fourier(output, input)
+ axis = normalize_axis_index(axis, input.ndim)
+ sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
+ sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
+ if not sigmas.flags.contiguous:
+ sigmas = sigmas.copy()
+
+ _nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
+ return output
+
+
+def fourier_uniform(input, size, n=-1, axis=-1, output=None):
+ """
+ Multidimensional uniform fourier filter.
+
+ The array is multiplied with the Fourier transform of a box of given
+ size.
+
+ Parameters
+ ----------
+ input : array_like
+ The input array.
+ size : float or sequence
+ The size of the box used for filtering.
+ If a float, `size` is the same for all axes. If a sequence, `size` has
+ to contain one value for each axis.
+ n : int, optional
+ If `n` is negative (default), then the input is assumed to be the
+ result of a complex fft.
+ If `n` is larger than or equal to zero, the input is assumed to be the
+ result of a real fft, and `n` gives the length of the array before
+ transformation along the real transform direction.
+ axis : int, optional
+ The axis of the real transform.
+ output : ndarray, optional
+ If given, the result of filtering the input is placed in this array.
+
+ Returns
+ -------
+ fourier_uniform : ndarray
+ The filtered input.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import numpy.fft
+ >>> import matplotlib.pyplot as plt
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ascent = datasets.ascent()
+ >>> input_ = numpy.fft.fft2(ascent)
+ >>> result = ndimage.fourier_uniform(input_, size=20)
+ >>> result = numpy.fft.ifft2(result)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
+ >>> plt.show()
+ """
+ input = numpy.asarray(input)
+ output = _get_output_fourier(output, input)
+ axis = normalize_axis_index(axis, input.ndim)
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
+ sizes = numpy.asarray(sizes, dtype=numpy.float64)
+ if not sizes.flags.contiguous:
+ sizes = sizes.copy()
+ _nd_image.fourier_filter(input, sizes, n, axis, output, 1)
+ return output
+
+
+def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
+ """
+ Multidimensional ellipsoid Fourier filter.
+
+ The array is multiplied with the fourier transform of an ellipsoid of
+ given sizes.
+
+ Parameters
+ ----------
+ input : array_like
+ The input array.
+ size : float or sequence
+ The size of the box used for filtering.
+ If a float, `size` is the same for all axes. If a sequence, `size` has
+ to contain one value for each axis.
+ n : int, optional
+ If `n` is negative (default), then the input is assumed to be the
+ result of a complex fft.
+ If `n` is larger than or equal to zero, the input is assumed to be the
+ result of a real fft, and `n` gives the length of the array before
+ transformation along the real transform direction.
+ axis : int, optional
+ The axis of the real transform.
+ output : ndarray, optional
+ If given, the result of filtering the input is placed in this array.
+
+ Returns
+ -------
+ fourier_ellipsoid : ndarray
+ The filtered input.
+
+ Notes
+ -----
+ This function is implemented for arrays of rank 1, 2, or 3.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import numpy.fft
+ >>> import matplotlib.pyplot as plt
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ascent = datasets.ascent()
+ >>> input_ = numpy.fft.fft2(ascent)
+ >>> result = ndimage.fourier_ellipsoid(input_, size=20)
+ >>> result = numpy.fft.ifft2(result)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
+ >>> plt.show()
+ """
+ input = numpy.asarray(input)
+ if input.ndim > 3:
+ raise NotImplementedError("Only 1d, 2d and 3d inputs are supported")
+ output = _get_output_fourier(output, input)
+ if output.size == 0:
+ # The C code has a bug that can result in a segfault with arrays
+ # that have size 0 (gh-17270), so check here.
+ return output
+ axis = normalize_axis_index(axis, input.ndim)
+ sizes = _ni_support._normalize_sequence(size, input.ndim)
+ sizes = numpy.asarray(sizes, dtype=numpy.float64)
+ if not sizes.flags.contiguous:
+ sizes = sizes.copy()
+ _nd_image.fourier_filter(input, sizes, n, axis, output, 2)
+ return output
+
+
+def fourier_shift(input, shift, n=-1, axis=-1, output=None):
+ """
+ Multidimensional Fourier shift filter.
+
+ The array is multiplied with the Fourier transform of a shift operation.
+
+ Parameters
+ ----------
+ input : array_like
+ The input array.
+ shift : float or sequence
+ The size of the box used for filtering.
+ If a float, `shift` is the same for all axes. If a sequence, `shift`
+ has to contain one value for each axis.
+ n : int, optional
+ If `n` is negative (default), then the input is assumed to be the
+ result of a complex fft.
+ If `n` is larger than or equal to zero, the input is assumed to be the
+ result of a real fft, and `n` gives the length of the array before
+ transformation along the real transform direction.
+ axis : int, optional
+ The axis of the real transform.
+ output : ndarray, optional
+ If given, the result of shifting the input is placed in this array.
+
+ Returns
+ -------
+ fourier_shift : ndarray
+ The shifted input.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> import numpy.fft
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2)
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> ascent = datasets.ascent()
+ >>> input_ = numpy.fft.fft2(ascent)
+ >>> result = ndimage.fourier_shift(input_, shift=200)
+ >>> result = numpy.fft.ifft2(result)
+ >>> ax1.imshow(ascent)
+ >>> ax2.imshow(result.real) # the imaginary part is an artifact
+ >>> plt.show()
+ """
+ input = numpy.asarray(input)
+ output = _get_output_fourier_complex(output, input)
+ axis = normalize_axis_index(axis, input.ndim)
+ shifts = _ni_support._normalize_sequence(shift, input.ndim)
+ shifts = numpy.asarray(shifts, dtype=numpy.float64)
+ if not shifts.flags.contiguous:
+ shifts = shifts.copy()
+ _nd_image.fourier_shift(input, shifts, n, axis, output)
+ return output
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py
new file mode 100644
index 0000000000000000000000000000000000000000..b87e32ef60215b8ec7b189bc5e7a579a4095b1f9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py
@@ -0,0 +1,1010 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import itertools
+import warnings
+
+import numpy
+from scipy._lib._util import normalize_axis_index
+
+from scipy import special
+from . import _ni_support
+from . import _nd_image
+from ._ni_docstrings import docfiller
+
+
+__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
+ 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
+
+
+@docfiller
+def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
+ mode='mirror'):
+ """
+ Calculate a 1-D spline filter along the given axis.
+
+ The lines of the array along the given axis are filtered by a
+ spline filter. The order of the spline must be >= 2 and <= 5.
+
+ Parameters
+ ----------
+ %(input)s
+ order : int, optional
+ The order of the spline, default is 3.
+ axis : int, optional
+ The axis along which the spline filter is applied. Default is the last
+ axis.
+ output : ndarray or dtype, optional
+ The array in which to place the output, or the dtype of the returned
+ array. Default is ``numpy.float64``.
+ %(mode_interp_mirror)s
+
+ Returns
+ -------
+ spline_filter1d : ndarray
+ The filtered input.
+
+ See Also
+ --------
+ spline_filter : Multidimensional spline filter.
+
+ Notes
+ -----
+ All of the interpolation functions in `ndimage` do spline interpolation of
+ the input image. If using B-splines of `order > 1`, the input image
+ values have to be converted to B-spline coefficients first, which is
+ done by applying this 1-D filter sequentially along all
+ axes of the input. All functions that require B-spline coefficients
+ will automatically filter their inputs, a behavior controllable with
+ the `prefilter` keyword argument. For functions that accept a `mode`
+ parameter, the result will only be correct if it matches the `mode`
+ used when filtering.
+
+ For complex-valued `input`, this function processes the real and imaginary
+ components independently.
+
+ .. versionadded:: 1.6.0
+ Complex-valued support added.
+
+ Examples
+ --------
+ We can filter an image using 1-D spline along the given axis:
+
+ >>> from scipy.ndimage import spline_filter1d
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> orig_img = np.eye(20) # create an image
+ >>> orig_img[10, :] = 1.0
+ >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
+ >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
+ >>> f, ax = plt.subplots(1, 3, sharex=True)
+ >>> for ind, data in enumerate([[orig_img, "original image"],
+ ... [sp_filter_axis_0, "spline filter (axis=0)"],
+ ... [sp_filter_axis_1, "spline filter (axis=1)"]]):
+ ... ax[ind].imshow(data[0], cmap='gray_r')
+ ... ax[ind].set_title(data[1])
+ >>> plt.tight_layout()
+ >>> plt.show()
+
+ """
+ if order < 0 or order > 5:
+ raise RuntimeError('spline order not supported')
+ input = numpy.asarray(input)
+ complex_output = numpy.iscomplexobj(input)
+ output = _ni_support._get_output(output, input,
+ complex_output=complex_output)
+ if complex_output:
+ spline_filter1d(input.real, order, axis, output.real, mode)
+ spline_filter1d(input.imag, order, axis, output.imag, mode)
+ return output
+ if order in [0, 1]:
+ output[...] = numpy.array(input)
+ else:
+ mode = _ni_support._extend_mode_to_code(mode)
+ axis = normalize_axis_index(axis, input.ndim)
+ _nd_image.spline_filter1d(input, order, axis, output, mode)
+ return output
+
+@docfiller
+def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
+ """
+ Multidimensional spline filter.
+
+ Parameters
+ ----------
+ %(input)s
+ order : int, optional
+ The order of the spline, default is 3.
+ output : ndarray or dtype, optional
+ The array in which to place the output, or the dtype of the returned
+ array. Default is ``numpy.float64``.
+ %(mode_interp_mirror)s
+
+ Returns
+ -------
+ spline_filter : ndarray
+ Filtered array. Has the same shape as `input`.
+
+ See Also
+ --------
+ spline_filter1d : Calculate a 1-D spline filter along the given axis.
+
+ Notes
+ -----
+ The multidimensional filter is implemented as a sequence of
+ 1-D spline filters. The intermediate arrays are stored
+ in the same data type as the output. Therefore, for output types
+ with a limited precision, the results may be imprecise because
+ intermediate results may be stored with insufficient precision.
+
+ For complex-valued `input`, this function processes the real and imaginary
+ components independently.
+
+ .. versionadded:: 1.6.0
+ Complex-valued support added.
+
+ Examples
+ --------
+ We can filter an image using multidimentional splines:
+
+ >>> from scipy.ndimage import spline_filter
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> orig_img = np.eye(20) # create an image
+ >>> orig_img[10, :] = 1.0
+ >>> sp_filter = spline_filter(orig_img, order=3)
+ >>> f, ax = plt.subplots(1, 2, sharex=True)
+ >>> for ind, data in enumerate([[orig_img, "original image"],
+ ... [sp_filter, "spline filter"]]):
+ ... ax[ind].imshow(data[0], cmap='gray_r')
+ ... ax[ind].set_title(data[1])
+ >>> plt.tight_layout()
+ >>> plt.show()
+
+ """
+ if order < 2 or order > 5:
+ raise RuntimeError('spline order not supported')
+ input = numpy.asarray(input)
+ complex_output = numpy.iscomplexobj(input)
+ output = _ni_support._get_output(output, input,
+ complex_output=complex_output)
+ if complex_output:
+ spline_filter(input.real, order, output.real, mode)
+ spline_filter(input.imag, order, output.imag, mode)
+ return output
+ if order not in [0, 1] and input.ndim > 0:
+ for axis in range(input.ndim):
+ spline_filter1d(input, order, axis, output=output, mode=mode)
+ input = output
+ else:
+ output[...] = input[...]
+ return output
+
+
+def _prepad_for_spline_filter(input, mode, cval):
+ if mode in ['nearest', 'grid-constant']:
+ npad = 12
+ if mode == 'grid-constant':
+ padded = numpy.pad(input, npad, mode='constant',
+ constant_values=cval)
+ elif mode == 'nearest':
+ padded = numpy.pad(input, npad, mode='edge')
+ else:
+ # other modes have exact boundary conditions implemented so
+ # no prepadding is needed
+ npad = 0
+ padded = input
+ return padded, npad
+
+
+@docfiller
+def geometric_transform(input, mapping, output_shape=None,
+ output=None, order=3,
+ mode='constant', cval=0.0, prefilter=True,
+ extra_arguments=(), extra_keywords={}):
+ """
+ Apply an arbitrary geometric transform.
+
+ The given mapping function is used to find, for each point in the
+ output, the corresponding coordinates in the input. The value of the
+ input at those coordinates is determined by spline interpolation of
+ the requested order.
+
+ Parameters
+ ----------
+ %(input)s
+ mapping : {callable, scipy.LowLevelCallable}
+ A callable object that accepts a tuple of length equal to the output
+ array rank, and returns the corresponding input coordinates as a tuple
+ of length equal to the input array rank.
+ output_shape : tuple of ints, optional
+ Shape tuple.
+ %(output)s
+ order : int, optional
+ The order of the spline interpolation, default is 3.
+ The order has to be in the range 0-5.
+ %(mode_interp_constant)s
+ %(cval)s
+ %(prefilter)s
+ extra_arguments : tuple, optional
+ Extra arguments passed to `mapping`.
+ extra_keywords : dict, optional
+ Extra keywords passed to `mapping`.
+
+ Returns
+ -------
+ output : ndarray
+ The filtered input.
+
+ See Also
+ --------
+ map_coordinates, affine_transform, spline_filter1d
+
+
+ Notes
+ -----
+ This function also accepts low-level callback functions with one
+ the following signatures and wrapped in `scipy.LowLevelCallable`:
+
+ .. code:: c
+
+ int mapping(npy_intp *output_coordinates, double *input_coordinates,
+ int output_rank, int input_rank, void *user_data)
+ int mapping(intptr_t *output_coordinates, double *input_coordinates,
+ int output_rank, int input_rank, void *user_data)
+
+ The calling function iterates over the elements of the output array,
+ calling the callback function at each element. The coordinates of the
+ current output element are passed through ``output_coordinates``. The
+ callback function must return the coordinates at which the input must
+ be interpolated in ``input_coordinates``. The rank of the input and
+ output arrays are given by ``input_rank`` and ``output_rank``
+ respectively. ``user_data`` is the data pointer provided
+ to `scipy.LowLevelCallable` as-is.
+
+ The callback function must return an integer error status that is zero
+ if something went wrong and one otherwise. If an error occurs, you should
+ normally set the Python error status with an informative message
+ before returning, otherwise a default error message is set by the
+ calling function.
+
+ In addition, some other low-level function pointer specifications
+ are accepted, but these are for backward compatibility only and should
+ not be used in new code.
+
+ For complex-valued `input`, this function transforms the real and imaginary
+ components independently.
+
+ .. versionadded:: 1.6.0
+ Complex-valued support added.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy.ndimage import geometric_transform
+ >>> a = np.arange(12.).reshape((4, 3))
+ >>> def shift_func(output_coords):
+ ... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
+ ...
+ >>> geometric_transform(a, shift_func)
+ array([[ 0. , 0. , 0. ],
+ [ 0. , 1.362, 2.738],
+ [ 0. , 4.812, 6.187],
+ [ 0. , 8.263, 9.637]])
+
+ >>> b = [1, 2, 3, 4, 5]
+ >>> def shift_func(output_coords):
+ ... return (output_coords[0] - 3,)
+ ...
+ >>> geometric_transform(b, shift_func, mode='constant')
+ array([0, 0, 0, 1, 2])
+ >>> geometric_transform(b, shift_func, mode='nearest')
+ array([1, 1, 1, 1, 2])
+ >>> geometric_transform(b, shift_func, mode='reflect')
+ array([3, 2, 1, 1, 2])
+ >>> geometric_transform(b, shift_func, mode='wrap')
+ array([2, 3, 4, 1, 2])
+
+ """
+ if order < 0 or order > 5:
+ raise RuntimeError('spline order not supported')
+ input = numpy.asarray(input)
+ if output_shape is None:
+ output_shape = input.shape
+ if input.ndim < 1 or len(output_shape) < 1:
+ raise RuntimeError('input and output rank must be > 0')
+ complex_output = numpy.iscomplexobj(input)
+ output = _ni_support._get_output(output, input, shape=output_shape,
+ complex_output=complex_output)
+ if complex_output:
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter,
+ output_shape=output_shape,
+ extra_arguments=extra_arguments,
+ extra_keywords=extra_keywords)
+ geometric_transform(input.real, mapping, output=output.real,
+ cval=numpy.real(cval), **kwargs)
+ geometric_transform(input.imag, mapping, output=output.imag,
+ cval=numpy.imag(cval), **kwargs)
+ return output
+
+ if prefilter and order > 1:
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
+ filtered = spline_filter(padded, order, output=numpy.float64,
+ mode=mode)
+ else:
+ npad = 0
+ filtered = input
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.geometric_transform(filtered, mapping, None, None, None, output,
+ order, mode, cval, npad, extra_arguments,
+ extra_keywords)
+ return output
+
+
+@docfiller
+def map_coordinates(input, coordinates, output=None, order=3,
+ mode='constant', cval=0.0, prefilter=True):
+ """
+ Map the input array to new coordinates by interpolation.
+
+ The array of coordinates is used to find, for each point in the output,
+ the corresponding coordinates in the input. The value of the input at
+ those coordinates is determined by spline interpolation of the
+ requested order.
+
+ The shape of the output is derived from that of the coordinate
+ array by dropping the first axis. The values of the array along
+ the first axis are the coordinates in the input array at which the
+ output value is found.
+
+ Parameters
+ ----------
+ %(input)s
+ coordinates : array_like
+ The coordinates at which `input` is evaluated.
+ %(output)s
+ order : int, optional
+ The order of the spline interpolation, default is 3.
+ The order has to be in the range 0-5.
+ %(mode_interp_constant)s
+ %(cval)s
+ %(prefilter)s
+
+ Returns
+ -------
+ map_coordinates : ndarray
+ The result of transforming the input. The shape of the output is
+ derived from that of `coordinates` by dropping the first axis.
+
+ See Also
+ --------
+ spline_filter, geometric_transform, scipy.interpolate
+
+ Notes
+ -----
+ For complex-valued `input`, this function maps the real and imaginary
+ components independently.
+
+ .. versionadded:: 1.6.0
+ Complex-valued support added.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.arange(12.).reshape((4, 3))
+ >>> a
+ array([[ 0., 1., 2.],
+ [ 3., 4., 5.],
+ [ 6., 7., 8.],
+ [ 9., 10., 11.]])
+ >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
+ array([ 2., 7.])
+
+ Above, the interpolated value of a[0.5, 0.5] gives output[0], while
+ a[2, 1] is output[1].
+
+ >>> inds = np.array([[0.5, 2], [0.5, 4]])
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
+ array([ 2. , -33.3])
+ >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
+ array([ 2., 8.])
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
+ array([ True, False], dtype=bool)
+
+ """
+ if order < 0 or order > 5:
+ raise RuntimeError('spline order not supported')
+ input = numpy.asarray(input)
+ coordinates = numpy.asarray(coordinates)
+ if numpy.iscomplexobj(coordinates):
+ raise TypeError('Complex type not supported')
+ output_shape = coordinates.shape[1:]
+ if input.ndim < 1 or len(output_shape) < 1:
+ raise RuntimeError('input and output rank must be > 0')
+ if coordinates.shape[0] != input.ndim:
+ raise RuntimeError('invalid shape for coordinate array')
+ complex_output = numpy.iscomplexobj(input)
+ output = _ni_support._get_output(output, input, shape=output_shape,
+ complex_output=complex_output)
+ if complex_output:
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
+ map_coordinates(input.real, coordinates, output=output.real,
+ cval=numpy.real(cval), **kwargs)
+ map_coordinates(input.imag, coordinates, output=output.imag,
+ cval=numpy.imag(cval), **kwargs)
+ return output
+ if prefilter and order > 1:
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
+ filtered = spline_filter(padded, order, output=numpy.float64,
+ mode=mode)
+ else:
+ npad = 0
+ filtered = input
+ mode = _ni_support._extend_mode_to_code(mode)
+ _nd_image.geometric_transform(filtered, None, coordinates, None, None,
+ output, order, mode, cval, npad, None, None)
+ return output
+
+
+@docfiller
+def affine_transform(input, matrix, offset=0.0, output_shape=None,
+ output=None, order=3,
+ mode='constant', cval=0.0, prefilter=True):
+ """
+ Apply an affine transformation.
+
+ Given an output image pixel index vector ``o``, the pixel value
+ is determined from the input image at position
+ ``np.dot(matrix, o) + offset``.
+
+ This does 'pull' (or 'backward') resampling, transforming the output space
+ to the input to locate data. Affine transformations are often described in
+ the 'push' (or 'forward') direction, transforming input to output. If you
+ have a matrix for the 'push' transformation, use its inverse
+ (:func:`numpy.linalg.inv`) in this function.
+
+ Parameters
+ ----------
+ %(input)s
+ matrix : ndarray
+ The inverse coordinate transformation matrix, mapping output
+ coordinates to input coordinates. If ``ndim`` is the number of
+ dimensions of ``input``, the given matrix must have one of the
+ following shapes:
+
+ - ``(ndim, ndim)``: the linear transformation matrix for each
+ output coordinate.
+ - ``(ndim,)``: assume that the 2-D transformation matrix is
+ diagonal, with the diagonal specified by the given value. A more
+ efficient algorithm is then used that exploits the separability
+ of the problem.
+ - ``(ndim + 1, ndim + 1)``: assume that the transformation is
+ specified using homogeneous coordinates [1]_. In this case, any
+ value passed to ``offset`` is ignored.
+ - ``(ndim, ndim + 1)``: as above, but the bottom row of a
+ homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
+ and may be omitted.
+
+ offset : float or sequence, optional
+ The offset into the array where the transform is applied. If a float,
+ `offset` is the same for each axis. If a sequence, `offset` should
+ contain one value for each axis.
+ output_shape : tuple of ints, optional
+ Shape tuple.
+ %(output)s
+ order : int, optional
+ The order of the spline interpolation, default is 3.
+ The order has to be in the range 0-5.
+ %(mode_interp_constant)s
+ %(cval)s
+ %(prefilter)s
+
+ Returns
+ -------
+ affine_transform : ndarray
+ The transformed input.
+
+ Notes
+ -----
+ The given matrix and offset are used to find for each point in the
+ output the corresponding coordinates in the input by an affine
+ transformation. The value of the input at those coordinates is
+ determined by spline interpolation of the requested order. Points
+ outside the boundaries of the input are filled according to the given
+ mode.
+
+ .. versionchanged:: 0.18.0
+ Previously, the exact interpretation of the affine transformation
+ depended on whether the matrix was supplied as a 1-D or a
+ 2-D array. If a 1-D array was supplied
+ to the matrix parameter, the output pixel value at index ``o``
+ was determined from the input image at position
+ ``matrix * (o + offset)``.
+
+ For complex-valued `input`, this function transforms the real and imaginary
+ components independently.
+
+ .. versionadded:: 1.6.0
+ Complex-valued support added.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
+ """
+ if order < 0 or order > 5:
+ raise RuntimeError('spline order not supported')
+ input = numpy.asarray(input)
+ if output_shape is None:
+ if isinstance(output, numpy.ndarray):
+ output_shape = output.shape
+ else:
+ output_shape = input.shape
+ if input.ndim < 1 or len(output_shape) < 1:
+ raise RuntimeError('input and output rank must be > 0')
+ complex_output = numpy.iscomplexobj(input)
+ output = _ni_support._get_output(output, input, shape=output_shape,
+ complex_output=complex_output)
+ if complex_output:
+ kwargs = dict(offset=offset, output_shape=output_shape, order=order,
+ mode=mode, prefilter=prefilter)
+ affine_transform(input.real, matrix, output=output.real,
+ cval=numpy.real(cval), **kwargs)
+ affine_transform(input.imag, matrix, output=output.imag,
+ cval=numpy.imag(cval), **kwargs)
+ return output
+ if prefilter and order > 1:
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
+ filtered = spline_filter(padded, order, output=numpy.float64,
+ mode=mode)
+ else:
+ npad = 0
+ filtered = input
+ mode = _ni_support._extend_mode_to_code(mode)
+ matrix = numpy.asarray(matrix, dtype=numpy.float64)
+ if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
+ raise RuntimeError('no proper affine matrix provided')
+ if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
+ (matrix.shape[0] in [input.ndim, input.ndim + 1])):
+ if matrix.shape[0] == input.ndim + 1:
+ exptd = [0] * input.ndim + [1]
+ if not numpy.all(matrix[input.ndim] == exptd):
+ msg = ('Expected homogeneous transformation matrix with '
+ 'shape {} for image shape {}, but bottom row was '
+ 'not equal to {}'.format(matrix.shape, input.shape, exptd))
+ raise ValueError(msg)
+ # assume input is homogeneous coordinate transformation matrix
+ offset = matrix[:input.ndim, input.ndim]
+ matrix = matrix[:input.ndim, :input.ndim]
+ if matrix.shape[0] != input.ndim:
+ raise RuntimeError('affine matrix has wrong number of rows')
+ if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
+ raise RuntimeError('affine matrix has wrong number of columns')
+ if not matrix.flags.contiguous:
+ matrix = matrix.copy()
+ offset = _ni_support._normalize_sequence(offset, input.ndim)
+ offset = numpy.asarray(offset, dtype=numpy.float64)
+ if offset.ndim != 1 or offset.shape[0] < 1:
+ raise RuntimeError('no proper offset provided')
+ if not offset.flags.contiguous:
+ offset = offset.copy()
+ if matrix.ndim == 1:
+ warnings.warn(
+ "The behavior of affine_transform with a 1-D "
+ "array supplied for the matrix parameter has changed in "
+ "SciPy 0.18.0.",
+ stacklevel=2
+ )
+ _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
+ mode, cval, npad, False)
+ else:
+ _nd_image.geometric_transform(filtered, None, None, matrix, offset,
+ output, order, mode, cval, npad, None,
+ None)
+ return output
+
+
+@docfiller
+def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
+ prefilter=True):
+ """
+ Shift an array.
+
+ The array is shifted using spline interpolation of the requested order.
+ Points outside the boundaries of the input are filled according to the
+ given mode.
+
+ Parameters
+ ----------
+ %(input)s
+ shift : float or sequence
+ The shift along the axes. If a float, `shift` is the same for each
+ axis. If a sequence, `shift` should contain one value for each axis.
+ %(output)s
+ order : int, optional
+ The order of the spline interpolation, default is 3.
+ The order has to be in the range 0-5.
+ %(mode_interp_constant)s
+ %(cval)s
+ %(prefilter)s
+
+ Returns
+ -------
+ shift : ndarray
+ The shifted input.
+
+ See Also
+ --------
+ affine_transform : Affine transformations
+
+ Notes
+ -----
+ For complex-valued `input`, this function shifts the real and imaginary
+ components independently.
+
+ .. versionadded:: 1.6.0
+ Complex-valued support added.
+
+ Examples
+ --------
+ Import the necessary modules and an exemplary image.
+
+ >>> from scipy.ndimage import shift
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy import datasets
+ >>> image = datasets.ascent()
+
+ Shift the image vertically by 20 pixels.
+
+ >>> image_shifted_vertically = shift(image, (20, 0))
+
+ Shift the image vertically by -200 pixels and horizontally by 100 pixels.
+
+ >>> image_shifted_both_directions = shift(image, (-200, 100))
+
+ Plot the original and the shifted images.
+
+ >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12))
+ >>> plt.gray() # show the filtered result in grayscale
+ >>> top, middle, bottom = axes
+ >>> for ax in axes:
+ ... ax.set_axis_off() # remove coordinate system
+ >>> top.imshow(image)
+ >>> top.set_title("Original image")
+ >>> middle.imshow(image_shifted_vertically)
+ >>> middle.set_title("Vertically shifted image")
+ >>> bottom.imshow(image_shifted_both_directions)
+ >>> bottom.set_title("Image shifted in both directions")
+ >>> fig.tight_layout()
+ """
+ if order < 0 or order > 5:
+ raise RuntimeError('spline order not supported')
+ input = numpy.asarray(input)
+ if input.ndim < 1:
+ raise RuntimeError('input and output rank must be > 0')
+ complex_output = numpy.iscomplexobj(input)
+ output = _ni_support._get_output(output, input,
+ complex_output=complex_output)
+ if complex_output:
+ # import under different name to avoid confusion with shift parameter
+ from scipy.ndimage._interpolation import shift as _shift
+
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
+ _shift(input.real, shift, output=output.real, cval=numpy.real(cval),
+ **kwargs)
+ _shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval),
+ **kwargs)
+ return output
+ if prefilter and order > 1:
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
+ filtered = spline_filter(padded, order, output=numpy.float64,
+ mode=mode)
+ else:
+ npad = 0
+ filtered = input
+ mode = _ni_support._extend_mode_to_code(mode)
+ shift = _ni_support._normalize_sequence(shift, input.ndim)
+ shift = [-ii for ii in shift]
+ shift = numpy.asarray(shift, dtype=numpy.float64)
+ if not shift.flags.contiguous:
+ shift = shift.copy()
+ _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
+ npad, False)
+ return output
+
+
+@docfiller
+def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
+ prefilter=True, *, grid_mode=False):
+ """
+ Zoom an array.
+
+ The array is zoomed using spline interpolation of the requested order.
+
+ Parameters
+ ----------
+ %(input)s
+ zoom : float or sequence
+ The zoom factor along the axes. If a float, `zoom` is the same for each
+ axis. If a sequence, `zoom` should contain one value for each axis.
+ %(output)s
+ order : int, optional
+ The order of the spline interpolation, default is 3.
+ The order has to be in the range 0-5.
+ %(mode_interp_constant)s
+ %(cval)s
+ %(prefilter)s
+ grid_mode : bool, optional
+ If False, the distance from the pixel centers is zoomed. Otherwise, the
+ distance including the full pixel extent is used. For example, a 1d
+ signal of length 5 is considered to have length 4 when `grid_mode` is
+ False, but length 5 when `grid_mode` is True. See the following
+ visual illustration:
+
+ .. code-block:: text
+
+ | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
+ |<-------------------------------------->|
+ vs.
+ |<----------------------------------------------->|
+
+ The starting point of the arrow in the diagram above corresponds to
+ coordinate location 0 in each mode.
+
+ Returns
+ -------
+ zoom : ndarray
+ The zoomed input.
+
+ Notes
+ -----
+ For complex-valued `input`, this function zooms the real and imaginary
+ components independently.
+
+ .. versionadded:: 1.6.0
+ Complex-valued support added.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+
+ >>> fig = plt.figure()
+ >>> ax1 = fig.add_subplot(121) # left side
+ >>> ax2 = fig.add_subplot(122) # right side
+ >>> ascent = datasets.ascent()
+ >>> result = ndimage.zoom(ascent, 3.0)
+ >>> ax1.imshow(ascent, vmin=0, vmax=255)
+ >>> ax2.imshow(result, vmin=0, vmax=255)
+ >>> plt.show()
+
+ >>> print(ascent.shape)
+ (512, 512)
+
+ >>> print(result.shape)
+ (1536, 1536)
+ """
+ if order < 0 or order > 5:
+ raise RuntimeError('spline order not supported')
+ input = numpy.asarray(input)
+ if input.ndim < 1:
+ raise RuntimeError('input and output rank must be > 0')
+ zoom = _ni_support._normalize_sequence(zoom, input.ndim)
+ output_shape = tuple(
+ [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
+ complex_output = numpy.iscomplexobj(input)
+ output = _ni_support._get_output(output, input, shape=output_shape,
+ complex_output=complex_output)
+ if complex_output:
+ # import under different name to avoid confusion with zoom parameter
+ from scipy.ndimage._interpolation import zoom as _zoom
+
+ kwargs = dict(order=order, mode=mode, prefilter=prefilter)
+ _zoom(input.real, zoom, output=output.real, cval=numpy.real(cval),
+ **kwargs)
+ _zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval),
+ **kwargs)
+ return output
+ if prefilter and order > 1:
+ padded, npad = _prepad_for_spline_filter(input, mode, cval)
+ filtered = spline_filter(padded, order, output=numpy.float64,
+ mode=mode)
+ else:
+ npad = 0
+ filtered = input
+ if grid_mode:
+ # warn about modes that may have surprising behavior
+ suggest_mode = None
+ if mode == 'constant':
+ suggest_mode = 'grid-constant'
+ elif mode == 'wrap':
+ suggest_mode = 'grid-wrap'
+ if suggest_mode is not None:
+ warnings.warn(
+ ("It is recommended to use mode = {} instead of {} when "
+ "grid_mode is True.").format(suggest_mode, mode),
+ stacklevel=2
+ )
+ mode = _ni_support._extend_mode_to_code(mode)
+
+ zoom_div = numpy.array(output_shape)
+ zoom_nominator = numpy.array(input.shape)
+ if not grid_mode:
+ zoom_div -= 1
+ zoom_nominator -= 1
+
+ # Zooming to infinite values is unpredictable, so just choose
+ # zoom factor 1 instead
+ zoom = numpy.divide(zoom_nominator, zoom_div,
+ out=numpy.ones_like(input.shape, dtype=numpy.float64),
+ where=zoom_div != 0)
+ zoom = numpy.ascontiguousarray(zoom)
+ _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
+ grid_mode)
+ return output
+
+
+@docfiller
+def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
+ mode='constant', cval=0.0, prefilter=True):
+ """
+ Rotate an array.
+
+ The array is rotated in the plane defined by the two axes given by the
+ `axes` parameter using spline interpolation of the requested order.
+
+ Parameters
+ ----------
+ %(input)s
+ angle : float
+ The rotation angle in degrees.
+ axes : tuple of 2 ints, optional
+ The two axes that define the plane of rotation. Default is the first
+ two axes.
+ reshape : bool, optional
+ If `reshape` is true, the output shape is adapted so that the input
+ array is contained completely in the output. Default is True.
+ %(output)s
+ order : int, optional
+ The order of the spline interpolation, default is 3.
+ The order has to be in the range 0-5.
+ %(mode_interp_constant)s
+ %(cval)s
+ %(prefilter)s
+
+ Returns
+ -------
+ rotate : ndarray
+ The rotated input.
+
+ Notes
+ -----
+ For complex-valued `input`, this function rotates the real and imaginary
+ components independently.
+
+ .. versionadded:: 1.6.0
+ Complex-valued support added.
+
+ Examples
+ --------
+ >>> from scipy import ndimage, datasets
+ >>> import matplotlib.pyplot as plt
+ >>> fig = plt.figure(figsize=(10, 3))
+ >>> ax1, ax2, ax3 = fig.subplots(1, 3)
+ >>> img = datasets.ascent()
+ >>> img_45 = ndimage.rotate(img, 45, reshape=False)
+ >>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
+ >>> ax1.imshow(img, cmap='gray')
+ >>> ax1.set_axis_off()
+ >>> ax2.imshow(img_45, cmap='gray')
+ >>> ax2.set_axis_off()
+ >>> ax3.imshow(full_img_45, cmap='gray')
+ >>> ax3.set_axis_off()
+ >>> fig.set_layout_engine('tight')
+ >>> plt.show()
+ >>> print(img.shape)
+ (512, 512)
+ >>> print(img_45.shape)
+ (512, 512)
+ >>> print(full_img_45.shape)
+ (724, 724)
+
+ """
+ input_arr = numpy.asarray(input)
+ ndim = input_arr.ndim
+
+ if ndim < 2:
+ raise ValueError('input array should be at least 2D')
+
+ axes = list(axes)
+
+ if len(axes) != 2:
+ raise ValueError('axes should contain exactly two values')
+
+ if not all([float(ax).is_integer() for ax in axes]):
+ raise ValueError('axes should contain only integer values')
+
+ if axes[0] < 0:
+ axes[0] += ndim
+ if axes[1] < 0:
+ axes[1] += ndim
+ if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
+ raise ValueError('invalid rotation plane specified')
+
+ axes.sort()
+
+ c, s = special.cosdg(angle), special.sindg(angle)
+
+ rot_matrix = numpy.array([[c, s],
+ [-s, c]])
+
+ img_shape = numpy.asarray(input_arr.shape)
+ in_plane_shape = img_shape[axes]
+ if reshape:
+ # Compute transformed input bounds
+ iy, ix = in_plane_shape
+ out_bounds = rot_matrix @ [[0, 0, iy, iy],
+ [0, ix, 0, ix]]
+ # Compute the shape of the transformed input plane
+ out_plane_shape = (numpy.ptp(out_bounds, axis=1) + 0.5).astype(int)
+ else:
+ out_plane_shape = img_shape[axes]
+
+ out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
+ in_center = (in_plane_shape - 1) / 2
+ offset = in_center - out_center
+
+ output_shape = img_shape
+ output_shape[axes] = out_plane_shape
+ output_shape = tuple(output_shape)
+
+ complex_output = numpy.iscomplexobj(input_arr)
+ output = _ni_support._get_output(output, input_arr, shape=output_shape,
+ complex_output=complex_output)
+
+ if ndim <= 2:
+ affine_transform(input_arr, rot_matrix, offset, output_shape, output,
+ order, mode, cval, prefilter)
+ else:
+ # If ndim > 2, the rotation is applied over all the planes
+ # parallel to axes
+ planes_coord = itertools.product(
+ *[[slice(None)] if ax in axes else range(img_shape[ax])
+ for ax in range(ndim)])
+
+ out_plane_shape = tuple(out_plane_shape)
+
+ for coordinates in planes_coord:
+ ia = input_arr[coordinates]
+ oa = output[coordinates]
+ affine_transform(ia, rot_matrix, offset, out_plane_shape,
+ oa, order, mode, cval, prefilter)
+
+ return output
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_measurements.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_measurements.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb3c8ef19ebc9f56e3bc0b2cc47e9664e64dd60c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_measurements.py
@@ -0,0 +1,1681 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import numpy
+import numpy as np
+from . import _ni_support
+from . import _ni_label
+from . import _nd_image
+from . import _morphology
+
+__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
+ 'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
+ 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
+ 'histogram', 'watershed_ift', 'sum_labels', 'value_indices']
+
+
+def label(input, structure=None, output=None):
+ """
+ Label features in an array.
+
+ Parameters
+ ----------
+ input : array_like
+ An array-like object to be labeled. Any non-zero values in `input` are
+ counted as features and zero values are considered the background.
+ structure : array_like, optional
+ A structuring element that defines feature connections.
+ `structure` must be centrosymmetric
+ (see Notes).
+ If no structuring element is provided,
+ one is automatically generated with a squared connectivity equal to
+ one. That is, for a 2-D `input` array, the default structuring element
+ is::
+
+ [[0,1,0],
+ [1,1,1],
+ [0,1,0]]
+
+ output : (None, data-type, array_like), optional
+ If `output` is a data type, it specifies the type of the resulting
+ labeled feature array.
+ If `output` is an array-like object, then `output` will be updated
+ with the labeled features from this function. This function can
+ operate in-place, by passing output=input.
+ Note that the output must be able to store the largest label, or this
+ function will raise an Exception.
+
+ Returns
+ -------
+ label : ndarray or int
+ An integer ndarray where each unique feature in `input` has a unique
+ label in the returned array.
+ num_features : int
+ How many objects were found.
+
+ If `output` is None, this function returns a tuple of
+ (`labeled_array`, `num_features`).
+
+ If `output` is a ndarray, then it will be updated with values in
+ `labeled_array` and only `num_features` will be returned by this
+ function.
+
+ See Also
+ --------
+ find_objects : generate a list of slices for the labeled features (or
+ objects); useful for finding features' position or
+ dimensions
+
+ Notes
+ -----
+ A centrosymmetric matrix is a matrix that is symmetric about the center.
+ See [1]_ for more information.
+
+ The `structure` matrix must be centrosymmetric to ensure
+ two-way connections.
+ For instance, if the `structure` matrix is not centrosymmetric
+ and is defined as::
+
+ [[0,1,0],
+ [1,1,0],
+ [0,0,0]]
+
+ and the `input` is::
+
+ [[1,2],
+ [0,3]]
+
+ then the structure matrix would indicate the
+ entry 2 in the input is connected to 1,
+ but 1 is not connected to 2.
+
+ References
+ ----------
+ .. [1] James R. Weaver, "Centrosymmetric (cross-symmetric)
+ matrices, their basic properties, eigenvalues, and
+ eigenvectors." The American Mathematical Monthly 92.10
+ (1985): 711-717.
+
+ Examples
+ --------
+ Create an image with some features, then label it using the default
+ (cross-shaped) structuring element:
+
+ >>> from scipy.ndimage import label, generate_binary_structure
+ >>> import numpy as np
+ >>> a = np.array([[0,0,1,1,0,0],
+ ... [0,0,0,1,0,0],
+ ... [1,1,0,0,1,0],
+ ... [0,0,0,1,0,0]])
+ >>> labeled_array, num_features = label(a)
+
+ Each of the 4 features are labeled with a different integer:
+
+ >>> num_features
+ 4
+ >>> labeled_array
+ array([[0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0],
+ [2, 2, 0, 0, 3, 0],
+ [0, 0, 0, 4, 0, 0]])
+
+ Generate a structuring element that will consider features connected even
+ if they touch diagonally:
+
+ >>> s = generate_binary_structure(2,2)
+
+ or,
+
+ >>> s = [[1,1,1],
+ ... [1,1,1],
+ ... [1,1,1]]
+
+ Label the image using the new structuring element:
+
+ >>> labeled_array, num_features = label(a, structure=s)
+
+ Show the 2 labeled features (note that features 1, 3, and 4 from above are
+ now considered a single feature):
+
+ >>> num_features
+ 2
+ >>> labeled_array
+ array([[0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0],
+ [2, 2, 0, 0, 1, 0],
+ [0, 0, 0, 1, 0, 0]])
+
+ """
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ if structure is None:
+ structure = _morphology.generate_binary_structure(input.ndim, 1)
+ structure = numpy.asarray(structure, dtype=bool)
+ if structure.ndim != input.ndim:
+ raise RuntimeError('structure and input must have equal rank')
+ for ii in structure.shape:
+ if ii != 3:
+ raise ValueError('structure dimensions must be equal to 3')
+
+ # Use 32 bits if it's large enough for this image.
+ # _ni_label.label() needs two entries for background and
+ # foreground tracking
+ need_64bits = input.size >= (2**31 - 2)
+
+ if isinstance(output, numpy.ndarray):
+ if output.shape != input.shape:
+ raise ValueError("output shape not correct")
+ caller_provided_output = True
+ else:
+ caller_provided_output = False
+ if output is None:
+ output = np.empty(input.shape, np.intp if need_64bits else np.int32)
+ else:
+ output = np.empty(input.shape, output)
+
+ # handle scalars, 0-D arrays
+ if input.ndim == 0 or input.size == 0:
+ if input.ndim == 0:
+ # scalar
+ maxlabel = 1 if (input != 0) else 0
+ output[...] = maxlabel
+ else:
+ # 0-D
+ maxlabel = 0
+ if caller_provided_output:
+ return maxlabel
+ else:
+ return output, maxlabel
+
+ try:
+ max_label = _ni_label._label(input, structure, output)
+ except _ni_label.NeedMoreBits as e:
+ # Make another attempt with enough bits, then try to cast to the
+ # new type.
+ tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
+ max_label = _ni_label._label(input, structure, tmp_output)
+ output[...] = tmp_output[...]
+ if not np.all(output == tmp_output):
+ # refuse to return bad results
+ raise RuntimeError(
+ "insufficient bit-depth in requested output type"
+ ) from e
+
+ if caller_provided_output:
+ # result was written in-place
+ return max_label
+ else:
+ return output, max_label
+
+
+def find_objects(input, max_label=0):
+ """
+ Find objects in a labeled array.
+
+ Parameters
+ ----------
+ input : ndarray of ints
+ Array containing objects defined by different labels. Labels with
+ value 0 are ignored.
+ max_label : int, optional
+ Maximum label to be searched for in `input`. If max_label is not
+ given, the positions of all objects are returned.
+
+ Returns
+ -------
+ object_slices : list of tuples
+ A list of tuples, with each tuple containing N slices (with N the
+ dimension of the input array). Slices correspond to the minimal
+ parallelepiped that contains the object. If a number is missing,
+ None is returned instead of a slice. The label ``l`` corresponds to
+ the index ``l-1`` in the returned list.
+
+ See Also
+ --------
+ label, center_of_mass
+
+ Notes
+ -----
+ This function is very useful for isolating a volume of interest inside
+ a 3-D array, that cannot be "seen through".
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((6,6), dtype=int)
+ >>> a[2:4, 2:4] = 1
+ >>> a[4, 4] = 1
+ >>> a[:2, :3] = 2
+ >>> a[0, 5] = 3
+ >>> a
+ array([[2, 2, 2, 0, 0, 3],
+ [2, 2, 2, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+ >>> ndimage.find_objects(a)
+ [(slice(2, 5, None), slice(2, 5, None)),
+ (slice(0, 2, None), slice(0, 3, None)),
+ (slice(0, 1, None), slice(5, 6, None))]
+ >>> ndimage.find_objects(a, max_label=2)
+ [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]
+ >>> ndimage.find_objects(a == 1, max_label=2)
+ [(slice(2, 5, None), slice(2, 5, None)), None]
+
+ >>> loc = ndimage.find_objects(a)[0]
+ >>> a[loc]
+ array([[1, 1, 0],
+ [1, 1, 0],
+ [0, 0, 1]])
+
+ """
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+
+ if max_label < 1:
+ max_label = input.max()
+
+ return _nd_image.find_objects(input, max_label)
+
+
+def value_indices(arr, *, ignore_value=None):
+ """
+ Find indices of each distinct value in given array.
+
+ Parameters
+ ----------
+ arr : ndarray of ints
+ Array containing integer values.
+ ignore_value : int, optional
+ This value will be ignored in searching the `arr` array. If not
+ given, all values found will be included in output. Default
+ is None.
+
+ Returns
+ -------
+ indices : dictionary
+ A Python dictionary of array indices for each distinct value. The
+ dictionary is keyed by the distinct values, the entries are array
+ index tuples covering all occurrences of the value within the
+ array.
+
+ This dictionary can occupy significant memory, usually several times
+ the size of the input array.
+
+ See Also
+ --------
+ label, maximum, median, minimum_position, extrema, sum, mean, variance,
+ standard_deviation, numpy.where, numpy.unique
+
+ Notes
+ -----
+ For a small array with few distinct values, one might use
+ `numpy.unique()` to find all possible values, and ``(arr == val)`` to
+ locate each value within that array. However, for large arrays,
+ with many distinct values, this can become extremely inefficient,
+ as locating each value would require a new search through the entire
+ array. Using this function, there is essentially one search, with
+ the indices saved for all distinct values.
+
+ This is useful when matching a categorical image (e.g. a segmentation
+ or classification) to an associated image of other data, allowing
+ any per-class statistic(s) to then be calculated. Provides a
+ more flexible alternative to functions like ``scipy.ndimage.mean()``
+ and ``scipy.ndimage.variance()``.
+
+ Some other closely related functionality, with different strengths and
+ weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and
+ the `scikit-image `_ function
+ ``skimage.measure.regionprops()``.
+
+ Note for IDL users: this provides functionality equivalent to IDL's
+ REVERSE_INDICES option (as per the IDL documentation for the
+ `HISTOGRAM `_
+ function).
+
+ .. versionadded:: 1.10.0
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from scipy import ndimage
+ >>> a = np.zeros((6, 6), dtype=int)
+ >>> a[2:4, 2:4] = 1
+ >>> a[4, 4] = 1
+ >>> a[:2, :3] = 2
+ >>> a[0, 5] = 3
+ >>> a
+ array([[2, 2, 2, 0, 0, 3],
+ [2, 2, 2, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+ >>> val_indices = ndimage.value_indices(a)
+
+ The dictionary `val_indices` will have an entry for each distinct
+ value in the input array.
+
+ >>> val_indices.keys()
+ dict_keys([0, 1, 2, 3])
+
+ The entry for each value is an index tuple, locating the elements
+ with that value.
+
+ >>> ndx1 = val_indices[1]
+ >>> ndx1
+ (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4]))
+
+ This can be used to index into the original array, or any other
+ array with the same shape.
+
+ >>> a[ndx1]
+ array([1, 1, 1, 1, 1])
+
+ If the zeros were to be ignored, then the resulting dictionary
+ would no longer have an entry for zero.
+
+ >>> val_indices = ndimage.value_indices(a, ignore_value=0)
+ >>> val_indices.keys()
+ dict_keys([1, 2, 3])
+
+ """
+ # Cope with ignore_value being None, without too much extra complexity
+ # in the C code. If not None, the value is passed in as a numpy array
+ # with the same dtype as arr.
+ ignore_value_arr = numpy.zeros((1,), dtype=arr.dtype)
+ ignoreIsNone = (ignore_value is None)
+ if not ignoreIsNone:
+ ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value)
+
+ val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr)
+ return val_indices
+
+
+def labeled_comprehension(input, labels, index, func, out_dtype, default,
+ pass_positions=False):
+ """
+ Roughly equivalent to [func(input[labels == i]) for i in index].
+
+ Sequentially applies an arbitrary function (that works on array_like input)
+ to subsets of an N-D image array specified by `labels` and `index`.
+ The option exists to provide the function with positional parameters as the
+ second argument.
+
+ Parameters
+ ----------
+ input : array_like
+ Data from which to select `labels` to process.
+ labels : array_like or None
+ Labels to objects in `input`.
+ If not None, array must be same shape as `input`.
+ If None, `func` is applied to raveled `input`.
+ index : int, sequence of ints or None
+ Subset of `labels` to which to apply `func`.
+ If a scalar, a single value is returned.
+ If None, `func` is applied to all non-zero values of `labels`.
+ func : callable
+ Python function to apply to `labels` from `input`.
+ out_dtype : dtype
+ Dtype to use for `result`.
+ default : int, float or None
+ Default return value when a element of `index` does not exist
+ in `labels`.
+ pass_positions : bool, optional
+ If True, pass linear indices to `func` as a second argument.
+ Default is False.
+
+ Returns
+ -------
+ result : ndarray
+ Result of applying `func` to each of `labels` to `input` in `index`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> from scipy import ndimage
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> lbls = np.arange(1, nlbl+1)
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)
+ array([ 2.75, 5.5 , 6. ])
+
+ Falling back to `default`:
+
+ >>> lbls = np.arange(1, nlbl+2)
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)
+ array([ 2.75, 5.5 , 6. , -1. ])
+
+ Passing positions:
+
+ >>> def fn(val, pos):
+ ... print("fn says: %s : %s" % (val, pos))
+ ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
+ ...
+ >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
+ fn says: [1 2 5 3] : [0 1 4 5]
+ fn says: [4 7] : [ 7 11]
+ fn says: [9 3] : [12 13]
+ array([ 11., 11., -12., 0.])
+
+ """
+
+ as_scalar = numpy.isscalar(index)
+ input = numpy.asarray(input)
+
+ if pass_positions:
+ positions = numpy.arange(input.size).reshape(input.shape)
+
+ if labels is None:
+ if index is not None:
+ raise ValueError("index without defined labels")
+ if not pass_positions:
+ return func(input.ravel())
+ else:
+ return func(input.ravel(), positions.ravel())
+
+ try:
+ input, labels = numpy.broadcast_arrays(input, labels)
+ except ValueError as e:
+ raise ValueError("input and labels must have the same shape "
+ "(excepting dimensions with width 1)") from e
+
+ if index is None:
+ if not pass_positions:
+ return func(input[labels > 0])
+ else:
+ return func(input[labels > 0], positions[labels > 0])
+
+ index = numpy.atleast_1d(index)
+ if np.any(index.astype(labels.dtype).astype(index.dtype) != index):
+ raise ValueError(f"Cannot convert index values from <{index.dtype}> to "
+ f"<{labels.dtype}> (labels' type) without loss of precision")
+
+ index = index.astype(labels.dtype)
+
+ # optimization: find min/max in index,
+ # and select those parts of labels, input, and positions
+ lo = index.min()
+ hi = index.max()
+ mask = (labels >= lo) & (labels <= hi)
+
+ # this also ravels the arrays
+ labels = labels[mask]
+ input = input[mask]
+ if pass_positions:
+ positions = positions[mask]
+
+ # sort everything by labels
+ label_order = labels.argsort()
+ labels = labels[label_order]
+ input = input[label_order]
+ if pass_positions:
+ positions = positions[label_order]
+
+ index_order = index.argsort()
+ sorted_index = index[index_order]
+
+ def do_map(inputs, output):
+ """labels must be sorted"""
+ nidx = sorted_index.size
+
+ # Find boundaries for each stretch of constant labels
+ # This could be faster, but we already paid N log N to sort labels.
+ lo = numpy.searchsorted(labels, sorted_index, side='left')
+ hi = numpy.searchsorted(labels, sorted_index, side='right')
+
+ for i, l, h in zip(range(nidx), lo, hi):
+ if l == h:
+ continue
+ output[i] = func(*[inp[l:h] for inp in inputs])
+
+ temp = numpy.empty(index.shape, out_dtype)
+ temp[:] = default
+ if not pass_positions:
+ do_map([input], temp)
+ else:
+ do_map([input, positions], temp)
+
+ output = numpy.zeros(index.shape, out_dtype)
+ output[index_order] = temp
+ if as_scalar:
+ output = output[0]
+
+ return output
+
+
+def _safely_castable_to_int(dt):
+ """Test whether the NumPy data type `dt` can be safely cast to an int."""
+ int_size = np.dtype(int).itemsize
+ safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or
+ (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))
+ return safe
+
+
+def _stats(input, labels=None, index=None, centered=False):
+ """Count, sum, and optionally compute (sum - centre)^2 of input by label
+
+ Parameters
+ ----------
+ input : array_like, N-D
+ The input data to be analyzed.
+ labels : array_like (N-D), optional
+ The labels of the data in `input`. This array must be broadcast
+ compatible with `input`; typically, it is the same shape as `input`.
+ If `labels` is None, all nonzero values in `input` are treated as
+ the single labeled group.
+ index : label or sequence of labels, optional
+ These are the labels of the groups for which the stats are computed.
+ If `index` is None, the stats are computed for the single group where
+ `labels` is greater than 0.
+ centered : bool, optional
+ If True, the centered sum of squares for each labeled group is
+ also returned. Default is False.
+
+ Returns
+ -------
+ counts : int or ndarray of ints
+ The number of elements in each labeled group.
+ sums : scalar or ndarray of scalars
+ The sums of the values in each labeled group.
+ sums_c : scalar or ndarray of scalars, optional
+ The sums of mean-centered squares of the values in each labeled group.
+ This is only returned if `centered` is True.
+
+ """
+ def single_group(vals):
+ if centered:
+ vals_c = vals - vals.mean()
+ return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()
+ else:
+ return vals.size, vals.sum()
+
+ if labels is None:
+ return single_group(input)
+
+ # ensure input and labels match sizes
+ input, labels = numpy.broadcast_arrays(input, labels)
+
+ if index is None:
+ return single_group(input[labels > 0])
+
+ if numpy.isscalar(index):
+ return single_group(input[labels == index])
+
+ def _sum_centered(labels):
+ # `labels` is expected to be an ndarray with the same shape as `input`.
+ # It must contain the label indices (which are not necessarily the labels
+ # themselves).
+ means = sums / counts
+ centered_input = input - means[labels]
+ # bincount expects 1-D inputs, so we ravel the arguments.
+ bc = numpy.bincount(labels.ravel(),
+ weights=(centered_input *
+ centered_input.conjugate()).ravel())
+ return bc
+
+ # Remap labels to unique integers if necessary, or if the largest
+ # label is larger than the number of values.
+
+ if (not _safely_castable_to_int(labels.dtype) or
+ labels.min() < 0 or labels.max() > labels.size):
+ # Use numpy.unique to generate the label indices. `new_labels` will
+ # be 1-D, but it should be interpreted as the flattened N-D array of
+ # label indices.
+ unique_labels, new_labels = numpy.unique(labels, return_inverse=True)
+ new_labels = np.reshape(new_labels, (-1,)) # flatten, since it may be >1-D
+ counts = numpy.bincount(new_labels)
+ sums = numpy.bincount(new_labels, weights=input.ravel())
+ if centered:
+ # Compute the sum of the mean-centered squares.
+ # We must reshape new_labels to the N-D shape of `input` before
+ # passing it _sum_centered.
+ sums_c = _sum_centered(new_labels.reshape(labels.shape))
+ idxs = numpy.searchsorted(unique_labels, index)
+ # make all of idxs valid
+ idxs[idxs >= unique_labels.size] = 0
+ found = (unique_labels[idxs] == index)
+ else:
+ # labels are an integer type allowed by bincount, and there aren't too
+ # many, so call bincount directly.
+ counts = numpy.bincount(labels.ravel())
+ sums = numpy.bincount(labels.ravel(), weights=input.ravel())
+ if centered:
+ sums_c = _sum_centered(labels)
+ # make sure all index values are valid
+ idxs = numpy.asanyarray(index, numpy.int_).copy()
+ found = (idxs >= 0) & (idxs < counts.size)
+ idxs[~found] = 0
+
+ counts = counts[idxs]
+ counts[~found] = 0
+ sums = sums[idxs]
+ sums[~found] = 0
+
+ if not centered:
+ return (counts, sums)
+ else:
+ sums_c = sums_c[idxs]
+ sums_c[~found] = 0
+ return (counts, sums, sums_c)
+
+
+def sum(input, labels=None, index=None):
+ """
+ Calculate the sum of the values of the array.
+
+ Notes
+ -----
+ This is an alias for `ndimage.sum_labels` kept for backwards compatibility
+ reasons, for new code please prefer `sum_labels`. See the `sum_labels`
+ docstring for more details.
+
+ """
+ return sum_labels(input, labels, index)
+
+
+def sum_labels(input, labels=None, index=None):
+ """
+ Calculate the sum of the values of the array.
+
+ Parameters
+ ----------
+ input : array_like
+ Values of `input` inside the regions defined by `labels`
+ are summed together.
+ labels : array_like of ints, optional
+ Assign labels to the values of the array. Has to have the same shape as
+ `input`.
+ index : array_like, optional
+ A single label number or a sequence of label numbers of
+ the objects to be measured.
+
+ Returns
+ -------
+ sum : ndarray or scalar
+ An array of the sums of values of `input` inside the regions defined
+ by `labels` with the same shape as `index`. If 'index' is None or scalar,
+ a scalar is returned.
+
+ See Also
+ --------
+ mean, median
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> input = [0,1,2,3]
+ >>> labels = [1,1,2,2]
+ >>> ndimage.sum_labels(input, labels, index=[1,2])
+ [1.0, 5.0]
+ >>> ndimage.sum_labels(input, labels, index=1)
+ 1
+ >>> ndimage.sum_labels(input, labels)
+ 6
+
+
+ """
+ count, sum = _stats(input, labels, index)
+ return sum
+
+
+def mean(input, labels=None, index=None):
+ """
+ Calculate the mean of the values of an array at labels.
+
+ Parameters
+ ----------
+ input : array_like
+ Array on which to compute the mean of elements over distinct
+ regions.
+ labels : array_like, optional
+ Array of labels of same shape, or broadcastable to the same shape as
+ `input`. All elements sharing the same label form one region over
+ which the mean of the elements is computed.
+ index : int or sequence of ints, optional
+ Labels of the objects over which the mean is to be computed.
+ Default is None, in which case the mean for all values where label is
+ greater than 0 is calculated.
+
+ Returns
+ -------
+ out : list
+ Sequence of same length as `index`, with the mean of the different
+ regions labeled by the labels in `index`.
+
+ See Also
+ --------
+ variance, standard_deviation, minimum, maximum, sum, label
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.arange(25).reshape((5,5))
+ >>> labels = np.zeros_like(a)
+ >>> labels[3:5,3:5] = 1
+ >>> index = np.unique(labels)
+ >>> labels
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1],
+ [0, 0, 0, 1, 1]])
+ >>> index
+ array([0, 1])
+ >>> ndimage.mean(a, labels=labels, index=index)
+ [10.285714285714286, 21.0]
+
+ """
+
+ count, sum = _stats(input, labels, index)
+ return sum / numpy.asanyarray(count).astype(numpy.float64)
+
+
+def variance(input, labels=None, index=None):
+ """
+ Calculate the variance of the values of an N-D image array, optionally at
+ specified sub-regions.
+
+ Parameters
+ ----------
+ input : array_like
+ Nd-image data to process.
+ labels : array_like, optional
+ Labels defining sub-regions in `input`.
+ If not None, must be same shape as `input`.
+ index : int or sequence of ints, optional
+ `labels` to include in output. If None (default), all values where
+ `labels` is non-zero are used.
+
+ Returns
+ -------
+ variance : float or ndarray
+ Values of variance, for each sub-region if `labels` and `index` are
+ specified.
+
+ See Also
+ --------
+ label, standard_deviation, maximum, minimum, extrema
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> from scipy import ndimage
+ >>> ndimage.variance(a)
+ 7.609375
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))
+ array([ 2.1875, 2.25 , 9. ])
+
+ If no index is given, all non-zero `labels` are processed:
+
+ >>> ndimage.variance(a, lbl)
+ 6.1875
+
+ """
+ count, sum, sum_c_sq = _stats(input, labels, index, centered=True)
+ return sum_c_sq / np.asanyarray(count).astype(float)
+
+
+def standard_deviation(input, labels=None, index=None):
+ """
+ Calculate the standard deviation of the values of an N-D image array,
+ optionally at specified sub-regions.
+
+ Parameters
+ ----------
+ input : array_like
+ N-D image data to process.
+ labels : array_like, optional
+ Labels to identify sub-regions in `input`.
+ If not None, must be same shape as `input`.
+ index : int or sequence of ints, optional
+ `labels` to include in output. If None (default), all values where
+ `labels` is non-zero are used.
+
+ Returns
+ -------
+ standard_deviation : float or ndarray
+ Values of standard deviation, for each sub-region if `labels` and
+ `index` are specified.
+
+ See Also
+ --------
+ label, variance, maximum, minimum, extrema
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> from scipy import ndimage
+ >>> ndimage.standard_deviation(a)
+ 2.7585095613392387
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))
+ array([ 1.479, 1.5 , 3. ])
+
+ If no index is given, non-zero `labels` are processed:
+
+ >>> ndimage.standard_deviation(a, lbl)
+ 2.4874685927665499
+
+ """
+ return numpy.sqrt(variance(input, labels, index))
+
+
+def _select(input, labels=None, index=None, find_min=False, find_max=False,
+ find_min_positions=False, find_max_positions=False,
+ find_median=False):
+ """Returns min, max, or both, plus their positions (if requested), and
+ median."""
+
+ input = numpy.asanyarray(input)
+
+ find_positions = find_min_positions or find_max_positions
+ positions = None
+ if find_positions:
+ positions = numpy.arange(input.size).reshape(input.shape)
+
+ def single_group(vals, positions):
+ result = []
+ if find_min:
+ result += [vals.min()]
+ if find_min_positions:
+ result += [positions[vals == vals.min()][0]]
+ if find_max:
+ result += [vals.max()]
+ if find_max_positions:
+ result += [positions[vals == vals.max()][0]]
+ if find_median:
+ result += [numpy.median(vals)]
+ return result
+
+ if labels is None:
+ return single_group(input, positions)
+
+ # ensure input and labels match sizes
+ input, labels = numpy.broadcast_arrays(input, labels)
+
+ if index is None:
+ mask = (labels > 0)
+ masked_positions = None
+ if find_positions:
+ masked_positions = positions[mask]
+ return single_group(input[mask], masked_positions)
+
+ if numpy.isscalar(index):
+ mask = (labels == index)
+ masked_positions = None
+ if find_positions:
+ masked_positions = positions[mask]
+ return single_group(input[mask], masked_positions)
+
+ # remap labels to unique integers if necessary, or if the largest
+ # label is larger than the number of values.
+ if (not _safely_castable_to_int(labels.dtype) or
+ labels.min() < 0 or labels.max() > labels.size):
+ # remap labels, and indexes
+ unique_labels, labels = numpy.unique(labels, return_inverse=True)
+ idxs = numpy.searchsorted(unique_labels, index)
+
+ # make all of idxs valid
+ idxs[idxs >= unique_labels.size] = 0
+ found = (unique_labels[idxs] == index)
+ else:
+ # labels are an integer type, and there aren't too many
+ idxs = numpy.asanyarray(index, numpy.int_).copy()
+ found = (idxs >= 0) & (idxs <= labels.max())
+
+ idxs[~ found] = labels.max() + 1
+
+ if find_median:
+ order = numpy.lexsort((input.ravel(), labels.ravel()))
+ else:
+ order = input.ravel().argsort()
+ input = input.ravel()[order]
+ labels = labels.ravel()[order]
+ if find_positions:
+ positions = positions.ravel()[order]
+
+ result = []
+ if find_min:
+ mins = numpy.zeros(labels.max() + 2, input.dtype)
+ mins[labels[::-1]] = input[::-1]
+ result += [mins[idxs]]
+ if find_min_positions:
+ minpos = numpy.zeros(labels.max() + 2, int)
+ minpos[labels[::-1]] = positions[::-1]
+ result += [minpos[idxs]]
+ if find_max:
+ maxs = numpy.zeros(labels.max() + 2, input.dtype)
+ maxs[labels] = input
+ result += [maxs[idxs]]
+ if find_max_positions:
+ maxpos = numpy.zeros(labels.max() + 2, int)
+ maxpos[labels] = positions
+ result += [maxpos[idxs]]
+ if find_median:
+ locs = numpy.arange(len(labels))
+ lo = numpy.zeros(labels.max() + 2, numpy.int_)
+ lo[labels[::-1]] = locs[::-1]
+ hi = numpy.zeros(labels.max() + 2, numpy.int_)
+ hi[labels] = locs
+ lo = lo[idxs]
+ hi = hi[idxs]
+ # lo is an index to the lowest value in input for each label,
+ # hi is an index to the largest value.
+ # move them to be either the same ((hi - lo) % 2 == 0) or next
+ # to each other ((hi - lo) % 2 == 1), then average.
+ step = (hi - lo) // 2
+ lo += step
+ hi -= step
+ if (np.issubdtype(input.dtype, np.integer)
+ or np.issubdtype(input.dtype, np.bool_)):
+ # avoid integer overflow or boolean addition (gh-12836)
+ result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0]
+ else:
+ result += [(input[lo] + input[hi]) / 2.0]
+
+ return result
+
+
+def minimum(input, labels=None, index=None):
+ """
+ Calculate the minimum of the values of an array over labeled regions.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values. For each region specified by `labels`, the
+ minimal values of `input` over the region is computed.
+ labels : array_like, optional
+ An array_like of integers marking different regions over which the
+ minimum value of `input` is to be computed. `labels` must have the
+ same shape as `input`. If `labels` is not specified, the minimum
+ over the whole array is returned.
+ index : array_like, optional
+ A list of region labels that are taken into account for computing the
+ minima. If index is None, the minimum over all elements where `labels`
+ is non-zero is returned.
+
+ Returns
+ -------
+ minimum : float or list of floats
+ List of minima of `input` over the regions determined by `labels` and
+ whose index is in `index`. If `index` or `labels` are not specified, a
+ float is returned: the minimal value of `input` if `labels` is None,
+ and the minimal value of elements where `labels` is greater than zero
+ if `index` is None.
+
+ See Also
+ --------
+ label, maximum, median, minimum_position, extrema, sum, mean, variance,
+ standard_deviation
+
+ Notes
+ -----
+ The function returns a Python list and not a NumPy array, use
+ `np.array` to convert the list to an array.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> labels, labels_nb = ndimage.label(a)
+ >>> labels
+ array([[1, 1, 0, 0],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))
+ [1.0, 4.0, 3.0]
+ >>> ndimage.minimum(a)
+ 0.0
+ >>> ndimage.minimum(a, labels=labels)
+ 1.0
+
+ """
+ return _select(input, labels, index, find_min=True)[0]
+
+
+def maximum(input, labels=None, index=None):
+ """
+ Calculate the maximum of the values of an array over labeled regions.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values. For each region specified by `labels`, the
+ maximal values of `input` over the region is computed.
+ labels : array_like, optional
+ An array of integers marking different regions over which the
+ maximum value of `input` is to be computed. `labels` must have the
+ same shape as `input`. If `labels` is not specified, the maximum
+ over the whole array is returned.
+ index : array_like, optional
+ A list of region labels that are taken into account for computing the
+ maxima. If index is None, the maximum over all elements where `labels`
+ is non-zero is returned.
+
+ Returns
+ -------
+ output : float or list of floats
+ List of maxima of `input` over the regions determined by `labels` and
+ whose index is in `index`. If `index` or `labels` are not specified, a
+ float is returned: the maximal value of `input` if `labels` is None,
+ and the maximal value of elements where `labels` is greater than zero
+ if `index` is None.
+
+ See Also
+ --------
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
+ standard_deviation
+
+ Notes
+ -----
+ The function returns a Python list and not a NumPy array, use
+ `np.array` to convert the list to an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(16).reshape((4,4))
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+ >>> labels = np.zeros_like(a)
+ >>> labels[:2,:2] = 1
+ >>> labels[2:, 1:3] = 2
+ >>> labels
+ array([[1, 1, 0, 0],
+ [1, 1, 0, 0],
+ [0, 2, 2, 0],
+ [0, 2, 2, 0]])
+ >>> from scipy import ndimage
+ >>> ndimage.maximum(a)
+ 15.0
+ >>> ndimage.maximum(a, labels=labels, index=[1,2])
+ [5.0, 14.0]
+ >>> ndimage.maximum(a, labels=labels)
+ 14.0
+
+ >>> b = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> labels, labels_nb = ndimage.label(b)
+ >>> labels
+ array([[1, 1, 0, 0],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))
+ [5.0, 7.0, 9.0]
+
+ """
+ return _select(input, labels, index, find_max=True)[0]
+
+
+def median(input, labels=None, index=None):
+ """
+ Calculate the median of the values of an array over labeled regions.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values. For each region specified by `labels`, the
+ median value of `input` over the region is computed.
+ labels : array_like, optional
+ An array_like of integers marking different regions over which the
+ median value of `input` is to be computed. `labels` must have the
+ same shape as `input`. If `labels` is not specified, the median
+ over the whole array is returned.
+ index : array_like, optional
+ A list of region labels that are taken into account for computing the
+ medians. If index is None, the median over all elements where `labels`
+ is non-zero is returned.
+
+ Returns
+ -------
+ median : float or list of floats
+ List of medians of `input` over the regions determined by `labels` and
+ whose index is in `index`. If `index` or `labels` are not specified, a
+ float is returned: the median value of `input` if `labels` is None,
+ and the median value of elements where `labels` is greater than zero
+ if `index` is None.
+
+ See Also
+ --------
+ label, minimum, maximum, extrema, sum, mean, variance, standard_deviation
+
+ Notes
+ -----
+ The function returns a Python list and not a NumPy array, use
+ `np.array` to convert the list to an array.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 1],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> labels, labels_nb = ndimage.label(a)
+ >>> labels
+ array([[1, 1, 0, 2],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))
+ [2.5, 4.0, 6.0]
+ >>> ndimage.median(a)
+ 1.0
+ >>> ndimage.median(a, labels=labels)
+ 3.0
+
+ """
+ return _select(input, labels, index, find_median=True)[0]
+
+
+def minimum_position(input, labels=None, index=None):
+ """
+ Find the positions of the minimums of the values of an array at labels.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values.
+ labels : array_like, optional
+ An array of integers marking different regions over which the
+ position of the minimum value of `input` is to be computed.
+ `labels` must have the same shape as `input`. If `labels` is not
+ specified, the location of the first minimum over the whole
+ array is returned.
+
+ The `labels` argument only works when `index` is specified.
+ index : array_like, optional
+ A list of region labels that are taken into account for finding the
+ location of the minima. If `index` is None, the ``first`` minimum
+ over all elements where `labels` is non-zero is returned.
+
+ The `index` argument only works when `labels` is specified.
+
+ Returns
+ -------
+ output : list of tuples of ints
+ Tuple of ints or list of tuples of ints that specify the location
+ of minima of `input` over the regions determined by `labels` and
+ whose index is in `index`.
+
+ If `index` or `labels` are not specified, a tuple of ints is
+ returned specifying the location of the first minimal value of `input`.
+
+ See Also
+ --------
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
+ standard_deviation
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[10, 20, 30],
+ ... [40, 80, 100],
+ ... [1, 100, 200]])
+ >>> b = np.array([[1, 2, 0, 1],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+
+ >>> from scipy import ndimage
+
+ >>> ndimage.minimum_position(a)
+ (2, 0)
+ >>> ndimage.minimum_position(b)
+ (0, 2)
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> label, pos = ndimage.label(a)
+ >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))
+ [(2, 0)]
+
+ >>> label, pos = ndimage.label(b)
+ >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))
+ [(0, 0), (0, 3), (3, 1)]
+
+ """
+ dims = numpy.array(numpy.asarray(input).shape)
+ # see numpy.unravel_index to understand this line.
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+ result = _select(input, labels, index, find_min_positions=True)[0]
+
+ if numpy.isscalar(result):
+ return tuple((result // dim_prod) % dims)
+
+ return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
+
+
+def maximum_position(input, labels=None, index=None):
+ """
+ Find the positions of the maximums of the values of an array at labels.
+
+ For each region specified by `labels`, the position of the maximum
+ value of `input` within the region is returned.
+
+ Parameters
+ ----------
+ input : array_like
+ Array_like of values.
+ labels : array_like, optional
+ An array of integers marking different regions over which the
+ position of the maximum value of `input` is to be computed.
+ `labels` must have the same shape as `input`. If `labels` is not
+ specified, the location of the first maximum over the whole
+ array is returned.
+
+ The `labels` argument only works when `index` is specified.
+ index : array_like, optional
+ A list of region labels that are taken into account for finding the
+ location of the maxima. If `index` is None, the first maximum
+ over all elements where `labels` is non-zero is returned.
+
+ The `index` argument only works when `labels` is specified.
+
+ Returns
+ -------
+ output : list of tuples of ints
+ List of tuples of ints that specify the location of maxima of
+ `input` over the regions determined by `labels` and whose index
+ is in `index`.
+
+ If `index` or `labels` are not specified, a tuple of ints is
+ returned specifying the location of the ``first`` maximal value
+ of `input`.
+
+ See Also
+ --------
+ label, minimum, median, maximum_position, extrema, sum, mean, variance,
+ standard_deviation
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> ndimage.maximum_position(a)
+ (3, 0)
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> lbl = np.array([[0, 1, 2, 3],
+ ... [0, 1, 2, 3],
+ ... [0, 1, 2, 3],
+ ... [0, 1, 2, 3]])
+ >>> ndimage.maximum_position(a, lbl, 1)
+ (1, 1)
+
+ If no index is given, non-zero `labels` are processed:
+
+ >>> ndimage.maximum_position(a, lbl)
+ (2, 3)
+
+ If there are no maxima, the position of the first element is returned:
+
+ >>> ndimage.maximum_position(a, lbl, 2)
+ (0, 2)
+
+ """
+ dims = numpy.array(numpy.asarray(input).shape)
+ # see numpy.unravel_index to understand this line.
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+ result = _select(input, labels, index, find_max_positions=True)[0]
+
+ if numpy.isscalar(result):
+ return tuple((result // dim_prod) % dims)
+
+ return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
+
+
+def extrema(input, labels=None, index=None):
+ """
+ Calculate the minimums and maximums of the values of an array
+ at labels, along with their positions.
+
+ Parameters
+ ----------
+ input : ndarray
+ N-D image data to process.
+ labels : ndarray, optional
+ Labels of features in input.
+ If not None, must be same shape as `input`.
+ index : int or sequence of ints, optional
+ Labels to include in output. If None (default), all values where
+ non-zero `labels` are used.
+
+ Returns
+ -------
+ minimums, maximums : int or ndarray
+ Values of minimums and maximums in each feature.
+ min_positions, max_positions : tuple or list of tuples
+ Each tuple gives the N-D coordinates of the corresponding minimum
+ or maximum.
+
+ See Also
+ --------
+ maximum, minimum, maximum_position, minimum_position, center_of_mass
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 0, 0],
+ ... [5, 3, 0, 4],
+ ... [0, 0, 0, 7],
+ ... [9, 3, 0, 0]])
+ >>> from scipy import ndimage
+ >>> ndimage.extrema(a)
+ (0, 9, (0, 2), (3, 0))
+
+ Features to process can be specified using `labels` and `index`:
+
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
+ (array([1, 4, 3]),
+ array([5, 7, 9]),
+ [(0, 0), (1, 3), (3, 1)],
+ [(1, 0), (2, 3), (3, 0)])
+
+ If no index is given, non-zero `labels` are processed:
+
+ >>> ndimage.extrema(a, lbl)
+ (1, 9, (0, 0), (3, 0))
+
+ """
+ dims = numpy.array(numpy.asarray(input).shape)
+ # see numpy.unravel_index to understand this line.
+ dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+ minimums, min_positions, maximums, max_positions = _select(input, labels,
+ index,
+ find_min=True,
+ find_max=True,
+ find_min_positions=True,
+ find_max_positions=True)
+
+ if numpy.isscalar(minimums):
+ return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
+ tuple((max_positions // dim_prod) % dims))
+
+ min_positions = [
+ tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims
+ ]
+ max_positions = [
+ tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims
+ ]
+
+ return minimums, maximums, min_positions, max_positions
+
+
+def center_of_mass(input, labels=None, index=None):
+ """
+ Calculate the center of mass of the values of an array at labels.
+
+ Parameters
+ ----------
+ input : ndarray
+ Data from which to calculate center-of-mass. The masses can either
+ be positive or negative.
+ labels : ndarray, optional
+ Labels for objects in `input`, as generated by `ndimage.label`.
+ Only used with `index`. Dimensions must be the same as `input`.
+ index : int or sequence of ints, optional
+ Labels for which to calculate centers-of-mass. If not specified,
+ the combined center of mass of all labels greater than zero
+ will be calculated. Only used with `labels`.
+
+ Returns
+ -------
+ center_of_mass : tuple, or list of tuples
+ Coordinates of centers-of-mass.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array(([0,0,0,0],
+ ... [0,1,1,0],
+ ... [0,1,1,0],
+ ... [0,1,1,0]))
+ >>> from scipy import ndimage
+ >>> ndimage.center_of_mass(a)
+ (2.0, 1.5)
+
+ Calculation of multiple objects in an image
+
+ >>> b = np.array(([0,1,1,0],
+ ... [0,1,0,0],
+ ... [0,0,0,0],
+ ... [0,0,1,1],
+ ... [0,0,1,1]))
+ >>> lbl = ndimage.label(b)[0]
+ >>> ndimage.center_of_mass(b, lbl, [1,2])
+ [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
+
+ Negative masses are also accepted, which can occur for example when
+ bias is removed from measured data due to random noise.
+
+ >>> c = np.array(([-1,0,0,0],
+ ... [0,-1,-1,0],
+ ... [0,1,-1,0],
+ ... [0,1,1,0]))
+ >>> ndimage.center_of_mass(c)
+ (-4.0, 1.0)
+
+ If there are division by zero issues, the function does not raise an
+ error but rather issues a RuntimeWarning before returning inf and/or NaN.
+
+ >>> d = np.array([-1, 1])
+ >>> ndimage.center_of_mass(d)
+ (inf,)
+ """
+ normalizer = sum(input, labels, index)
+ grids = numpy.ogrid[[slice(0, i) for i in input.shape]]
+
+ results = [sum(input * grids[dir].astype(float), labels, index) / normalizer
+ for dir in range(input.ndim)]
+
+ if numpy.isscalar(results[0]):
+ return tuple(results)
+
+ return [tuple(v) for v in numpy.array(results).T]
+
+
+def histogram(input, min, max, bins, labels=None, index=None):
+ """
+ Calculate the histogram of the values of an array, optionally at labels.
+
+ Histogram calculates the frequency of values in an array within bins
+ determined by `min`, `max`, and `bins`. The `labels` and `index`
+ keywords can limit the scope of the histogram to specified sub-regions
+ within the array.
+
+ Parameters
+ ----------
+ input : array_like
+ Data for which to calculate histogram.
+ min, max : int
+ Minimum and maximum values of range of histogram bins.
+ bins : int
+ Number of bins.
+ labels : array_like, optional
+ Labels for objects in `input`.
+ If not None, must be same shape as `input`.
+ index : int or sequence of ints, optional
+ Label or labels for which to calculate histogram. If None, all values
+ where label is greater than zero are used
+
+ Returns
+ -------
+ hist : ndarray
+ Histogram counts.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ],
+ ... [ 0. , 0.7778, 0. , 0. ],
+ ... [ 0. , 0. , 0. , 0. ],
+ ... [ 0. , 0. , 0.7181, 0.2787],
+ ... [ 0. , 0. , 0.6573, 0.3094]])
+ >>> from scipy import ndimage
+ >>> ndimage.histogram(a, 0, 1, 10)
+ array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0])
+
+ With labels and no indices, non-zero elements are counted:
+
+ >>> lbl, nlbl = ndimage.label(a)
+ >>> ndimage.histogram(a, 0, 1, 10, lbl)
+ array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])
+
+ Indices can be used to count only certain objects:
+
+ >>> ndimage.histogram(a, 0, 1, 10, lbl, 2)
+ array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
+
+ """
+ _bins = numpy.linspace(min, max, bins + 1)
+
+ def _hist(vals):
+ return numpy.histogram(vals, _bins)[0]
+
+ return labeled_comprehension(input, labels, index, _hist, object, None,
+ pass_positions=False)
+
+
+def watershed_ift(input, markers, structure=None, output=None):
+ """
+ Apply watershed from markers using image foresting transform algorithm.
+
+ Parameters
+ ----------
+ input : array_like
+ Input.
+ markers : array_like
+ Markers are points within each watershed that form the beginning
+ of the process. Negative markers are considered background markers
+ which are processed after the other markers.
+ structure : structure element, optional
+ A structuring element defining the connectivity of the object can be
+ provided. If None, an element is generated with a squared
+ connectivity equal to one.
+ output : ndarray, optional
+ An output array can optionally be provided. The same shape as input.
+
+ Returns
+ -------
+ watershed_ift : ndarray
+ Output. Same shape as `input`.
+
+ References
+ ----------
+ .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image
+ foresting transform: theory, algorithms, and applications",
+ Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.
+
+ """
+ input = numpy.asarray(input)
+ if input.dtype.type not in [numpy.uint8, numpy.uint16]:
+ raise TypeError('only 8 and 16 unsigned inputs are supported')
+
+ if structure is None:
+ structure = _morphology.generate_binary_structure(input.ndim, 1)
+ structure = numpy.asarray(structure, dtype=bool)
+ if structure.ndim != input.ndim:
+ raise RuntimeError('structure and input must have equal rank')
+ for ii in structure.shape:
+ if ii != 3:
+ raise RuntimeError('structure dimensions must be equal to 3')
+
+ if not structure.flags.contiguous:
+ structure = structure.copy()
+ markers = numpy.asarray(markers)
+ if input.shape != markers.shape:
+ raise RuntimeError('input and markers must have equal shape')
+
+ integral_types = [numpy.int8,
+ numpy.int16,
+ numpy.int32,
+ numpy.int64,
+ numpy.intc,
+ numpy.intp]
+
+ if markers.dtype.type not in integral_types:
+ raise RuntimeError('marker should be of integer type')
+
+ if isinstance(output, numpy.ndarray):
+ if output.dtype.type not in integral_types:
+ raise RuntimeError('output should be of integer type')
+ else:
+ output = markers.dtype
+
+ output = _ni_support._get_output(output, input)
+ _nd_image.watershed_ift(input, markers, structure, output)
+ return output
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_morphology.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_morphology.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bd7ec7fbfdead532c7a9d3cb073360907d85b6b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_morphology.py
@@ -0,0 +1,2520 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import warnings
+import operator
+
+import numpy
+from . import _ni_support
+from . import _nd_image
+from . import _filters
+
+__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
+ 'binary_dilation', 'binary_opening', 'binary_closing',
+ 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes',
+ 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing',
+ 'morphological_gradient', 'morphological_laplace', 'white_tophat',
+ 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt',
+ 'distance_transform_edt']
+
+
+def _center_is_true(structure, origin):
+ structure = numpy.array(structure)
+ coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape,
+ origin)])
+ return bool(structure[coor])
+
+
+def iterate_structure(structure, iterations, origin=None):
+ """
+ Iterate a structure by dilating it with itself.
+
+ Parameters
+ ----------
+ structure : array_like
+ Structuring element (an array of bools, for example), to be dilated with
+ itself.
+ iterations : int
+ number of dilations performed on the structure with itself
+ origin : optional
+ If origin is None, only the iterated structure is returned. If
+ not, a tuple of the iterated structure and the modified origin is
+ returned.
+
+ Returns
+ -------
+ iterate_structure : ndarray of bools
+ A new structuring element obtained by dilating `structure`
+ (`iterations` - 1) times with itself.
+
+ See Also
+ --------
+ generate_binary_structure
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> struct = ndimage.generate_binary_structure(2, 1)
+ >>> struct.astype(int)
+ array([[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]])
+ >>> ndimage.iterate_structure(struct, 2).astype(int)
+ array([[0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0]])
+ >>> ndimage.iterate_structure(struct, 3).astype(int)
+ array([[0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0]])
+
+ """
+ structure = numpy.asarray(structure)
+ if iterations < 2:
+ return structure.copy()
+ ni = iterations - 1
+ shape = [ii + ni * (ii - 1) for ii in structure.shape]
+ pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
+ slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None)
+ for ii in range(len(shape)))
+ out = numpy.zeros(shape, bool)
+ out[slc] = structure != 0
+ out = binary_dilation(out, structure, iterations=ni)
+ if origin is None:
+ return out
+ else:
+ origin = _ni_support._normalize_sequence(origin, structure.ndim)
+ origin = [iterations * o for o in origin]
+ return out, origin
+
+
+def generate_binary_structure(rank, connectivity):
+ """
+ Generate a binary structure for binary morphological operations.
+
+ Parameters
+ ----------
+ rank : int
+ Number of dimensions of the array to which the structuring element
+ will be applied, as returned by `np.ndim`.
+ connectivity : int
+ `connectivity` determines which elements of the output array belong
+ to the structure, i.e., are considered as neighbors of the central
+ element. Elements up to a squared distance of `connectivity` from
+ the center are considered neighbors. `connectivity` may range from 1
+ (no diagonal elements are neighbors) to `rank` (all elements are
+ neighbors).
+
+ Returns
+ -------
+ output : ndarray of bools
+ Structuring element which may be used for binary morphological
+ operations, with `rank` dimensions and all dimensions equal to 3.
+
+ See Also
+ --------
+ iterate_structure, binary_dilation, binary_erosion
+
+ Notes
+ -----
+ `generate_binary_structure` can only create structuring elements with
+ dimensions equal to 3, i.e., minimal dimensions. For larger structuring
+ elements, that are useful e.g., for eroding large objects, one may either
+ use `iterate_structure`, or create directly custom arrays with
+ numpy functions such as `numpy.ones`.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> struct = ndimage.generate_binary_structure(2, 1)
+ >>> struct
+ array([[False, True, False],
+ [ True, True, True],
+ [False, True, False]], dtype=bool)
+ >>> a = np.zeros((5,5))
+ >>> a[2, 2] = 1
+ >>> a
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype)
+ >>> b
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype)
+ array([[ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 1., 1., 1., 1., 1.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.]])
+ >>> struct = ndimage.generate_binary_structure(2, 2)
+ >>> struct
+ array([[ True, True, True],
+ [ True, True, True],
+ [ True, True, True]], dtype=bool)
+ >>> struct = ndimage.generate_binary_structure(3, 1)
+ >>> struct # no diagonal elements
+ array([[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ [[False, True, False],
+ [ True, True, True],
+ [False, True, False]],
+ [[False, False, False],
+ [False, True, False],
+ [False, False, False]]], dtype=bool)
+
+ """
+ if connectivity < 1:
+ connectivity = 1
+ if rank < 1:
+ return numpy.array(True, dtype=bool)
+ output = numpy.fabs(numpy.indices([3] * rank) - 1)
+ output = numpy.add.reduce(output, 0)
+ return output <= connectivity
+
+
+def _binary_erosion(input, structure, iterations, mask, output,
+ border_value, origin, invert, brute_force):
+ try:
+ iterations = operator.index(iterations)
+ except TypeError as e:
+ raise TypeError('iterations parameter should be an integer') from e
+
+ input = numpy.asarray(input)
+ if numpy.iscomplexobj(input):
+ raise TypeError('Complex type not supported')
+ if structure is None:
+ structure = generate_binary_structure(input.ndim, 1)
+ else:
+ structure = numpy.asarray(structure, dtype=bool)
+ if structure.ndim != input.ndim:
+ raise RuntimeError('structure and input must have same dimensionality')
+ if not structure.flags.contiguous:
+ structure = structure.copy()
+ if numpy.prod(structure.shape, axis=0) < 1:
+ raise RuntimeError('structure must not be empty')
+ if mask is not None:
+ mask = numpy.asarray(mask)
+ if mask.shape != input.shape:
+ raise RuntimeError('mask and input must have equal sizes')
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
+ cit = _center_is_true(structure, origin)
+ if isinstance(output, numpy.ndarray):
+ if numpy.iscomplexobj(output):
+ raise TypeError('Complex output type not supported')
+ else:
+ output = bool
+ output = _ni_support._get_output(output, input)
+ temp_needed = numpy.may_share_memory(input, output)
+ if temp_needed:
+ # input and output arrays cannot share memory
+ temp = output
+ output = _ni_support._get_output(output.dtype, input)
+ if iterations == 1:
+ _nd_image.binary_erosion(input, structure, mask, output,
+ border_value, origin, invert, cit, 0)
+ elif cit and not brute_force:
+ changed, coordinate_list = _nd_image.binary_erosion(
+ input, structure, mask, output,
+ border_value, origin, invert, cit, 1)
+ structure = structure[tuple([slice(None, None, -1)] *
+ structure.ndim)]
+ for ii in range(len(origin)):
+ origin[ii] = -origin[ii]
+ if not structure.shape[ii] & 1:
+ origin[ii] -= 1
+ if mask is not None:
+ mask = numpy.asarray(mask, dtype=numpy.int8)
+ if not structure.flags.contiguous:
+ structure = structure.copy()
+ _nd_image.binary_erosion2(output, structure, mask, iterations - 1,
+ origin, invert, coordinate_list)
+ else:
+ tmp_in = numpy.empty_like(input, dtype=bool)
+ tmp_out = output
+ if iterations >= 1 and not iterations & 1:
+ tmp_in, tmp_out = tmp_out, tmp_in
+ changed = _nd_image.binary_erosion(
+ input, structure, mask, tmp_out,
+ border_value, origin, invert, cit, 0)
+ ii = 1
+ while ii < iterations or (iterations < 1 and changed):
+ tmp_in, tmp_out = tmp_out, tmp_in
+ changed = _nd_image.binary_erosion(
+ tmp_in, structure, mask, tmp_out,
+ border_value, origin, invert, cit, 0)
+ ii += 1
+ if temp_needed:
+ temp[...] = output
+ output = temp
+ return output
+
+
+def binary_erosion(input, structure=None, iterations=1, mask=None, output=None,
+ border_value=0, origin=0, brute_force=False):
+ """
+ Multidimensional binary erosion with a given structuring element.
+
+ Binary erosion is a mathematical morphology operation used for image
+ processing.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary image to be eroded. Non-zero (True) elements form
+ the subset to be eroded.
+ structure : array_like, optional
+ Structuring element used for the erosion. Non-zero elements are
+ considered True. If no structuring element is provided, an element
+ is generated with a square connectivity equal to one.
+ iterations : int, optional
+ The erosion is repeated `iterations` times (one, by default).
+ If iterations is less than 1, the erosion is repeated until the
+ result does not change anymore.
+ mask : array_like, optional
+ If a mask is given, only those elements with a True value at
+ the corresponding mask element are modified at each iteration.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+ brute_force : boolean, optional
+ Memory condition: if False, only the pixels whose value was changed in
+ the last iteration are tracked as candidates to be updated (eroded) in
+ the current iteration; if True all pixels are considered as candidates
+ for erosion, regardless of what happened in the previous iteration.
+ False by default.
+
+ Returns
+ -------
+ binary_erosion : ndarray of bools
+ Erosion of the input by the structuring element.
+
+ See Also
+ --------
+ grey_erosion, binary_dilation, binary_closing, binary_opening,
+ generate_binary_structure
+
+ Notes
+ -----
+ Erosion [1]_ is a mathematical morphology operation [2]_ that uses a
+ structuring element for shrinking the shapes in an image. The binary
+ erosion of an image by a structuring element is the locus of the points
+ where a superimposition of the structuring element centered on the point
+ is entirely contained in the set of non-zero elements of the image.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[1:6, 2:5] = 1
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.binary_erosion(a).astype(a.dtype)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> #Erosion removes objects smaller than the structure
+ >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ return _binary_erosion(input, structure, iterations, mask,
+ output, border_value, origin, 0, brute_force)
+
+
+def binary_dilation(input, structure=None, iterations=1, mask=None,
+ output=None, border_value=0, origin=0,
+ brute_force=False):
+ """
+ Multidimensional binary dilation with the given structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary array_like to be dilated. Non-zero (True) elements form
+ the subset to be dilated.
+ structure : array_like, optional
+ Structuring element used for the dilation. Non-zero elements are
+ considered True. If no structuring element is provided an element
+ is generated with a square connectivity equal to one.
+ iterations : int, optional
+ The dilation is repeated `iterations` times (one, by default).
+ If iterations is less than 1, the dilation is repeated until the
+ result does not change anymore. Only an integer of iterations is
+ accepted.
+ mask : array_like, optional
+ If a mask is given, only those elements with a True value at
+ the corresponding mask element are modified at each iteration.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+ brute_force : boolean, optional
+ Memory condition: if False, only the pixels whose value was changed in
+ the last iteration are tracked as candidates to be updated (dilated)
+ in the current iteration; if True all pixels are considered as
+ candidates for dilation, regardless of what happened in the previous
+ iteration. False by default.
+
+ Returns
+ -------
+ binary_dilation : ndarray of bools
+ Dilation of the input by the structuring element.
+
+ See Also
+ --------
+ grey_dilation, binary_erosion, binary_closing, binary_opening,
+ generate_binary_structure
+
+ Notes
+ -----
+ Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
+ structuring element for expanding the shapes in an image. The binary
+ dilation of an image by a structuring element is the locus of the points
+ covered by the structuring element, when its center lies within the
+ non-zero points of the image.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((5, 5))
+ >>> a[2, 2] = 1
+ >>> a
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> ndimage.binary_dilation(a)
+ array([[False, False, False, False, False],
+ [False, False, True, False, False],
+ [False, True, True, True, False],
+ [False, False, True, False, False],
+ [False, False, False, False, False]], dtype=bool)
+ >>> ndimage.binary_dilation(a).astype(a.dtype)
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> # 3x3 structuring element with connectivity 1, used by default
+ >>> struct1 = ndimage.generate_binary_structure(2, 1)
+ >>> struct1
+ array([[False, True, False],
+ [ True, True, True],
+ [False, True, False]], dtype=bool)
+ >>> # 3x3 structuring element with connectivity 2
+ >>> struct2 = ndimage.generate_binary_structure(2, 2)
+ >>> struct2
+ array([[ True, True, True],
+ [ True, True, True],
+ [ True, True, True]], dtype=bool)
+ >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
+ array([[ 0., 0., 0., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 0., 0., 0.]])
+ >>> ndimage.binary_dilation(a, structure=struct1,\\
+ ... iterations=2).astype(a.dtype)
+ array([[ 0., 0., 1., 0., 0.],
+ [ 0., 1., 1., 1., 0.],
+ [ 1., 1., 1., 1., 1.],
+ [ 0., 1., 1., 1., 0.],
+ [ 0., 0., 1., 0., 0.]])
+
+ """
+ input = numpy.asarray(input)
+ if structure is None:
+ structure = generate_binary_structure(input.ndim, 1)
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
+ structure = numpy.asarray(structure)
+ structure = structure[tuple([slice(None, None, -1)] *
+ structure.ndim)]
+ for ii in range(len(origin)):
+ origin[ii] = -origin[ii]
+ if not structure.shape[ii] & 1:
+ origin[ii] -= 1
+
+ return _binary_erosion(input, structure, iterations, mask,
+ output, border_value, origin, 1, brute_force)
+
+
+def binary_opening(input, structure=None, iterations=1, output=None,
+ origin=0, mask=None, border_value=0, brute_force=False):
+ """
+ Multidimensional binary opening with the given structuring element.
+
+ The *opening* of an input image by a structuring element is the
+ *dilation* of the *erosion* of the image by the structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary array_like to be opened. Non-zero (True) elements form
+ the subset to be opened.
+ structure : array_like, optional
+ Structuring element used for the opening. Non-zero elements are
+ considered True. If no structuring element is provided an element
+ is generated with a square connectivity equal to one (i.e., only
+ nearest neighbors are connected to the center, diagonally-connected
+ elements are not considered neighbors).
+ iterations : int, optional
+ The erosion step of the opening, then the dilation step are each
+ repeated `iterations` times (one, by default). If `iterations` is
+ less than 1, each operation is repeated until the result does
+ not change anymore. Only an integer of iterations is accepted.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+ mask : array_like, optional
+ If a mask is given, only those elements with a True value at
+ the corresponding mask element are modified at each iteration.
+
+ .. versionadded:: 1.1.0
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+
+ .. versionadded:: 1.1.0
+ brute_force : boolean, optional
+ Memory condition: if False, only the pixels whose value was changed in
+ the last iteration are tracked as candidates to be updated in the
+ current iteration; if true all pixels are considered as candidates for
+ update, regardless of what happened in the previous iteration.
+ False by default.
+
+ .. versionadded:: 1.1.0
+
+ Returns
+ -------
+ binary_opening : ndarray of bools
+ Opening of the input by the structuring element.
+
+ See Also
+ --------
+ grey_opening, binary_closing, binary_erosion, binary_dilation,
+ generate_binary_structure
+
+ Notes
+ -----
+ *Opening* [1]_ is a mathematical morphology operation [2]_ that
+ consists in the succession of an erosion and a dilation of the
+ input with the same structuring element. Opening, therefore, removes
+ objects smaller than the structuring element.
+
+ Together with *closing* (`binary_closing`), opening can be used for
+ noise removal.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((5,5), dtype=int)
+ >>> a[1:4, 1:4] = 1; a[4, 4] = 1
+ >>> a
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 1]])
+ >>> # Opening removes small objects
+ >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Opening can also smooth corners
+ >>> ndimage.binary_opening(a).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Opening is the dilation of the erosion of the input
+ >>> ndimage.binary_erosion(a).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]])
+ >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+
+ """
+ input = numpy.asarray(input)
+ if structure is None:
+ rank = input.ndim
+ structure = generate_binary_structure(rank, 1)
+
+ tmp = binary_erosion(input, structure, iterations, mask, None,
+ border_value, origin, brute_force)
+ return binary_dilation(tmp, structure, iterations, mask, output,
+ border_value, origin, brute_force)
+
+
+def binary_closing(input, structure=None, iterations=1, output=None,
+ origin=0, mask=None, border_value=0, brute_force=False):
+ """
+ Multidimensional binary closing with the given structuring element.
+
+ The *closing* of an input image by a structuring element is the
+ *erosion* of the *dilation* of the image by the structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary array_like to be closed. Non-zero (True) elements form
+ the subset to be closed.
+ structure : array_like, optional
+ Structuring element used for the closing. Non-zero elements are
+ considered True. If no structuring element is provided an element
+ is generated with a square connectivity equal to one (i.e., only
+ nearest neighbors are connected to the center, diagonally-connected
+ elements are not considered neighbors).
+ iterations : int, optional
+ The dilation step of the closing, then the erosion step are each
+ repeated `iterations` times (one, by default). If iterations is
+ less than 1, each operations is repeated until the result does
+ not change anymore. Only an integer of iterations is accepted.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+ mask : array_like, optional
+ If a mask is given, only those elements with a True value at
+ the corresponding mask element are modified at each iteration.
+
+ .. versionadded:: 1.1.0
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+
+ .. versionadded:: 1.1.0
+ brute_force : boolean, optional
+ Memory condition: if False, only the pixels whose value was changed in
+ the last iteration are tracked as candidates to be updated in the
+ current iteration; if true al pixels are considered as candidates for
+ update, regardless of what happened in the previous iteration.
+ False by default.
+
+ .. versionadded:: 1.1.0
+
+ Returns
+ -------
+ binary_closing : ndarray of bools
+ Closing of the input by the structuring element.
+
+ See Also
+ --------
+ grey_closing, binary_opening, binary_dilation, binary_erosion,
+ generate_binary_structure
+
+ Notes
+ -----
+ *Closing* [1]_ is a mathematical morphology operation [2]_ that
+ consists in the succession of a dilation and an erosion of the
+ input with the same structuring element. Closing therefore fills
+ holes smaller than the structuring element.
+
+ Together with *opening* (`binary_opening`), closing can be used for
+ noise removal.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((5,5), dtype=int)
+ >>> a[1:-1, 1:-1] = 1; a[2,2] = 0
+ >>> a
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Closing removes small holes
+ >>> ndimage.binary_closing(a).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Closing is the erosion of the dilation of the input
+ >>> ndimage.binary_dilation(a).astype(int)
+ array([[0, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 0]])
+ >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+
+
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[1:6, 2:5] = 1; a[1:3,3] = 0
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> # In addition to removing holes, closing can also
+ >>> # coarsen boundaries with fine hollows.
+ >>> ndimage.binary_closing(a).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ input = numpy.asarray(input)
+ if structure is None:
+ rank = input.ndim
+ structure = generate_binary_structure(rank, 1)
+
+ tmp = binary_dilation(input, structure, iterations, mask, None,
+ border_value, origin, brute_force)
+ return binary_erosion(tmp, structure, iterations, mask, output,
+ border_value, origin, brute_force)
+
+
+def binary_hit_or_miss(input, structure1=None, structure2=None,
+ output=None, origin1=0, origin2=None):
+ """
+ Multidimensional binary hit-or-miss transform.
+
+ The hit-or-miss transform finds the locations of a given pattern
+ inside the input image.
+
+ Parameters
+ ----------
+ input : array_like (cast to booleans)
+ Binary image where a pattern is to be detected.
+ structure1 : array_like (cast to booleans), optional
+ Part of the structuring element to be fitted to the foreground
+ (non-zero elements) of `input`. If no value is provided, a
+ structure of square connectivity 1 is chosen.
+ structure2 : array_like (cast to booleans), optional
+ Second part of the structuring element that has to miss completely
+ the foreground. If no value is provided, the complementary of
+ `structure1` is taken.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ origin1 : int or tuple of ints, optional
+ Placement of the first part of the structuring element `structure1`,
+ by default 0 for a centered structure.
+ origin2 : int or tuple of ints, optional
+ Placement of the second part of the structuring element `structure2`,
+ by default 0 for a centered structure. If a value is provided for
+ `origin1` and not for `origin2`, then `origin2` is set to `origin1`.
+
+ Returns
+ -------
+ binary_hit_or_miss : ndarray
+ Hit-or-miss transform of `input` with the given structuring
+ element (`structure1`, `structure2`).
+
+ See Also
+ --------
+ binary_erosion
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
+ >>> structure1
+ array([[1, 0, 0],
+ [0, 1, 1],
+ [0, 1, 1]])
+ >>> # Find the matches of structure1 in the array a
+ >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> # Change the origin of the filter
+ >>> # origin1=1 is equivalent to origin1=(1,1) here
+ >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\
+ ... origin1=1).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ input = numpy.asarray(input)
+ if structure1 is None:
+ structure1 = generate_binary_structure(input.ndim, 1)
+ if structure2 is None:
+ structure2 = numpy.logical_not(structure1)
+ origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
+ if origin2 is None:
+ origin2 = origin1
+ else:
+ origin2 = _ni_support._normalize_sequence(origin2, input.ndim)
+
+ tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
+ 0, False)
+ inplace = isinstance(output, numpy.ndarray)
+ result = _binary_erosion(input, structure2, 1, None, output, 0,
+ origin2, 1, False)
+ if inplace:
+ numpy.logical_not(output, output)
+ numpy.logical_and(tmp1, output, output)
+ else:
+ numpy.logical_not(result, result)
+ return numpy.logical_and(tmp1, result)
+
+
+def binary_propagation(input, structure=None, mask=None,
+ output=None, border_value=0, origin=0):
+ """
+ Multidimensional binary propagation with the given structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Binary image to be propagated inside `mask`.
+ structure : array_like, optional
+ Structuring element used in the successive dilations. The output
+ may depend on the structuring element, especially if `mask` has
+ several connex components. If no structuring element is
+ provided, an element is generated with a squared connectivity equal
+ to one.
+ mask : array_like, optional
+ Binary mask defining the region into which `input` is allowed to
+ propagate.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ border_value : int (cast to 0 or 1), optional
+ Value at the border in the output array.
+ origin : int or tuple of ints, optional
+ Placement of the filter, by default 0.
+
+ Returns
+ -------
+ binary_propagation : ndarray
+ Binary propagation of `input` inside `mask`.
+
+ Notes
+ -----
+ This function is functionally equivalent to calling binary_dilation
+ with the number of iterations less than one: iterative dilation until
+ the result does not change anymore.
+
+ The succession of an erosion and propagation inside the original image
+ can be used instead of an *opening* for deleting small objects while
+ keeping the contours of larger objects untouched.
+
+ References
+ ----------
+ .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15.
+ .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of
+ image processing", 1998
+ ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> input = np.zeros((8, 8), dtype=int)
+ >>> input[2, 2] = 1
+ >>> mask = np.zeros((8, 8), dtype=int)
+ >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1
+ >>> input
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]])
+ >>> mask
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 0, 0, 1, 1]])
+ >>> ndimage.binary_propagation(input, mask=mask).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.binary_propagation(input, mask=mask,\\
+ ... structure=np.ones((3,3))).astype(int)
+ array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]])
+
+ >>> # Comparison between opening and erosion+propagation
+ >>> a = np.zeros((6,6), dtype=int)
+ >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1
+ >>> a
+ array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 1]])
+ >>> ndimage.binary_opening(a).astype(int)
+ array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ >>> b = ndimage.binary_erosion(a)
+ >>> b.astype(int)
+ array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ >>> ndimage.binary_propagation(b, mask=a).astype(int)
+ array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+
+ """
+ return binary_dilation(input, structure, -1, mask, output,
+ border_value, origin)
+
+
+def binary_fill_holes(input, structure=None, output=None, origin=0):
+ """
+ Fill the holes in binary objects.
+
+
+ Parameters
+ ----------
+ input : array_like
+ N-D binary array with holes to be filled
+ structure : array_like, optional
+ Structuring element used in the computation; large-size elements
+ make computations faster but may miss holes separated from the
+ background by thin regions. The default element (with a square
+ connectivity equal to one) yields the intuitive result where all
+ holes in the input have been filled.
+ output : ndarray, optional
+ Array of the same shape as input, into which the output is placed.
+ By default, a new array is created.
+ origin : int, tuple of ints, optional
+ Position of the structuring element.
+
+ Returns
+ -------
+ out : ndarray
+ Transformation of the initial image `input` where holes have been
+ filled.
+
+ See Also
+ --------
+ binary_dilation, binary_propagation, label
+
+ Notes
+ -----
+ The algorithm used in this function consists in invading the complementary
+ of the shapes in `input` from the outer boundary of the image,
+ using binary dilations. Holes are not connected to the boundary and are
+ therefore not invaded. The result is the complementary subset of the
+ invaded region.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((5, 5), dtype=int)
+ >>> a[1:4, 1:4] = 1
+ >>> a[2,2] = 0
+ >>> a
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> ndimage.binary_fill_holes(a).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+ >>> # Too big structuring element
+ >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int)
+ array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]])
+
+ """
+ mask = numpy.logical_not(input)
+ tmp = numpy.zeros(mask.shape, bool)
+ inplace = isinstance(output, numpy.ndarray)
+ if inplace:
+ binary_dilation(tmp, structure, -1, mask, output, 1, origin)
+ numpy.logical_not(output, output)
+ else:
+ output = binary_dilation(tmp, structure, -1, mask, None, 1,
+ origin)
+ numpy.logical_not(output, output)
+ return output
+
+
+def grey_erosion(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Calculate a greyscale erosion, using either a structuring element,
+ or a footprint corresponding to a flat structuring element.
+
+ Grayscale erosion is a mathematical morphology operation. For the
+ simple case of a full and flat structuring element, it can be viewed
+ as a minimum filter over a sliding window.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which the grayscale erosion is to be computed.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the grayscale
+ erosion. Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the grayscale erosion. Non-zero values give the set of
+ neighbors of the center over which the minimum is chosen.
+ structure : array of ints, optional
+ Structuring element used for the grayscale erosion. `structure`
+ may be a non-flat structuring element.
+ output : array, optional
+ An array used for storing the output of the erosion may be provided.
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ output : ndarray
+ Grayscale erosion of `input`.
+
+ See Also
+ --------
+ binary_erosion, grey_dilation, grey_opening, grey_closing
+ generate_binary_structure, minimum_filter
+
+ Notes
+ -----
+ The grayscale erosion of an image input by a structuring element s defined
+ over a domain E is given by:
+
+ (input+s)(x) = min {input(y) - s(x-y), for y in E}
+
+ In particular, for structuring elements defined as
+ s(y) = 0 for y in E, the grayscale erosion computes the minimum of the
+ input image inside a sliding window defined by E.
+
+ Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[1:6, 1:6] = 3
+ >>> a[4,4] = 2; a[2,3] = 1
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 3, 3, 3, 3, 3, 0],
+ [0, 3, 3, 1, 3, 3, 0],
+ [0, 3, 3, 3, 3, 3, 0],
+ [0, 3, 3, 3, 2, 3, 0],
+ [0, 3, 3, 3, 3, 3, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.grey_erosion(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 3, 2, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> footprint = ndimage.generate_binary_structure(2, 1)
+ >>> footprint
+ array([[False, True, False],
+ [ True, True, True],
+ [False, True, False]], dtype=bool)
+ >>> # Diagonally-connected elements are not considered neighbors
+ >>> ndimage.grey_erosion(a, footprint=footprint)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 3, 1, 2, 0, 0],
+ [0, 0, 3, 2, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ if size is None and footprint is None and structure is None:
+ raise ValueError("size, footprint, or structure must be specified")
+
+ return _filters._min_or_max_filter(input, size, footprint, structure,
+ output, mode, cval, origin, 1)
+
+
+def grey_dilation(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Calculate a greyscale dilation, using either a structuring element,
+ or a footprint corresponding to a flat structuring element.
+
+ Grayscale dilation is a mathematical morphology operation. For the
+ simple case of a full and flat structuring element, it can be viewed
+ as a maximum filter over a sliding window.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which the grayscale dilation is to be computed.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the grayscale
+ dilation. Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the grayscale dilation. Non-zero values give the set of
+ neighbors of the center over which the maximum is chosen.
+ structure : array of ints, optional
+ Structuring element used for the grayscale dilation. `structure`
+ may be a non-flat structuring element.
+ output : array, optional
+ An array used for storing the output of the dilation may be provided.
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ grey_dilation : ndarray
+ Grayscale dilation of `input`.
+
+ See Also
+ --------
+ binary_dilation, grey_erosion, grey_closing, grey_opening
+ generate_binary_structure, maximum_filter
+
+ Notes
+ -----
+ The grayscale dilation of an image input by a structuring element s defined
+ over a domain E is given by:
+
+ (input+s)(x) = max {input(y) + s(x-y), for y in E}
+
+ In particular, for structuring elements defined as
+ s(y) = 0 for y in E, the grayscale dilation computes the maximum of the
+ input image inside a sliding window defined by E.
+
+ Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
+ .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[2:5, 2:5] = 1
+ >>> a[4,4] = 2; a[2,3] = 3
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 3, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.grey_dilation(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.grey_dilation(a, footprint=np.ones((3,3)))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> s = ndimage.generate_binary_structure(2,1)
+ >>> s
+ array([[False, True, False],
+ [ True, True, True],
+ [False, True, False]], dtype=bool)
+ >>> ndimage.grey_dilation(a, footprint=s)
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 3, 1, 0, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 1, 3, 2, 1, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 0, 1, 1, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3)))
+ array([[1, 1, 1, 1, 1, 1, 1],
+ [1, 2, 4, 4, 4, 2, 1],
+ [1, 2, 4, 4, 4, 2, 1],
+ [1, 2, 4, 4, 4, 3, 1],
+ [1, 2, 2, 3, 3, 3, 1],
+ [1, 2, 2, 3, 3, 3, 1],
+ [1, 1, 1, 1, 1, 1, 1]])
+
+ """
+ if size is None and footprint is None and structure is None:
+ raise ValueError("size, footprint, or structure must be specified")
+ if structure is not None:
+ structure = numpy.asarray(structure)
+ structure = structure[tuple([slice(None, None, -1)] *
+ structure.ndim)]
+ if footprint is not None:
+ footprint = numpy.asarray(footprint)
+ footprint = footprint[tuple([slice(None, None, -1)] *
+ footprint.ndim)]
+
+ input = numpy.asarray(input)
+ origin = _ni_support._normalize_sequence(origin, input.ndim)
+ for ii in range(len(origin)):
+ origin[ii] = -origin[ii]
+ if footprint is not None:
+ sz = footprint.shape[ii]
+ elif structure is not None:
+ sz = structure.shape[ii]
+ elif numpy.isscalar(size):
+ sz = size
+ else:
+ sz = size[ii]
+ if not sz & 1:
+ origin[ii] -= 1
+
+ return _filters._min_or_max_filter(input, size, footprint, structure,
+ output, mode, cval, origin, 0)
+
+
+def grey_opening(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional grayscale opening.
+
+ A grayscale opening consists in the succession of a grayscale erosion,
+ and a grayscale dilation.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which the grayscale opening is to be computed.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the grayscale
+ opening. Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the grayscale opening.
+ structure : array of ints, optional
+ Structuring element used for the grayscale opening. `structure`
+ may be a non-flat structuring element.
+ output : array, optional
+ An array used for storing the output of the opening may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ grey_opening : ndarray
+ Result of the grayscale opening of `input` with `structure`.
+
+ See Also
+ --------
+ binary_opening, grey_dilation, grey_erosion, grey_closing
+ generate_binary_structure
+
+ Notes
+ -----
+ The action of a grayscale opening with a flat structuring element amounts
+ to smoothen high local maxima, whereas binary opening erases small objects.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.arange(36).reshape((6,6))
+ >>> a[3, 3] = 50
+ >>> a
+ array([[ 0, 1, 2, 3, 4, 5],
+ [ 6, 7, 8, 9, 10, 11],
+ [12, 13, 14, 15, 16, 17],
+ [18, 19, 20, 50, 22, 23],
+ [24, 25, 26, 27, 28, 29],
+ [30, 31, 32, 33, 34, 35]])
+ >>> ndimage.grey_opening(a, size=(3,3))
+ array([[ 0, 1, 2, 3, 4, 4],
+ [ 6, 7, 8, 9, 10, 10],
+ [12, 13, 14, 15, 16, 16],
+ [18, 19, 20, 22, 22, 22],
+ [24, 25, 26, 27, 28, 28],
+ [24, 25, 26, 27, 28, 28]])
+ >>> # Note that the local maximum a[3,3] has disappeared
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ tmp = grey_erosion(input, size, footprint, structure, None, mode,
+ cval, origin)
+ return grey_dilation(tmp, size, footprint, structure, output, mode,
+ cval, origin)
+
+
+def grey_closing(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional grayscale closing.
+
+ A grayscale closing consists in the succession of a grayscale dilation,
+ and a grayscale erosion.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which the grayscale closing is to be computed.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the grayscale
+ closing. Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the grayscale closing.
+ structure : array of ints, optional
+ Structuring element used for the grayscale closing. `structure`
+ may be a non-flat structuring element.
+ output : array, optional
+ An array used for storing the output of the closing may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ grey_closing : ndarray
+ Result of the grayscale closing of `input` with `structure`.
+
+ See Also
+ --------
+ binary_closing, grey_dilation, grey_erosion, grey_opening,
+ generate_binary_structure
+
+ Notes
+ -----
+ The action of a grayscale closing with a flat structuring element amounts
+ to smoothen deep local minima, whereas binary closing fills small holes.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.arange(36).reshape((6,6))
+ >>> a[3,3] = 0
+ >>> a
+ array([[ 0, 1, 2, 3, 4, 5],
+ [ 6, 7, 8, 9, 10, 11],
+ [12, 13, 14, 15, 16, 17],
+ [18, 19, 20, 0, 22, 23],
+ [24, 25, 26, 27, 28, 29],
+ [30, 31, 32, 33, 34, 35]])
+ >>> ndimage.grey_closing(a, size=(3,3))
+ array([[ 7, 7, 8, 9, 10, 11],
+ [ 7, 7, 8, 9, 10, 11],
+ [13, 13, 14, 15, 16, 17],
+ [19, 19, 20, 20, 22, 23],
+ [25, 25, 26, 27, 28, 29],
+ [31, 31, 32, 33, 34, 35]])
+ >>> # Note that the local minimum a[3,3] has disappeared
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
+ cval, origin)
+ return grey_erosion(tmp, size, footprint, structure, output, mode,
+ cval, origin)
+
+
+def morphological_gradient(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional morphological gradient.
+
+ The morphological gradient is calculated as the difference between a
+ dilation and an erosion of the input with a given structuring element.
+
+ Parameters
+ ----------
+ input : array_like
+ Array over which to compute the morphlogical gradient.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the mathematical
+ morphology operations. Optional if `footprint` or `structure` is
+ provided. A larger `size` yields a more blurred gradient.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the morphology operations. Larger footprints
+ give a more blurred morphological gradient.
+ structure : array of ints, optional
+ Structuring element used for the morphology operations.
+ `structure` may be a non-flat structuring element.
+ output : array, optional
+ An array used for storing the output of the morphological gradient
+ may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ morphological_gradient : ndarray
+ Morphological gradient of `input`.
+
+ See Also
+ --------
+ grey_dilation, grey_erosion, gaussian_gradient_magnitude
+
+ Notes
+ -----
+ For a flat structuring element, the morphological gradient
+ computed at a given point corresponds to the maximal difference
+ between elements of the input among the elements covered by the
+ structuring element centered on the point.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[2:5, 2:5] = 1
+ >>> ndimage.morphological_gradient(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> # The morphological gradient is computed as the difference
+ >>> # between a dilation and an erosion
+ >>> ndimage.grey_dilation(a, size=(3,3)) -\\
+ ... ndimage.grey_erosion(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> a = np.zeros((7,7), dtype=int)
+ >>> a[2:5, 2:5] = 1
+ >>> a[4,4] = 2; a[2,3] = 3
+ >>> a
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 3, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+ >>> ndimage.morphological_gradient(a, size=(3,3))
+ array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 3, 3, 1, 0],
+ [0, 1, 3, 2, 3, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 1, 1, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0, 0]])
+
+ """
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
+ cval, origin)
+ if isinstance(output, numpy.ndarray):
+ grey_erosion(input, size, footprint, structure, output, mode,
+ cval, origin)
+ return numpy.subtract(tmp, output, output)
+ else:
+ return (tmp - grey_erosion(input, size, footprint, structure,
+ None, mode, cval, origin))
+
+
+def morphological_laplace(input, size=None, footprint=None,
+ structure=None, output=None,
+ mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional morphological laplace.
+
+ Parameters
+ ----------
+ input : array_like
+ Input.
+ size : int or sequence of ints, optional
+ See `structure`.
+ footprint : bool or ndarray, optional
+ See `structure`.
+ structure : structure, optional
+ Either `size`, `footprint`, or the `structure` must be provided.
+ output : ndarray, optional
+ An output array can optionally be provided.
+ mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+ The mode parameter determines how the array borders are handled.
+ For 'constant' mode, values beyond borders are set to be `cval`.
+ Default is 'reflect'.
+ cval : scalar, optional
+ Value to fill past edges of input if mode is 'constant'.
+ Default is 0.0
+ origin : origin, optional
+ The origin parameter controls the placement of the filter.
+
+ Returns
+ -------
+ morphological_laplace : ndarray
+ Output
+
+ """
+ tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
+ cval, origin)
+ if isinstance(output, numpy.ndarray):
+ grey_erosion(input, size, footprint, structure, output, mode,
+ cval, origin)
+ numpy.add(tmp1, output, output)
+ numpy.subtract(output, input, output)
+ return numpy.subtract(output, input, output)
+ else:
+ tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
+ cval, origin)
+ numpy.add(tmp1, tmp2, tmp2)
+ numpy.subtract(tmp2, input, tmp2)
+ numpy.subtract(tmp2, input, tmp2)
+ return tmp2
+
+
+def white_tophat(input, size=None, footprint=None, structure=None,
+ output=None, mode="reflect", cval=0.0, origin=0):
+ """
+ Multidimensional white tophat filter.
+
+ Parameters
+ ----------
+ input : array_like
+ Input.
+ size : tuple of ints
+ Shape of a flat and full structuring element used for the filter.
+ Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of elements of a flat structuring element
+ used for the white tophat filter.
+ structure : array of ints, optional
+ Structuring element used for the filter. `structure`
+ may be a non-flat structuring element.
+ output : array, optional
+ An array used for storing the output of the filter may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'.
+ Default is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default is 0.
+
+ Returns
+ -------
+ output : ndarray
+ Result of the filter of `input` with `structure`.
+
+ See Also
+ --------
+ black_tophat
+
+ Examples
+ --------
+ Subtract gray background from a bright peak.
+
+ >>> from scipy.ndimage import generate_binary_structure, white_tophat
+ >>> import numpy as np
+ >>> square = generate_binary_structure(rank=2, connectivity=3)
+ >>> bright_on_gray = np.array([[2, 3, 3, 3, 2],
+ ... [3, 4, 5, 4, 3],
+ ... [3, 5, 9, 5, 3],
+ ... [3, 4, 5, 4, 3],
+ ... [2, 3, 3, 3, 2]])
+ >>> white_tophat(input=bright_on_gray, structure=square)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 5, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ tmp = grey_erosion(input, size, footprint, structure, None, mode,
+ cval, origin)
+ tmp = grey_dilation(tmp, size, footprint, structure, output, mode,
+ cval, origin)
+ if tmp is None:
+ tmp = output
+
+ if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
+ numpy.bitwise_xor(input, tmp, out=tmp)
+ else:
+ numpy.subtract(input, tmp, out=tmp)
+ return tmp
+
+
+def black_tophat(input, size=None, footprint=None,
+ structure=None, output=None, mode="reflect",
+ cval=0.0, origin=0):
+ """
+ Multidimensional black tophat filter.
+
+ Parameters
+ ----------
+ input : array_like
+ Input.
+ size : tuple of ints, optional
+ Shape of a flat and full structuring element used for the filter.
+ Optional if `footprint` or `structure` is provided.
+ footprint : array of ints, optional
+ Positions of non-infinite elements of a flat structuring element
+ used for the black tophat filter.
+ structure : array of ints, optional
+ Structuring element used for the filter. `structure`
+ may be a non-flat structuring element.
+ output : array, optional
+ An array used for storing the output of the filter may be provided.
+ mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the array borders are
+ handled, where `cval` is the value when mode is equal to
+ 'constant'. Default is 'reflect'
+ cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.
+ origin : scalar, optional
+ The `origin` parameter controls the placement of the filter.
+ Default 0
+
+ Returns
+ -------
+ black_tophat : ndarray
+ Result of the filter of `input` with `structure`.
+
+ See Also
+ --------
+ white_tophat, grey_opening, grey_closing
+
+ Examples
+ --------
+ Change dark peak to bright peak and subtract background.
+
+ >>> from scipy.ndimage import generate_binary_structure, black_tophat
+ >>> import numpy as np
+ >>> square = generate_binary_structure(rank=2, connectivity=3)
+ >>> dark_on_gray = np.array([[7, 6, 6, 6, 7],
+ ... [6, 5, 4, 5, 6],
+ ... [6, 4, 0, 4, 6],
+ ... [6, 5, 4, 5, 6],
+ ... [7, 6, 6, 6, 7]])
+ >>> black_tophat(input=dark_on_gray, structure=square)
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 1, 5, 1, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]])
+
+ """
+ if (size is not None) and (footprint is not None):
+ warnings.warn("ignoring size because footprint is set",
+ UserWarning, stacklevel=2)
+ tmp = grey_dilation(input, size, footprint, structure, None, mode,
+ cval, origin)
+ tmp = grey_erosion(tmp, size, footprint, structure, output, mode,
+ cval, origin)
+ if tmp is None:
+ tmp = output
+
+ if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
+ numpy.bitwise_xor(tmp, input, out=tmp)
+ else:
+ numpy.subtract(tmp, input, out=tmp)
+ return tmp
+
+
+def distance_transform_bf(input, metric="euclidean", sampling=None,
+ return_distances=True, return_indices=False,
+ distances=None, indices=None):
+ """
+ Distance transform function by a brute force algorithm.
+
+ This function calculates the distance transform of the `input`, by
+ replacing each foreground (non-zero) element, with its
+ shortest distance to the background (any zero-valued element).
+
+ In addition to the distance transform, the feature transform can
+ be calculated. In this case the index of the closest background
+ element to each foreground element is returned in a separate array.
+
+ Parameters
+ ----------
+ input : array_like
+ Input
+ metric : {'euclidean', 'taxicab', 'chessboard'}, optional
+ 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
+ The default is 'euclidean'.
+ sampling : float, or sequence of float, optional
+ This parameter is only used when `metric` is 'euclidean'.
+ Spacing of elements along each dimension. If a sequence, must be of
+ length equal to the input rank; if a single number, this is used for
+ all axes. If not specified, a grid spacing of unity is implied.
+ return_distances : bool, optional
+ Whether to calculate the distance transform.
+ Default is True.
+ return_indices : bool, optional
+ Whether to calculate the feature transform.
+ Default is False.
+ distances : ndarray, optional
+ An output array to store the calculated distance transform, instead of
+ returning it.
+ `return_distances` must be True.
+ It must be the same shape as `input`, and of type float64 if `metric`
+ is 'euclidean', uint32 otherwise.
+ indices : int32 ndarray, optional
+ An output array to store the calculated feature transform, instead of
+ returning it.
+ `return_indicies` must be True.
+ Its shape must be `(input.ndim,) + input.shape`.
+
+ Returns
+ -------
+ distances : ndarray, optional
+ The calculated distance transform. Returned only when
+ `return_distances` is True and `distances` is not supplied.
+ It will have the same shape as the input array.
+ indices : int32 ndarray, optional
+ The calculated feature transform. It has an input-shaped array for each
+ dimension of the input. See distance_transform_edt documentation for an
+ example.
+ Returned only when `return_indices` is True and `indices` is not
+ supplied.
+
+ See Also
+ --------
+ distance_transform_cdt : Faster distance transform for taxicab and
+ chessboard metrics
+ distance_transform_edt : Faster distance transform for euclidean metric
+
+ Notes
+ -----
+ This function employs a slow brute force algorithm. See also the
+ function `distance_transform_cdt` for more efficient taxicab [1]_ and
+ chessboard algorithms [2]_.
+
+ References
+ ----------
+ .. [1] Taxicab distance. Wikipedia, 2023.
+ https://en.wikipedia.org/wiki/Taxicab_geometry
+ .. [2] Chessboard distance. Wikipedia, 2023.
+ https://en.wikipedia.org/wiki/Chebyshev_distance
+
+ Examples
+ --------
+ Import the necessary modules.
+
+ >>> import numpy as np
+ >>> from scipy.ndimage import distance_transform_bf
+ >>> import matplotlib.pyplot as plt
+ >>> from mpl_toolkits.axes_grid1 import ImageGrid
+
+ First, we create a toy binary image.
+
+ >>> def add_circle(center_x, center_y, radius, image, fillvalue=1):
+ ... # fill circular area with 1
+ ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]]
+ ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2
+ ... circle_shape = np.sqrt(circle) < radius
+ ... image[circle_shape] = fillvalue
+ ... return image
+ >>> image = np.zeros((100, 100), dtype=np.uint8)
+ >>> image[35:65, 20:80] = 1
+ >>> image = add_circle(28, 65, 10, image)
+ >>> image = add_circle(37, 30, 10, image)
+ >>> image = add_circle(70, 45, 20, image)
+ >>> image = add_circle(45, 80, 10, image)
+
+ Next, we set up the figure.
+
+ >>> fig = plt.figure(figsize=(8, 8)) # set up the figure structure
+ >>> grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=(0.4, 0.3),
+ ... label_mode="1", share_all=True,
+ ... cbar_location="right", cbar_mode="each",
+ ... cbar_size="7%", cbar_pad="2%")
+ >>> for ax in grid:
+ ... ax.axis('off') # remove axes from images
+
+ The top left image is the original binary image.
+
+ >>> binary_image = grid[0].imshow(image, cmap='gray')
+ >>> cbar_binary_image = grid.cbar_axes[0].colorbar(binary_image)
+ >>> cbar_binary_image.set_ticks([0, 1])
+ >>> grid[0].set_title("Binary image: foreground in white")
+
+ The distance transform calculates the distance between foreground pixels
+ and the image background according to a distance metric. Available metrics
+ in `distance_transform_bf` are: ``euclidean`` (default), ``taxicab``
+ and ``chessboard``. The top right image contains the distance transform
+ based on the ``euclidean`` metric.
+
+ >>> distance_transform_euclidean = distance_transform_bf(image)
+ >>> euclidean_transform = grid[1].imshow(distance_transform_euclidean,
+ ... cmap='gray')
+ >>> cbar_euclidean = grid.cbar_axes[1].colorbar(euclidean_transform)
+ >>> colorbar_ticks = [0, 10, 20]
+ >>> cbar_euclidean.set_ticks(colorbar_ticks)
+ >>> grid[1].set_title("Euclidean distance")
+
+ The lower left image contains the distance transform using the ``taxicab``
+ metric.
+
+ >>> distance_transform_taxicab = distance_transform_bf(image,
+ ... metric='taxicab')
+ >>> taxicab_transformation = grid[2].imshow(distance_transform_taxicab,
+ ... cmap='gray')
+ >>> cbar_taxicab = grid.cbar_axes[2].colorbar(taxicab_transformation)
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
+ >>> grid[2].set_title("Taxicab distance")
+
+ Finally, the lower right image contains the distance transform using the
+ ``chessboard`` metric.
+
+ >>> distance_transform_cb = distance_transform_bf(image,
+ ... metric='chessboard')
+ >>> chessboard_transformation = grid[3].imshow(distance_transform_cb,
+ ... cmap='gray')
+ >>> cbar_taxicab = grid.cbar_axes[3].colorbar(chessboard_transformation)
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
+ >>> grid[3].set_title("Chessboard distance")
+ >>> plt.show()
+
+ """
+ ft_inplace = isinstance(indices, numpy.ndarray)
+ dt_inplace = isinstance(distances, numpy.ndarray)
+ _distance_tranform_arg_check(
+ dt_inplace, ft_inplace, return_distances, return_indices
+ )
+
+ tmp1 = numpy.asarray(input) != 0
+ struct = generate_binary_structure(tmp1.ndim, tmp1.ndim)
+ tmp2 = binary_dilation(tmp1, struct)
+ tmp2 = numpy.logical_xor(tmp1, tmp2)
+ tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8)
+ metric = metric.lower()
+ if metric == 'euclidean':
+ metric = 1
+ elif metric in ['taxicab', 'cityblock', 'manhattan']:
+ metric = 2
+ elif metric == 'chessboard':
+ metric = 3
+ else:
+ raise RuntimeError('distance metric not supported')
+ if sampling is not None:
+ sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim)
+ sampling = numpy.asarray(sampling, dtype=numpy.float64)
+ if not sampling.flags.contiguous:
+ sampling = sampling.copy()
+ if return_indices:
+ ft = numpy.zeros(tmp1.shape, dtype=numpy.int32)
+ else:
+ ft = None
+ if return_distances:
+ if distances is None:
+ if metric == 1:
+ dt = numpy.zeros(tmp1.shape, dtype=numpy.float64)
+ else:
+ dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32)
+ else:
+ if distances.shape != tmp1.shape:
+ raise RuntimeError('distances array has wrong shape')
+ if metric == 1:
+ if distances.dtype.type != numpy.float64:
+ raise RuntimeError('distances array must be float64')
+ else:
+ if distances.dtype.type != numpy.uint32:
+ raise RuntimeError('distances array must be uint32')
+ dt = distances
+ else:
+ dt = None
+
+ _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft)
+ if return_indices:
+ if isinstance(indices, numpy.ndarray):
+ if indices.dtype.type != numpy.int32:
+ raise RuntimeError('indices array must be int32')
+ if indices.shape != (tmp1.ndim,) + tmp1.shape:
+ raise RuntimeError('indices array has wrong shape')
+ tmp2 = indices
+ else:
+ tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32)
+ ft = numpy.ravel(ft)
+ for ii in range(tmp2.shape[0]):
+ rtmp = numpy.ravel(tmp2[ii, ...])[ft]
+ rtmp.shape = tmp1.shape
+ tmp2[ii, ...] = rtmp
+ ft = tmp2
+
+ # construct and return the result
+ result = []
+ if return_distances and not dt_inplace:
+ result.append(dt)
+ if return_indices and not ft_inplace:
+ result.append(ft)
+
+ if len(result) == 2:
+ return tuple(result)
+ elif len(result) == 1:
+ return result[0]
+ else:
+ return None
+
+
+def distance_transform_cdt(input, metric='chessboard', return_distances=True,
+ return_indices=False, distances=None, indices=None):
+ """
+ Distance transform for chamfer type of transforms.
+
+ This function calculates the distance transform of the `input`, by
+ replacing each foreground (non-zero) element, with its
+ shortest distance to the background (any zero-valued element).
+
+ In addition to the distance transform, the feature transform can
+ be calculated. In this case the index of the closest background
+ element to each foreground element is returned in a separate array.
+
+ Parameters
+ ----------
+ input : array_like
+ Input. Values of 0 are treated as background.
+ metric : {'chessboard', 'taxicab'} or array_like, optional
+ The `metric` determines the type of chamfering that is done. If the
+ `metric` is equal to 'taxicab' a structure is generated using
+ `generate_binary_structure` with a squared distance equal to 1. If
+ the `metric` is equal to 'chessboard', a `metric` is generated
+ using `generate_binary_structure` with a squared distance equal to
+ the dimensionality of the array. These choices correspond to the
+ common interpretations of the 'taxicab' and the 'chessboard'
+ distance metrics in two dimensions.
+ A custom metric may be provided, in the form of a matrix where
+ each dimension has a length of three.
+ 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
+ The default is 'chessboard'.
+ return_distances : bool, optional
+ Whether to calculate the distance transform.
+ Default is True.
+ return_indices : bool, optional
+ Whether to calculate the feature transform.
+ Default is False.
+ distances : int32 ndarray, optional
+ An output array to store the calculated distance transform, instead of
+ returning it.
+ `return_distances` must be True.
+ It must be the same shape as `input`.
+ indices : int32 ndarray, optional
+ An output array to store the calculated feature transform, instead of
+ returning it.
+ `return_indicies` must be True.
+ Its shape must be `(input.ndim,) + input.shape`.
+
+ Returns
+ -------
+ distances : int32 ndarray, optional
+ The calculated distance transform. Returned only when
+ `return_distances` is True, and `distances` is not supplied.
+ It will have the same shape as the input array.
+ indices : int32 ndarray, optional
+ The calculated feature transform. It has an input-shaped array for each
+ dimension of the input. See distance_transform_edt documentation for an
+ example.
+ Returned only when `return_indices` is True, and `indices` is not
+ supplied.
+
+ See Also
+ --------
+ distance_transform_edt : Fast distance transform for euclidean metric
+ distance_transform_bf : Distance transform for different metrics using
+ a slower brute force algorithm
+
+ Examples
+ --------
+ Import the necessary modules.
+
+ >>> import numpy as np
+ >>> from scipy.ndimage import distance_transform_cdt
+ >>> import matplotlib.pyplot as plt
+ >>> from mpl_toolkits.axes_grid1 import ImageGrid
+
+ First, we create a toy binary image.
+
+ >>> def add_circle(center_x, center_y, radius, image, fillvalue=1):
+ ... # fill circular area with 1
+ ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]]
+ ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2
+ ... circle_shape = np.sqrt(circle) < radius
+ ... image[circle_shape] = fillvalue
+ ... return image
+ >>> image = np.zeros((100, 100), dtype=np.uint8)
+ >>> image[35:65, 20:80] = 1
+ >>> image = add_circle(28, 65, 10, image)
+ >>> image = add_circle(37, 30, 10, image)
+ >>> image = add_circle(70, 45, 20, image)
+ >>> image = add_circle(45, 80, 10, image)
+
+ Next, we set up the figure.
+
+ >>> fig = plt.figure(figsize=(5, 15))
+ >>> grid = ImageGrid(fig, 111, nrows_ncols=(3, 1), axes_pad=(0.5, 0.3),
+ ... label_mode="1", share_all=True,
+ ... cbar_location="right", cbar_mode="each",
+ ... cbar_size="7%", cbar_pad="2%")
+ >>> for ax in grid:
+ ... ax.axis('off')
+ >>> top, middle, bottom = grid
+ >>> colorbar_ticks = [0, 10, 20]
+
+ The top image contains the original binary image.
+
+ >>> binary_image = top.imshow(image, cmap='gray')
+ >>> cbar_binary_image = top.cax.colorbar(binary_image)
+ >>> cbar_binary_image.set_ticks([0, 1])
+ >>> top.set_title("Binary image: foreground in white")
+
+ The middle image contains the distance transform using the ``taxicab``
+ metric.
+
+ >>> distance_taxicab = distance_transform_cdt(image, metric="taxicab")
+ >>> taxicab_transform = middle.imshow(distance_taxicab, cmap='gray')
+ >>> cbar_taxicab = middle.cax.colorbar(taxicab_transform)
+ >>> cbar_taxicab.set_ticks(colorbar_ticks)
+ >>> middle.set_title("Taxicab metric")
+
+ The bottom image contains the distance transform using the ``chessboard``
+ metric.
+
+ >>> distance_chessboard = distance_transform_cdt(image,
+ ... metric="chessboard")
+ >>> chessboard_transform = bottom.imshow(distance_chessboard, cmap='gray')
+ >>> cbar_chessboard = bottom.cax.colorbar(chessboard_transform)
+ >>> cbar_chessboard.set_ticks(colorbar_ticks)
+ >>> bottom.set_title("Chessboard metric")
+ >>> plt.tight_layout()
+ >>> plt.show()
+
+ """
+ ft_inplace = isinstance(indices, numpy.ndarray)
+ dt_inplace = isinstance(distances, numpy.ndarray)
+ _distance_tranform_arg_check(
+ dt_inplace, ft_inplace, return_distances, return_indices
+ )
+ input = numpy.asarray(input)
+ if isinstance(metric, str):
+ if metric in ['taxicab', 'cityblock', 'manhattan']:
+ rank = input.ndim
+ metric = generate_binary_structure(rank, 1)
+ elif metric == 'chessboard':
+ rank = input.ndim
+ metric = generate_binary_structure(rank, rank)
+ else:
+ raise ValueError('invalid metric provided')
+ else:
+ try:
+ metric = numpy.asarray(metric)
+ except Exception as e:
+ raise ValueError('invalid metric provided') from e
+ for s in metric.shape:
+ if s != 3:
+ raise ValueError('metric sizes must be equal to 3')
+
+ if not metric.flags.contiguous:
+ metric = metric.copy()
+ if dt_inplace:
+ if distances.dtype.type != numpy.int32:
+ raise ValueError('distances must be of int32 type')
+ if distances.shape != input.shape:
+ raise ValueError('distances has wrong shape')
+ dt = distances
+ dt[...] = numpy.where(input, -1, 0).astype(numpy.int32)
+ else:
+ dt = numpy.where(input, -1, 0).astype(numpy.int32)
+
+ rank = dt.ndim
+ if return_indices:
+ sz = numpy.prod(dt.shape, axis=0)
+ ft = numpy.arange(sz, dtype=numpy.int32)
+ ft.shape = dt.shape
+ else:
+ ft = None
+
+ _nd_image.distance_transform_op(metric, dt, ft)
+ dt = dt[tuple([slice(None, None, -1)] * rank)]
+ if return_indices:
+ ft = ft[tuple([slice(None, None, -1)] * rank)]
+ _nd_image.distance_transform_op(metric, dt, ft)
+ dt = dt[tuple([slice(None, None, -1)] * rank)]
+ if return_indices:
+ ft = ft[tuple([slice(None, None, -1)] * rank)]
+ ft = numpy.ravel(ft)
+ if ft_inplace:
+ if indices.dtype.type != numpy.int32:
+ raise ValueError('indices array must be int32')
+ if indices.shape != (dt.ndim,) + dt.shape:
+ raise ValueError('indices array has wrong shape')
+ tmp = indices
+ else:
+ tmp = numpy.indices(dt.shape, dtype=numpy.int32)
+ for ii in range(tmp.shape[0]):
+ rtmp = numpy.ravel(tmp[ii, ...])[ft]
+ rtmp.shape = dt.shape
+ tmp[ii, ...] = rtmp
+ ft = tmp
+
+ # construct and return the result
+ result = []
+ if return_distances and not dt_inplace:
+ result.append(dt)
+ if return_indices and not ft_inplace:
+ result.append(ft)
+
+ if len(result) == 2:
+ return tuple(result)
+ elif len(result) == 1:
+ return result[0]
+ else:
+ return None
+
+
+def distance_transform_edt(input, sampling=None, return_distances=True,
+ return_indices=False, distances=None, indices=None):
+ """
+ Exact Euclidean distance transform.
+
+ This function calculates the distance transform of the `input`, by
+ replacing each foreground (non-zero) element, with its
+ shortest distance to the background (any zero-valued element).
+
+ In addition to the distance transform, the feature transform can
+ be calculated. In this case the index of the closest background
+ element to each foreground element is returned in a separate array.
+
+ Parameters
+ ----------
+ input : array_like
+ Input data to transform. Can be any type but will be converted
+ into binary: 1 wherever input equates to True, 0 elsewhere.
+ sampling : float, or sequence of float, optional
+ Spacing of elements along each dimension. If a sequence, must be of
+ length equal to the input rank; if a single number, this is used for
+ all axes. If not specified, a grid spacing of unity is implied.
+ return_distances : bool, optional
+ Whether to calculate the distance transform.
+ Default is True.
+ return_indices : bool, optional
+ Whether to calculate the feature transform.
+ Default is False.
+ distances : float64 ndarray, optional
+ An output array to store the calculated distance transform, instead of
+ returning it.
+ `return_distances` must be True.
+ It must be the same shape as `input`.
+ indices : int32 ndarray, optional
+ An output array to store the calculated feature transform, instead of
+ returning it.
+ `return_indicies` must be True.
+ Its shape must be `(input.ndim,) + input.shape`.
+
+ Returns
+ -------
+ distances : float64 ndarray, optional
+ The calculated distance transform. Returned only when
+ `return_distances` is True and `distances` is not supplied.
+ It will have the same shape as the input array.
+ indices : int32 ndarray, optional
+ The calculated feature transform. It has an input-shaped array for each
+ dimension of the input. See example below.
+ Returned only when `return_indices` is True and `indices` is not
+ supplied.
+
+ Notes
+ -----
+ The Euclidean distance transform gives values of the Euclidean
+ distance::
+
+ n
+ y_i = sqrt(sum (x[i]-b[i])**2)
+ i
+
+ where b[i] is the background point (value 0) with the smallest
+ Euclidean distance to input points x[i], and n is the
+ number of dimensions.
+
+ Examples
+ --------
+ >>> from scipy import ndimage
+ >>> import numpy as np
+ >>> a = np.array(([0,1,1,1,1],
+ ... [0,0,1,1,1],
+ ... [0,1,1,1,1],
+ ... [0,1,1,1,0],
+ ... [0,1,1,0,0]))
+ >>> ndimage.distance_transform_edt(a)
+ array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
+ [ 0. , 0. , 1. , 2. , 2. ],
+ [ 0. , 1. , 1.4142, 1.4142, 1. ],
+ [ 0. , 1. , 1.4142, 1. , 0. ],
+ [ 0. , 1. , 1. , 0. , 0. ]])
+
+ With a sampling of 2 units along x, 1 along y:
+
+ >>> ndimage.distance_transform_edt(a, sampling=[2,1])
+ array([[ 0. , 1. , 2. , 2.8284, 3.6056],
+ [ 0. , 0. , 1. , 2. , 3. ],
+ [ 0. , 1. , 2. , 2.2361, 2. ],
+ [ 0. , 1. , 2. , 1. , 0. ],
+ [ 0. , 1. , 1. , 0. , 0. ]])
+
+ Asking for indices as well:
+
+ >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True)
+ >>> inds
+ array([[[0, 0, 1, 1, 3],
+ [1, 1, 1, 1, 3],
+ [2, 2, 1, 3, 3],
+ [3, 3, 4, 4, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 0, 1, 1, 4],
+ [0, 1, 1, 1, 4],
+ [0, 0, 1, 4, 4],
+ [0, 0, 3, 3, 4],
+ [0, 0, 3, 3, 4]]])
+
+ With arrays provided for inplace outputs:
+
+ >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32)
+ >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices)
+ array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
+ [ 0. , 0. , 1. , 2. , 2. ],
+ [ 0. , 1. , 1.4142, 1.4142, 1. ],
+ [ 0. , 1. , 1.4142, 1. , 0. ],
+ [ 0. , 1. , 1. , 0. , 0. ]])
+ >>> indices
+ array([[[0, 0, 1, 1, 3],
+ [1, 1, 1, 1, 3],
+ [2, 2, 1, 3, 3],
+ [3, 3, 4, 4, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 0, 1, 1, 4],
+ [0, 1, 1, 1, 4],
+ [0, 0, 1, 4, 4],
+ [0, 0, 3, 3, 4],
+ [0, 0, 3, 3, 4]]])
+
+ """
+ ft_inplace = isinstance(indices, numpy.ndarray)
+ dt_inplace = isinstance(distances, numpy.ndarray)
+ _distance_tranform_arg_check(
+ dt_inplace, ft_inplace, return_distances, return_indices
+ )
+
+ # calculate the feature transform
+ input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8))
+ if sampling is not None:
+ sampling = _ni_support._normalize_sequence(sampling, input.ndim)
+ sampling = numpy.asarray(sampling, dtype=numpy.float64)
+ if not sampling.flags.contiguous:
+ sampling = sampling.copy()
+
+ if ft_inplace:
+ ft = indices
+ if ft.shape != (input.ndim,) + input.shape:
+ raise RuntimeError('indices array has wrong shape')
+ if ft.dtype.type != numpy.int32:
+ raise RuntimeError('indices array must be int32')
+ else:
+ ft = numpy.zeros((input.ndim,) + input.shape, dtype=numpy.int32)
+
+ _nd_image.euclidean_feature_transform(input, sampling, ft)
+ # if requested, calculate the distance transform
+ if return_distances:
+ dt = ft - numpy.indices(input.shape, dtype=ft.dtype)
+ dt = dt.astype(numpy.float64)
+ if sampling is not None:
+ for ii in range(len(sampling)):
+ dt[ii, ...] *= sampling[ii]
+ numpy.multiply(dt, dt, dt)
+ if dt_inplace:
+ dt = numpy.add.reduce(dt, axis=0)
+ if distances.shape != dt.shape:
+ raise RuntimeError('distances array has wrong shape')
+ if distances.dtype.type != numpy.float64:
+ raise RuntimeError('distances array must be float64')
+ numpy.sqrt(dt, distances)
+ else:
+ dt = numpy.add.reduce(dt, axis=0)
+ dt = numpy.sqrt(dt)
+
+ # construct and return the result
+ result = []
+ if return_distances and not dt_inplace:
+ result.append(dt)
+ if return_indices and not ft_inplace:
+ result.append(ft)
+
+ if len(result) == 2:
+ return tuple(result)
+ elif len(result) == 1:
+ return result[0]
+ else:
+ return None
+
+
+def _distance_tranform_arg_check(distances_out, indices_out,
+ return_distances, return_indices):
+ """Raise a RuntimeError if the arguments are invalid"""
+ error_msgs = []
+ if (not return_distances) and (not return_indices):
+ error_msgs.append(
+ 'at least one of return_distances/return_indices must be True')
+ if distances_out and not return_distances:
+ error_msgs.append(
+ 'return_distances must be True if distances is supplied'
+ )
+ if indices_out and not return_indices:
+ error_msgs.append('return_indices must be True if indices is supplied')
+ if error_msgs:
+ raise RuntimeError(', '.join(error_msgs))
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..b2e0eab625d2da3b1bab2f221530be543eb41628
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6469f2c75fcee1f74dfbbe049df8ca05b074505
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py
@@ -0,0 +1,208 @@
+"""Docstring components common to several ndimage functions."""
+from scipy._lib import doccer
+
+__all__ = ['docfiller']
+
+
+_input_doc = (
+"""input : array_like
+ The input array.""")
+_axis_doc = (
+"""axis : int, optional
+ The axis of `input` along which to calculate. Default is -1.""")
+_output_doc = (
+"""output : array or dtype, optional
+ The array in which to place the output, or the dtype of the
+ returned array. By default an array of the same dtype as input
+ will be created.""")
+_size_foot_doc = (
+"""size : scalar or tuple, optional
+ See footprint, below. Ignored if footprint is given.
+footprint : array, optional
+ Either `size` or `footprint` must be defined. `size` gives
+ the shape that is taken from the input array, at every element
+ position, to define the input to the filter function.
+ `footprint` is a boolean array that specifies (implicitly) a
+ shape, but also which of the elements within this shape will get
+ passed to the filter function. Thus ``size=(n,m)`` is equivalent
+ to ``footprint=np.ones((n,m))``. We adjust `size` to the number
+ of dimensions of the input array, so that, if the input array is
+ shape (10,10,10), and `size` is 2, then the actual size used is
+ (2,2,2). When `footprint` is given, `size` is ignored.""")
+_mode_reflect_doc = (
+"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+ The `mode` parameter determines how the input array is extended
+ beyond its boundaries. Default is 'reflect'. Behavior for each valid
+ value is as follows:
+
+ 'reflect' (`d c b a | a b c d | d c b a`)
+ The input is extended by reflecting about the edge of the last
+ pixel. This mode is also sometimes referred to as half-sample
+ symmetric.
+
+ 'constant' (`k k k k | a b c d | k k k k`)
+ The input is extended by filling all values beyond the edge with
+ the same constant value, defined by the `cval` parameter.
+
+ 'nearest' (`a a a a | a b c d | d d d d`)
+ The input is extended by replicating the last pixel.
+
+ 'mirror' (`d c b | a b c d | c b a`)
+ The input is extended by reflecting about the center of the last
+ pixel. This mode is also sometimes referred to as whole-sample
+ symmetric.
+
+ 'wrap' (`a b c d | a b c d | a b c d`)
+ The input is extended by wrapping around to the opposite edge.
+
+ For consistency with the interpolation functions, the following mode
+ names can also be used:
+
+ 'grid-mirror'
+ This is a synonym for 'reflect'.
+
+ 'grid-constant'
+ This is a synonym for 'constant'.
+
+ 'grid-wrap'
+ This is a synonym for 'wrap'.""")
+
+_mode_interp_constant_doc = (
+"""mode : {'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', \
+'mirror', 'grid-wrap', 'wrap'}, optional
+ The `mode` parameter determines how the input array is extended
+ beyond its boundaries. Default is 'constant'. Behavior for each valid
+ value is as follows (see additional plots and details on
+ :ref:`boundary modes `):
+
+ 'reflect' (`d c b a | a b c d | d c b a`)
+ The input is extended by reflecting about the edge of the last
+ pixel. This mode is also sometimes referred to as half-sample
+ symmetric.
+
+ 'grid-mirror'
+ This is a synonym for 'reflect'.
+
+ 'constant' (`k k k k | a b c d | k k k k`)
+ The input is extended by filling all values beyond the edge with
+ the same constant value, defined by the `cval` parameter. No
+ interpolation is performed beyond the edges of the input.
+
+ 'grid-constant' (`k k k k | a b c d | k k k k`)
+ The input is extended by filling all values beyond the edge with
+ the same constant value, defined by the `cval` parameter. Interpolation
+ occurs for samples outside the input's extent as well.
+
+ 'nearest' (`a a a a | a b c d | d d d d`)
+ The input is extended by replicating the last pixel.
+
+ 'mirror' (`d c b | a b c d | c b a`)
+ The input is extended by reflecting about the center of the last
+ pixel. This mode is also sometimes referred to as whole-sample
+ symmetric.
+
+ 'grid-wrap' (`a b c d | a b c d | a b c d`)
+ The input is extended by wrapping around to the opposite edge.
+
+ 'wrap' (`d b c d | a b c d | b c a b`)
+ The input is extended by wrapping around to the opposite edge, but in a
+ way such that the last point and initial point exactly overlap. In this
+ case it is not well defined which sample will be chosen at the point of
+ overlap.""")
+_mode_interp_mirror_doc = (
+ _mode_interp_constant_doc.replace("Default is 'constant'",
+ "Default is 'mirror'")
+)
+assert _mode_interp_mirror_doc != _mode_interp_constant_doc, \
+ 'Default not replaced'
+
+_mode_multiple_doc = (
+"""mode : str or sequence, optional
+ The `mode` parameter determines how the input array is extended
+ when the filter overlaps a border. By passing a sequence of modes
+ with length equal to the number of dimensions of the input array,
+ different modes can be specified along each axis. Default value is
+ 'reflect'. The valid values and their behavior is as follows:
+
+ 'reflect' (`d c b a | a b c d | d c b a`)
+ The input is extended by reflecting about the edge of the last
+ pixel. This mode is also sometimes referred to as half-sample
+ symmetric.
+
+ 'constant' (`k k k k | a b c d | k k k k`)
+ The input is extended by filling all values beyond the edge with
+ the same constant value, defined by the `cval` parameter.
+
+ 'nearest' (`a a a a | a b c d | d d d d`)
+ The input is extended by replicating the last pixel.
+
+ 'mirror' (`d c b | a b c d | c b a`)
+ The input is extended by reflecting about the center of the last
+ pixel. This mode is also sometimes referred to as whole-sample
+ symmetric.
+
+ 'wrap' (`a b c d | a b c d | a b c d`)
+ The input is extended by wrapping around to the opposite edge.
+
+ For consistency with the interpolation functions, the following mode
+ names can also be used:
+
+ 'grid-constant'
+ This is a synonym for 'constant'.
+
+ 'grid-mirror'
+ This is a synonym for 'reflect'.
+
+ 'grid-wrap'
+ This is a synonym for 'wrap'.""")
+_cval_doc = (
+"""cval : scalar, optional
+ Value to fill past edges of input if `mode` is 'constant'. Default
+ is 0.0.""")
+_origin_doc = (
+"""origin : int, optional
+ Controls the placement of the filter on the input array's pixels.
+ A value of 0 (the default) centers the filter over the pixel, with
+ positive values shifting the filter to the left, and negative ones
+ to the right.""")
+_origin_multiple_doc = (
+"""origin : int or sequence, optional
+ Controls the placement of the filter on the input array's pixels.
+ A value of 0 (the default) centers the filter over the pixel, with
+ positive values shifting the filter to the left, and negative ones
+ to the right. By passing a sequence of origins with length equal to
+ the number of dimensions of the input array, different shifts can
+ be specified along each axis.""")
+_extra_arguments_doc = (
+"""extra_arguments : sequence, optional
+ Sequence of extra positional arguments to pass to passed function.""")
+_extra_keywords_doc = (
+"""extra_keywords : dict, optional
+ dict of extra keyword arguments to pass to passed function.""")
+_prefilter_doc = (
+"""prefilter : bool, optional
+ Determines if the input array is prefiltered with `spline_filter`
+ before interpolation. The default is True, which will create a
+ temporary `float64` array of filtered values if `order > 1`. If
+ setting this to False, the output will be slightly blurred if
+ `order > 1`, unless the input is prefiltered, i.e. it is the result
+ of calling `spline_filter` on the original input.""")
+
+docdict = {
+ 'input': _input_doc,
+ 'axis': _axis_doc,
+ 'output': _output_doc,
+ 'size_foot': _size_foot_doc,
+ 'mode_interp_constant': _mode_interp_constant_doc,
+ 'mode_interp_mirror': _mode_interp_mirror_doc,
+ 'mode_reflect': _mode_reflect_doc,
+ 'mode_multiple': _mode_multiple_doc,
+ 'cval': _cval_doc,
+ 'origin': _origin_doc,
+ 'origin_multiple': _origin_multiple_doc,
+ 'extra_arguments': _extra_arguments_doc,
+ 'extra_keywords': _extra_keywords_doc,
+ 'prefilter': _prefilter_doc
+ }
+
+docfiller = doccer.filldoc(docdict)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..1afb18ebc1f600ee1cc7638c4629b5036144eab4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..dadce8cf800597764f7f978f22888e4458b3fe7f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from collections.abc import Iterable
+import operator
+import warnings
+import numpy
+
+
+def _extend_mode_to_code(mode):
+ """Convert an extension mode to the corresponding integer code.
+ """
+ if mode == 'nearest':
+ return 0
+ elif mode == 'wrap':
+ return 1
+ elif mode in ['reflect', 'grid-mirror']:
+ return 2
+ elif mode == 'mirror':
+ return 3
+ elif mode == 'constant':
+ return 4
+ elif mode == 'grid-wrap':
+ return 5
+ elif mode == 'grid-constant':
+ return 6
+ else:
+ raise RuntimeError('boundary mode not supported')
+
+
+def _normalize_sequence(input, rank):
+ """If input is a scalar, create a sequence of length equal to the
+ rank by duplicating the input. If input is a sequence,
+ check if its length is equal to the length of array.
+ """
+ is_str = isinstance(input, str)
+ if not is_str and isinstance(input, Iterable):
+ normalized = list(input)
+ if len(normalized) != rank:
+ err = "sequence argument must have length equal to input rank"
+ raise RuntimeError(err)
+ else:
+ normalized = [input] * rank
+ return normalized
+
+
+def _get_output(output, input, shape=None, complex_output=False):
+ if shape is None:
+ shape = input.shape
+ if output is None:
+ if not complex_output:
+ output = numpy.zeros(shape, dtype=input.dtype.name)
+ else:
+ complex_type = numpy.promote_types(input.dtype, numpy.complex64)
+ output = numpy.zeros(shape, dtype=complex_type)
+ elif isinstance(output, (type, numpy.dtype)):
+ # Classes (like `np.float32`) and dtypes are interpreted as dtype
+ if complex_output and numpy.dtype(output).kind != 'c':
+ warnings.warn("promoting specified output dtype to complex", stacklevel=3)
+ output = numpy.promote_types(output, numpy.complex64)
+ output = numpy.zeros(shape, dtype=output)
+ elif isinstance(output, str):
+ output = numpy.dtype(output)
+ if complex_output and output.kind != 'c':
+ raise RuntimeError("output must have complex dtype")
+ elif not issubclass(output.type, numpy.number):
+ raise RuntimeError("output must have numeric dtype")
+ output = numpy.zeros(shape, dtype=output)
+ elif output.shape != shape:
+ raise RuntimeError("output shape not correct")
+ elif complex_output and output.dtype.kind != 'c':
+ raise RuntimeError("output must have complex dtype")
+ return output
+
+
+def _check_axes(axes, ndim):
+ if axes is None:
+ return tuple(range(ndim))
+ elif numpy.isscalar(axes):
+ axes = (operator.index(axes),)
+ elif isinstance(axes, Iterable):
+ for ax in axes:
+ axes = tuple(operator.index(ax) for ax in axes)
+ if ax < -ndim or ax > ndim - 1:
+ raise ValueError(f"specified axis: {ax} is out of range")
+ axes = tuple(ax % ndim if ax < 0 else ax for ax in axes)
+ else:
+ message = "axes must be an integer, iterable of integers, or None"
+ raise ValueError(message)
+ if len(tuple(set(axes))) != len(axes):
+ raise ValueError("axes must be unique")
+ return axes
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/filters.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..e16d9d279a9585b2454c46ee09cf22143de833a6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/filters.py
@@ -0,0 +1,27 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'correlate1d', 'convolve1d', 'gaussian_filter1d',
+ 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace',
+ 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude',
+ 'gaussian_gradient_magnitude', 'correlate', 'convolve',
+ 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
+ 'maximum_filter1d', 'minimum_filter', 'maximum_filter',
+ 'rank_filter', 'median_filter', 'percentile_filter',
+ 'generic_filter1d', 'generic_filter'
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package='ndimage', module='filters',
+ private_modules=['_filters'], all=__all__,
+ attribute=name)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/fourier.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/fourier.py
new file mode 100644
index 0000000000000000000000000000000000000000..73c49bd52d9a446ce0fe25d9e15b8de68fbd46fb
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/fourier.py
@@ -0,0 +1,21 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'fourier_gaussian', 'fourier_uniform',
+ 'fourier_ellipsoid', 'fourier_shift'
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package='ndimage', module='fourier',
+ private_modules=['_fourier'], all=__all__,
+ attribute=name)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/interpolation.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/interpolation.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a28816cbfad89faced7acb8a54cd6ecc4fa8ad2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/interpolation.py
@@ -0,0 +1,23 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'spline_filter1d', 'spline_filter',
+ 'geometric_transform', 'map_coordinates',
+ 'affine_transform', 'shift', 'zoom', 'rotate',
+ 'docfiller'
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package='ndimage', module='interpolation',
+ private_modules=['_interpolation'], all=__all__,
+ attribute=name)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/measurements.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/measurements.py
new file mode 100644
index 0000000000000000000000000000000000000000..22f76b01840ffb829205bd1d28a7ad1f9ac5db61
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/measurements.py
@@ -0,0 +1,24 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'label', 'find_objects', 'labeled_comprehension',
+ 'sum', 'mean', 'variance', 'standard_deviation',
+ 'minimum', 'maximum', 'median', 'minimum_position',
+ 'maximum_position', 'extrema', 'center_of_mass',
+ 'histogram', 'watershed_ift', 'sum_labels'
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package='ndimage', module='measurements',
+ private_modules=['_measurements'], all=__all__,
+ attribute=name)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/morphology.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/morphology.py
new file mode 100644
index 0000000000000000000000000000000000000000..e522e7df3a4b06b7e04ed8c2d0ecaff2a98b951d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/morphology.py
@@ -0,0 +1,27 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = [ # noqa: F822
+ 'iterate_structure', 'generate_binary_structure',
+ 'binary_erosion', 'binary_dilation', 'binary_opening',
+ 'binary_closing', 'binary_hit_or_miss', 'binary_propagation',
+ 'binary_fill_holes', 'grey_erosion', 'grey_dilation',
+ 'grey_opening', 'grey_closing', 'morphological_gradient',
+ 'morphological_laplace', 'white_tophat', 'black_tophat',
+ 'distance_transform_bf', 'distance_transform_cdt',
+ 'distance_transform_edt'
+]
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package='ndimage', module='morphology',
+ private_modules=['_morphology'], all=__all__,
+ attribute=name)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fc6581276063ca8d0e90362e1d1eee743d4ed18
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__init__.py
@@ -0,0 +1,13 @@
+from __future__ import annotations
+import numpy
+
+# list of numarray data types
+integer_types: list[type] = [
+ numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,
+ numpy.int32, numpy.uint32, numpy.int64, numpy.uint64]
+
+float_types: list[type] = [numpy.float32, numpy.float64]
+
+complex_types: list[type] = [numpy.complex64, numpy.complex128]
+
+types: list[type] = integer_types + float_types
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7608ea15141646a0442e2712d8d4ea6b88cfb119
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cbd8a24d6f57382b8bb3f56d016fae1c5803118b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04b546ae599d7a0326ae296e98c6f23c3bb7d91f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f4e552339689a95ff69fdb5b9c4276e1dfe8b56b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b0e9f49cd336227e4ab8c9f86f3919f1b8a4877e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6a5f364bf4f2831ceb17f44e8c0c35332e53f8c8
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c73cd501b1070a7f4fcc26a3fc0636c0c475b3fe
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..074daa1d54234ef4684168b9fc878581dc248a2c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_ni_support.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3c71e4fe241b1cf2e419bd07469bd6dbbef27ac
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed52ed8477056176e1f5aacbf681b12b0153fee6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_c_api.py
@@ -0,0 +1,102 @@
+import numpy as np
+from numpy.testing import assert_allclose
+
+from scipy import ndimage
+from scipy.ndimage import _ctest
+from scipy.ndimage import _cytest
+from scipy._lib._ccallback import LowLevelCallable
+
+FILTER1D_FUNCTIONS = [
+ lambda filter_size: _ctest.filter1d(filter_size),
+ lambda filter_size: _cytest.filter1d(filter_size, with_signature=False),
+ lambda filter_size: LowLevelCallable(
+ _cytest.filter1d(filter_size, with_signature=True)
+ ),
+ lambda filter_size: LowLevelCallable.from_cython(
+ _cytest, "_filter1d",
+ _cytest.filter1d_capsule(filter_size),
+ ),
+]
+
+FILTER2D_FUNCTIONS = [
+ lambda weights: _ctest.filter2d(weights),
+ lambda weights: _cytest.filter2d(weights, with_signature=False),
+ lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)),
+ lambda weights: LowLevelCallable.from_cython(_cytest,
+ "_filter2d",
+ _cytest.filter2d_capsule(weights),),
+]
+
+TRANSFORM_FUNCTIONS = [
+ lambda shift: _ctest.transform(shift),
+ lambda shift: _cytest.transform(shift, with_signature=False),
+ lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)),
+ lambda shift: LowLevelCallable.from_cython(_cytest,
+ "_transform",
+ _cytest.transform_capsule(shift),),
+]
+
+
+def test_generic_filter():
+ def filter2d(footprint_elements, weights):
+ return (weights*footprint_elements).sum()
+
+ def check(j):
+ func = FILTER2D_FUNCTIONS[j]
+
+ im = np.ones((20, 20))
+ im[:10,:10] = 0
+ footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
+ footprint_size = np.count_nonzero(footprint)
+ weights = np.ones(footprint_size)/footprint_size
+
+ res = ndimage.generic_filter(im, func(weights),
+ footprint=footprint)
+ std = ndimage.generic_filter(im, filter2d, footprint=footprint,
+ extra_arguments=(weights,))
+ assert_allclose(res, std, err_msg=f"#{j} failed")
+
+ for j, func in enumerate(FILTER2D_FUNCTIONS):
+ check(j)
+
+
+def test_generic_filter1d():
+ def filter1d(input_line, output_line, filter_size):
+ for i in range(output_line.size):
+ output_line[i] = 0
+ for j in range(filter_size):
+ output_line[i] += input_line[i+j]
+ output_line /= filter_size
+
+ def check(j):
+ func = FILTER1D_FUNCTIONS[j]
+
+ im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1))
+ filter_size = 3
+
+ res = ndimage.generic_filter1d(im, func(filter_size),
+ filter_size)
+ std = ndimage.generic_filter1d(im, filter1d, filter_size,
+ extra_arguments=(filter_size,))
+ assert_allclose(res, std, err_msg=f"#{j} failed")
+
+ for j, func in enumerate(FILTER1D_FUNCTIONS):
+ check(j)
+
+
+def test_geometric_transform():
+ def transform(output_coordinates, shift):
+ return output_coordinates[0] - shift, output_coordinates[1] - shift
+
+ def check(j):
+ func = TRANSFORM_FUNCTIONS[j]
+
+ im = np.arange(12).reshape(4, 3).astype(np.float64)
+ shift = 0.5
+
+ res = ndimage.geometric_transform(im, func(shift))
+ std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,))
+ assert_allclose(res, std, err_msg=f"#{j} failed")
+
+ for j, func in enumerate(TRANSFORM_FUNCTIONS):
+ check(j)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd9382a16ada38a6d3059d54ad765c2e0f74b7c1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_datatypes.py
@@ -0,0 +1,66 @@
+""" Testing data types for ndimage calls
+"""
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_
+import pytest
+
+from scipy import ndimage
+
+
+def test_map_coordinates_dts():
+ # check that ndimage accepts different data types for interpolation
+ data = np.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ shifted_data = np.array([[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+ idx = np.indices(data.shape)
+ dts = (np.uint8, np.uint16, np.uint32, np.uint64,
+ np.int8, np.int16, np.int32, np.int64,
+ np.intp, np.uintp, np.float32, np.float64)
+ for order in range(0, 6):
+ for data_dt in dts:
+ these_data = data.astype(data_dt)
+ for coord_dt in dts:
+ # affine mapping
+ mat = np.eye(2, dtype=coord_dt)
+ off = np.zeros((2,), dtype=coord_dt)
+ out = ndimage.affine_transform(these_data, mat, off)
+ assert_array_almost_equal(these_data, out)
+ # map coordinates
+ coords_m1 = idx.astype(coord_dt) - 1
+ coords_p10 = idx.astype(coord_dt) + 10
+ out = ndimage.map_coordinates(these_data, coords_m1, order=order)
+ assert_array_almost_equal(out, shifted_data)
+ # check constant fill works
+ out = ndimage.map_coordinates(these_data, coords_p10, order=order)
+ assert_array_almost_equal(out, np.zeros((3,4)))
+ # check shift and zoom
+ out = ndimage.shift(these_data, 1)
+ assert_array_almost_equal(out, shifted_data)
+ out = ndimage.zoom(these_data, 1)
+ assert_array_almost_equal(these_data, out)
+
+
+@pytest.mark.xfail(True, reason="Broken on many platforms")
+def test_uint64_max():
+ # Test interpolation respects uint64 max. Reported to fail at least on
+ # win32 (due to the 32 bit visual C compiler using signed int64 when
+ # converting between uint64 to double) and Debian on s390x.
+ # Interpolation is always done in double precision floating point, so
+ # we use the largest uint64 value for which int(float(big)) still fits
+ # in a uint64.
+ # This test was last enabled on macOS only, and there it started failing
+ # on arm64 as well (see gh-19117).
+ big = 2**64 - 1025
+ arr = np.array([big, big, big], dtype=np.uint64)
+ # Tests geometric transform (map_coordinates, affine_transform)
+ inds = np.indices(arr.shape) - 0.1
+ x = ndimage.map_coordinates(arr, inds)
+ assert_(x[1] == int(float(big)))
+ assert_(x[2] == int(float(big)))
+ # Tests zoom / shift
+ x = ndimage.shift(arr, 0.1)
+ assert_(x[1] == int(float(big)))
+ assert_(x[2] == int(float(big)))
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..6401a69f8627fa6b95c71f3711dfd064e74264f2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_filters.py
@@ -0,0 +1,2189 @@
+''' Some tests for filters '''
+import functools
+import itertools
+import math
+import numpy
+
+from numpy.testing import (assert_equal, assert_allclose,
+ assert_array_almost_equal,
+ assert_array_equal, assert_almost_equal,
+ suppress_warnings, assert_)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy import ndimage
+from scipy.ndimage._filters import _gaussian_kernel1d
+
+from . import types, float_types, complex_types
+
+
+def sumsq(a, b):
+ return math.sqrt(((a - b)**2).sum())
+
+
+def _complex_correlate(array, kernel, real_dtype, convolve=False,
+ mode="reflect", cval=0, ):
+ """Utility to perform a reference complex-valued convolutions.
+
+ When convolve==False, correlation is performed instead
+ """
+ array = numpy.asarray(array)
+ kernel = numpy.asarray(kernel)
+ complex_array = array.dtype.kind == 'c'
+ complex_kernel = kernel.dtype.kind == 'c'
+ if array.ndim == 1:
+ func = ndimage.convolve1d if convolve else ndimage.correlate1d
+ else:
+ func = ndimage.convolve if convolve else ndimage.correlate
+ if not convolve:
+ kernel = kernel.conj()
+ if complex_array and complex_kernel:
+ # use: real(cval) for array.real component
+ # imag(cval) for array.imag component
+ output = (
+ func(array.real, kernel.real, output=real_dtype,
+ mode=mode, cval=numpy.real(cval)) -
+ func(array.imag, kernel.imag, output=real_dtype,
+ mode=mode, cval=numpy.imag(cval)) +
+ 1j * func(array.imag, kernel.real, output=real_dtype,
+ mode=mode, cval=numpy.imag(cval)) +
+ 1j * func(array.real, kernel.imag, output=real_dtype,
+ mode=mode, cval=numpy.real(cval))
+ )
+ elif complex_array:
+ output = (
+ func(array.real, kernel, output=real_dtype, mode=mode,
+ cval=numpy.real(cval)) +
+ 1j * func(array.imag, kernel, output=real_dtype, mode=mode,
+ cval=numpy.imag(cval))
+ )
+ elif complex_kernel:
+ # real array so cval is real too
+ output = (
+ func(array, kernel.real, output=real_dtype, mode=mode, cval=cval) +
+ 1j * func(array, kernel.imag, output=real_dtype, mode=mode,
+ cval=cval)
+ )
+ return output
+
+
+def _cases_axes_tuple_length_mismatch():
+ # Generate combinations of filter function, valid kwargs, and
+ # keyword-value pairs for which the value will become with mismatched
+ # (invalid) size
+ filter_func = ndimage.gaussian_filter
+ kwargs = dict(radius=3, mode='constant', sigma=1.0, order=0)
+ for key, val in kwargs.items():
+ yield filter_func, kwargs, key, val
+
+ filter_funcs = [ndimage.uniform_filter, ndimage.minimum_filter,
+ ndimage.maximum_filter]
+ kwargs = dict(size=3, mode='constant', origin=0)
+ for filter_func in filter_funcs:
+ for key, val in kwargs.items():
+ yield filter_func, kwargs, key, val
+
+
+class TestNdimageFilters:
+
+ def _validate_complex(self, array, kernel, type2, mode='reflect', cval=0):
+ # utility for validating complex-valued correlations
+ real_dtype = numpy.asarray([], dtype=type2).real.dtype
+ expected = _complex_correlate(
+ array, kernel, real_dtype, convolve=False, mode=mode, cval=cval
+ )
+
+ if array.ndim == 1:
+ correlate = functools.partial(ndimage.correlate1d, axis=-1,
+ mode=mode, cval=cval)
+ convolve = functools.partial(ndimage.convolve1d, axis=-1,
+ mode=mode, cval=cval)
+ else:
+ correlate = functools.partial(ndimage.correlate, mode=mode,
+ cval=cval)
+ convolve = functools.partial(ndimage.convolve, mode=mode,
+ cval=cval)
+
+ # test correlate output dtype
+ output = correlate(array, kernel, output=type2)
+ assert_array_almost_equal(expected, output)
+ assert_equal(output.dtype.type, type2)
+
+ # test correlate with pre-allocated output
+ output = numpy.zeros_like(array, dtype=type2)
+ correlate(array, kernel, output=output)
+ assert_array_almost_equal(expected, output)
+
+ # test convolve output dtype
+ output = convolve(array, kernel, output=type2)
+ expected = _complex_correlate(
+ array, kernel, real_dtype, convolve=True, mode=mode, cval=cval,
+ )
+ assert_array_almost_equal(expected, output)
+ assert_equal(output.dtype.type, type2)
+
+ # convolve with pre-allocated output
+ convolve(array, kernel, output=output)
+ assert_array_almost_equal(expected, output)
+ assert_equal(output.dtype.type, type2)
+
+ # warns if the output is not a complex dtype
+ with pytest.warns(UserWarning,
+ match="promoting specified output dtype to complex"):
+ correlate(array, kernel, output=real_dtype)
+
+ with pytest.warns(UserWarning,
+ match="promoting specified output dtype to complex"):
+ convolve(array, kernel, output=real_dtype)
+
+ # raises if output array is provided, but is not complex-valued
+ output_real = numpy.zeros_like(array, dtype=real_dtype)
+ with assert_raises(RuntimeError):
+ correlate(array, kernel, output=output_real)
+
+ with assert_raises(RuntimeError):
+ convolve(array, kernel, output=output_real)
+
+ def test_correlate01(self):
+ array = numpy.array([1, 2])
+ weights = numpy.array([2])
+ expected = [2, 4]
+
+ output = ndimage.correlate(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ output = ndimage.convolve(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ output = ndimage.correlate1d(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ output = ndimage.convolve1d(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ def test_correlate01_overlap(self):
+ array = numpy.arange(256).reshape(16, 16)
+ weights = numpy.array([2])
+ expected = 2 * array
+
+ ndimage.correlate1d(array, weights, output=array)
+ assert_array_almost_equal(array, expected)
+
+ def test_correlate02(self):
+ array = numpy.array([1, 2, 3])
+ kernel = numpy.array([1])
+
+ output = ndimage.correlate(array, kernel)
+ assert_array_almost_equal(array, output)
+
+ output = ndimage.convolve(array, kernel)
+ assert_array_almost_equal(array, output)
+
+ output = ndimage.correlate1d(array, kernel)
+ assert_array_almost_equal(array, output)
+
+ output = ndimage.convolve1d(array, kernel)
+ assert_array_almost_equal(array, output)
+
+ def test_correlate03(self):
+ array = numpy.array([1])
+ weights = numpy.array([1, 1])
+ expected = [2]
+
+ output = ndimage.correlate(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ output = ndimage.convolve(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ output = ndimage.correlate1d(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ output = ndimage.convolve1d(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ def test_correlate04(self):
+ array = numpy.array([1, 2])
+ tcor = [2, 3]
+ tcov = [3, 4]
+ weights = numpy.array([1, 1])
+ output = ndimage.correlate(array, weights)
+ assert_array_almost_equal(output, tcor)
+ output = ndimage.convolve(array, weights)
+ assert_array_almost_equal(output, tcov)
+ output = ndimage.correlate1d(array, weights)
+ assert_array_almost_equal(output, tcor)
+ output = ndimage.convolve1d(array, weights)
+ assert_array_almost_equal(output, tcov)
+
+ def test_correlate05(self):
+ array = numpy.array([1, 2, 3])
+ tcor = [2, 3, 5]
+ tcov = [3, 5, 6]
+ kernel = numpy.array([1, 1])
+ output = ndimage.correlate(array, kernel)
+ assert_array_almost_equal(tcor, output)
+ output = ndimage.convolve(array, kernel)
+ assert_array_almost_equal(tcov, output)
+ output = ndimage.correlate1d(array, kernel)
+ assert_array_almost_equal(tcor, output)
+ output = ndimage.convolve1d(array, kernel)
+ assert_array_almost_equal(tcov, output)
+
+ def test_correlate06(self):
+ array = numpy.array([1, 2, 3])
+ tcor = [9, 14, 17]
+ tcov = [7, 10, 15]
+ weights = numpy.array([1, 2, 3])
+ output = ndimage.correlate(array, weights)
+ assert_array_almost_equal(output, tcor)
+ output = ndimage.convolve(array, weights)
+ assert_array_almost_equal(output, tcov)
+ output = ndimage.correlate1d(array, weights)
+ assert_array_almost_equal(output, tcor)
+ output = ndimage.convolve1d(array, weights)
+ assert_array_almost_equal(output, tcov)
+
+ def test_correlate07(self):
+ array = numpy.array([1, 2, 3])
+ expected = [5, 8, 11]
+ weights = numpy.array([1, 2, 1])
+ output = ndimage.correlate(array, weights)
+ assert_array_almost_equal(output, expected)
+ output = ndimage.convolve(array, weights)
+ assert_array_almost_equal(output, expected)
+ output = ndimage.correlate1d(array, weights)
+ assert_array_almost_equal(output, expected)
+ output = ndimage.convolve1d(array, weights)
+ assert_array_almost_equal(output, expected)
+
+ def test_correlate08(self):
+ array = numpy.array([1, 2, 3])
+ tcor = [1, 2, 5]
+ tcov = [3, 6, 7]
+ weights = numpy.array([1, 2, -1])
+ output = ndimage.correlate(array, weights)
+ assert_array_almost_equal(output, tcor)
+ output = ndimage.convolve(array, weights)
+ assert_array_almost_equal(output, tcov)
+ output = ndimage.correlate1d(array, weights)
+ assert_array_almost_equal(output, tcor)
+ output = ndimage.convolve1d(array, weights)
+ assert_array_almost_equal(output, tcov)
+
+ def test_correlate09(self):
+ array = []
+ kernel = numpy.array([1, 1])
+ output = ndimage.correlate(array, kernel)
+ assert_array_almost_equal(array, output)
+ output = ndimage.convolve(array, kernel)
+ assert_array_almost_equal(array, output)
+ output = ndimage.correlate1d(array, kernel)
+ assert_array_almost_equal(array, output)
+ output = ndimage.convolve1d(array, kernel)
+ assert_array_almost_equal(array, output)
+
+ def test_correlate10(self):
+ array = [[]]
+ kernel = numpy.array([[1, 1]])
+ output = ndimage.correlate(array, kernel)
+ assert_array_almost_equal(array, output)
+ output = ndimage.convolve(array, kernel)
+ assert_array_almost_equal(array, output)
+
+ def test_correlate11(self):
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]])
+ kernel = numpy.array([[1, 1],
+ [1, 1]])
+ output = ndimage.correlate(array, kernel)
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
+ output = ndimage.convolve(array, kernel)
+ assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
+
+ def test_correlate12(self):
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]])
+ kernel = numpy.array([[1, 0],
+ [0, 1]])
+ output = ndimage.correlate(array, kernel)
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+ output = ndimage.convolve(array, kernel)
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ @pytest.mark.parametrize('dtype_kernel', types)
+ def test_correlate13(self, dtype_array, dtype_kernel):
+ kernel = numpy.array([[1, 0],
+ [0, 1]])
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype_array)
+ output = ndimage.correlate(array, kernel, output=dtype_kernel)
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+ assert_equal(output.dtype.type, dtype_kernel)
+
+ output = ndimage.convolve(array, kernel,
+ output=dtype_kernel)
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+ assert_equal(output.dtype.type, dtype_kernel)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ @pytest.mark.parametrize('dtype_output', types)
+ def test_correlate14(self, dtype_array, dtype_output):
+ kernel = numpy.array([[1, 0],
+ [0, 1]])
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype_array)
+ output = numpy.zeros(array.shape, dtype_output)
+ ndimage.correlate(array, kernel, output=output)
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+ assert_equal(output.dtype.type, dtype_output)
+
+ ndimage.convolve(array, kernel, output=output)
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+ assert_equal(output.dtype.type, dtype_output)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ def test_correlate15(self, dtype_array):
+ kernel = numpy.array([[1, 0],
+ [0, 1]])
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype_array)
+ output = ndimage.correlate(array, kernel, output=numpy.float32)
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+ assert_equal(output.dtype.type, numpy.float32)
+
+ output = ndimage.convolve(array, kernel, output=numpy.float32)
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+ assert_equal(output.dtype.type, numpy.float32)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ def test_correlate16(self, dtype_array):
+ kernel = numpy.array([[0.5, 0],
+ [0, 0.5]])
+ array = numpy.array([[1, 2, 3], [4, 5, 6]], dtype_array)
+ output = ndimage.correlate(array, kernel, output=numpy.float32)
+ assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
+ assert_equal(output.dtype.type, numpy.float32)
+
+ output = ndimage.convolve(array, kernel, output=numpy.float32)
+ assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
+ assert_equal(output.dtype.type, numpy.float32)
+
+ def test_correlate17(self):
+ array = numpy.array([1, 2, 3])
+ tcor = [3, 5, 6]
+ tcov = [2, 3, 5]
+ kernel = numpy.array([1, 1])
+ output = ndimage.correlate(array, kernel, origin=-1)
+ assert_array_almost_equal(tcor, output)
+ output = ndimage.convolve(array, kernel, origin=-1)
+ assert_array_almost_equal(tcov, output)
+ output = ndimage.correlate1d(array, kernel, origin=-1)
+ assert_array_almost_equal(tcor, output)
+ output = ndimage.convolve1d(array, kernel, origin=-1)
+ assert_array_almost_equal(tcov, output)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ def test_correlate18(self, dtype_array):
+ kernel = numpy.array([[1, 0],
+ [0, 1]])
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype_array)
+ output = ndimage.correlate(array, kernel,
+ output=numpy.float32,
+ mode='nearest', origin=-1)
+ assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+ assert_equal(output.dtype.type, numpy.float32)
+
+ output = ndimage.convolve(array, kernel,
+ output=numpy.float32,
+ mode='nearest', origin=-1)
+ assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+ assert_equal(output.dtype.type, numpy.float32)
+
+ def test_correlate_mode_sequence(self):
+ kernel = numpy.ones((2, 2))
+ array = numpy.ones((3, 3), float)
+ with assert_raises(RuntimeError):
+ ndimage.correlate(array, kernel, mode=['nearest', 'reflect'])
+ with assert_raises(RuntimeError):
+ ndimage.convolve(array, kernel, mode=['nearest', 'reflect'])
+
+ @pytest.mark.parametrize('dtype_array', types)
+ def test_correlate19(self, dtype_array):
+ kernel = numpy.array([[1, 0],
+ [0, 1]])
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype_array)
+ output = ndimage.correlate(array, kernel,
+ output=numpy.float32,
+ mode='nearest', origin=[-1, 0])
+ assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
+ assert_equal(output.dtype.type, numpy.float32)
+
+ output = ndimage.convolve(array, kernel,
+ output=numpy.float32,
+ mode='nearest', origin=[-1, 0])
+ assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
+ assert_equal(output.dtype.type, numpy.float32)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ @pytest.mark.parametrize('dtype_output', types)
+ def test_correlate20(self, dtype_array, dtype_output):
+ weights = numpy.array([1, 2, 1])
+ expected = [[5, 10, 15], [7, 14, 21]]
+ array = numpy.array([[1, 2, 3],
+ [2, 4, 6]], dtype_array)
+ output = numpy.zeros((2, 3), dtype_output)
+ ndimage.correlate1d(array, weights, axis=0, output=output)
+ assert_array_almost_equal(output, expected)
+ ndimage.convolve1d(array, weights, axis=0, output=output)
+ assert_array_almost_equal(output, expected)
+
+ def test_correlate21(self):
+ array = numpy.array([[1, 2, 3],
+ [2, 4, 6]])
+ expected = [[5, 10, 15], [7, 14, 21]]
+ weights = numpy.array([1, 2, 1])
+ output = ndimage.correlate1d(array, weights, axis=0)
+ assert_array_almost_equal(output, expected)
+ output = ndimage.convolve1d(array, weights, axis=0)
+ assert_array_almost_equal(output, expected)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ @pytest.mark.parametrize('dtype_output', types)
+ def test_correlate22(self, dtype_array, dtype_output):
+ weights = numpy.array([1, 2, 1])
+ expected = [[6, 12, 18], [6, 12, 18]]
+ array = numpy.array([[1, 2, 3],
+ [2, 4, 6]], dtype_array)
+ output = numpy.zeros((2, 3), dtype_output)
+ ndimage.correlate1d(array, weights, axis=0,
+ mode='wrap', output=output)
+ assert_array_almost_equal(output, expected)
+ ndimage.convolve1d(array, weights, axis=0,
+ mode='wrap', output=output)
+ assert_array_almost_equal(output, expected)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ @pytest.mark.parametrize('dtype_output', types)
+ def test_correlate23(self, dtype_array, dtype_output):
+ weights = numpy.array([1, 2, 1])
+ expected = [[5, 10, 15], [7, 14, 21]]
+ array = numpy.array([[1, 2, 3],
+ [2, 4, 6]], dtype_array)
+ output = numpy.zeros((2, 3), dtype_output)
+ ndimage.correlate1d(array, weights, axis=0,
+ mode='nearest', output=output)
+ assert_array_almost_equal(output, expected)
+ ndimage.convolve1d(array, weights, axis=0,
+ mode='nearest', output=output)
+ assert_array_almost_equal(output, expected)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ @pytest.mark.parametrize('dtype_output', types)
+ def test_correlate24(self, dtype_array, dtype_output):
+ weights = numpy.array([1, 2, 1])
+ tcor = [[7, 14, 21], [8, 16, 24]]
+ tcov = [[4, 8, 12], [5, 10, 15]]
+ array = numpy.array([[1, 2, 3],
+ [2, 4, 6]], dtype_array)
+ output = numpy.zeros((2, 3), dtype_output)
+ ndimage.correlate1d(array, weights, axis=0,
+ mode='nearest', output=output, origin=-1)
+ assert_array_almost_equal(output, tcor)
+ ndimage.convolve1d(array, weights, axis=0,
+ mode='nearest', output=output, origin=-1)
+ assert_array_almost_equal(output, tcov)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ @pytest.mark.parametrize('dtype_output', types)
+ def test_correlate25(self, dtype_array, dtype_output):
+ weights = numpy.array([1, 2, 1])
+ tcor = [[4, 8, 12], [5, 10, 15]]
+ tcov = [[7, 14, 21], [8, 16, 24]]
+ array = numpy.array([[1, 2, 3],
+ [2, 4, 6]], dtype_array)
+ output = numpy.zeros((2, 3), dtype_output)
+ ndimage.correlate1d(array, weights, axis=0,
+ mode='nearest', output=output, origin=1)
+ assert_array_almost_equal(output, tcor)
+ ndimage.convolve1d(array, weights, axis=0,
+ mode='nearest', output=output, origin=1)
+ assert_array_almost_equal(output, tcov)
+
+ def test_correlate26(self):
+ # test fix for gh-11661 (mirror extension of a length 1 signal)
+ y = ndimage.convolve1d(numpy.ones(1), numpy.ones(5), mode='mirror')
+ assert_array_equal(y, numpy.array(5.))
+
+ y = ndimage.correlate1d(numpy.ones(1), numpy.ones(5), mode='mirror')
+ assert_array_equal(y, numpy.array(5.))
+
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
+ @pytest.mark.parametrize('dtype_input', types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate_complex_kernel(self, dtype_input, dtype_kernel,
+ dtype_output):
+ kernel = numpy.array([[1, 0],
+ [0, 1 + 1j]], dtype_kernel)
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype_input)
+ self._validate_complex(array, kernel, dtype_output)
+
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
+ @pytest.mark.parametrize('dtype_input', types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ @pytest.mark.parametrize('mode', ['grid-constant', 'constant'])
+ def test_correlate_complex_kernel_cval(self, dtype_input, dtype_kernel,
+ dtype_output, mode):
+ # test use of non-zero cval with complex inputs
+ # also verifies that mode 'grid-constant' does not segfault
+ kernel = numpy.array([[1, 0],
+ [0, 1 + 1j]], dtype_kernel)
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype_input)
+ self._validate_complex(array, kernel, dtype_output, mode=mode,
+ cval=5.0)
+
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
+ @pytest.mark.parametrize('dtype_input', types)
+ def test_correlate_complex_kernel_invalid_cval(self, dtype_input,
+ dtype_kernel):
+ # cannot give complex cval with a real image
+ kernel = numpy.array([[1, 0],
+ [0, 1 + 1j]], dtype_kernel)
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype_input)
+ for func in [ndimage.convolve, ndimage.correlate, ndimage.convolve1d,
+ ndimage.correlate1d]:
+ with pytest.raises(ValueError):
+ func(array, kernel, mode='constant', cval=5.0 + 1.0j,
+ output=numpy.complex64)
+
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
+ @pytest.mark.parametrize('dtype_input', types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate1d_complex_kernel(self, dtype_input, dtype_kernel,
+ dtype_output):
+ kernel = numpy.array([1, 1 + 1j], dtype_kernel)
+ array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
+ self._validate_complex(array, kernel, dtype_output)
+
+ @pytest.mark.parametrize('dtype_kernel', complex_types)
+ @pytest.mark.parametrize('dtype_input', types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate1d_complex_kernel_cval(self, dtype_input, dtype_kernel,
+ dtype_output):
+ kernel = numpy.array([1, 1 + 1j], dtype_kernel)
+ array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
+ cval=5.0)
+
+ @pytest.mark.parametrize('dtype_kernel', types)
+ @pytest.mark.parametrize('dtype_input', complex_types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate_complex_input(self, dtype_input, dtype_kernel,
+ dtype_output):
+ kernel = numpy.array([[1, 0],
+ [0, 1]], dtype_kernel)
+ array = numpy.array([[1, 2j, 3],
+ [1 + 4j, 5, 6j]], dtype_input)
+ self._validate_complex(array, kernel, dtype_output)
+
+ @pytest.mark.parametrize('dtype_kernel', types)
+ @pytest.mark.parametrize('dtype_input', complex_types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate1d_complex_input(self, dtype_input, dtype_kernel,
+ dtype_output):
+ kernel = numpy.array([1, 0, 1], dtype_kernel)
+ array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
+ self._validate_complex(array, kernel, dtype_output)
+
+ @pytest.mark.parametrize('dtype_kernel', types)
+ @pytest.mark.parametrize('dtype_input', complex_types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate1d_complex_input_cval(self, dtype_input, dtype_kernel,
+ dtype_output):
+ kernel = numpy.array([1, 0, 1], dtype_kernel)
+ array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
+ cval=5 - 3j)
+
+ @pytest.mark.parametrize('dtype', complex_types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate_complex_input_and_kernel(self, dtype, dtype_output):
+ kernel = numpy.array([[1, 0],
+ [0, 1 + 1j]], dtype)
+ array = numpy.array([[1, 2j, 3],
+ [1 + 4j, 5, 6j]], dtype)
+ self._validate_complex(array, kernel, dtype_output)
+
+ @pytest.mark.parametrize('dtype', complex_types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate_complex_input_and_kernel_cval(self, dtype,
+ dtype_output):
+ kernel = numpy.array([[1, 0],
+ [0, 1 + 1j]], dtype)
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype)
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
+ cval=5.0 + 2.0j)
+
+ @pytest.mark.parametrize('dtype', complex_types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate1d_complex_input_and_kernel(self, dtype, dtype_output):
+ kernel = numpy.array([1, 1 + 1j], dtype)
+ array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
+ self._validate_complex(array, kernel, dtype_output)
+
+ @pytest.mark.parametrize('dtype', complex_types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_correlate1d_complex_input_and_kernel_cval(self, dtype,
+ dtype_output):
+ kernel = numpy.array([1, 1 + 1j], dtype)
+ array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
+ self._validate_complex(array, kernel, dtype_output, mode='constant',
+ cval=5.0 + 2.0j)
+
+ def test_gauss01(self):
+ input = numpy.array([[1, 2, 3],
+ [2, 4, 6]], numpy.float32)
+ output = ndimage.gaussian_filter(input, 0)
+ assert_array_almost_equal(output, input)
+
+ def test_gauss02(self):
+ input = numpy.array([[1, 2, 3],
+ [2, 4, 6]], numpy.float32)
+ output = ndimage.gaussian_filter(input, 1.0)
+ assert_equal(input.dtype, output.dtype)
+ assert_equal(input.shape, output.shape)
+
+ def test_gauss03(self):
+ # single precision data
+ input = numpy.arange(100 * 100).astype(numpy.float32)
+ input.shape = (100, 100)
+ output = ndimage.gaussian_filter(input, [1.0, 1.0])
+
+ assert_equal(input.dtype, output.dtype)
+ assert_equal(input.shape, output.shape)
+
+ # input.sum() is 49995000.0. With single precision floats, we can't
+ # expect more than 8 digits of accuracy, so use decimal=0 in this test.
+ assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'),
+ decimal=0)
+ assert_(sumsq(input, output) > 1.0)
+
+ def test_gauss04(self):
+ input = numpy.arange(100 * 100).astype(numpy.float32)
+ input.shape = (100, 100)
+ otype = numpy.float64
+ output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
+ assert_equal(output.dtype.type, numpy.float64)
+ assert_equal(input.shape, output.shape)
+ assert_(sumsq(input, output) > 1.0)
+
+ def test_gauss05(self):
+ input = numpy.arange(100 * 100).astype(numpy.float32)
+ input.shape = (100, 100)
+ otype = numpy.float64
+ output = ndimage.gaussian_filter(input, [1.0, 1.0],
+ order=1, output=otype)
+ assert_equal(output.dtype.type, numpy.float64)
+ assert_equal(input.shape, output.shape)
+ assert_(sumsq(input, output) > 1.0)
+
+ def test_gauss06(self):
+ input = numpy.arange(100 * 100).astype(numpy.float32)
+ input.shape = (100, 100)
+ otype = numpy.float64
+ output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
+ output2 = ndimage.gaussian_filter(input, 1.0, output=otype)
+ assert_array_almost_equal(output1, output2)
+
+ def test_gauss_memory_overlap(self):
+ input = numpy.arange(100 * 100).astype(numpy.float32)
+ input.shape = (100, 100)
+ output1 = ndimage.gaussian_filter(input, 1.0)
+ ndimage.gaussian_filter(input, 1.0, output=input)
+ assert_array_almost_equal(output1, input)
+
+ @pytest.mark.parametrize(('filter_func', 'extra_args', 'size0', 'size'),
+ [(ndimage.gaussian_filter, (), 0, 1.0),
+ (ndimage.uniform_filter, (), 1, 3),
+ (ndimage.minimum_filter, (), 1, 3),
+ (ndimage.maximum_filter, (), 1, 3),
+ (ndimage.median_filter, (), 1, 3),
+ (ndimage.rank_filter, (1,), 1, 3),
+ (ndimage.percentile_filter, (40,), 1, 3)])
+ @pytest.mark.parametrize(
+ 'axes',
+ tuple(itertools.combinations(range(-3, 3), 1))
+ + tuple(itertools.combinations(range(-3, 3), 2))
+ + ((0, 1, 2),))
+ def test_filter_axes(self, filter_func, extra_args, size0, size, axes):
+ # Note: `size` is called `sigma` in `gaussian_filter`
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
+ axes = numpy.array(axes)
+
+ if len(set(axes % array.ndim)) != len(axes):
+ # parametrized cases with duplicate axes raise an error
+ with pytest.raises(ValueError, match="axes must be unique"):
+ filter_func(array, *extra_args, size, axes=axes)
+ return
+ output = filter_func(array, *extra_args, size, axes=axes)
+
+ # result should be equivalent to sigma=0.0/size=1 on unfiltered axes
+ all_sizes = (size if ax in (axes % array.ndim) else size0
+ for ax in range(array.ndim))
+ expected = filter_func(array, *extra_args, all_sizes)
+ assert_allclose(output, expected)
+
+ kwargs_gauss = dict(radius=[4, 2, 3], order=[0, 1, 2],
+ mode=['reflect', 'nearest', 'constant'])
+ kwargs_other = dict(origin=(-1, 0, 1),
+ mode=['reflect', 'nearest', 'constant'])
+ kwargs_rank = dict(origin=(-1, 0, 1))
+
+ @pytest.mark.parametrize("filter_func, size0, size, kwargs",
+ [(ndimage.gaussian_filter, 0, 1.0, kwargs_gauss),
+ (ndimage.uniform_filter, 1, 3, kwargs_other),
+ (ndimage.maximum_filter, 1, 3, kwargs_other),
+ (ndimage.minimum_filter, 1, 3, kwargs_other),
+ (ndimage.median_filter, 1, 3, kwargs_rank),
+ (ndimage.rank_filter, 1, 3, kwargs_rank),
+ (ndimage.percentile_filter, 1, 3, kwargs_rank)])
+ @pytest.mark.parametrize('axes', itertools.combinations(range(-3, 3), 2))
+ def test_filter_axes_kwargs(self, filter_func, size0, size, kwargs, axes):
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
+
+ kwargs = {key: numpy.array(val) for key, val in kwargs.items()}
+ axes = numpy.array(axes)
+ n_axes = axes.size
+
+ if filter_func == ndimage.rank_filter:
+ args = (2,) # (rank,)
+ elif filter_func == ndimage.percentile_filter:
+ args = (30,) # (percentile,)
+ else:
+ args = ()
+
+ # form kwargs that specify only the axes in `axes`
+ reduced_kwargs = {key: val[axes] for key, val in kwargs.items()}
+ if len(set(axes % array.ndim)) != len(axes):
+ # parametrized cases with duplicate axes raise an error
+ with pytest.raises(ValueError, match="axes must be unique"):
+ filter_func(array, *args, [size]*n_axes, axes=axes,
+ **reduced_kwargs)
+ return
+
+ output = filter_func(array, *args, [size]*n_axes, axes=axes,
+ **reduced_kwargs)
+
+ # result should be equivalent to sigma=0.0/size=1 on unfiltered axes
+ size_3d = numpy.full(array.ndim, fill_value=size0)
+ size_3d[axes] = size
+ if 'origin' in kwargs:
+ # origin should be zero on the axis that has size 0
+ origin = numpy.array([0, 0, 0])
+ origin[axes] = reduced_kwargs['origin']
+ kwargs['origin'] = origin
+ expected = filter_func(array, *args, size_3d, **kwargs)
+ assert_allclose(output, expected)
+
+ @pytest.mark.parametrize(
+ 'filter_func, args',
+ [(ndimage.gaussian_filter, (1.0,)), # args = (sigma,)
+ (ndimage.uniform_filter, (3,)), # args = (size,)
+ (ndimage.minimum_filter, (3,)), # args = (size,)
+ (ndimage.maximum_filter, (3,)), # args = (size,)
+ (ndimage.median_filter, (3,)), # args = (size,)
+ (ndimage.rank_filter, (2, 3)), # args = (rank, size)
+ (ndimage.percentile_filter, (30, 3))]) # args = (percentile, size)
+ @pytest.mark.parametrize(
+ 'axes', [(1.5,), (0, 1, 2, 3), (3,), (-4,)]
+ )
+ def test_filter_invalid_axes(self, filter_func, args, axes):
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
+ if any(isinstance(ax, float) for ax in axes):
+ error_class = TypeError
+ match = "cannot be interpreted as an integer"
+ else:
+ error_class = ValueError
+ match = "out of range"
+ with pytest.raises(error_class, match=match):
+ filter_func(array, *args, axes=axes)
+
+ @pytest.mark.parametrize(
+ 'filter_func, kwargs',
+ [(ndimage.minimum_filter, {}),
+ (ndimage.maximum_filter, {}),
+ (ndimage.median_filter, {}),
+ (ndimage.rank_filter, dict(rank=3)),
+ (ndimage.percentile_filter, dict(percentile=30))])
+ @pytest.mark.parametrize(
+ 'axes', [(0, ), (1, 2), (0, 1, 2)]
+ )
+ @pytest.mark.parametrize('separable_footprint', [False, True])
+ def test_filter_invalid_footprint_ndim(self, filter_func, kwargs, axes,
+ separable_footprint):
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
+ # create a footprint with one too many dimensions
+ footprint = numpy.ones((3,) * (len(axes) + 1))
+ if not separable_footprint:
+ footprint[(0,) * footprint.ndim] = 0
+ if (filter_func in [ndimage.minimum_filter, ndimage.maximum_filter]
+ and separable_footprint):
+ match = "sequence argument must have length equal to input rank"
+ else:
+ match = "footprint array has incorrect shape"
+ with pytest.raises(RuntimeError, match=match):
+ filter_func(array, **kwargs, footprint=footprint, axes=axes)
+
+ @pytest.mark.parametrize('n_mismatch', [1, 3])
+ @pytest.mark.parametrize('filter_func, kwargs, key, val',
+ _cases_axes_tuple_length_mismatch())
+ def test_filter_tuple_length_mismatch(self, n_mismatch, filter_func,
+ kwargs, key, val):
+ # Test for the intended RuntimeError when a kwargs has an invalid size
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
+ kwargs = dict(**kwargs, axes=(0, 1))
+ kwargs[key] = (val,) * n_mismatch
+ err_msg = "sequence argument must have length equal to input rank"
+ with pytest.raises(RuntimeError, match=err_msg):
+ filter_func(array, **kwargs)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_prewitt01(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
+ output = ndimage.prewitt(array, 0)
+ assert_array_almost_equal(t, output)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_prewitt02(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
+ output = numpy.zeros(array.shape, dtype)
+ ndimage.prewitt(array, 0, output)
+ assert_array_almost_equal(t, output)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_prewitt03(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
+ t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
+ output = ndimage.prewitt(array, 1)
+ assert_array_almost_equal(t, output)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_prewitt04(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ t = ndimage.prewitt(array, -1)
+ output = ndimage.prewitt(array, 1)
+ assert_array_almost_equal(t, output)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_sobel01(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
+ output = ndimage.sobel(array, 0)
+ assert_array_almost_equal(t, output)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_sobel02(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
+ output = numpy.zeros(array.shape, dtype)
+ ndimage.sobel(array, 0, output)
+ assert_array_almost_equal(t, output)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_sobel03(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
+ t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
+ output = numpy.zeros(array.shape, dtype)
+ output = ndimage.sobel(array, 1)
+ assert_array_almost_equal(t, output)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_sobel04(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ t = ndimage.sobel(array, -1)
+ output = ndimage.sobel(array, 1)
+ assert_array_almost_equal(t, output)
+
+ @pytest.mark.parametrize('dtype',
+ [numpy.int32, numpy.float32, numpy.float64,
+ numpy.complex64, numpy.complex128])
+ def test_laplace01(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype) * 100
+ tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
+ tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
+ output = ndimage.laplace(array)
+ assert_array_almost_equal(tmp1 + tmp2, output)
+
+ @pytest.mark.parametrize('dtype',
+ [numpy.int32, numpy.float32, numpy.float64,
+ numpy.complex64, numpy.complex128])
+ def test_laplace02(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype) * 100
+ tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
+ tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
+ output = numpy.zeros(array.shape, dtype)
+ ndimage.laplace(array, output=output)
+ assert_array_almost_equal(tmp1 + tmp2, output)
+
+ @pytest.mark.parametrize('dtype',
+ [numpy.int32, numpy.float32, numpy.float64,
+ numpy.complex64, numpy.complex128])
+ def test_gaussian_laplace01(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype) * 100
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
+ output = ndimage.gaussian_laplace(array, 1.0)
+ assert_array_almost_equal(tmp1 + tmp2, output)
+
+ @pytest.mark.parametrize('dtype',
+ [numpy.int32, numpy.float32, numpy.float64,
+ numpy.complex64, numpy.complex128])
+ def test_gaussian_laplace02(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype) * 100
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
+ output = numpy.zeros(array.shape, dtype)
+ ndimage.gaussian_laplace(array, 1.0, output)
+ assert_array_almost_equal(tmp1 + tmp2, output)
+
+ @pytest.mark.parametrize('dtype', types + complex_types)
+ def test_generic_laplace01(self, dtype):
+ def derivative2(input, axis, output, mode, cval, a, b):
+ sigma = [a, b / 2.0]
+ input = numpy.asarray(input)
+ order = [0] * input.ndim
+ order[axis] = 2
+ return ndimage.gaussian_filter(input, sigma, order,
+ output, mode, cval)
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ output = numpy.zeros(array.shape, dtype)
+ tmp = ndimage.generic_laplace(array, derivative2,
+ extra_arguments=(1.0,),
+ extra_keywords={'b': 2.0})
+ ndimage.gaussian_laplace(array, 1.0, output)
+ assert_array_almost_equal(tmp, output)
+
+ @pytest.mark.parametrize('dtype',
+ [numpy.int32, numpy.float32, numpy.float64,
+ numpy.complex64, numpy.complex128])
+ def test_gaussian_gradient_magnitude01(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype) * 100
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
+ output = ndimage.gaussian_gradient_magnitude(array, 1.0)
+ expected = tmp1 * tmp1 + tmp2 * tmp2
+ expected = numpy.sqrt(expected).astype(dtype)
+ assert_array_almost_equal(expected, output)
+
+ @pytest.mark.parametrize('dtype',
+ [numpy.int32, numpy.float32, numpy.float64,
+ numpy.complex64, numpy.complex128])
+ def test_gaussian_gradient_magnitude02(self, dtype):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype) * 100
+ tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
+ tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
+ output = numpy.zeros(array.shape, dtype)
+ ndimage.gaussian_gradient_magnitude(array, 1.0, output)
+ expected = tmp1 * tmp1 + tmp2 * tmp2
+ expected = numpy.sqrt(expected).astype(dtype)
+ assert_array_almost_equal(expected, output)
+
+ def test_generic_gradient_magnitude01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], numpy.float64)
+
+ def derivative(input, axis, output, mode, cval, a, b):
+ sigma = [a, b / 2.0]
+ input = numpy.asarray(input)
+ order = [0] * input.ndim
+ order[axis] = 1
+ return ndimage.gaussian_filter(input, sigma, order,
+ output, mode, cval)
+ tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
+ tmp2 = ndimage.generic_gradient_magnitude(
+ array, derivative, extra_arguments=(1.0,),
+ extra_keywords={'b': 2.0})
+ assert_array_almost_equal(tmp1, tmp2)
+
+ def test_uniform01(self):
+ array = numpy.array([2, 4, 6])
+ size = 2
+ output = ndimage.uniform_filter1d(array, size, origin=-1)
+ assert_array_almost_equal([3, 5, 6], output)
+
+ def test_uniform01_complex(self):
+ array = numpy.array([2 + 1j, 4 + 2j, 6 + 3j], dtype=numpy.complex128)
+ size = 2
+ output = ndimage.uniform_filter1d(array, size, origin=-1)
+ assert_array_almost_equal([3, 5, 6], output.real)
+ assert_array_almost_equal([1.5, 2.5, 3], output.imag)
+
+ def test_uniform02(self):
+ array = numpy.array([1, 2, 3])
+ filter_shape = [0]
+ output = ndimage.uniform_filter(array, filter_shape)
+ assert_array_almost_equal(array, output)
+
+ def test_uniform03(self):
+ array = numpy.array([1, 2, 3])
+ filter_shape = [1]
+ output = ndimage.uniform_filter(array, filter_shape)
+ assert_array_almost_equal(array, output)
+
+ def test_uniform04(self):
+ array = numpy.array([2, 4, 6])
+ filter_shape = [2]
+ output = ndimage.uniform_filter(array, filter_shape)
+ assert_array_almost_equal([2, 3, 5], output)
+
+ def test_uniform05(self):
+ array = []
+ filter_shape = [1]
+ output = ndimage.uniform_filter(array, filter_shape)
+ assert_array_almost_equal([], output)
+
+ @pytest.mark.parametrize('dtype_array', types)
+ @pytest.mark.parametrize('dtype_output', types)
+ def test_uniform06(self, dtype_array, dtype_output):
+ filter_shape = [2, 2]
+ array = numpy.array([[4, 8, 12],
+ [16, 20, 24]], dtype_array)
+ output = ndimage.uniform_filter(
+ array, filter_shape, output=dtype_output)
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
+ assert_equal(output.dtype.type, dtype_output)
+
+ @pytest.mark.parametrize('dtype_array', complex_types)
+ @pytest.mark.parametrize('dtype_output', complex_types)
+ def test_uniform06_complex(self, dtype_array, dtype_output):
+ filter_shape = [2, 2]
+ array = numpy.array([[4, 8 + 5j, 12],
+ [16, 20, 24]], dtype_array)
+ output = ndimage.uniform_filter(
+ array, filter_shape, output=dtype_output)
+ assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output.real)
+ assert_equal(output.dtype.type, dtype_output)
+
+ def test_minimum_filter01(self):
+ array = numpy.array([1, 2, 3, 4, 5])
+ filter_shape = numpy.array([2])
+ output = ndimage.minimum_filter(array, filter_shape)
+ assert_array_almost_equal([1, 1, 2, 3, 4], output)
+
+ def test_minimum_filter02(self):
+ array = numpy.array([1, 2, 3, 4, 5])
+ filter_shape = numpy.array([3])
+ output = ndimage.minimum_filter(array, filter_shape)
+ assert_array_almost_equal([1, 1, 2, 3, 4], output)
+
+ def test_minimum_filter03(self):
+ array = numpy.array([3, 2, 5, 1, 4])
+ filter_shape = numpy.array([2])
+ output = ndimage.minimum_filter(array, filter_shape)
+ assert_array_almost_equal([3, 2, 2, 1, 1], output)
+
+ def test_minimum_filter04(self):
+ array = numpy.array([3, 2, 5, 1, 4])
+ filter_shape = numpy.array([3])
+ output = ndimage.minimum_filter(array, filter_shape)
+ assert_array_almost_equal([2, 2, 1, 1, 1], output)
+
+ def test_minimum_filter05(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ filter_shape = numpy.array([2, 3])
+ output = ndimage.minimum_filter(array, filter_shape)
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
+ [2, 2, 1, 1, 1],
+ [5, 3, 3, 1, 1]], output)
+
+ def test_minimum_filter05_overlap(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ filter_shape = numpy.array([2, 3])
+ ndimage.minimum_filter(array, filter_shape, output=array)
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
+ [2, 2, 1, 1, 1],
+ [5, 3, 3, 1, 1]], array)
+
+ def test_minimum_filter06(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 1, 1], [1, 1, 1]]
+ output = ndimage.minimum_filter(array, footprint=footprint)
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
+ [2, 2, 1, 1, 1],
+ [5, 3, 3, 1, 1]], output)
+ # separable footprint should allow mode sequence
+ output2 = ndimage.minimum_filter(array, footprint=footprint,
+ mode=['reflect', 'reflect'])
+ assert_array_almost_equal(output2, output)
+
+ def test_minimum_filter07(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.minimum_filter(array, footprint=footprint)
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
+ [2, 3, 1, 3, 1],
+ [5, 5, 3, 3, 1]], output)
+ with assert_raises(RuntimeError):
+ ndimage.minimum_filter(array, footprint=footprint,
+ mode=['reflect', 'constant'])
+
+ def test_minimum_filter08(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.minimum_filter(array, footprint=footprint, origin=-1)
+ assert_array_almost_equal([[3, 1, 3, 1, 1],
+ [5, 3, 3, 1, 1],
+ [3, 3, 1, 1, 1]], output)
+
+ def test_minimum_filter09(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.minimum_filter(array, footprint=footprint,
+ origin=[-1, 0])
+ assert_array_almost_equal([[2, 3, 1, 3, 1],
+ [5, 5, 3, 3, 1],
+ [5, 3, 3, 1, 1]], output)
+
+ def test_maximum_filter01(self):
+ array = numpy.array([1, 2, 3, 4, 5])
+ filter_shape = numpy.array([2])
+ output = ndimage.maximum_filter(array, filter_shape)
+ assert_array_almost_equal([1, 2, 3, 4, 5], output)
+
+ def test_maximum_filter02(self):
+ array = numpy.array([1, 2, 3, 4, 5])
+ filter_shape = numpy.array([3])
+ output = ndimage.maximum_filter(array, filter_shape)
+ assert_array_almost_equal([2, 3, 4, 5, 5], output)
+
+ def test_maximum_filter03(self):
+ array = numpy.array([3, 2, 5, 1, 4])
+ filter_shape = numpy.array([2])
+ output = ndimage.maximum_filter(array, filter_shape)
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
+
+ def test_maximum_filter04(self):
+ array = numpy.array([3, 2, 5, 1, 4])
+ filter_shape = numpy.array([3])
+ output = ndimage.maximum_filter(array, filter_shape)
+ assert_array_almost_equal([3, 5, 5, 5, 4], output)
+
+ def test_maximum_filter05(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ filter_shape = numpy.array([2, 3])
+ output = ndimage.maximum_filter(array, filter_shape)
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
+ [7, 9, 9, 9, 5],
+ [8, 9, 9, 9, 7]], output)
+
+ def test_maximum_filter06(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 1, 1], [1, 1, 1]]
+ output = ndimage.maximum_filter(array, footprint=footprint)
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
+ [7, 9, 9, 9, 5],
+ [8, 9, 9, 9, 7]], output)
+ # separable footprint should allow mode sequence
+ output2 = ndimage.maximum_filter(array, footprint=footprint,
+ mode=['reflect', 'reflect'])
+ assert_array_almost_equal(output2, output)
+
+ def test_maximum_filter07(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.maximum_filter(array, footprint=footprint)
+ assert_array_almost_equal([[3, 5, 5, 5, 4],
+ [7, 7, 9, 9, 5],
+ [7, 9, 8, 9, 7]], output)
+ # non-separable footprint should not allow mode sequence
+ with assert_raises(RuntimeError):
+ ndimage.maximum_filter(array, footprint=footprint,
+ mode=['reflect', 'reflect'])
+
+ def test_maximum_filter08(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.maximum_filter(array, footprint=footprint, origin=-1)
+ assert_array_almost_equal([[7, 9, 9, 5, 5],
+ [9, 8, 9, 7, 5],
+ [8, 8, 7, 7, 7]], output)
+
+ def test_maximum_filter09(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.maximum_filter(array, footprint=footprint,
+ origin=[-1, 0])
+ assert_array_almost_equal([[7, 7, 9, 9, 5],
+ [7, 9, 8, 9, 7],
+ [8, 8, 8, 7, 7]], output)
+
+ @pytest.mark.parametrize(
+ 'axes', tuple(itertools.combinations(range(-3, 3), 2))
+ )
+ @pytest.mark.parametrize(
+ 'filter_func, kwargs',
+ [(ndimage.minimum_filter, {}),
+ (ndimage.maximum_filter, {}),
+ (ndimage.median_filter, {}),
+ (ndimage.rank_filter, dict(rank=3)),
+ (ndimage.percentile_filter, dict(percentile=60))]
+ )
+ def test_minmax_nonseparable_axes(self, filter_func, axes, kwargs):
+ array = numpy.arange(6 * 8 * 12, dtype=numpy.float32).reshape(6, 8, 12)
+ # use 2D triangular footprint because it is non-separable
+ footprint = numpy.tri(5)
+ axes = numpy.array(axes)
+
+ if len(set(axes % array.ndim)) != len(axes):
+ # parametrized cases with duplicate axes raise an error
+ with pytest.raises(ValueError):
+ filter_func(array, footprint=footprint, axes=axes, **kwargs)
+ return
+ output = filter_func(array, footprint=footprint, axes=axes, **kwargs)
+
+ missing_axis = tuple(set(range(3)) - set(axes % array.ndim))[0]
+ footprint_3d = numpy.expand_dims(footprint, missing_axis)
+ expected = filter_func(array, footprint=footprint_3d, **kwargs)
+ assert_allclose(output, expected)
+
+ def test_rank01(self):
+ array = numpy.array([1, 2, 3, 4, 5])
+ output = ndimage.rank_filter(array, 1, size=2)
+ assert_array_almost_equal(array, output)
+ output = ndimage.percentile_filter(array, 100, size=2)
+ assert_array_almost_equal(array, output)
+ output = ndimage.median_filter(array, 2)
+ assert_array_almost_equal(array, output)
+
+ def test_rank02(self):
+ array = numpy.array([1, 2, 3, 4, 5])
+ output = ndimage.rank_filter(array, 1, size=[3])
+ assert_array_almost_equal(array, output)
+ output = ndimage.percentile_filter(array, 50, size=3)
+ assert_array_almost_equal(array, output)
+ output = ndimage.median_filter(array, (3,))
+ assert_array_almost_equal(array, output)
+
+ def test_rank03(self):
+ array = numpy.array([3, 2, 5, 1, 4])
+ output = ndimage.rank_filter(array, 1, size=[2])
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
+ output = ndimage.percentile_filter(array, 100, size=2)
+ assert_array_almost_equal([3, 3, 5, 5, 4], output)
+
+ def test_rank04(self):
+ array = numpy.array([3, 2, 5, 1, 4])
+ expected = [3, 3, 2, 4, 4]
+ output = ndimage.rank_filter(array, 1, size=3)
+ assert_array_almost_equal(expected, output)
+ output = ndimage.percentile_filter(array, 50, size=3)
+ assert_array_almost_equal(expected, output)
+ output = ndimage.median_filter(array, size=3)
+ assert_array_almost_equal(expected, output)
+
+ def test_rank05(self):
+ array = numpy.array([3, 2, 5, 1, 4])
+ expected = [3, 3, 2, 4, 4]
+ output = ndimage.rank_filter(array, -2, size=3)
+ assert_array_almost_equal(expected, output)
+
+ def test_rank06(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]])
+ expected = [[2, 2, 1, 1, 1],
+ [3, 3, 2, 1, 1],
+ [5, 5, 3, 3, 1]]
+ output = ndimage.rank_filter(array, 1, size=[2, 3])
+ assert_array_almost_equal(expected, output)
+ output = ndimage.percentile_filter(array, 17, size=(2, 3))
+ assert_array_almost_equal(expected, output)
+
+ def test_rank06_overlap(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]])
+ array_copy = array.copy()
+ expected = [[2, 2, 1, 1, 1],
+ [3, 3, 2, 1, 1],
+ [5, 5, 3, 3, 1]]
+ ndimage.rank_filter(array, 1, size=[2, 3], output=array)
+ assert_array_almost_equal(expected, array)
+
+ ndimage.percentile_filter(array_copy, 17, size=(2, 3),
+ output=array_copy)
+ assert_array_almost_equal(expected, array_copy)
+
+ def test_rank07(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]])
+ expected = [[3, 5, 5, 5, 4],
+ [5, 5, 7, 5, 4],
+ [6, 8, 8, 7, 5]]
+ output = ndimage.rank_filter(array, -2, size=[2, 3])
+ assert_array_almost_equal(expected, output)
+
+ def test_rank08(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]])
+ expected = [[3, 3, 2, 4, 4],
+ [5, 5, 5, 4, 4],
+ [5, 6, 7, 5, 5]]
+ output = ndimage.percentile_filter(array, 50.0, size=(2, 3))
+ assert_array_almost_equal(expected, output)
+ output = ndimage.rank_filter(array, 3, size=(2, 3))
+ assert_array_almost_equal(expected, output)
+ output = ndimage.median_filter(array, size=(2, 3))
+ assert_array_almost_equal(expected, output)
+
+ # non-separable: does not allow mode sequence
+ with assert_raises(RuntimeError):
+ ndimage.percentile_filter(array, 50.0, size=(2, 3),
+ mode=['reflect', 'constant'])
+ with assert_raises(RuntimeError):
+ ndimage.rank_filter(array, 3, size=(2, 3), mode=['reflect']*2)
+ with assert_raises(RuntimeError):
+ ndimage.median_filter(array, size=(2, 3), mode=['reflect']*2)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_rank09(self, dtype):
+ expected = [[3, 3, 2, 4, 4],
+ [3, 5, 2, 5, 1],
+ [5, 5, 8, 3, 5]]
+ footprint = [[1, 0, 1], [0, 1, 0]]
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ output = ndimage.rank_filter(array, 1, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+ output = ndimage.percentile_filter(array, 35, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+
+ def test_rank10(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ expected = [[2, 2, 1, 1, 1],
+ [2, 3, 1, 3, 1],
+ [5, 5, 3, 3, 1]]
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.rank_filter(array, 0, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+ output = ndimage.percentile_filter(array, 0.0, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+
+ def test_rank11(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ expected = [[3, 5, 5, 5, 4],
+ [7, 7, 9, 9, 5],
+ [7, 9, 8, 9, 7]]
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.rank_filter(array, -1, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+ output = ndimage.percentile_filter(array, 100.0, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_rank12(self, dtype):
+ expected = [[3, 3, 2, 4, 4],
+ [3, 5, 2, 5, 1],
+ [5, 5, 8, 3, 5]]
+ footprint = [[1, 0, 1], [0, 1, 0]]
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ output = ndimage.rank_filter(array, 1, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+ output = ndimage.percentile_filter(array, 50.0,
+ footprint=footprint)
+ assert_array_almost_equal(expected, output)
+ output = ndimage.median_filter(array, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_rank13(self, dtype):
+ expected = [[5, 2, 5, 1, 1],
+ [5, 8, 3, 5, 5],
+ [6, 6, 5, 5, 5]]
+ footprint = [[1, 0, 1], [0, 1, 0]]
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ output = ndimage.rank_filter(array, 1, footprint=footprint,
+ origin=-1)
+ assert_array_almost_equal(expected, output)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_rank14(self, dtype):
+ expected = [[3, 5, 2, 5, 1],
+ [5, 5, 8, 3, 5],
+ [5, 6, 6, 5, 5]]
+ footprint = [[1, 0, 1], [0, 1, 0]]
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ output = ndimage.rank_filter(array, 1, footprint=footprint,
+ origin=[-1, 0])
+ assert_array_almost_equal(expected, output)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_rank15(self, dtype):
+ expected = [[2, 3, 1, 4, 1],
+ [5, 3, 7, 1, 1],
+ [5, 5, 3, 3, 3]]
+ footprint = [[1, 0, 1], [0, 1, 0]]
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [5, 8, 3, 7, 1],
+ [5, 6, 9, 3, 5]], dtype)
+ output = ndimage.rank_filter(array, 0, footprint=footprint,
+ origin=[-1, 0])
+ assert_array_almost_equal(expected, output)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_generic_filter1d01(self, dtype):
+ weights = numpy.array([1.1, 2.2, 3.3])
+
+ def _filter_func(input, output, fltr, total):
+ fltr = fltr / total
+ for ii in range(input.shape[0] - 2):
+ output[ii] = input[ii] * fltr[0]
+ output[ii] += input[ii + 1] * fltr[1]
+ output[ii] += input[ii + 2] * fltr[2]
+ a = numpy.arange(12, dtype=dtype)
+ a.shape = (3, 4)
+ r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1)
+ r2 = ndimage.generic_filter1d(
+ a, _filter_func, 3, axis=0, origin=-1,
+ extra_arguments=(weights,),
+ extra_keywords={'total': weights.sum()})
+ assert_array_almost_equal(r1, r2)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_generic_filter01(self, dtype):
+ filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
+ footprint = numpy.array([[1, 0], [0, 1]])
+ cf = numpy.array([1., 4.])
+
+ def _filter_func(buffer, weights, total=1.0):
+ weights = cf / total
+ return (buffer * weights).sum()
+
+ a = numpy.arange(12, dtype=dtype)
+ a.shape = (3, 4)
+ r1 = ndimage.correlate(a, filter_ * footprint)
+ if dtype in float_types:
+ r1 /= 5
+ else:
+ r1 //= 5
+ r2 = ndimage.generic_filter(
+ a, _filter_func, footprint=footprint, extra_arguments=(cf,),
+ extra_keywords={'total': cf.sum()})
+ assert_array_almost_equal(r1, r2)
+
+ # generic_filter doesn't allow mode sequence
+ with assert_raises(RuntimeError):
+ r2 = ndimage.generic_filter(
+ a, _filter_func, mode=['reflect', 'reflect'],
+ footprint=footprint, extra_arguments=(cf,),
+ extra_keywords={'total': cf.sum()})
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [1, 1, 2]),
+ ('wrap', [3, 1, 2]),
+ ('reflect', [1, 1, 2]),
+ ('mirror', [2, 1, 2]),
+ ('constant', [0, 1, 2])]
+ )
+ def test_extend01(self, mode, expected_value):
+ array = numpy.array([1, 2, 3])
+ weights = numpy.array([1, 0])
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [1, 1, 1]),
+ ('wrap', [3, 1, 2]),
+ ('reflect', [3, 3, 2]),
+ ('mirror', [1, 2, 3]),
+ ('constant', [0, 0, 0])]
+ )
+ def test_extend02(self, mode, expected_value):
+ array = numpy.array([1, 2, 3])
+ weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0])
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [2, 3, 3]),
+ ('wrap', [2, 3, 1]),
+ ('reflect', [2, 3, 3]),
+ ('mirror', [2, 3, 2]),
+ ('constant', [2, 3, 0])]
+ )
+ def test_extend03(self, mode, expected_value):
+ array = numpy.array([1, 2, 3])
+ weights = numpy.array([0, 0, 1])
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [3, 3, 3]),
+ ('wrap', [2, 3, 1]),
+ ('reflect', [2, 1, 1]),
+ ('mirror', [1, 2, 3]),
+ ('constant', [0, 0, 0])]
+ )
+ def test_extend04(self, mode, expected_value):
+ array = numpy.array([1, 2, 3])
+ weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
+ output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
+ ('wrap', [[9, 7, 8], [3, 1, 2], [6, 4, 5]]),
+ ('reflect', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
+ ('mirror', [[5, 4, 5], [2, 1, 2], [5, 4, 5]]),
+ ('constant', [[0, 0, 0], [0, 1, 2], [0, 4, 5]])]
+ )
+ def test_extend05(self, mode, expected_value):
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+ weights = numpy.array([[1, 0], [0, 0]])
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
+ ('wrap', [[5, 6, 4], [8, 9, 7], [2, 3, 1]]),
+ ('reflect', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
+ ('mirror', [[5, 6, 5], [8, 9, 8], [5, 6, 5]]),
+ ('constant', [[5, 6, 0], [8, 9, 0], [0, 0, 0]])]
+ )
+ def test_extend06(self, mode, expected_value):
+ array = numpy.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+ weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [3, 3, 3]),
+ ('wrap', [2, 3, 1]),
+ ('reflect', [2, 1, 1]),
+ ('mirror', [1, 2, 3]),
+ ('constant', [0, 0, 0])]
+ )
+ def test_extend07(self, mode, expected_value):
+ array = numpy.array([1, 2, 3])
+ weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [[3], [3], [3]]),
+ ('wrap', [[2], [3], [1]]),
+ ('reflect', [[2], [1], [1]]),
+ ('mirror', [[1], [2], [3]]),
+ ('constant', [[0], [0], [0]])]
+ )
+ def test_extend08(self, mode, expected_value):
+ array = numpy.array([[1], [2], [3]])
+ weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [3, 3, 3]),
+ ('wrap', [2, 3, 1]),
+ ('reflect', [2, 1, 1]),
+ ('mirror', [1, 2, 3]),
+ ('constant', [0, 0, 0])]
+ )
+ def test_extend09(self, mode, expected_value):
+ array = numpy.array([1, 2, 3])
+ weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [[3], [3], [3]]),
+ ('wrap', [[2], [3], [1]]),
+ ('reflect', [[2], [1], [1]]),
+ ('mirror', [[1], [2], [3]]),
+ ('constant', [[0], [0], [0]])]
+ )
+ def test_extend10(self, mode, expected_value):
+ array = numpy.array([[1], [2], [3]])
+ weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
+ output = ndimage.correlate(array, weights, mode=mode, cval=0)
+ assert_array_equal(output, expected_value)
+
+
+def test_ticket_701():
+ # Test generic filter sizes
+ arr = numpy.arange(4).reshape((2, 2))
+ def func(x):
+ return numpy.min(x)
+ res = ndimage.generic_filter(arr, func, size=(1, 1))
+ # The following raises an error unless ticket 701 is fixed
+ res2 = ndimage.generic_filter(arr, func, size=1)
+ assert_equal(res, res2)
+
+
+def test_gh_5430():
+ # At least one of these raises an error unless gh-5430 is
+ # fixed. In py2k an int is implemented using a C long, so
+ # which one fails depends on your system. In py3k there is only
+ # one arbitrary precision integer type, so both should fail.
+ sigma = numpy.int32(1)
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
+ assert_equal(out, [sigma])
+ sigma = numpy.int64(1)
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
+ assert_equal(out, [sigma])
+ # This worked before; make sure it still works
+ sigma = 1
+ out = ndimage._ni_support._normalize_sequence(sigma, 1)
+ assert_equal(out, [sigma])
+ # This worked before; make sure it still works
+ sigma = [1, 1]
+ out = ndimage._ni_support._normalize_sequence(sigma, 2)
+ assert_equal(out, sigma)
+ # Also include the OPs original example to make sure we fixed the issue
+ x = numpy.random.normal(size=(256, 256))
+ perlin = numpy.zeros_like(x)
+ for i in 2**numpy.arange(6):
+ perlin += ndimage.gaussian_filter(x, i, mode="wrap") * i**2
+ # This also fixes gh-4106, show that the OPs example now runs.
+ x = numpy.int64(21)
+ ndimage._ni_support._normalize_sequence(x, 0)
+
+
+def test_gaussian_kernel1d():
+ radius = 10
+ sigma = 2
+ sigma2 = sigma * sigma
+ x = numpy.arange(-radius, radius + 1, dtype=numpy.double)
+ phi_x = numpy.exp(-0.5 * x * x / sigma2)
+ phi_x /= phi_x.sum()
+ assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
+ assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
+ assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
+ _gaussian_kernel1d(sigma, 2, radius))
+ assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
+ _gaussian_kernel1d(sigma, 3, radius))
+
+
+def test_orders_gauss():
+ # Check order inputs to Gaussians
+ arr = numpy.zeros((1,))
+ assert_equal(0, ndimage.gaussian_filter(arr, 1, order=0))
+ assert_equal(0, ndimage.gaussian_filter(arr, 1, order=3))
+ assert_raises(ValueError, ndimage.gaussian_filter, arr, 1, -1)
+ assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=0))
+ assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=3))
+ assert_raises(ValueError, ndimage.gaussian_filter1d, arr, 1, -1, -1)
+
+
+def test_valid_origins():
+ """Regression test for #1311."""
+ def func(x):
+ return numpy.mean(x)
+ data = numpy.array([1, 2, 3, 4, 5], dtype=numpy.float64)
+ assert_raises(ValueError, ndimage.generic_filter, data, func, size=3,
+ origin=2)
+ assert_raises(ValueError, ndimage.generic_filter1d, data, func,
+ filter_size=3, origin=2)
+ assert_raises(ValueError, ndimage.percentile_filter, data, 0.2, size=3,
+ origin=2)
+
+ for filter in [ndimage.uniform_filter, ndimage.minimum_filter,
+ ndimage.maximum_filter, ndimage.maximum_filter1d,
+ ndimage.median_filter, ndimage.minimum_filter1d]:
+ # This should work, since for size == 3, the valid range for origin is
+ # -1 to 1.
+ list(filter(data, 3, origin=-1))
+ list(filter(data, 3, origin=1))
+ # Just check this raises an error instead of silently accepting or
+ # segfaulting.
+ assert_raises(ValueError, filter, data, 3, origin=2)
+
+
+def test_bad_convolve_and_correlate_origins():
+ """Regression test for gh-822."""
+ # Before gh-822 was fixed, these would generate seg. faults or
+ # other crashes on many system.
+ assert_raises(ValueError, ndimage.correlate1d,
+ [0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
+ assert_raises(ValueError, ndimage.correlate,
+ [0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
+ assert_raises(ValueError, ndimage.correlate,
+ numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, 1])
+
+ assert_raises(ValueError, ndimage.convolve1d,
+ numpy.arange(10), numpy.ones(3), origin=-2)
+ assert_raises(ValueError, ndimage.convolve,
+ numpy.arange(10), numpy.ones(3), origin=[-2])
+ assert_raises(ValueError, ndimage.convolve,
+ numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, -2])
+
+
+def test_multiple_modes():
+ # Test that the filters with multiple mode cababilities for different
+ # dimensions give the same result as applying a single mode.
+ arr = numpy.array([[1., 0., 0.],
+ [1., 1., 0.],
+ [0., 0., 0.]])
+
+ mode1 = 'reflect'
+ mode2 = ['reflect', 'reflect']
+
+ assert_equal(ndimage.gaussian_filter(arr, 1, mode=mode1),
+ ndimage.gaussian_filter(arr, 1, mode=mode2))
+ assert_equal(ndimage.prewitt(arr, mode=mode1),
+ ndimage.prewitt(arr, mode=mode2))
+ assert_equal(ndimage.sobel(arr, mode=mode1),
+ ndimage.sobel(arr, mode=mode2))
+ assert_equal(ndimage.laplace(arr, mode=mode1),
+ ndimage.laplace(arr, mode=mode2))
+ assert_equal(ndimage.gaussian_laplace(arr, 1, mode=mode1),
+ ndimage.gaussian_laplace(arr, 1, mode=mode2))
+ assert_equal(ndimage.maximum_filter(arr, size=5, mode=mode1),
+ ndimage.maximum_filter(arr, size=5, mode=mode2))
+ assert_equal(ndimage.minimum_filter(arr, size=5, mode=mode1),
+ ndimage.minimum_filter(arr, size=5, mode=mode2))
+ assert_equal(ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode1),
+ ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode2))
+ assert_equal(ndimage.uniform_filter(arr, 5, mode=mode1),
+ ndimage.uniform_filter(arr, 5, mode=mode2))
+
+
+def test_multiple_modes_sequentially():
+ # Test that the filters with multiple mode cababilities for different
+ # dimensions give the same result as applying the filters with
+ # different modes sequentially
+ arr = numpy.array([[1., 0., 0.],
+ [1., 1., 0.],
+ [0., 0., 0.]])
+
+ modes = ['reflect', 'wrap']
+
+ expected = ndimage.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
+ expected = ndimage.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
+ assert_equal(expected,
+ ndimage.gaussian_filter(arr, 1, mode=modes))
+
+ expected = ndimage.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
+ expected = ndimage.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
+ assert_equal(expected,
+ ndimage.uniform_filter(arr, 5, mode=modes))
+
+ expected = ndimage.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
+ expected = ndimage.maximum_filter1d(expected, size=5, axis=1,
+ mode=modes[1])
+ assert_equal(expected,
+ ndimage.maximum_filter(arr, size=5, mode=modes))
+
+ expected = ndimage.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
+ expected = ndimage.minimum_filter1d(expected, size=5, axis=1,
+ mode=modes[1])
+ assert_equal(expected,
+ ndimage.minimum_filter(arr, size=5, mode=modes))
+
+
+def test_multiple_modes_prewitt():
+ # Test prewitt filter for multiple extrapolation modes
+ arr = numpy.array([[1., 0., 0.],
+ [1., 1., 0.],
+ [0., 0., 0.]])
+
+ expected = numpy.array([[1., -3., 2.],
+ [1., -2., 1.],
+ [1., -1., 0.]])
+
+ modes = ['reflect', 'wrap']
+
+ assert_equal(expected,
+ ndimage.prewitt(arr, mode=modes))
+
+
+def test_multiple_modes_sobel():
+ # Test sobel filter for multiple extrapolation modes
+ arr = numpy.array([[1., 0., 0.],
+ [1., 1., 0.],
+ [0., 0., 0.]])
+
+ expected = numpy.array([[1., -4., 3.],
+ [2., -3., 1.],
+ [1., -1., 0.]])
+
+ modes = ['reflect', 'wrap']
+
+ assert_equal(expected,
+ ndimage.sobel(arr, mode=modes))
+
+
+def test_multiple_modes_laplace():
+ # Test laplace filter for multiple extrapolation modes
+ arr = numpy.array([[1., 0., 0.],
+ [1., 1., 0.],
+ [0., 0., 0.]])
+
+ expected = numpy.array([[-2., 2., 1.],
+ [-2., -3., 2.],
+ [1., 1., 0.]])
+
+ modes = ['reflect', 'wrap']
+
+ assert_equal(expected,
+ ndimage.laplace(arr, mode=modes))
+
+
+def test_multiple_modes_gaussian_laplace():
+ # Test gaussian_laplace filter for multiple extrapolation modes
+ arr = numpy.array([[1., 0., 0.],
+ [1., 1., 0.],
+ [0., 0., 0.]])
+
+ expected = numpy.array([[-0.28438687, 0.01559809, 0.19773499],
+ [-0.36630503, -0.20069774, 0.07483620],
+ [0.15849176, 0.18495566, 0.21934094]])
+
+ modes = ['reflect', 'wrap']
+
+ assert_almost_equal(expected,
+ ndimage.gaussian_laplace(arr, 1, mode=modes))
+
+
+def test_multiple_modes_gaussian_gradient_magnitude():
+ # Test gaussian_gradient_magnitude filter for multiple
+ # extrapolation modes
+ arr = numpy.array([[1., 0., 0.],
+ [1., 1., 0.],
+ [0., 0., 0.]])
+
+ expected = numpy.array([[0.04928965, 0.09745625, 0.06405368],
+ [0.23056905, 0.14025305, 0.04550846],
+ [0.19894369, 0.14950060, 0.06796850]])
+
+ modes = ['reflect', 'wrap']
+
+ calculated = ndimage.gaussian_gradient_magnitude(arr, 1, mode=modes)
+
+ assert_almost_equal(expected, calculated)
+
+
+def test_multiple_modes_uniform():
+ # Test uniform filter for multiple extrapolation modes
+ arr = numpy.array([[1., 0., 0.],
+ [1., 1., 0.],
+ [0., 0., 0.]])
+
+ expected = numpy.array([[0.32, 0.40, 0.48],
+ [0.20, 0.28, 0.32],
+ [0.28, 0.32, 0.40]])
+
+ modes = ['reflect', 'wrap']
+
+ assert_almost_equal(expected,
+ ndimage.uniform_filter(arr, 5, mode=modes))
+
+
+def test_gaussian_truncate():
+ # Test that Gaussian filters can be truncated at different widths.
+ # These tests only check that the result has the expected number
+ # of nonzero elements.
+ arr = numpy.zeros((100, 100), float)
+ arr[50, 50] = 1
+ num_nonzeros_2 = (ndimage.gaussian_filter(arr, 5, truncate=2) > 0).sum()
+ assert_equal(num_nonzeros_2, 21**2)
+ num_nonzeros_5 = (ndimage.gaussian_filter(arr, 5, truncate=5) > 0).sum()
+ assert_equal(num_nonzeros_5, 51**2)
+
+ # Test truncate when sigma is a sequence.
+ f = ndimage.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
+ fpos = f > 0
+ n0 = fpos.any(axis=0).sum()
+ # n0 should be 2*int(2.5*3.5 + 0.5) + 1
+ assert_equal(n0, 19)
+ n1 = fpos.any(axis=1).sum()
+ # n1 should be 2*int(0.5*3.5 + 0.5) + 1
+ assert_equal(n1, 5)
+
+ # Test gaussian_filter1d.
+ x = numpy.zeros(51)
+ x[25] = 1
+ f = ndimage.gaussian_filter1d(x, sigma=2, truncate=3.5)
+ n = (f > 0).sum()
+ assert_equal(n, 15)
+
+ # Test gaussian_laplace
+ y = ndimage.gaussian_laplace(x, sigma=2, truncate=3.5)
+ nonzero_indices = numpy.nonzero(y != 0)[0]
+ n = numpy.ptp(nonzero_indices) + 1
+ assert_equal(n, 15)
+
+ # Test gaussian_gradient_magnitude
+ y = ndimage.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
+ nonzero_indices = numpy.nonzero(y != 0)[0]
+ n = numpy.ptp(nonzero_indices) + 1
+ assert_equal(n, 15)
+
+
+def test_gaussian_radius():
+ # Test that Gaussian filters with radius argument produce the same
+ # results as the filters with corresponding truncate argument.
+ # radius = int(truncate * sigma + 0.5)
+ # Test gaussian_filter1d
+ x = numpy.zeros(7)
+ x[3] = 1
+ f1 = ndimage.gaussian_filter1d(x, sigma=2, truncate=1.5)
+ f2 = ndimage.gaussian_filter1d(x, sigma=2, radius=3)
+ assert_equal(f1, f2)
+
+ # Test gaussian_filter when sigma is a number.
+ a = numpy.zeros((9, 9))
+ a[4, 4] = 1
+ f1 = ndimage.gaussian_filter(a, sigma=0.5, truncate=3.5)
+ f2 = ndimage.gaussian_filter(a, sigma=0.5, radius=2)
+ assert_equal(f1, f2)
+
+ # Test gaussian_filter when sigma is a sequence.
+ a = numpy.zeros((50, 50))
+ a[25, 25] = 1
+ f1 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], truncate=3.5)
+ f2 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], radius=[2, 9])
+ assert_equal(f1, f2)
+
+
+def test_gaussian_radius_invalid():
+ # radius must be a nonnegative integer
+ with assert_raises(ValueError):
+ ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=-1)
+ with assert_raises(ValueError):
+ ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=1.1)
+
+
+class TestThreading:
+ def check_func_thread(self, n, fun, args, out):
+ from threading import Thread
+ thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]})
+ for x in range(n)]
+ [t.start() for t in thrds]
+ [t.join() for t in thrds]
+
+ def check_func_serial(self, n, fun, args, out):
+ for i in range(n):
+ fun(*args, output=out[i])
+
+ def test_correlate1d(self):
+ d = numpy.random.randn(5000)
+ os = numpy.empty((4, d.size))
+ ot = numpy.empty_like(os)
+ k = numpy.arange(5)
+ self.check_func_serial(4, ndimage.correlate1d, (d, k), os)
+ self.check_func_thread(4, ndimage.correlate1d, (d, k), ot)
+ assert_array_equal(os, ot)
+
+ def test_correlate(self):
+ d = numpy.random.randn(500, 500)
+ k = numpy.random.randn(10, 10)
+ os = numpy.empty([4] + list(d.shape))
+ ot = numpy.empty_like(os)
+ self.check_func_serial(4, ndimage.correlate, (d, k), os)
+ self.check_func_thread(4, ndimage.correlate, (d, k), ot)
+ assert_array_equal(os, ot)
+
+ def test_median_filter(self):
+ d = numpy.random.randn(500, 500)
+ os = numpy.empty([4] + list(d.shape))
+ ot = numpy.empty_like(os)
+ self.check_func_serial(4, ndimage.median_filter, (d, 3), os)
+ self.check_func_thread(4, ndimage.median_filter, (d, 3), ot)
+ assert_array_equal(os, ot)
+
+ def test_uniform_filter1d(self):
+ d = numpy.random.randn(5000)
+ os = numpy.empty((4, d.size))
+ ot = numpy.empty_like(os)
+ self.check_func_serial(4, ndimage.uniform_filter1d, (d, 5), os)
+ self.check_func_thread(4, ndimage.uniform_filter1d, (d, 5), ot)
+ assert_array_equal(os, ot)
+
+ def test_minmax_filter(self):
+ d = numpy.random.randn(500, 500)
+ os = numpy.empty([4] + list(d.shape))
+ ot = numpy.empty_like(os)
+ self.check_func_serial(4, ndimage.maximum_filter, (d, 3), os)
+ self.check_func_thread(4, ndimage.maximum_filter, (d, 3), ot)
+ assert_array_equal(os, ot)
+ self.check_func_serial(4, ndimage.minimum_filter, (d, 3), os)
+ self.check_func_thread(4, ndimage.minimum_filter, (d, 3), ot)
+ assert_array_equal(os, ot)
+
+
+def test_minmaximum_filter1d():
+ # Regression gh-3898
+ in_ = numpy.arange(10)
+ out = ndimage.minimum_filter1d(in_, 1)
+ assert_equal(in_, out)
+ out = ndimage.maximum_filter1d(in_, 1)
+ assert_equal(in_, out)
+ # Test reflect
+ out = ndimage.minimum_filter1d(in_, 5, mode='reflect')
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
+ out = ndimage.maximum_filter1d(in_, 5, mode='reflect')
+ assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
+ # Test constant
+ out = ndimage.minimum_filter1d(in_, 5, mode='constant', cval=-1)
+ assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
+ out = ndimage.maximum_filter1d(in_, 5, mode='constant', cval=10)
+ assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
+ # Test nearest
+ out = ndimage.minimum_filter1d(in_, 5, mode='nearest')
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
+ out = ndimage.maximum_filter1d(in_, 5, mode='nearest')
+ assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
+ # Test wrap
+ out = ndimage.minimum_filter1d(in_, 5, mode='wrap')
+ assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
+ out = ndimage.maximum_filter1d(in_, 5, mode='wrap')
+ assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
+
+
+def test_uniform_filter1d_roundoff_errors():
+ # gh-6930
+ in_ = numpy.repeat([0, 1, 0], [9, 9, 9])
+ for filter_size in range(3, 10):
+ out = ndimage.uniform_filter1d(in_, filter_size)
+ assert_equal(out.sum(), 10 - filter_size)
+
+
+def test_footprint_all_zeros():
+ # regression test for gh-6876: footprint of all zeros segfaults
+ arr = numpy.random.randint(0, 100, (100, 100))
+ kernel = numpy.zeros((3, 3), bool)
+ with assert_raises(ValueError):
+ ndimage.maximum_filter(arr, footprint=kernel)
+
+
+def test_gaussian_filter():
+ # Test gaussian filter with numpy.float16
+ # gh-8207
+ data = numpy.array([1], dtype=numpy.float16)
+ sigma = 1.0
+ with assert_raises(RuntimeError):
+ ndimage.gaussian_filter(data, sigma)
+
+
+def test_rank_filter_noninteger_rank():
+ # regression test for issue 9388: ValueError for
+ # non integer rank when performing rank_filter
+ arr = numpy.random.random((10, 20, 30))
+ assert_raises(TypeError, ndimage.rank_filter, arr, 0.5,
+ footprint=numpy.ones((1, 1, 10), dtype=bool))
+
+
+def test_size_footprint_both_set():
+ # test for input validation, expect user warning when
+ # size and footprint is set
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ "ignoring size because footprint is set")
+ arr = numpy.random.random((10, 20, 30))
+ ndimage.rank_filter(arr, 5, size=2, footprint=numpy.ones((1, 1, 10),
+ dtype=bool))
+
+
+def test_byte_order_median():
+ """Regression test for #413: median_filter does not handle bytes orders."""
+ a = numpy.arange(9, dtype=' 3 raise NotImplementedError
+ x = numpy.ones((4, 6, 8, 10), dtype=numpy.complex128)
+ with pytest.raises(NotImplementedError):
+ ndimage.fourier_ellipsoid(x, 3)
+
+ def test_fourier_ellipsoid_1d_complex(self):
+ # expected result of 1d ellipsoid is the same as for fourier_uniform
+ for shape in [(32, ), (31, )]:
+ for type_, dec in zip([numpy.complex64, numpy.complex128],
+ [5, 14]):
+ x = numpy.ones(shape, dtype=type_)
+ a = ndimage.fourier_ellipsoid(x, 5, -1, 0)
+ b = ndimage.fourier_uniform(x, 5, -1, 0)
+ assert_array_almost_equal(a, b, decimal=dec)
+
+ @pytest.mark.parametrize('shape', [(0, ), (0, 10), (10, 0)])
+ @pytest.mark.parametrize('dtype',
+ [numpy.float32, numpy.float64,
+ numpy.complex64, numpy.complex128])
+ @pytest.mark.parametrize('test_func',
+ [ndimage.fourier_ellipsoid,
+ ndimage.fourier_gaussian,
+ ndimage.fourier_uniform])
+ def test_fourier_zero_length_dims(self, shape, dtype, test_func):
+ a = numpy.ones(shape, dtype)
+ b = test_func(a, 3)
+ assert_equal(a, b)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py
new file mode 100644
index 0000000000000000000000000000000000000000..beb8681e850bd682b789e97f901048d718627dd0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_interpolation.py
@@ -0,0 +1,1327 @@
+import sys
+
+import numpy
+from numpy.testing import (assert_, assert_equal, assert_array_equal,
+ assert_array_almost_equal, assert_allclose,
+ suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+import scipy.ndimage as ndimage
+
+from . import types
+
+eps = 1e-12
+
+ndimage_to_numpy_mode = {
+ 'mirror': 'reflect',
+ 'reflect': 'symmetric',
+ 'grid-mirror': 'symmetric',
+ 'grid-wrap': 'wrap',
+ 'nearest': 'edge',
+ 'grid-constant': 'constant',
+}
+
+
+class TestNdimageInterpolation:
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
+ ('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
+ ('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
+ ('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
+ ('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
+ ('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
+ ('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
+ )
+ def test_boundaries(self, mode, expected_value):
+ def shift(x):
+ return (x[0] + 0.5,)
+
+ data = numpy.array([1, 2, 3, 4.])
+ assert_array_equal(
+ expected_value,
+ ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
+ output_shape=(7,), order=1))
+
+ @pytest.mark.parametrize(
+ 'mode, expected_value',
+ [('nearest', [1, 1, 2, 3]),
+ ('wrap', [3, 1, 2, 3]),
+ ('grid-wrap', [4, 1, 2, 3]),
+ ('mirror', [2, 1, 2, 3]),
+ ('reflect', [1, 1, 2, 3]),
+ ('constant', [-1, 1, 2, 3]),
+ ('grid-constant', [-1, 1, 2, 3])]
+ )
+ def test_boundaries2(self, mode, expected_value):
+ def shift(x):
+ return (x[0] - 0.9,)
+
+ data = numpy.array([1, 2, 3, 4])
+ assert_array_equal(
+ expected_value,
+ ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
+ output_shape=(4,)))
+
+ @pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
+ 'grid-wrap', 'grid-constant',
+ 'nearest'])
+ @pytest.mark.parametrize('order', range(6))
+ def test_boundary_spline_accuracy(self, mode, order):
+ """Tests based on examples from gh-2640"""
+ data = numpy.arange(-6, 7, dtype=float)
+ x = numpy.linspace(-8, 15, num=1000)
+ y = ndimage.map_coordinates(data, [x], order=order, mode=mode)
+
+ # compute expected value using explicit padding via numpy.pad
+ npad = 32
+ pad_mode = ndimage_to_numpy_mode.get(mode)
+ padded = numpy.pad(data, npad, mode=pad_mode)
+ expected = ndimage.map_coordinates(padded, [npad + x], order=order,
+ mode=mode)
+
+ atol = 1e-5 if mode == 'grid-constant' else 1e-12
+ assert_allclose(y, expected, rtol=1e-7, atol=atol)
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline01(self, dtype, order):
+ data = numpy.ones([], dtype)
+ out = ndimage.spline_filter(data, order=order)
+ assert_array_almost_equal(out, 1)
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline02(self, dtype, order):
+ data = numpy.array([1], dtype)
+ out = ndimage.spline_filter(data, order=order)
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline03(self, dtype, order):
+ data = numpy.ones([], dtype)
+ out = ndimage.spline_filter(data, order, output=dtype)
+ assert_array_almost_equal(out, 1)
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline04(self, dtype, order):
+ data = numpy.ones([4], dtype)
+ out = ndimage.spline_filter(data, order)
+ assert_array_almost_equal(out, [1, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(2, 6))
+ @pytest.mark.parametrize('dtype', types)
+ def test_spline05(self, dtype, order):
+ data = numpy.ones([4, 4], dtype)
+ out = ndimage.spline_filter(data, order=order)
+ assert_array_almost_equal(out, [[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform01(self, order):
+ data = numpy.array([1])
+
+ def mapping(x):
+ return x
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform02(self, order):
+ data = numpy.ones([4])
+
+ def mapping(x):
+ return x
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform03(self, order):
+ data = numpy.ones([4])
+
+ def mapping(x):
+ return (x[0] - 1,)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [0, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform04(self, order):
+ data = numpy.array([4, 1, 3, 2])
+
+ def mapping(x):
+ return (x[0] - 1,)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [0, 4, 1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+ def test_geometric_transform05(self, order, dtype):
+ data = numpy.array([[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]], dtype=dtype)
+ expected = numpy.array([[0, 1, 1, 1],
+ [0, 1, 1, 1],
+ [0, 1, 1, 1]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+
+ def mapping(x):
+ return (x[0], x[1] - 1)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform06(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+
+ def mapping(x):
+ return (x[0], x[1] - 1)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
+ [0, 7, 6, 8],
+ [0, 3, 5, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform07(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+
+ def mapping(x):
+ return (x[0] - 1, x[1])
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [4, 1, 3, 2],
+ [7, 6, 8, 5]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform08(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+
+ def mapping(x):
+ return (x[0] - 1, x[1] - 1)
+
+ out = ndimage.geometric_transform(data, mapping, data.shape,
+ order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform10(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+
+ def mapping(x):
+ return (x[0] - 1, x[1] - 1)
+
+ if (order > 1):
+ filtered = ndimage.spline_filter(data, order=order)
+ else:
+ filtered = data
+ out = ndimage.geometric_transform(filtered, mapping, data.shape,
+ order=order, prefilter=False)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform13(self, order):
+ data = numpy.ones([2], numpy.float64)
+
+ def mapping(x):
+ return (x[0] // 2,)
+
+ out = ndimage.geometric_transform(data, mapping, [4], order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform14(self, order):
+ data = [1, 5, 2, 6, 3, 7, 4, 4]
+
+ def mapping(x):
+ return (2 * x[0],)
+
+ out = ndimage.geometric_transform(data, mapping, [4], order=order)
+ assert_array_almost_equal(out, [1, 2, 3, 4])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform15(self, order):
+ data = [1, 2, 3, 4]
+
+ def mapping(x):
+ return (x[0] / 2,)
+
+ out = ndimage.geometric_transform(data, mapping, [8], order=order)
+ assert_array_almost_equal(out[::2], [1, 2, 3, 4])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform16(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9.0, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0], x[1] * 2)
+
+ out = ndimage.geometric_transform(data, mapping, (3, 2),
+ order=order)
+ assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform17(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0] * 2, x[1])
+
+ out = ndimage.geometric_transform(data, mapping, (1, 4),
+ order=order)
+ assert_array_almost_equal(out, [[1, 2, 3, 4]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform18(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0] * 2, x[1] * 2)
+
+ out = ndimage.geometric_transform(data, mapping, (1, 2),
+ order=order)
+ assert_array_almost_equal(out, [[1, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform19(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0], x[1] / 2)
+
+ out = ndimage.geometric_transform(data, mapping, (3, 8),
+ order=order)
+ assert_array_almost_equal(out[..., ::2], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform20(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0] / 2, x[1])
+
+ out = ndimage.geometric_transform(data, mapping, (6, 4),
+ order=order)
+ assert_array_almost_equal(out[::2, ...], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform21(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (x[0] / 2, x[1] / 2)
+
+ out = ndimage.geometric_transform(data, mapping, (6, 8),
+ order=order)
+ assert_array_almost_equal(out[::2, ::2], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform22(self, order):
+ data = numpy.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]], numpy.float64)
+
+ def mapping1(x):
+ return (x[0] / 2, x[1] / 2)
+
+ def mapping2(x):
+ return (x[0] * 2, x[1] * 2)
+
+ out = ndimage.geometric_transform(data, mapping1,
+ (6, 8), order=order)
+ out = ndimage.geometric_transform(out, mapping2,
+ (3, 4), order=order)
+ assert_array_almost_equal(out, data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform23(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x):
+ return (1, x[0] * 2)
+
+ out = ndimage.geometric_transform(data, mapping, (2,), order=order)
+ out = out.astype(numpy.int32)
+ assert_array_almost_equal(out, [5, 7])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_geometric_transform24(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+
+ def mapping(x, a, b):
+ return (a, x[0] * b)
+
+ out = ndimage.geometric_transform(
+ data, mapping, (2,), order=order, extra_arguments=(1,),
+ extra_keywords={'b': 2})
+ assert_array_almost_equal(out, [5, 7])
+
+ def test_geometric_transform_grid_constant_order1(self):
+ # verify interpolation outside the original bounds
+ x = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype=float)
+
+ def mapping(x):
+ return (x[0] - 0.5), (x[1] - 0.5)
+
+ expected_result = numpy.array([[0.25, 0.75, 1.25],
+ [1.25, 3.00, 4.00]])
+ assert_array_almost_equal(
+ ndimage.geometric_transform(x, mapping, mode='grid-constant',
+ order=1),
+ expected_result,
+ )
+
+ @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
+ 'mirror', 'reflect'])
+ @pytest.mark.parametrize('order', range(6))
+ def test_geometric_transform_vs_padded(self, order, mode):
+ x = numpy.arange(144, dtype=float).reshape(12, 12)
+
+ def mapping(x):
+ return (x[0] - 0.4), (x[1] + 2.3)
+
+ # Manually pad and then extract center after the transform to get the
+ # expected result.
+ npad = 24
+ pad_mode = ndimage_to_numpy_mode.get(mode)
+ xp = numpy.pad(x, npad, mode=pad_mode)
+ center_slice = tuple([slice(npad, -npad)] * x.ndim)
+ expected_result = ndimage.geometric_transform(
+ xp, mapping, mode=mode, order=order)[center_slice]
+
+ assert_allclose(
+ ndimage.geometric_transform(x, mapping, mode=mode,
+ order=order),
+ expected_result,
+ rtol=1e-7,
+ )
+
+ def test_geometric_transform_endianness_with_output_parameter(self):
+ # geometric transform given output ndarray or dtype with
+ # non-native endianness. see issue #4127
+ data = numpy.array([1])
+
+ def mapping(x):
+ return x
+
+ for out in [data.dtype, data.dtype.newbyteorder(),
+ numpy.empty_like(data),
+ numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
+ returned = ndimage.geometric_transform(data, mapping, data.shape,
+ output=out)
+ result = out if returned is None else returned
+ assert_array_almost_equal(result, [1])
+
+ def test_geometric_transform_with_string_output(self):
+ data = numpy.array([1])
+
+ def mapping(x):
+ return x
+
+ out = ndimage.geometric_transform(data, mapping, output='f')
+ assert_(out.dtype is numpy.dtype('f'))
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+ def test_map_coordinates01(self, order, dtype):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ expected = numpy.array([[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+ if data.dtype.kind == 'c':
+ data = data - 1j * data
+ expected = expected - 1j * expected
+
+ idx = numpy.indices(data.shape)
+ idx -= 1
+
+ out = ndimage.map_coordinates(data, idx, order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_map_coordinates02(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ idx = numpy.indices(data.shape, numpy.float64)
+ idx -= 0.5
+
+ out1 = ndimage.shift(data, 0.5, order=order)
+ out2 = ndimage.map_coordinates(data, idx, order=order)
+ assert_array_almost_equal(out1, out2)
+
+ def test_map_coordinates03(self):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]], order='F')
+ idx = numpy.indices(data.shape) - 1
+ out = ndimage.map_coordinates(data, idx)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+ assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
+ idx = numpy.indices(data[::2].shape) - 1
+ out = ndimage.map_coordinates(data[::2], idx)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3]])
+ assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
+ idx = numpy.indices(data[:, ::2].shape) - 1
+ out = ndimage.map_coordinates(data[:, ::2], idx)
+ assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
+ assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
+
+ def test_map_coordinates_endianness_with_output_parameter(self):
+ # output parameter given as array or dtype with either endianness
+ # see issue #4127
+ data = numpy.array([[1, 2], [7, 6]])
+ expected = numpy.array([[0, 0], [0, 1]])
+ idx = numpy.indices(data.shape)
+ idx -= 1
+ for out in [
+ data.dtype,
+ data.dtype.newbyteorder(),
+ numpy.empty_like(expected),
+ numpy.empty_like(expected).astype(expected.dtype.newbyteorder())
+ ]:
+ returned = ndimage.map_coordinates(data, idx, output=out)
+ result = out if returned is None else returned
+ assert_array_almost_equal(result, expected)
+
+ def test_map_coordinates_with_string_output(self):
+ data = numpy.array([[1]])
+ idx = numpy.indices(data.shape)
+ out = ndimage.map_coordinates(data, idx, output='f')
+ assert_(out.dtype is numpy.dtype('f'))
+ assert_array_almost_equal(out, [[1]])
+
+ @pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8,
+ reason='do not run on 32 bit or windows '
+ '(no sparse memory)')
+ def test_map_coordinates_large_data(self):
+ # check crash on large data
+ try:
+ n = 30000
+ a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n)
+ # fill the part we might read
+ a[n - 3:, n - 3:] = 0
+ ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
+ except MemoryError as e:
+ raise pytest.skip('Not enough memory available') from e
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform01(self, order):
+ data = numpy.array([1])
+ out = ndimage.affine_transform(data, [[1]], order=order)
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform02(self, order):
+ data = numpy.ones([4])
+ out = ndimage.affine_transform(data, [[1]], order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform03(self, order):
+ data = numpy.ones([4])
+ out = ndimage.affine_transform(data, [[1]], -1, order=order)
+ assert_array_almost_equal(out, [0, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform04(self, order):
+ data = numpy.array([4, 1, 3, 2])
+ out = ndimage.affine_transform(data, [[1]], -1, order=order)
+ assert_array_almost_equal(out, [0, 4, 1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+ def test_affine_transform05(self, order, dtype):
+ data = numpy.array([[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]], dtype=dtype)
+ expected = numpy.array([[0, 1, 1, 1],
+ [0, 1, 1, 1],
+ [0, 1, 1, 1]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+ [0, -1], order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform06(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+ [0, -1], order=order)
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
+ [0, 7, 6, 8],
+ [0, 3, 5, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform07(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+ [-1, 0], order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [4, 1, 3, 2],
+ [7, 6, 8, 5]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform08(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+ [-1, -1], order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform09(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ if (order > 1):
+ filtered = ndimage.spline_filter(data, order=order)
+ else:
+ filtered = data
+ out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
+ [-1, -1], order=order,
+ prefilter=False)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform10(self, order):
+ data = numpy.ones([2], numpy.float64)
+ out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
+ order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 0])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform11(self, order):
+ data = [1, 5, 2, 6, 3, 7, 4, 4]
+ out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
+ assert_array_almost_equal(out, [1, 2, 3, 4])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform12(self, order):
+ data = [1, 2, 3, 4]
+ out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
+ assert_array_almost_equal(out[::2], [1, 2, 3, 4])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform13(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9.0, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
+ order=order)
+ assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform14(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
+ order=order)
+ assert_array_almost_equal(out, [[1, 2, 3, 4]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform15(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
+ order=order)
+ assert_array_almost_equal(out, [[1, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform16(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
+ (3, 8), order=order)
+ assert_array_almost_equal(out[..., ::2], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform17(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
+ (6, 4), order=order)
+ assert_array_almost_equal(out[::2, ...], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform18(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
+ (6, 8), order=order)
+ assert_array_almost_equal(out[::2, ::2], data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform19(self, order):
+ data = numpy.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]], numpy.float64)
+ out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
+ (6, 8), order=order)
+ out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
+ (3, 4), order=order)
+ assert_array_almost_equal(out, data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform20(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
+ order=order)
+ assert_array_almost_equal(out, [1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform21(self, order):
+ data = [[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]]
+ out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
+ order=order)
+ assert_array_almost_equal(out, [1, 9])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform22(self, order):
+ # shift and offset interaction; see issue #1547
+ data = numpy.array([4, 1, 3, 2])
+ out = ndimage.affine_transform(data, [[2]], [-1], (3,),
+ order=order)
+ assert_array_almost_equal(out, [0, 1, 2])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform23(self, order):
+ # shift and offset interaction; see issue #1547
+ data = numpy.array([4, 1, 3, 2])
+ out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
+ order=order)
+ assert_array_almost_equal(out[::2], [0, 4, 1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform24(self, order):
+ # consistency between diagonal and non-diagonal case; see issue #1547
+ data = numpy.array([4, 1, 3, 2])
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ 'The behavior of affine_transform with a 1-D array .* '
+ 'has changed')
+ out1 = ndimage.affine_transform(data, [2], -1, order=order)
+ out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
+ assert_array_almost_equal(out1, out2)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform25(self, order):
+ # consistency between diagonal and non-diagonal case; see issue #1547
+ data = numpy.array([4, 1, 3, 2])
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ 'The behavior of affine_transform with a 1-D array .* '
+ 'has changed')
+ out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
+ out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
+ assert_array_almost_equal(out1, out2)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform26(self, order):
+ # test homogeneous coordinates
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ if (order > 1):
+ filtered = ndimage.spline_filter(data, order=order)
+ else:
+ filtered = data
+ tform_original = numpy.eye(2)
+ offset_original = -numpy.ones((2, 1))
+ tform_h1 = numpy.hstack((tform_original, offset_original))
+ tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]]))
+ out1 = ndimage.affine_transform(filtered, tform_original,
+ offset_original.ravel(),
+ order=order, prefilter=False)
+ out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
+ prefilter=False)
+ out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
+ prefilter=False)
+ for out in [out1, out2, out3]:
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ def test_affine_transform27(self):
+ # test valid homogeneous transformation matrix
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1))))
+ tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]]))
+ assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
+
+ def test_affine_transform_1d_endianness_with_output_parameter(self):
+ # 1d affine transform given output ndarray or dtype with
+ # either endianness. see issue #7388
+ data = numpy.ones((2, 2))
+ for out in [numpy.empty_like(data),
+ numpy.empty_like(data).astype(data.dtype.newbyteorder()),
+ data.dtype, data.dtype.newbyteorder()]:
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ 'The behavior of affine_transform with a 1-D array '
+ '.* has changed')
+ returned = ndimage.affine_transform(data, [1, 1], output=out)
+ result = out if returned is None else returned
+ assert_array_almost_equal(result, [[1, 1], [1, 1]])
+
+ def test_affine_transform_multi_d_endianness_with_output_parameter(self):
+ # affine transform given output ndarray or dtype with either endianness
+ # see issue #4127
+ data = numpy.array([1])
+ for out in [data.dtype, data.dtype.newbyteorder(),
+ numpy.empty_like(data),
+ numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
+ returned = ndimage.affine_transform(data, [[1]], output=out)
+ result = out if returned is None else returned
+ assert_array_almost_equal(result, [1])
+
+ def test_affine_transform_output_shape(self):
+ # don't require output_shape when out of a different size is given
+ data = numpy.arange(8, dtype=numpy.float64)
+ out = numpy.ones((16,))
+
+ ndimage.affine_transform(data, [[1]], output=out)
+ assert_array_almost_equal(out[:8], data)
+
+ # mismatched output shape raises an error
+ with pytest.raises(RuntimeError):
+ ndimage.affine_transform(
+ data, [[1]], output=out, output_shape=(12,))
+
+ def test_affine_transform_with_string_output(self):
+ data = numpy.array([1])
+ out = ndimage.affine_transform(data, [[1]], output='f')
+ assert_(out.dtype is numpy.dtype('f'))
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('shift',
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform_shift_via_grid_wrap(self, shift, order):
+ # For mode 'grid-wrap', integer shifts should match numpy.roll
+ x = numpy.array([[0, 1],
+ [2, 3]])
+ affine = numpy.zeros((2, 3))
+ affine[:2, :2] = numpy.eye(2)
+ affine[:, 2] = shift
+ assert_array_almost_equal(
+ ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
+ numpy.roll(x, shift, axis=(0, 1)),
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_affine_transform_shift_reflect(self, order):
+ # shift by x.shape results in reflection
+ x = numpy.array([[0, 1, 2],
+ [3, 4, 5]])
+ affine = numpy.zeros((2, 3))
+ affine[:2, :2] = numpy.eye(2)
+ affine[:, 2] = x.shape
+ assert_array_almost_equal(
+ ndimage.affine_transform(x, affine, mode='reflect', order=order),
+ x[::-1, ::-1],
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift01(self, order):
+ data = numpy.array([1])
+ out = ndimage.shift(data, [1], order=order)
+ assert_array_almost_equal(out, [0])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift02(self, order):
+ data = numpy.ones([4])
+ out = ndimage.shift(data, [1], order=order)
+ assert_array_almost_equal(out, [0, 1, 1, 1])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift03(self, order):
+ data = numpy.ones([4])
+ out = ndimage.shift(data, -1, order=order)
+ assert_array_almost_equal(out, [1, 1, 1, 0])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift04(self, order):
+ data = numpy.array([4, 1, 3, 2])
+ out = ndimage.shift(data, 1, order=order)
+ assert_array_almost_equal(out, [0, 4, 1, 3])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+ def test_shift05(self, order, dtype):
+ data = numpy.array([[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]], dtype=dtype)
+ expected = numpy.array([[0, 1, 1, 1],
+ [0, 1, 1, 1],
+ [0, 1, 1, 1]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+ out = ndimage.shift(data, [0, 1], order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('mode', ['constant', 'grid-constant'])
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+ def test_shift_with_nonzero_cval(self, order, mode, dtype):
+ data = numpy.array([[1, 1, 1, 1],
+ [1, 1, 1, 1],
+ [1, 1, 1, 1]], dtype=dtype)
+
+ expected = numpy.array([[0, 1, 1, 1],
+ [0, 1, 1, 1],
+ [0, 1, 1, 1]], dtype=dtype)
+
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+ cval = 5.0
+ expected[:, 0] = cval # specific to shift of [0, 1] used below
+ out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift06(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.shift(data, [0, 1], order=order)
+ assert_array_almost_equal(out, [[0, 4, 1, 3],
+ [0, 7, 6, 8],
+ [0, 3, 5, 3]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift07(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.shift(data, [1, 0], order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [4, 1, 3, 2],
+ [7, 6, 8, 5]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift08(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ out = ndimage.shift(data, [1, 1], order=order)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift09(self, order):
+ data = numpy.array([[4, 1, 3, 2],
+ [7, 6, 8, 5],
+ [3, 5, 3, 6]])
+ if (order > 1):
+ filtered = ndimage.spline_filter(data, order=order)
+ else:
+ filtered = data
+ out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
+ assert_array_almost_equal(out, [[0, 0, 0, 0],
+ [0, 4, 1, 3],
+ [0, 7, 6, 8]])
+
+ @pytest.mark.parametrize('shift',
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift_grid_wrap(self, shift, order):
+ # For mode 'grid-wrap', integer shifts should match numpy.roll
+ x = numpy.array([[0, 1],
+ [2, 3]])
+ assert_array_almost_equal(
+ ndimage.shift(x, shift, mode='grid-wrap', order=order),
+ numpy.roll(x, shift, axis=(0, 1)),
+ )
+
+ @pytest.mark.parametrize('shift',
+ [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift_grid_constant1(self, shift, order):
+ # For integer shifts, 'constant' and 'grid-constant' should be equal
+ x = numpy.arange(20).reshape((5, 4))
+ assert_array_almost_equal(
+ ndimage.shift(x, shift, mode='grid-constant', order=order),
+ ndimage.shift(x, shift, mode='constant', order=order),
+ )
+
+ def test_shift_grid_constant_order1(self):
+ x = numpy.array([[1, 2, 3],
+ [4, 5, 6]], dtype=float)
+ expected_result = numpy.array([[0.25, 0.75, 1.25],
+ [1.25, 3.00, 4.00]])
+ assert_array_almost_equal(
+ ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1),
+ expected_result,
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_shift_reflect(self, order):
+ # shift by x.shape results in reflection
+ x = numpy.array([[0, 1, 2],
+ [3, 4, 5]])
+ assert_array_almost_equal(
+ ndimage.shift(x, x.shape, mode='reflect', order=order),
+ x[::-1, ::-1],
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('prefilter', [False, True])
+ def test_shift_nearest_boundary(self, order, prefilter):
+ # verify that shifting at least order // 2 beyond the end of the array
+ # gives a value equal to the edge value.
+ x = numpy.arange(16)
+ kwargs = dict(mode='nearest', order=order, prefilter=prefilter)
+ assert_array_almost_equal(
+ ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0],
+ )
+ assert_array_almost_equal(
+ ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1],
+ )
+
+ @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
+ 'mirror', 'reflect'])
+ @pytest.mark.parametrize('order', range(6))
+ def test_shift_vs_padded(self, order, mode):
+ x = numpy.arange(144, dtype=float).reshape(12, 12)
+ shift = (0.4, -2.3)
+
+ # manually pad and then extract center to get expected result
+ npad = 32
+ pad_mode = ndimage_to_numpy_mode.get(mode)
+ xp = numpy.pad(x, npad, mode=pad_mode)
+ center_slice = tuple([slice(npad, -npad)] * x.ndim)
+ expected_result = ndimage.shift(
+ xp, shift, mode=mode, order=order)[center_slice]
+
+ assert_allclose(
+ ndimage.shift(x, shift, mode=mode, order=order),
+ expected_result,
+ rtol=1e-7,
+ )
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_zoom1(self, order):
+ for z in [2, [2, 2]]:
+ arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float)
+ arr = ndimage.zoom(arr, z, order=order)
+ assert_equal(arr.shape, (10, 10))
+ assert_(numpy.all(arr[-1, :] != 0))
+ assert_(numpy.all(arr[-1, :] >= (20 - eps)))
+ assert_(numpy.all(arr[0, :] <= (5 + eps)))
+ assert_(numpy.all(arr >= (0 - eps)))
+ assert_(numpy.all(arr <= (24 + eps)))
+
+ def test_zoom2(self):
+ arr = numpy.arange(12).reshape((3, 4))
+ out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
+ assert_array_equal(out, arr)
+
+ def test_zoom3(self):
+ arr = numpy.array([[1, 2]])
+ out1 = ndimage.zoom(arr, (2, 1))
+ out2 = ndimage.zoom(arr, (1, 2))
+
+ assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]]))
+ assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]]))
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+ def test_zoom_affine01(self, order, dtype):
+ data = numpy.asarray([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning,
+ 'The behavior of affine_transform with a 1-D array .* '
+ 'has changed')
+ out = ndimage.affine_transform(data, [0.5, 0.5], 0,
+ (6, 8), order=order)
+ assert_array_almost_equal(out[::2, ::2], data)
+
+ def test_zoom_infinity(self):
+ # Ticket #1419 regression test
+ dim = 8
+ ndimage.zoom(numpy.zeros((dim, dim)), 1. / dim, mode='nearest')
+
+ def test_zoom_zoomfactor_one(self):
+ # Ticket #1122 regression test
+ arr = numpy.zeros((1, 5, 5))
+ zoom = (1.0, 2.0, 2.0)
+
+ out = ndimage.zoom(arr, zoom, cval=7)
+ ref = numpy.zeros((1, 10, 10))
+ assert_array_almost_equal(out, ref)
+
+ def test_zoom_output_shape_roundoff(self):
+ arr = numpy.zeros((3, 11, 25))
+ zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
+ out = ndimage.zoom(arr, zoom)
+ assert_array_equal(out.shape, (4, 15, 29))
+
+ @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
+ @pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
+ 'mirror', 'grid-wrap', 'grid-mirror',
+ 'grid-constant'])
+ def test_zoom_by_int_order0(self, zoom, mode):
+ # order 0 zoom should be the same as replication via numpy.kron
+ # Note: This is not True for general x shapes when grid_mode is False,
+ # but works here for all modes because the size ratio happens to
+ # always be an integer when x.shape = (2, 2).
+ x = numpy.array([[0, 1],
+ [2, 3]], dtype=float)
+ # x = numpy.arange(16, dtype=float).reshape(4, 4)
+ assert_array_almost_equal(
+ ndimage.zoom(x, zoom, order=0, mode=mode),
+ numpy.kron(x, numpy.ones(zoom))
+ )
+
+ @pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
+ @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
+ @pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
+ 'grid-wrap', 'grid-constant'])
+ def test_zoom_grid_by_int_order0(self, shape, zoom, mode):
+ # When grid_mode is True, order 0 zoom should be the same as
+ # replication via numpy.kron. The only exceptions to this are the
+ # non-grid modes 'constant' and 'wrap'.
+ x = numpy.arange(numpy.prod(shape), dtype=float).reshape(shape)
+ assert_array_almost_equal(
+ ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
+ numpy.kron(x, numpy.ones(zoom))
+ )
+
+ @pytest.mark.parametrize('mode', ['constant', 'wrap'])
+ def test_zoom_grid_mode_warnings(self, mode):
+ # Warn on use of non-grid modes when grid_mode is True
+ x = numpy.arange(9, dtype=float).reshape((3, 3))
+ with pytest.warns(UserWarning,
+ match="It is recommended to use mode"):
+ ndimage.zoom(x, 2, mode=mode, grid_mode=True),
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate01(self, order):
+ data = numpy.array([[0, 0, 0, 0],
+ [0, 1, 1, 0],
+ [0, 0, 0, 0]], dtype=numpy.float64)
+ out = ndimage.rotate(data, 0, order=order)
+ assert_array_almost_equal(out, data)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate02(self, order):
+ data = numpy.array([[0, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 0, 0]], dtype=numpy.float64)
+ expected = numpy.array([[0, 0, 0],
+ [0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0]], dtype=numpy.float64)
+ out = ndimage.rotate(data, 90, order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+ def test_rotate03(self, order, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]], dtype=dtype)
+ expected = numpy.array([[0, 0, 0],
+ [0, 0, 0],
+ [0, 1, 0],
+ [0, 1, 0],
+ [0, 0, 0]], dtype=dtype)
+ if data.dtype.kind == 'c':
+ data -= 1j * data
+ expected -= 1j * expected
+ out = ndimage.rotate(data, 90, order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate04(self, order):
+ data = numpy.array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]], dtype=numpy.float64)
+ expected = numpy.array([[0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 1, 0, 0]], dtype=numpy.float64)
+ out = ndimage.rotate(data, 90, reshape=False, order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate05(self, order):
+ data = numpy.empty((4, 3, 3))
+ for i in range(3):
+ data[:, :, i] = numpy.array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 1, 0],
+ [0, 0, 0]], dtype=numpy.float64)
+ expected = numpy.array([[0, 0, 0, 0],
+ [0, 1, 1, 0],
+ [0, 0, 0, 0]], dtype=numpy.float64)
+ out = ndimage.rotate(data, 90, order=order)
+ for i in range(3):
+ assert_array_almost_equal(out[:, :, i], expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate06(self, order):
+ data = numpy.empty((3, 4, 3))
+ for i in range(3):
+ data[:, :, i] = numpy.array([[0, 0, 0, 0],
+ [0, 1, 1, 0],
+ [0, 0, 0, 0]], dtype=numpy.float64)
+ expected = numpy.array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 1, 0],
+ [0, 0, 0]], dtype=numpy.float64)
+ out = ndimage.rotate(data, 90, order=order)
+ for i in range(3):
+ assert_array_almost_equal(out[:, :, i], expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate07(self, order):
+ data = numpy.array([[[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
+ data = data.transpose()
+ expected = numpy.array([[[0, 0, 0],
+ [0, 1, 0],
+ [0, 1, 0],
+ [0, 0, 0],
+ [0, 0, 0]]] * 2, dtype=numpy.float64)
+ expected = expected.transpose([2, 1, 0])
+ out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('order', range(0, 6))
+ def test_rotate08(self, order):
+ data = numpy.array([[[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
+ data = data.transpose()
+ expected = numpy.array([[[0, 0, 1, 0, 0],
+ [0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
+ expected = expected.transpose()
+ out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
+ assert_array_almost_equal(out, expected)
+
+ def test_rotate09(self):
+ data = numpy.array([[0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0]] * 2, dtype=numpy.float64)
+ with assert_raises(ValueError):
+ ndimage.rotate(data, 90, axes=(0, data.ndim))
+
+ def test_rotate10(self):
+ data = numpy.arange(45, dtype=numpy.float64).reshape((3, 5, 3))
+
+ # The output of ndimage.rotate before refactoring
+ expected = numpy.array([[[0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0],
+ [6.54914793, 7.54914793, 8.54914793],
+ [10.84520162, 11.84520162, 12.84520162],
+ [0.0, 0.0, 0.0]],
+ [[6.19286575, 7.19286575, 8.19286575],
+ [13.4730712, 14.4730712, 15.4730712],
+ [21.0, 22.0, 23.0],
+ [28.5269288, 29.5269288, 30.5269288],
+ [35.80713425, 36.80713425, 37.80713425]],
+ [[0.0, 0.0, 0.0],
+ [31.15479838, 32.15479838, 33.15479838],
+ [35.45085207, 36.45085207, 37.45085207],
+ [0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0]]])
+
+ out = ndimage.rotate(data, angle=12, reshape=False)
+ assert_array_almost_equal(out, expected)
+
+ def test_rotate_exact_180(self):
+ a = numpy.tile(numpy.arange(5), (5, 1))
+ b = ndimage.rotate(ndimage.rotate(a, 180), -180)
+ assert_equal(a, b)
+
+
+def test_zoom_output_shape():
+ """Ticket #643"""
+ x = numpy.arange(12).reshape((3, 4))
+ ndimage.zoom(x, 2, output=numpy.zeros((6, 8)))
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py
new file mode 100644
index 0000000000000000000000000000000000000000..135e9a72c94103cc378d87ac9a78e44342bfb55b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_measurements.py
@@ -0,0 +1,1409 @@
+import os.path
+
+import numpy as np
+from numpy.testing import (
+ assert_,
+ assert_allclose,
+ assert_almost_equal,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_equal,
+ suppress_warnings,
+)
+from pytest import raises as assert_raises
+
+import scipy.ndimage as ndimage
+
+
+from . import types
+
+
+class Test_measurements_stats:
+ """ndimage._measurements._stats() is a utility used by other functions."""
+
+ def test_a(self):
+ x = [0, 1, 2, 6]
+ labels = [0, 0, 1, 1]
+ index = [0, 1]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums = ndimage._measurements._stats(
+ x, labels=labels, index=index)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+
+ def test_b(self):
+ # Same data as test_a, but different labels. The label 9 exceeds the
+ # length of 'labels', so this test will follow a different code path.
+ x = [0, 1, 2, 6]
+ labels = [0, 0, 9, 9]
+ index = [0, 9]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums = ndimage._measurements._stats(
+ x, labels=labels, index=index)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+
+ def test_a_centered(self):
+ x = [0, 1, 2, 6]
+ labels = [0, 0, 1, 1]
+ index = [0, 1]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums, centers = ndimage._measurements._stats(
+ x, labels=labels, index=index, centered=True)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+ assert_array_equal(centers, [0.5, 8.0])
+
+ def test_b_centered(self):
+ x = [0, 1, 2, 6]
+ labels = [0, 0, 9, 9]
+ index = [0, 9]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums, centers = ndimage._measurements._stats(
+ x, labels=labels, index=index, centered=True)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+ assert_array_equal(centers, [0.5, 8.0])
+
+ def test_nonint_labels(self):
+ x = [0, 1, 2, 6]
+ labels = [0.0, 0.0, 9.0, 9.0]
+ index = [0.0, 9.0]
+ for shp in [(4,), (2, 2)]:
+ x = np.array(x).reshape(shp)
+ labels = np.array(labels).reshape(shp)
+ counts, sums, centers = ndimage._measurements._stats(
+ x, labels=labels, index=index, centered=True)
+ assert_array_equal(counts, [2, 2])
+ assert_array_equal(sums, [1.0, 8.0])
+ assert_array_equal(centers, [0.5, 8.0])
+
+
+class Test_measurements_select:
+ """ndimage._measurements._select() is a utility used by other functions."""
+
+ def test_basic(self):
+ x = [0, 1, 6, 2]
+ cases = [
+ ([0, 0, 1, 1], [0, 1]), # "Small" integer labels
+ ([0, 0, 9, 9], [0, 9]), # A label larger than len(labels)
+ ([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels
+ ]
+ for labels, index in cases:
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index)
+ assert_(len(result) == 0)
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index, find_max=True)
+ assert_(len(result) == 1)
+ assert_array_equal(result[0], [1, 6])
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index, find_min=True)
+ assert_(len(result) == 1)
+ assert_array_equal(result[0], [0, 2])
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index, find_min=True,
+ find_min_positions=True)
+ assert_(len(result) == 2)
+ assert_array_equal(result[0], [0, 2])
+ assert_array_equal(result[1], [0, 3])
+ assert_equal(result[1].dtype.kind, 'i')
+ result = ndimage._measurements._select(
+ x, labels=labels, index=index, find_max=True,
+ find_max_positions=True)
+ assert_(len(result) == 2)
+ assert_array_equal(result[0], [1, 6])
+ assert_array_equal(result[1], [1, 2])
+ assert_equal(result[1].dtype.kind, 'i')
+
+
+def test_label01():
+ data = np.ones([])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, 1)
+ assert_equal(n, 1)
+
+
+def test_label02():
+ data = np.zeros([])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, 0)
+ assert_equal(n, 0)
+
+
+def test_label03():
+ data = np.ones([1])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [1])
+ assert_equal(n, 1)
+
+
+def test_label04():
+ data = np.zeros([1])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [0])
+ assert_equal(n, 0)
+
+
+def test_label05():
+ data = np.ones([5])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [1, 1, 1, 1, 1])
+ assert_equal(n, 1)
+
+
+def test_label06():
+ data = np.array([1, 0, 1, 1, 0, 1])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
+ assert_equal(n, 3)
+
+
+def test_label07():
+ data = np.array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ assert_equal(n, 0)
+
+
+def test_label08():
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0]])
+ out, n = ndimage.label(data)
+ assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [3, 3, 0, 0, 0, 0],
+ [3, 3, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]])
+ assert_equal(n, 4)
+
+
+def test_label09():
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0]])
+ struct = ndimage.generate_binary_structure(2, 2)
+ out, n = ndimage.label(data, struct)
+ assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [2, 2, 0, 0, 0, 0],
+ [2, 2, 0, 0, 0, 0],
+ [0, 0, 0, 3, 3, 0]])
+ assert_equal(n, 3)
+
+
+def test_label10():
+ data = np.array([[0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+ struct = ndimage.generate_binary_structure(2, 2)
+ out, n = ndimage.label(data, struct)
+ assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0]])
+ assert_equal(n, 1)
+
+
+def test_label11():
+ for type in types:
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0]], type)
+ out, n = ndimage.label(data)
+ expected = [[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [3, 3, 0, 0, 0, 0],
+ [3, 3, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]]
+ assert_array_almost_equal(out, expected)
+ assert_equal(n, 4)
+
+
+def test_label11_inplace():
+ for type in types:
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0],
+ [1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0]], type)
+ n = ndimage.label(data, output=data)
+ expected = [[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [3, 3, 0, 0, 0, 0],
+ [3, 3, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]]
+ assert_array_almost_equal(data, expected)
+ assert_equal(n, 4)
+
+
+def test_label12():
+ for type in types:
+ data = np.array([[0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 0, 1],
+ [0, 0, 1, 0, 1, 1],
+ [0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 1, 1, 0]], type)
+ out, n = ndimage.label(data)
+ expected = [[0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 0, 1],
+ [0, 0, 1, 0, 1, 1],
+ [0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 1, 1, 0]]
+ assert_array_almost_equal(out, expected)
+ assert_equal(n, 1)
+
+
+def test_label13():
+ for type in types:
+ data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
+ [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
+ type)
+ out, n = ndimage.label(data)
+ expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
+ [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
+ assert_array_almost_equal(out, expected)
+ assert_equal(n, 1)
+
+
+def test_label_output_typed():
+ data = np.ones([5])
+ for t in types:
+ output = np.zeros([5], dtype=t)
+ n = ndimage.label(data, output=output)
+ assert_array_almost_equal(output, 1)
+ assert_equal(n, 1)
+
+
+def test_label_output_dtype():
+ data = np.ones([5])
+ for t in types:
+ output, n = ndimage.label(data, output=t)
+ assert_array_almost_equal(output, 1)
+ assert output.dtype == t
+
+
+def test_label_output_wrong_size():
+ data = np.ones([5])
+ for t in types:
+ output = np.zeros([10], t)
+ assert_raises((RuntimeError, ValueError),
+ ndimage.label, data, output=output)
+
+
+def test_label_structuring_elements():
+ data = np.loadtxt(os.path.join(os.path.dirname(
+ __file__), "data", "label_inputs.txt"))
+ strels = np.loadtxt(os.path.join(
+ os.path.dirname(__file__), "data", "label_strels.txt"))
+ results = np.loadtxt(os.path.join(
+ os.path.dirname(__file__), "data", "label_results.txt"))
+ data = data.reshape((-1, 7, 7))
+ strels = strels.reshape((-1, 3, 3))
+ results = results.reshape((-1, 7, 7))
+ r = 0
+ for i in range(data.shape[0]):
+ d = data[i, :, :]
+ for j in range(strels.shape[0]):
+ s = strels[j, :, :]
+ assert_equal(ndimage.label(d, s)[0], results[r, :, :])
+ r += 1
+
+
+def test_ticket_742():
+ def SE(img, thresh=.7, size=4):
+ mask = img > thresh
+ rank = len(mask.shape)
+ la, co = ndimage.label(mask,
+ ndimage.generate_binary_structure(rank, rank))
+ _ = ndimage.find_objects(la)
+
+ if np.dtype(np.intp) != np.dtype('i'):
+ shape = (3, 1240, 1240)
+ a = np.random.rand(np.prod(shape)).reshape(shape)
+ # shouldn't crash
+ SE(a)
+
+
+def test_gh_issue_3025():
+ """Github issue #3025 - improper merging of labels"""
+ d = np.zeros((60, 320))
+ d[:, :257] = 1
+ d[:, 260:] = 1
+ d[36, 257] = 1
+ d[35, 258] = 1
+ d[35, 259] = 1
+ assert ndimage.label(d, np.ones((3, 3)))[1] == 1
+
+
+def test_label_default_dtype():
+ test_array = np.random.rand(10, 10)
+ label, no_features = ndimage.label(test_array > 0.5)
+ assert_(label.dtype in (np.int32, np.int64))
+ # Shouldn't raise an exception
+ ndimage.find_objects(label)
+
+
+def test_find_objects01():
+ data = np.ones([], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_(out == [()])
+
+
+def test_find_objects02():
+ data = np.zeros([], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_(out == [])
+
+
+def test_find_objects03():
+ data = np.ones([1], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 1, None),)])
+
+
+def test_find_objects04():
+ data = np.zeros([1], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_equal(out, [])
+
+
+def test_find_objects05():
+ data = np.ones([5], dtype=int)
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 5, None),)])
+
+
+def test_find_objects06():
+ data = np.array([1, 0, 2, 2, 0, 3])
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 1, None),),
+ (slice(2, 4, None),),
+ (slice(5, 6, None),)])
+
+
+def test_find_objects07():
+ data = np.array([[0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+ out = ndimage.find_objects(data)
+ assert_equal(out, [])
+
+
+def test_find_objects08():
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [3, 3, 0, 0, 0, 0],
+ [3, 3, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]])
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
+ (slice(1, 3, None), slice(2, 5, None)),
+ (slice(3, 5, None), slice(0, 2, None)),
+ (slice(5, 6, None), slice(3, 5, None))])
+
+
+def test_find_objects09():
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]])
+ out = ndimage.find_objects(data)
+ assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
+ (slice(1, 3, None), slice(2, 5, None)),
+ None,
+ (slice(5, 6, None), slice(3, 5, None))])
+
+
+def test_value_indices01():
+ "Test dictionary keys and entries"
+ data = np.array([[1, 0, 0, 0, 0, 0],
+ [0, 0, 2, 2, 0, 0],
+ [0, 0, 2, 2, 2, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 0]])
+ vi = ndimage.value_indices(data, ignore_value=0)
+ true_keys = [1, 2, 4]
+ assert_equal(list(vi.keys()), true_keys)
+
+ truevi = {}
+ for k in true_keys:
+ truevi[k] = np.where(data == k)
+
+ vi = ndimage.value_indices(data, ignore_value=0)
+ assert_equal(vi, truevi)
+
+
+def test_value_indices02():
+ "Test input checking"
+ data = np.zeros((5, 4), dtype=np.float32)
+ msg = "Parameter 'arr' must be an integer array"
+ with assert_raises(ValueError, match=msg):
+ ndimage.value_indices(data)
+
+
+def test_value_indices03():
+ "Test different input array shapes, from 1-D to 4-D"
+ for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
+ a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape)
+ trueKeys = np.unique(a)
+ vi = ndimage.value_indices(a)
+ assert_equal(list(vi.keys()), list(trueKeys))
+ for k in trueKeys:
+ trueNdx = np.where(a == k)
+ assert_equal(vi[k], trueNdx)
+
+
+def test_sum01():
+ for type in types:
+ input = np.array([], type)
+ output = ndimage.sum(input)
+ assert_equal(output, 0.0)
+
+
+def test_sum02():
+ for type in types:
+ input = np.zeros([0, 4], type)
+ output = ndimage.sum(input)
+ assert_equal(output, 0.0)
+
+
+def test_sum03():
+ for type in types:
+ input = np.ones([], type)
+ output = ndimage.sum(input)
+ assert_almost_equal(output, 1.0)
+
+
+def test_sum04():
+ for type in types:
+ input = np.array([1, 2], type)
+ output = ndimage.sum(input)
+ assert_almost_equal(output, 3.0)
+
+
+def test_sum05():
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.sum(input)
+ assert_almost_equal(output, 10.0)
+
+
+def test_sum06():
+ labels = np.array([], bool)
+ for type in types:
+ input = np.array([], type)
+ output = ndimage.sum(input, labels=labels)
+ assert_equal(output, 0.0)
+
+
+def test_sum07():
+ labels = np.ones([0, 4], bool)
+ for type in types:
+ input = np.zeros([0, 4], type)
+ output = ndimage.sum(input, labels=labels)
+ assert_equal(output, 0.0)
+
+
+def test_sum08():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([1, 2], type)
+ output = ndimage.sum(input, labels=labels)
+ assert_equal(output, 1.0)
+
+
+def test_sum09():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.sum(input, labels=labels)
+ assert_almost_equal(output, 4.0)
+
+
+def test_sum10():
+ labels = np.array([1, 0], bool)
+ input = np.array([[1, 2], [3, 4]], bool)
+ output = ndimage.sum(input, labels=labels)
+ assert_almost_equal(output, 2.0)
+
+
+def test_sum11():
+ labels = np.array([1, 2], np.int8)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.sum(input, labels=labels,
+ index=2)
+ assert_almost_equal(output, 6.0)
+
+
+def test_sum12():
+ labels = np.array([[1, 2], [2, 4]], np.int8)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.sum(input, labels=labels, index=[4, 8, 2])
+ assert_array_almost_equal(output, [4.0, 0.0, 5.0])
+
+
+def test_sum_labels():
+ labels = np.array([[1, 2], [2, 4]], np.int8)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2])
+ output_labels = ndimage.sum_labels(
+ input, labels=labels, index=[4, 8, 2])
+
+ assert (output_sum == output_labels).all()
+ assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0])
+
+
+def test_mean01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.mean(input, labels=labels)
+ assert_almost_equal(output, 2.0)
+
+
+def test_mean02():
+ labels = np.array([1, 0], bool)
+ input = np.array([[1, 2], [3, 4]], bool)
+ output = ndimage.mean(input, labels=labels)
+ assert_almost_equal(output, 1.0)
+
+
+def test_mean03():
+ labels = np.array([1, 2])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.mean(input, labels=labels,
+ index=2)
+ assert_almost_equal(output, 3.0)
+
+
+def test_mean04():
+ labels = np.array([[1, 2], [2, 4]], np.int8)
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.mean(input, labels=labels,
+ index=[4, 8, 2])
+ assert_array_almost_equal(output[[0, 2]], [4.0, 2.5])
+ assert_(np.isnan(output[1]))
+
+
+def test_minimum01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.minimum(input, labels=labels)
+ assert_almost_equal(output, 1.0)
+
+
+def test_minimum02():
+ labels = np.array([1, 0], bool)
+ input = np.array([[2, 2], [2, 4]], bool)
+ output = ndimage.minimum(input, labels=labels)
+ assert_almost_equal(output, 1.0)
+
+
+def test_minimum03():
+ labels = np.array([1, 2])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.minimum(input, labels=labels,
+ index=2)
+ assert_almost_equal(output, 2.0)
+
+
+def test_minimum04():
+ labels = np.array([[1, 2], [2, 3]])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.minimum(input, labels=labels,
+ index=[2, 3, 8])
+ assert_array_almost_equal(output, [2.0, 4.0, 0.0])
+
+
+def test_maximum01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.maximum(input, labels=labels)
+ assert_almost_equal(output, 3.0)
+
+
+def test_maximum02():
+ labels = np.array([1, 0], bool)
+ input = np.array([[2, 2], [2, 4]], bool)
+ output = ndimage.maximum(input, labels=labels)
+ assert_almost_equal(output, 1.0)
+
+
+def test_maximum03():
+ labels = np.array([1, 2])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.maximum(input, labels=labels,
+ index=2)
+ assert_almost_equal(output, 4.0)
+
+
+def test_maximum04():
+ labels = np.array([[1, 2], [2, 3]])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.maximum(input, labels=labels,
+ index=[2, 3, 8])
+ assert_array_almost_equal(output, [3.0, 4.0, 0.0])
+
+
+def test_maximum05():
+ # Regression test for ticket #501 (Trac)
+ x = np.array([-3, -2, -1])
+ assert_equal(ndimage.maximum(x), -1)
+
+
+def test_median01():
+ a = np.array([[1, 2, 0, 1],
+ [5, 3, 0, 4],
+ [0, 0, 0, 7],
+ [9, 3, 0, 0]])
+ labels = np.array([[1, 1, 0, 2],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ output = ndimage.median(a, labels=labels, index=[1, 2, 3])
+ assert_array_almost_equal(output, [2.5, 4.0, 6.0])
+
+
+def test_median02():
+ a = np.array([[1, 2, 0, 1],
+ [5, 3, 0, 4],
+ [0, 0, 0, 7],
+ [9, 3, 0, 0]])
+ output = ndimage.median(a)
+ assert_almost_equal(output, 1.0)
+
+
+def test_median03():
+ a = np.array([[1, 2, 0, 1],
+ [5, 3, 0, 4],
+ [0, 0, 0, 7],
+ [9, 3, 0, 0]])
+ labels = np.array([[1, 1, 0, 2],
+ [1, 1, 0, 2],
+ [0, 0, 0, 2],
+ [3, 3, 0, 0]])
+ output = ndimage.median(a, labels=labels)
+ assert_almost_equal(output, 3.0)
+
+
+def test_median_gh12836_bool():
+ # test boolean addition fix on example from gh-12836
+ a = np.asarray([1, 1], dtype=bool)
+ output = ndimage.median(a, labels=np.ones((2,)), index=[1])
+ assert_array_almost_equal(output, [1.0])
+
+
+def test_median_no_int_overflow():
+ # test integer overflow fix on example from gh-12836
+ a = np.asarray([65, 70], dtype=np.int8)
+ output = ndimage.median(a, labels=np.ones((2,)), index=[1])
+ assert_array_almost_equal(output, [67.5])
+
+
+def test_variance01():
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([], type)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "Mean of empty slice")
+ output = ndimage.variance(input)
+ assert_(np.isnan(output))
+
+
+def test_variance02():
+ for type in types:
+ input = np.array([1], type)
+ output = ndimage.variance(input)
+ assert_almost_equal(output, 0.0)
+
+
+def test_variance03():
+ for type in types:
+ input = np.array([1, 3], type)
+ output = ndimage.variance(input)
+ assert_almost_equal(output, 1.0)
+
+
+def test_variance04():
+ input = np.array([1, 0], bool)
+ output = ndimage.variance(input)
+ assert_almost_equal(output, 0.25)
+
+
+def test_variance05():
+ labels = [2, 2, 3]
+ for type in types:
+ input = np.array([1, 3, 8], type)
+ output = ndimage.variance(input, labels, 2)
+ assert_almost_equal(output, 1.0)
+
+
+def test_variance06():
+ labels = [2, 2, 3, 3, 4]
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([1, 3, 8, 10, 8], type)
+ output = ndimage.variance(input, labels, [2, 3, 4])
+ assert_array_almost_equal(output, [1.0, 1.0, 0.0])
+
+
+def test_standard_deviation01():
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([], type)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "Mean of empty slice")
+ output = ndimage.standard_deviation(input)
+ assert_(np.isnan(output))
+
+
+def test_standard_deviation02():
+ for type in types:
+ input = np.array([1], type)
+ output = ndimage.standard_deviation(input)
+ assert_almost_equal(output, 0.0)
+
+
+def test_standard_deviation03():
+ for type in types:
+ input = np.array([1, 3], type)
+ output = ndimage.standard_deviation(input)
+ assert_almost_equal(output, np.sqrt(1.0))
+
+
+def test_standard_deviation04():
+ input = np.array([1, 0], bool)
+ output = ndimage.standard_deviation(input)
+ assert_almost_equal(output, 0.5)
+
+
+def test_standard_deviation05():
+ labels = [2, 2, 3]
+ for type in types:
+ input = np.array([1, 3, 8], type)
+ output = ndimage.standard_deviation(input, labels, 2)
+ assert_almost_equal(output, 1.0)
+
+
+def test_standard_deviation06():
+ labels = [2, 2, 3, 3, 4]
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([1, 3, 8, 10, 8], type)
+ output = ndimage.standard_deviation(input, labels, [2, 3, 4])
+ assert_array_almost_equal(output, [1.0, 1.0, 0.0])
+
+
+def test_standard_deviation07():
+ labels = [1]
+ with np.errstate(all='ignore'):
+ for type in types:
+ input = np.array([-0.00619519], type)
+ output = ndimage.standard_deviation(input, labels, [1])
+ assert_array_almost_equal(output, [0])
+
+
+def test_minimum_position01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.minimum_position(input, labels=labels)
+ assert_equal(output, (0, 0))
+
+
+def test_minimum_position02():
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.minimum_position(input)
+ assert_equal(output, (1, 2))
+
+
+def test_minimum_position03():
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 1, 1]], bool)
+ output = ndimage.minimum_position(input)
+ assert_equal(output, (1, 2))
+
+
+def test_minimum_position04():
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 1, 2],
+ [1, 5, 1, 1]], bool)
+ output = ndimage.minimum_position(input)
+ assert_equal(output, (0, 0))
+
+
+def test_minimum_position05():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 2, 3]], type)
+ output = ndimage.minimum_position(input, labels)
+ assert_equal(output, (2, 0))
+
+
+def test_minimum_position06():
+ labels = [1, 2, 3, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.minimum_position(input, labels, 2)
+ assert_equal(output, (0, 1))
+
+
+def test_minimum_position07():
+ labels = [1, 2, 3, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 0, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.minimum_position(input, labels,
+ [2, 3])
+ assert_equal(output[0], (0, 1))
+ assert_equal(output[1], (1, 2))
+
+
+def test_maximum_position01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output = ndimage.maximum_position(input,
+ labels=labels)
+ assert_equal(output, (1, 0))
+
+
+def test_maximum_position02():
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input)
+ assert_equal(output, (1, 2))
+
+
+def test_maximum_position03():
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], bool)
+ output = ndimage.maximum_position(input)
+ assert_equal(output, (0, 0))
+
+
+def test_maximum_position04():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input, labels)
+ assert_equal(output, (1, 1))
+
+
+def test_maximum_position05():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input, labels, 1)
+ assert_equal(output, (0, 0))
+
+
+def test_maximum_position06():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input, labels,
+ [1, 2])
+ assert_equal(output[0], (0, 0))
+ assert_equal(output[1], (1, 1))
+
+
+def test_maximum_position07():
+ # Test float labels
+ labels = np.array([1.0, 2.5, 0.0, 4.5])
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output = ndimage.maximum_position(input, labels,
+ [1.0, 4.5])
+ assert_equal(output[0], (0, 0))
+ assert_equal(output[1], (0, 3))
+
+
+def test_extrema01():
+ labels = np.array([1, 0], bool)
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output1 = ndimage.extrema(input, labels=labels)
+ output2 = ndimage.minimum(input, labels=labels)
+ output3 = ndimage.maximum(input, labels=labels)
+ output4 = ndimage.minimum_position(input,
+ labels=labels)
+ output5 = ndimage.maximum_position(input,
+ labels=labels)
+ assert_equal(output1, (output2, output3, output4, output5))
+
+
+def test_extrema02():
+ labels = np.array([1, 2])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output1 = ndimage.extrema(input, labels=labels,
+ index=2)
+ output2 = ndimage.minimum(input, labels=labels,
+ index=2)
+ output3 = ndimage.maximum(input, labels=labels,
+ index=2)
+ output4 = ndimage.minimum_position(input,
+ labels=labels, index=2)
+ output5 = ndimage.maximum_position(input,
+ labels=labels, index=2)
+ assert_equal(output1, (output2, output3, output4, output5))
+
+
+def test_extrema03():
+ labels = np.array([[1, 2], [2, 3]])
+ for type in types:
+ input = np.array([[1, 2], [3, 4]], type)
+ output1 = ndimage.extrema(input, labels=labels,
+ index=[2, 3, 8])
+ output2 = ndimage.minimum(input, labels=labels,
+ index=[2, 3, 8])
+ output3 = ndimage.maximum(input, labels=labels,
+ index=[2, 3, 8])
+ output4 = ndimage.minimum_position(input,
+ labels=labels, index=[2, 3, 8])
+ output5 = ndimage.maximum_position(input,
+ labels=labels, index=[2, 3, 8])
+ assert_array_almost_equal(output1[0], output2)
+ assert_array_almost_equal(output1[1], output3)
+ assert_array_almost_equal(output1[2], output4)
+ assert_array_almost_equal(output1[3], output5)
+
+
+def test_extrema04():
+ labels = [1, 2, 0, 4]
+ for type in types:
+ input = np.array([[5, 4, 2, 5],
+ [3, 7, 8, 2],
+ [1, 5, 1, 1]], type)
+ output1 = ndimage.extrema(input, labels, [1, 2])
+ output2 = ndimage.minimum(input, labels, [1, 2])
+ output3 = ndimage.maximum(input, labels, [1, 2])
+ output4 = ndimage.minimum_position(input, labels,
+ [1, 2])
+ output5 = ndimage.maximum_position(input, labels,
+ [1, 2])
+ assert_array_almost_equal(output1[0], output2)
+ assert_array_almost_equal(output1[1], output3)
+ assert_array_almost_equal(output1[2], output4)
+ assert_array_almost_equal(output1[3], output5)
+
+
+def test_center_of_mass01():
+ expected = [0.0, 0.0]
+ for type in types:
+ input = np.array([[1, 0], [0, 0]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass02():
+ expected = [1, 0]
+ for type in types:
+ input = np.array([[0, 0], [1, 0]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass03():
+ expected = [0, 1]
+ for type in types:
+ input = np.array([[0, 1], [0, 0]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass04():
+ expected = [1, 1]
+ for type in types:
+ input = np.array([[0, 0], [0, 1]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass05():
+ expected = [0.5, 0.5]
+ for type in types:
+ input = np.array([[1, 1], [1, 1]], type)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass06():
+ expected = [0.5, 0.5]
+ input = np.array([[1, 2], [3, 1]], bool)
+ output = ndimage.center_of_mass(input)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass07():
+ labels = [1, 0]
+ expected = [0.5, 0.0]
+ input = np.array([[1, 2], [3, 1]], bool)
+ output = ndimage.center_of_mass(input, labels)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass08():
+ labels = [1, 2]
+ expected = [0.5, 1.0]
+ input = np.array([[5, 2], [3, 1]], bool)
+ output = ndimage.center_of_mass(input, labels, 2)
+ assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass09():
+ labels = [1, 2]
+ expected = [(0.5, 0.0), (0.5, 1.0)]
+ input = np.array([[1, 2], [1, 1]], bool)
+ output = ndimage.center_of_mass(input, labels, [1, 2])
+ assert_array_almost_equal(output, expected)
+
+
+def test_histogram01():
+ expected = np.ones(10)
+ input = np.arange(10)
+ output = ndimage.histogram(input, 0, 10, 10)
+ assert_array_almost_equal(output, expected)
+
+
+def test_histogram02():
+ labels = [1, 1, 1, 1, 2, 2, 2, 2]
+ expected = [0, 2, 0, 1, 1]
+ input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
+ output = ndimage.histogram(input, 0, 4, 5, labels, 1)
+ assert_array_almost_equal(output, expected)
+
+
+def test_histogram03():
+ labels = [1, 0, 1, 1, 2, 2, 2, 2]
+ expected1 = [0, 1, 0, 1, 1]
+ expected2 = [0, 0, 0, 3, 0]
+ input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
+ output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
+
+ assert_array_almost_equal(output[0], expected1)
+ assert_array_almost_equal(output[1], expected2)
+
+
+def test_stat_funcs_2d():
+ a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
+ lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
+
+ mean = ndimage.mean(a, labels=lbl, index=[1, 2])
+ assert_array_equal(mean, [7.0, 4.0])
+
+ var = ndimage.variance(a, labels=lbl, index=[1, 2])
+ assert_array_equal(var, [2.5, 1.0])
+
+ std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
+ assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))
+
+ med = ndimage.median(a, labels=lbl, index=[1, 2])
+ assert_array_equal(med, [7.0, 4.0])
+
+ min = ndimage.minimum(a, labels=lbl, index=[1, 2])
+ assert_array_equal(min, [5, 3])
+
+ max = ndimage.maximum(a, labels=lbl, index=[1, 2])
+ assert_array_equal(max, [9, 5])
+
+
+class TestWatershedIft:
+
+ def test_watershed_ift01(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift02(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers)
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, 1, 1, 1, -1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, -1, 1, 1, 1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift03(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 2, 0, 3, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, -1]], np.int8)
+ out = ndimage.watershed_ift(data, markers)
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, 2, -1, 3, -1, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, -1, 2, -1, 3, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift04(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 2, 0, 3, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, -1]],
+ np.int8)
+ out = ndimage.watershed_ift(data, markers,
+ structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, 2, 2, 3, 3, 3, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift05(self):
+ data = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 3, 0, 2, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, -1]],
+ np.int8)
+ out = ndimage.watershed_ift(data, markers,
+ structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+ expected = [[-1, -1, -1, -1, -1, -1, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, 3, 3, 2, 2, 2, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift06(self):
+ data = np.array([[0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers,
+ structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+ expected = [[-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift07(self):
+ shape = (7, 6)
+ data = np.zeros(shape, dtype=np.uint8)
+ data = data.transpose()
+ data[...] = np.array([[0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+ markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], np.int8)
+ out = np.zeros(shape, dtype=np.int16)
+ out = out.transpose()
+ ndimage.watershed_ift(data, markers,
+ structure=[[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]],
+ output=out)
+ expected = [[-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, 1, 1, 1, 1, 1, -1],
+ [-1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift08(self):
+ # Test cost larger than uint8. See gh-10069.
+ data = np.array([[256, 0],
+ [0, 0]], np.uint16)
+ markers = np.array([[1, 0],
+ [0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers)
+ expected = [[1, 1],
+ [1, 1]]
+ assert_array_almost_equal(out, expected)
+
+ def test_watershed_ift09(self):
+ # Test large cost. See gh-19575
+ data = np.array([[np.iinfo(np.uint16).max, 0],
+ [0, 0]], np.uint16)
+ markers = np.array([[1, 0],
+ [0, 0]], np.int8)
+ out = ndimage.watershed_ift(data, markers)
+ expected = [[1, 1],
+ [1, 1]]
+ assert_allclose(out, expected)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0f47d651f32143c1594b1fe833e51f0ec4f5fb7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_morphology.py
@@ -0,0 +1,2395 @@
+import numpy
+import numpy as np
+from numpy.testing import (assert_, assert_equal, assert_array_equal,
+ assert_array_almost_equal)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy import ndimage
+
+from . import types
+
+
+class TestNdimageMorphology:
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_bf01(self, dtype):
+ # brute force (bf) distance transform
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out, ft = ndimage.distance_transform_bf(data, 'euclidean',
+ return_indices=True)
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 2, 4, 2, 1, 0, 0],
+ [0, 0, 1, 4, 8, 4, 1, 0, 0],
+ [0, 0, 1, 2, 4, 2, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+ assert_array_almost_equal(out * out, expected)
+
+ expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 1, 2, 2, 2, 2],
+ [3, 3, 3, 2, 1, 2, 3, 3, 3],
+ [4, 4, 4, 4, 6, 4, 4, 4, 4],
+ [5, 5, 6, 6, 7, 6, 6, 5, 5],
+ [6, 6, 6, 7, 7, 7, 6, 6, 6],
+ [7, 7, 7, 7, 7, 7, 7, 7, 7],
+ [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 2, 4, 6, 6, 7, 8],
+ [0, 1, 1, 2, 4, 6, 7, 7, 8],
+ [0, 1, 1, 1, 6, 7, 7, 7, 8],
+ [0, 1, 2, 2, 4, 6, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+ assert_array_almost_equal(ft, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_bf02(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out, ft = ndimage.distance_transform_bf(data, 'cityblock',
+ return_indices=True)
+
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 2, 2, 2, 1, 0, 0],
+ [0, 0, 1, 2, 3, 2, 1, 0, 0],
+ [0, 0, 1, 2, 2, 2, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+ assert_array_almost_equal(out, expected)
+
+ expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 1, 2, 2, 2, 2],
+ [3, 3, 3, 3, 1, 3, 3, 3, 3],
+ [4, 4, 4, 4, 7, 4, 4, 4, 4],
+ [5, 5, 6, 7, 7, 7, 6, 5, 5],
+ [6, 6, 6, 7, 7, 7, 6, 6, 6],
+ [7, 7, 7, 7, 7, 7, 7, 7, 7],
+ [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 2, 4, 6, 6, 7, 8],
+ [0, 1, 1, 1, 4, 7, 7, 7, 8],
+ [0, 1, 1, 1, 4, 7, 7, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+ assert_array_almost_equal(expected, ft)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_bf03(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out, ft = ndimage.distance_transform_bf(data, 'chessboard',
+ return_indices=True)
+
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 2, 1, 1, 0, 0],
+ [0, 0, 1, 2, 2, 2, 1, 0, 0],
+ [0, 0, 1, 1, 2, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+ assert_array_almost_equal(out, expected)
+
+ expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 1, 2, 2, 2, 2],
+ [3, 3, 4, 2, 2, 2, 4, 3, 3],
+ [4, 4, 5, 6, 6, 6, 5, 4, 4],
+ [5, 5, 6, 6, 7, 6, 6, 5, 5],
+ [6, 6, 6, 7, 7, 7, 6, 6, 6],
+ [7, 7, 7, 7, 7, 7, 7, 7, 7],
+ [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 2, 5, 6, 6, 7, 8],
+ [0, 1, 1, 2, 6, 6, 7, 7, 8],
+ [0, 1, 1, 2, 6, 7, 7, 7, 8],
+ [0, 1, 2, 2, 6, 6, 7, 7, 8],
+ [0, 1, 2, 4, 5, 6, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+ assert_array_almost_equal(ft, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_bf04(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ tdt, tft = ndimage.distance_transform_bf(data, return_indices=1)
+ dts = []
+ fts = []
+ dt = numpy.zeros(data.shape, dtype=numpy.float64)
+ ndimage.distance_transform_bf(data, distances=dt)
+ dts.append(dt)
+ ft = ndimage.distance_transform_bf(
+ data, return_distances=False, return_indices=1)
+ fts.append(ft)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ ndimage.distance_transform_bf(
+ data, return_distances=False, return_indices=True, indices=ft)
+ fts.append(ft)
+ dt, ft = ndimage.distance_transform_bf(
+ data, return_indices=1)
+ dts.append(dt)
+ fts.append(ft)
+ dt = numpy.zeros(data.shape, dtype=numpy.float64)
+ ft = ndimage.distance_transform_bf(
+ data, distances=dt, return_indices=True)
+ dts.append(dt)
+ fts.append(ft)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ dt = ndimage.distance_transform_bf(
+ data, return_indices=True, indices=ft)
+ dts.append(dt)
+ fts.append(ft)
+ dt = numpy.zeros(data.shape, dtype=numpy.float64)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ ndimage.distance_transform_bf(
+ data, distances=dt, return_indices=True, indices=ft)
+ dts.append(dt)
+ fts.append(ft)
+ for dt in dts:
+ assert_array_almost_equal(tdt, dt)
+ for ft in fts:
+ assert_array_almost_equal(tft, ft)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_bf05(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out, ft = ndimage.distance_transform_bf(
+ data, 'euclidean', return_indices=True, sampling=[2, 2])
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 4, 4, 4, 0, 0, 0],
+ [0, 0, 4, 8, 16, 8, 4, 0, 0],
+ [0, 0, 4, 16, 32, 16, 4, 0, 0],
+ [0, 0, 4, 8, 16, 8, 4, 0, 0],
+ [0, 0, 0, 4, 4, 4, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+ assert_array_almost_equal(out * out, expected)
+
+ expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 1, 2, 2, 2, 2],
+ [3, 3, 3, 2, 1, 2, 3, 3, 3],
+ [4, 4, 4, 4, 6, 4, 4, 4, 4],
+ [5, 5, 6, 6, 7, 6, 6, 5, 5],
+ [6, 6, 6, 7, 7, 7, 6, 6, 6],
+ [7, 7, 7, 7, 7, 7, 7, 7, 7],
+ [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 2, 4, 6, 6, 7, 8],
+ [0, 1, 1, 2, 4, 6, 7, 7, 8],
+ [0, 1, 1, 1, 6, 7, 7, 7, 8],
+ [0, 1, 2, 2, 4, 6, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+ assert_array_almost_equal(ft, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_bf06(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out, ft = ndimage.distance_transform_bf(
+ data, 'euclidean', return_indices=True, sampling=[2, 1])
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 4, 1, 0, 0, 0],
+ [0, 0, 1, 4, 8, 4, 1, 0, 0],
+ [0, 0, 1, 4, 9, 4, 1, 0, 0],
+ [0, 0, 1, 4, 8, 4, 1, 0, 0],
+ [0, 0, 0, 1, 4, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+ assert_array_almost_equal(out * out, expected)
+
+ expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 2, 2, 2, 2, 2],
+ [3, 3, 3, 3, 2, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4, 4, 4, 4, 4],
+ [5, 5, 5, 5, 6, 5, 5, 5, 5],
+ [6, 6, 6, 6, 7, 6, 6, 6, 6],
+ [7, 7, 7, 7, 7, 7, 7, 7, 7],
+ [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 2, 6, 6, 6, 7, 8],
+ [0, 1, 1, 1, 6, 7, 7, 7, 8],
+ [0, 1, 1, 1, 7, 7, 7, 7, 8],
+ [0, 1, 1, 1, 6, 7, 7, 7, 8],
+ [0, 1, 2, 2, 4, 6, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+ assert_array_almost_equal(ft, expected)
+
+ def test_distance_transform_bf07(self):
+ # test input validation per discussion on PR #13302
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]])
+ with assert_raises(RuntimeError):
+ ndimage.distance_transform_bf(
+ data, return_distances=False, return_indices=False
+ )
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_cdt01(self, dtype):
+ # chamfer type distance (cdt) transform
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out, ft = ndimage.distance_transform_cdt(
+ data, 'cityblock', return_indices=True)
+ bf = ndimage.distance_transform_bf(data, 'cityblock')
+ assert_array_almost_equal(bf, out)
+
+ expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [2, 2, 2, 1, 1, 1, 2, 2, 2],
+ [3, 3, 2, 1, 1, 1, 2, 3, 3],
+ [4, 4, 4, 4, 1, 4, 4, 4, 4],
+ [5, 5, 5, 5, 7, 7, 6, 5, 5],
+ [6, 6, 6, 6, 7, 7, 6, 6, 6],
+ [7, 7, 7, 7, 7, 7, 7, 7, 7],
+ [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 1, 1, 4, 7, 7, 7, 8],
+ [0, 1, 1, 1, 4, 5, 6, 7, 8],
+ [0, 1, 2, 2, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+ assert_array_almost_equal(ft, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_cdt02(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out, ft = ndimage.distance_transform_cdt(data, 'chessboard',
+ return_indices=True)
+ bf = ndimage.distance_transform_bf(data, 'chessboard')
+ assert_array_almost_equal(bf, out)
+
+ expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [2, 2, 2, 1, 1, 1, 2, 2, 2],
+ [3, 3, 2, 2, 1, 2, 2, 3, 3],
+ [4, 4, 3, 2, 2, 2, 3, 4, 4],
+ [5, 5, 4, 6, 7, 6, 4, 5, 5],
+ [6, 6, 6, 6, 7, 7, 6, 6, 6],
+ [7, 7, 7, 7, 7, 7, 7, 7, 7],
+ [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 2, 3, 4, 6, 7, 8],
+ [0, 1, 1, 2, 2, 6, 6, 7, 8],
+ [0, 1, 1, 1, 2, 6, 7, 7, 8],
+ [0, 1, 1, 2, 6, 6, 7, 7, 8],
+ [0, 1, 2, 2, 5, 6, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8],
+ [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+ assert_array_almost_equal(ft, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_cdt03(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ tdt, tft = ndimage.distance_transform_cdt(data, return_indices=True)
+ dts = []
+ fts = []
+ dt = numpy.zeros(data.shape, dtype=numpy.int32)
+ ndimage.distance_transform_cdt(data, distances=dt)
+ dts.append(dt)
+ ft = ndimage.distance_transform_cdt(
+ data, return_distances=False, return_indices=True)
+ fts.append(ft)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ ndimage.distance_transform_cdt(
+ data, return_distances=False, return_indices=True, indices=ft)
+ fts.append(ft)
+ dt, ft = ndimage.distance_transform_cdt(
+ data, return_indices=True)
+ dts.append(dt)
+ fts.append(ft)
+ dt = numpy.zeros(data.shape, dtype=numpy.int32)
+ ft = ndimage.distance_transform_cdt(
+ data, distances=dt, return_indices=True)
+ dts.append(dt)
+ fts.append(ft)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ dt = ndimage.distance_transform_cdt(
+ data, return_indices=True, indices=ft)
+ dts.append(dt)
+ fts.append(ft)
+ dt = numpy.zeros(data.shape, dtype=numpy.int32)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ ndimage.distance_transform_cdt(data, distances=dt,
+ return_indices=True, indices=ft)
+ dts.append(dt)
+ fts.append(ft)
+ for dt in dts:
+ assert_array_almost_equal(tdt, dt)
+ for ft in fts:
+ assert_array_almost_equal(tft, ft)
+
+ def test_distance_transform_cdt04(self):
+ # test input validation per discussion on PR #13302
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]])
+ indices_out = numpy.zeros((data.ndim,) + data.shape, dtype=numpy.int32)
+ with assert_raises(RuntimeError):
+ ndimage.distance_transform_bf(
+ data,
+ return_distances=True,
+ return_indices=False,
+ indices=indices_out
+ )
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_cdt05(self, dtype):
+ # test custom metric type per discussion on issue #17381
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ metric_arg = np.ones((3, 3))
+ actual = ndimage.distance_transform_cdt(data, metric=metric_arg)
+ assert actual.sum() == -21
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_edt01(self, dtype):
+ # euclidean distance transform (edt)
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out, ft = ndimage.distance_transform_edt(data, return_indices=True)
+ bf = ndimage.distance_transform_bf(data, 'euclidean')
+ assert_array_almost_equal(bf, out)
+
+ dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype)
+ dt = dt.astype(numpy.float64)
+ numpy.multiply(dt, dt, dt)
+ dt = numpy.add.reduce(dt, axis=0)
+ numpy.sqrt(dt, dt)
+
+ assert_array_almost_equal(bf, dt)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_edt02(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ tdt, tft = ndimage.distance_transform_edt(data, return_indices=True)
+ dts = []
+ fts = []
+ dt = numpy.zeros(data.shape, dtype=numpy.float64)
+ ndimage.distance_transform_edt(data, distances=dt)
+ dts.append(dt)
+ ft = ndimage.distance_transform_edt(
+ data, return_distances=0, return_indices=True)
+ fts.append(ft)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ ndimage.distance_transform_edt(
+ data, return_distances=False, return_indices=True, indices=ft)
+ fts.append(ft)
+ dt, ft = ndimage.distance_transform_edt(
+ data, return_indices=True)
+ dts.append(dt)
+ fts.append(ft)
+ dt = numpy.zeros(data.shape, dtype=numpy.float64)
+ ft = ndimage.distance_transform_edt(
+ data, distances=dt, return_indices=True)
+ dts.append(dt)
+ fts.append(ft)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ dt = ndimage.distance_transform_edt(
+ data, return_indices=True, indices=ft)
+ dts.append(dt)
+ fts.append(ft)
+ dt = numpy.zeros(data.shape, dtype=numpy.float64)
+ ft = numpy.indices(data.shape, dtype=numpy.int32)
+ ndimage.distance_transform_edt(
+ data, distances=dt, return_indices=True, indices=ft)
+ dts.append(dt)
+ fts.append(ft)
+ for dt in dts:
+ assert_array_almost_equal(tdt, dt)
+ for ft in fts:
+ assert_array_almost_equal(tft, ft)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_edt03(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 2])
+ out = ndimage.distance_transform_edt(data, sampling=[2, 2])
+ assert_array_almost_equal(ref, out)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_distance_transform_edt4(self, dtype):
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 1])
+ out = ndimage.distance_transform_edt(data, sampling=[2, 1])
+ assert_array_almost_equal(ref, out)
+
+ def test_distance_transform_edt5(self):
+ # Ticket #954 regression test
+ out = ndimage.distance_transform_edt(False)
+ assert_array_almost_equal(out, [0.])
+
+ def test_distance_transform_edt6(self):
+ # test input validation per discussion on PR #13302
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]])
+ distances_out = numpy.zeros(data.shape, dtype=numpy.float64)
+ with assert_raises(RuntimeError):
+ ndimage.distance_transform_bf(
+ data,
+ return_indices=True,
+ return_distances=False,
+ distances=distances_out
+ )
+
+ def test_generate_structure01(self):
+ struct = ndimage.generate_binary_structure(0, 1)
+ assert_array_almost_equal(struct, 1)
+
+ def test_generate_structure02(self):
+ struct = ndimage.generate_binary_structure(1, 1)
+ assert_array_almost_equal(struct, [1, 1, 1])
+
+ def test_generate_structure03(self):
+ struct = ndimage.generate_binary_structure(2, 1)
+ assert_array_almost_equal(struct, [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]])
+
+ def test_generate_structure04(self):
+ struct = ndimage.generate_binary_structure(2, 2)
+ assert_array_almost_equal(struct, [[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+
+ def test_iterate_structure01(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ out = ndimage.iterate_structure(struct, 2)
+ assert_array_almost_equal(out, [[0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0]])
+
+ def test_iterate_structure02(self):
+ struct = [[0, 1],
+ [1, 1],
+ [0, 1]]
+ out = ndimage.iterate_structure(struct, 2)
+ assert_array_almost_equal(out, [[0, 0, 1],
+ [0, 1, 1],
+ [1, 1, 1],
+ [0, 1, 1],
+ [0, 0, 1]])
+
+ def test_iterate_structure03(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ out = ndimage.iterate_structure(struct, 2, 1)
+ expected = [[0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0]]
+ assert_array_almost_equal(out[0], expected)
+ assert_equal(out[1], [2, 2])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion01(self, dtype):
+ data = numpy.ones([], dtype)
+ out = ndimage.binary_erosion(data)
+ assert_array_almost_equal(out, 1)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion02(self, dtype):
+ data = numpy.ones([], dtype)
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, 1)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion03(self, dtype):
+ data = numpy.ones([1], dtype)
+ out = ndimage.binary_erosion(data)
+ assert_array_almost_equal(out, [0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion04(self, dtype):
+ data = numpy.ones([1], dtype)
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion05(self, dtype):
+ data = numpy.ones([3], dtype)
+ out = ndimage.binary_erosion(data)
+ assert_array_almost_equal(out, [0, 1, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion06(self, dtype):
+ data = numpy.ones([3], dtype)
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, [1, 1, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion07(self, dtype):
+ data = numpy.ones([5], dtype)
+ out = ndimage.binary_erosion(data)
+ assert_array_almost_equal(out, [0, 1, 1, 1, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion08(self, dtype):
+ data = numpy.ones([5], dtype)
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, [1, 1, 1, 1, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion09(self, dtype):
+ data = numpy.ones([5], dtype)
+ data[2] = 0
+ out = ndimage.binary_erosion(data)
+ assert_array_almost_equal(out, [0, 0, 0, 0, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion10(self, dtype):
+ data = numpy.ones([5], dtype)
+ data[2] = 0
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, [1, 0, 0, 0, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion11(self, dtype):
+ data = numpy.ones([5], dtype)
+ data[2] = 0
+ struct = [1, 0, 1]
+ out = ndimage.binary_erosion(data, struct, border_value=1)
+ assert_array_almost_equal(out, [1, 0, 1, 0, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion12(self, dtype):
+ data = numpy.ones([5], dtype)
+ data[2] = 0
+ struct = [1, 0, 1]
+ out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1)
+ assert_array_almost_equal(out, [0, 1, 0, 1, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion13(self, dtype):
+ data = numpy.ones([5], dtype)
+ data[2] = 0
+ struct = [1, 0, 1]
+ out = ndimage.binary_erosion(data, struct, border_value=1, origin=1)
+ assert_array_almost_equal(out, [1, 1, 0, 1, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion14(self, dtype):
+ data = numpy.ones([5], dtype)
+ data[2] = 0
+ struct = [1, 1]
+ out = ndimage.binary_erosion(data, struct, border_value=1)
+ assert_array_almost_equal(out, [1, 1, 0, 0, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion15(self, dtype):
+ data = numpy.ones([5], dtype)
+ data[2] = 0
+ struct = [1, 1]
+ out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1)
+ assert_array_almost_equal(out, [1, 0, 0, 1, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion16(self, dtype):
+ data = numpy.ones([1, 1], dtype)
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, [[1]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion17(self, dtype):
+ data = numpy.ones([1, 1], dtype)
+ out = ndimage.binary_erosion(data)
+ assert_array_almost_equal(out, [[0]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion18(self, dtype):
+ data = numpy.ones([1, 3], dtype)
+ out = ndimage.binary_erosion(data)
+ assert_array_almost_equal(out, [[0, 0, 0]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion19(self, dtype):
+ data = numpy.ones([1, 3], dtype)
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, [[1, 1, 1]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion20(self, dtype):
+ data = numpy.ones([3, 3], dtype)
+ out = ndimage.binary_erosion(data)
+ assert_array_almost_equal(out, [[0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion21(self, dtype):
+ data = numpy.ones([3, 3], dtype)
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, [[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion22(self, dtype):
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 1, 1, 1, 1, 1, 1],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_erosion(data, border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion23(self, dtype):
+ struct = ndimage.generate_binary_structure(2, 2)
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 1, 1, 1, 1, 1, 1],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_erosion(data, struct, border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion24(self, dtype):
+ struct = [[0, 1],
+ [1, 1]]
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 1, 1, 1, 1, 1, 1],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_erosion(data, struct, border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion25(self, dtype):
+ struct = [[0, 1, 0],
+ [1, 0, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 1, 1, 1, 0, 1, 1],
+ [0, 0, 1, 0, 1, 1, 0, 0],
+ [0, 1, 0, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_erosion(data, struct, border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_erosion26(self, dtype):
+ struct = [[0, 1, 0],
+ [1, 0, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 1],
+ [0, 0, 0, 0, 1, 0, 0, 1],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 1]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 1, 1, 1, 0, 1, 1],
+ [0, 0, 1, 0, 1, 1, 0, 0],
+ [0, 1, 0, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_erosion(data, struct, border_value=1,
+ origin=(-1, -1))
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion27(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_erosion(data, struct, border_value=1,
+ iterations=2)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion28(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], bool)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_erosion(data, struct, border_value=1,
+ iterations=2, output=out)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion29(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0]], bool)
+ out = ndimage.binary_erosion(data, struct,
+ border_value=1, iterations=3)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion30(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0]], bool)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_erosion(data, struct, border_value=1,
+ iterations=3, output=out)
+ assert_array_almost_equal(out, expected)
+
+ # test with output memory overlap
+ ndimage.binary_erosion(data, struct, border_value=1,
+ iterations=3, output=data)
+ assert_array_almost_equal(data, expected)
+
+ def test_binary_erosion31(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 1, 1, 0, 1],
+ [0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 1]]
+ data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0]], bool)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_erosion(data, struct, border_value=1,
+ iterations=1, output=out, origin=(-1, -1))
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion32(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_erosion(data, struct,
+ border_value=1, iterations=2)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion33(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 0, 0, 0, 1],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ mask = [[1, 1, 1, 1, 1, 0, 0],
+ [1, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1]]
+ data = numpy.array([[0, 0, 0, 0, 0, 1, 1],
+ [0, 0, 0, 1, 0, 0, 1],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_erosion(data, struct,
+ border_value=1, mask=mask, iterations=-1)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion34(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ mask = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_erosion(data, struct,
+ border_value=1, mask=mask)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion35(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ mask = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0]], bool)
+ tmp = [[0, 0, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 1, 1, 0, 1],
+ [0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 1]]
+ expected = numpy.logical_and(tmp, mask)
+ tmp = numpy.logical_and(data, numpy.logical_not(mask))
+ expected = numpy.logical_or(expected, tmp)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_erosion(data, struct, border_value=1,
+ iterations=1, output=out,
+ origin=(-1, -1), mask=mask)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion36(self):
+ struct = [[0, 1, 0],
+ [1, 0, 1],
+ [0, 1, 0]]
+ mask = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ tmp = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 1],
+ [0, 0, 0, 0, 1, 0, 0, 1],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 1]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 1, 1, 1, 0, 1, 1],
+ [0, 0, 1, 0, 1, 1, 0, 0],
+ [0, 1, 0, 1, 1, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]])
+ expected = numpy.logical_and(tmp, mask)
+ tmp = numpy.logical_and(data, numpy.logical_not(mask))
+ expected = numpy.logical_or(expected, tmp)
+ out = ndimage.binary_erosion(data, struct, mask=mask,
+ border_value=1, origin=(-1, -1))
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion37(self):
+ a = numpy.array([[1, 0, 1],
+ [0, 1, 0],
+ [1, 0, 1]], dtype=bool)
+ b = numpy.zeros_like(a)
+ out = ndimage.binary_erosion(a, structure=a, output=b, iterations=0,
+ border_value=True, brute_force=True)
+ assert_(out is b)
+ assert_array_equal(
+ ndimage.binary_erosion(a, structure=a, iterations=0,
+ border_value=True),
+ b)
+
+ def test_binary_erosion38(self):
+ data = numpy.array([[1, 0, 1],
+ [0, 1, 0],
+ [1, 0, 1]], dtype=bool)
+ iterations = 2.0
+ with assert_raises(TypeError):
+ _ = ndimage.binary_erosion(data, iterations=iterations)
+
+ def test_binary_erosion39(self):
+ iterations = numpy.int32(3)
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0]], bool)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_erosion(data, struct, border_value=1,
+ iterations=iterations, output=out)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_erosion40(self):
+ iterations = numpy.int64(3)
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 0, 0, 0]], bool)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_erosion(data, struct, border_value=1,
+ iterations=iterations, output=out)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation01(self, dtype):
+ data = numpy.ones([], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, 1)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation02(self, dtype):
+ data = numpy.zeros([], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, 0)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation03(self, dtype):
+ data = numpy.ones([1], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation04(self, dtype):
+ data = numpy.zeros([1], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation05(self, dtype):
+ data = numpy.ones([3], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [1, 1, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation06(self, dtype):
+ data = numpy.zeros([3], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [0, 0, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation07(self, dtype):
+ data = numpy.zeros([3], dtype)
+ data[1] = 1
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [1, 1, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation08(self, dtype):
+ data = numpy.zeros([5], dtype)
+ data[1] = 1
+ data[3] = 1
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [1, 1, 1, 1, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation09(self, dtype):
+ data = numpy.zeros([5], dtype)
+ data[1] = 1
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [1, 1, 1, 0, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation10(self, dtype):
+ data = numpy.zeros([5], dtype)
+ data[1] = 1
+ out = ndimage.binary_dilation(data, origin=-1)
+ assert_array_almost_equal(out, [0, 1, 1, 1, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation11(self, dtype):
+ data = numpy.zeros([5], dtype)
+ data[1] = 1
+ out = ndimage.binary_dilation(data, origin=1)
+ assert_array_almost_equal(out, [1, 1, 0, 0, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation12(self, dtype):
+ data = numpy.zeros([5], dtype)
+ data[1] = 1
+ struct = [1, 0, 1]
+ out = ndimage.binary_dilation(data, struct)
+ assert_array_almost_equal(out, [1, 0, 1, 0, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation13(self, dtype):
+ data = numpy.zeros([5], dtype)
+ data[1] = 1
+ struct = [1, 0, 1]
+ out = ndimage.binary_dilation(data, struct, border_value=1)
+ assert_array_almost_equal(out, [1, 0, 1, 0, 1])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation14(self, dtype):
+ data = numpy.zeros([5], dtype)
+ data[1] = 1
+ struct = [1, 0, 1]
+ out = ndimage.binary_dilation(data, struct, origin=-1)
+ assert_array_almost_equal(out, [0, 1, 0, 1, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation15(self, dtype):
+ data = numpy.zeros([5], dtype)
+ data[1] = 1
+ struct = [1, 0, 1]
+ out = ndimage.binary_dilation(data, struct,
+ origin=-1, border_value=1)
+ assert_array_almost_equal(out, [1, 1, 0, 1, 0])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation16(self, dtype):
+ data = numpy.ones([1, 1], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [[1]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation17(self, dtype):
+ data = numpy.zeros([1, 1], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [[0]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation18(self, dtype):
+ data = numpy.ones([1, 3], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [[1, 1, 1]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation19(self, dtype):
+ data = numpy.ones([3, 3], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation20(self, dtype):
+ data = numpy.zeros([3, 3], dtype)
+ data[1, 1] = 1
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation21(self, dtype):
+ struct = ndimage.generate_binary_structure(2, 2)
+ data = numpy.zeros([3, 3], dtype)
+ data[1, 1] = 1
+ out = ndimage.binary_dilation(data, struct)
+ assert_array_almost_equal(out, [[1, 1, 1],
+ [1, 1, 1],
+ [1, 1, 1]])
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation22(self, dtype):
+ expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_dilation(data)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation23(self, dtype):
+ expected = [[1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 0, 0, 0, 0, 1],
+ [1, 1, 0, 0, 0, 1, 0, 1],
+ [1, 0, 0, 1, 1, 1, 1, 1],
+ [1, 0, 1, 1, 1, 1, 0, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 0, 1, 0, 0, 1, 0, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_dilation(data, border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation24(self, dtype):
+ expected = [[1, 1, 0, 0, 0, 0, 0, 0],
+ [1, 0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 0, 0],
+ [0, 1, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_dilation(data, origin=(1, 1))
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation25(self, dtype):
+ expected = [[1, 1, 0, 0, 0, 0, 1, 1],
+ [1, 0, 0, 0, 1, 0, 1, 1],
+ [0, 0, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 0, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 0, 0, 1, 0, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_dilation(data, origin=(1, 1), border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation26(self, dtype):
+ struct = ndimage.generate_binary_structure(2, 2)
+ expected = [[1, 1, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_dilation(data, struct)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation27(self, dtype):
+ struct = [[0, 1],
+ [1, 1]]
+ expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 1, 1, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_dilation(data, struct)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation28(self, dtype):
+ expected = [[1, 1, 1, 1],
+ [1, 0, 0, 1],
+ [1, 0, 0, 1],
+ [1, 1, 1, 1]]
+ data = numpy.array([[0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0]], dtype)
+ out = ndimage.binary_dilation(data, border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_dilation29(self):
+ struct = [[0, 1],
+ [1, 1]]
+ expected = [[0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0],
+ [0, 0, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]]
+
+ data = numpy.array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_dilation(data, struct, iterations=2)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_dilation30(self):
+ struct = [[0, 1],
+ [1, 1]]
+ expected = [[0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0],
+ [0, 0, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]]
+
+ data = numpy.array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0]], bool)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_dilation(data, struct, iterations=2, output=out)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_dilation31(self):
+ struct = [[0, 1],
+ [1, 1]]
+ expected = [[0, 0, 0, 1, 0],
+ [0, 0, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]]
+
+ data = numpy.array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_dilation(data, struct, iterations=3)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_dilation32(self):
+ struct = [[0, 1],
+ [1, 1]]
+ expected = [[0, 0, 0, 1, 0],
+ [0, 0, 1, 1, 0],
+ [0, 1, 1, 1, 0],
+ [1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0]]
+
+ data = numpy.array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0]], bool)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_dilation(data, struct, iterations=3, output=out)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_dilation33(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 1, 1, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 1, 1, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+
+ out = ndimage.binary_dilation(data, struct, iterations=-1,
+ mask=mask, border_value=0)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_dilation34(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ data = numpy.zeros(mask.shape, bool)
+ out = ndimage.binary_dilation(data, struct, iterations=-1,
+ mask=mask, border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_dilation35(self, dtype):
+ tmp = [[1, 1, 0, 0, 0, 0, 1, 1],
+ [1, 0, 0, 0, 1, 0, 1, 1],
+ [0, 0, 1, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 0, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 1, 0, 0, 1, 0, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1]]
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]])
+ mask = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ expected = numpy.logical_and(tmp, mask)
+ tmp = numpy.logical_and(data, numpy.logical_not(mask))
+ expected = numpy.logical_or(expected, tmp)
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_dilation(data, mask=mask,
+ origin=(1, 1), border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_propagation01(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 1, 1, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 0, 0],
+ [0, 1, 1, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+
+ out = ndimage.binary_propagation(data, struct,
+ mask=mask, border_value=0)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_propagation02(self):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ data = numpy.zeros(mask.shape, bool)
+ out = ndimage.binary_propagation(data, struct,
+ mask=mask, border_value=1)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_opening01(self, dtype):
+ expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 0, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_opening(data)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_opening02(self, dtype):
+ struct = ndimage.generate_binary_structure(2, 2)
+ expected = [[1, 1, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_opening(data, struct)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_closing01(self, dtype):
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 0, 1, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_closing(data)
+ assert_array_almost_equal(out, expected)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_binary_closing02(self, dtype):
+ struct = ndimage.generate_binary_structure(2, 2)
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_closing(data, struct)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_fill_holes01(self):
+ expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_fill_holes(data)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_fill_holes02(self):
+ expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 1, 0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_fill_holes(data)
+ assert_array_almost_equal(out, expected)
+
+ def test_binary_fill_holes03(self):
+ expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0, 1, 1, 1],
+ [0, 1, 1, 1, 0, 1, 1, 1],
+ [0, 1, 1, 1, 0, 1, 1, 1],
+ [0, 0, 1, 0, 0, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 1, 0, 1, 1, 1],
+ [0, 1, 0, 1, 0, 1, 0, 1],
+ [0, 1, 0, 1, 0, 1, 0, 1],
+ [0, 0, 1, 0, 0, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+ out = ndimage.binary_fill_holes(data)
+ assert_array_almost_equal(out, expected)
+
+ def test_grey_erosion01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ output = ndimage.grey_erosion(array, footprint=footprint)
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
+ [2, 3, 1, 3, 1],
+ [5, 5, 3, 3, 1]], output)
+
+ def test_grey_erosion01_overlap(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ ndimage.grey_erosion(array, footprint=footprint, output=array)
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
+ [2, 3, 1, 3, 1],
+ [5, 5, 3, 3, 1]], array)
+
+ def test_grey_erosion02(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ output = ndimage.grey_erosion(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal([[2, 2, 1, 1, 1],
+ [2, 3, 1, 3, 1],
+ [5, 5, 3, 3, 1]], output)
+
+ def test_grey_erosion03(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[1, 1, 1], [1, 1, 1]]
+ output = ndimage.grey_erosion(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal([[1, 1, 0, 0, 0],
+ [1, 2, 0, 2, 0],
+ [4, 4, 2, 2, 0]], output)
+
+ def test_grey_dilation01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[0, 1, 1], [1, 0, 1]]
+ output = ndimage.grey_dilation(array, footprint=footprint)
+ assert_array_almost_equal([[7, 7, 9, 9, 5],
+ [7, 9, 8, 9, 7],
+ [8, 8, 8, 7, 7]], output)
+
+ def test_grey_dilation02(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[0, 1, 1], [1, 0, 1]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ output = ndimage.grey_dilation(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal([[7, 7, 9, 9, 5],
+ [7, 9, 8, 9, 7],
+ [8, 8, 8, 7, 7]], output)
+
+ def test_grey_dilation03(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[0, 1, 1], [1, 0, 1]]
+ structure = [[1, 1, 1], [1, 1, 1]]
+ output = ndimage.grey_dilation(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal([[8, 8, 10, 10, 6],
+ [8, 10, 9, 10, 8],
+ [9, 9, 9, 8, 8]], output)
+
+ def test_grey_opening01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ tmp = ndimage.grey_erosion(array, footprint=footprint)
+ expected = ndimage.grey_dilation(tmp, footprint=footprint)
+ output = ndimage.grey_opening(array, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+
+ def test_grey_opening02(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp = ndimage.grey_erosion(array, footprint=footprint,
+ structure=structure)
+ expected = ndimage.grey_dilation(tmp, footprint=footprint,
+ structure=structure)
+ output = ndimage.grey_opening(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal(expected, output)
+
+ def test_grey_closing01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ tmp = ndimage.grey_dilation(array, footprint=footprint)
+ expected = ndimage.grey_erosion(tmp, footprint=footprint)
+ output = ndimage.grey_closing(array, footprint=footprint)
+ assert_array_almost_equal(expected, output)
+
+ def test_grey_closing02(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp = ndimage.grey_dilation(array, footprint=footprint,
+ structure=structure)
+ expected = ndimage.grey_erosion(tmp, footprint=footprint,
+ structure=structure)
+ output = ndimage.grey_closing(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal(expected, output)
+
+ def test_morphological_gradient01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp1 = ndimage.grey_dilation(array, footprint=footprint,
+ structure=structure)
+ tmp2 = ndimage.grey_erosion(array, footprint=footprint,
+ structure=structure)
+ expected = tmp1 - tmp2
+ output = numpy.zeros(array.shape, array.dtype)
+ ndimage.morphological_gradient(array, footprint=footprint,
+ structure=structure, output=output)
+ assert_array_almost_equal(expected, output)
+
+ def test_morphological_gradient02(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp1 = ndimage.grey_dilation(array, footprint=footprint,
+ structure=structure)
+ tmp2 = ndimage.grey_erosion(array, footprint=footprint,
+ structure=structure)
+ expected = tmp1 - tmp2
+ output = ndimage.morphological_gradient(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal(expected, output)
+
+ def test_morphological_laplace01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp1 = ndimage.grey_dilation(array, footprint=footprint,
+ structure=structure)
+ tmp2 = ndimage.grey_erosion(array, footprint=footprint,
+ structure=structure)
+ expected = tmp1 + tmp2 - 2 * array
+ output = numpy.zeros(array.shape, array.dtype)
+ ndimage.morphological_laplace(array, footprint=footprint,
+ structure=structure, output=output)
+ assert_array_almost_equal(expected, output)
+
+ def test_morphological_laplace02(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp1 = ndimage.grey_dilation(array, footprint=footprint,
+ structure=structure)
+ tmp2 = ndimage.grey_erosion(array, footprint=footprint,
+ structure=structure)
+ expected = tmp1 + tmp2 - 2 * array
+ output = ndimage.morphological_laplace(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal(expected, output)
+
+ def test_white_tophat01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp = ndimage.grey_opening(array, footprint=footprint,
+ structure=structure)
+ expected = array - tmp
+ output = numpy.zeros(array.shape, array.dtype)
+ ndimage.white_tophat(array, footprint=footprint,
+ structure=structure, output=output)
+ assert_array_almost_equal(expected, output)
+
+ def test_white_tophat02(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp = ndimage.grey_opening(array, footprint=footprint,
+ structure=structure)
+ expected = array - tmp
+ output = ndimage.white_tophat(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal(expected, output)
+
+ def test_white_tophat03(self):
+ array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
+ structure = numpy.ones((3, 3), dtype=numpy.bool_)
+ expected = numpy.array([[0, 1, 1, 0, 0, 0, 0],
+ [1, 0, 0, 1, 1, 1, 0],
+ [1, 0, 0, 1, 1, 1, 0],
+ [0, 1, 1, 0, 0, 0, 1],
+ [0, 1, 1, 0, 1, 0, 1],
+ [0, 1, 1, 0, 0, 0, 1],
+ [0, 0, 0, 1, 1, 1, 1]], dtype=numpy.bool_)
+
+ output = ndimage.white_tophat(array, structure=structure)
+ assert_array_equal(expected, output)
+
+ def test_white_tophat04(self):
+ array = numpy.eye(5, dtype=numpy.bool_)
+ structure = numpy.ones((3, 3), dtype=numpy.bool_)
+
+ # Check that type mismatch is properly handled
+ output = numpy.empty_like(array, dtype=numpy.float64)
+ ndimage.white_tophat(array, structure=structure, output=output)
+
+ def test_black_tophat01(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp = ndimage.grey_closing(array, footprint=footprint,
+ structure=structure)
+ expected = tmp - array
+ output = numpy.zeros(array.shape, array.dtype)
+ ndimage.black_tophat(array, footprint=footprint,
+ structure=structure, output=output)
+ assert_array_almost_equal(expected, output)
+
+ def test_black_tophat02(self):
+ array = numpy.array([[3, 2, 5, 1, 4],
+ [7, 6, 9, 3, 5],
+ [5, 8, 3, 7, 1]])
+ footprint = [[1, 0, 1], [1, 1, 0]]
+ structure = [[0, 0, 0], [0, 0, 0]]
+ tmp = ndimage.grey_closing(array, footprint=footprint,
+ structure=structure)
+ expected = tmp - array
+ output = ndimage.black_tophat(array, footprint=footprint,
+ structure=structure)
+ assert_array_almost_equal(expected, output)
+
+ def test_black_tophat03(self):
+ array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0, 1, 0],
+ [0, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
+ structure = numpy.ones((3, 3), dtype=numpy.bool_)
+ expected = numpy.array([[0, 1, 1, 1, 1, 1, 1],
+ [1, 0, 0, 0, 0, 0, 1],
+ [1, 0, 0, 0, 0, 0, 1],
+ [1, 0, 0, 0, 0, 0, 1],
+ [1, 0, 0, 0, 1, 0, 1],
+ [1, 0, 0, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1, 1, 0]], dtype=numpy.bool_)
+
+ output = ndimage.black_tophat(array, structure=structure)
+ assert_array_equal(expected, output)
+
+ def test_black_tophat04(self):
+ array = numpy.eye(5, dtype=numpy.bool_)
+ structure = numpy.ones((3, 3), dtype=numpy.bool_)
+
+ # Check that type mismatch is properly handled
+ output = numpy.empty_like(array, dtype=numpy.float64)
+ ndimage.black_tophat(array, structure=structure, output=output)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_hit_or_miss01(self, dtype):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 1, 0, 0, 0],
+ [1, 1, 1, 0, 0],
+ [0, 1, 0, 1, 1],
+ [0, 0, 1, 1, 1],
+ [0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0]], dtype)
+ out = numpy.zeros(data.shape, bool)
+ ndimage.binary_hit_or_miss(data, struct, output=out)
+ assert_array_almost_equal(expected, out)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_hit_or_miss02(self, dtype):
+ struct = [[0, 1, 0],
+ [1, 1, 1],
+ [0, 1, 0]]
+ expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
+ [1, 1, 1, 0, 0, 1, 0, 0],
+ [0, 1, 0, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_hit_or_miss(data, struct)
+ assert_array_almost_equal(expected, out)
+
+ @pytest.mark.parametrize('dtype', types)
+ def test_hit_or_miss03(self, dtype):
+ struct1 = [[0, 0, 0],
+ [1, 1, 1],
+ [0, 0, 0]]
+ struct2 = [[1, 1, 1],
+ [0, 0, 0],
+ [1, 1, 1]]
+ expected = [[0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]]
+ data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
+ [1, 1, 1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 1, 1, 1, 1, 0],
+ [0, 0, 1, 1, 1, 1, 1, 0],
+ [0, 1, 1, 1, 0, 1, 1, 0],
+ [0, 0, 0, 0, 1, 1, 1, 0],
+ [0, 1, 1, 1, 1, 1, 1, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+ out = ndimage.binary_hit_or_miss(data, struct1, struct2)
+ assert_array_almost_equal(expected, out)
+
+
+class TestDilateFix:
+
+ def setup_method(self):
+ # dilation related setup
+ self.array = numpy.array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0],
+ [0, 0, 1, 1, 0],
+ [0, 0, 0, 0, 0]], dtype=numpy.uint8)
+
+ self.sq3x3 = numpy.ones((3, 3))
+ dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3)
+ self.dilated3x3 = dilated3x3.view(numpy.uint8)
+
+ def test_dilation_square_structure(self):
+ result = ndimage.grey_dilation(self.array, structure=self.sq3x3)
+ # +1 accounts for difference between grey and binary dilation
+ assert_array_almost_equal(result, self.dilated3x3 + 1)
+
+ def test_dilation_scalar_size(self):
+ result = ndimage.grey_dilation(self.array, size=3)
+ assert_array_almost_equal(result, self.dilated3x3)
+
+
+class TestBinaryOpeningClosing:
+
+ def setup_method(self):
+ a = numpy.zeros((5, 5), dtype=bool)
+ a[1:4, 1:4] = True
+ a[4, 4] = True
+ self.array = a
+ self.sq3x3 = numpy.ones((3, 3))
+ self.opened_old = ndimage.binary_opening(self.array, self.sq3x3,
+ 1, None, 0)
+ self.closed_old = ndimage.binary_closing(self.array, self.sq3x3,
+ 1, None, 0)
+
+ def test_opening_new_arguments(self):
+ opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None,
+ 0, None, 0, False)
+ assert_array_equal(opened_new, self.opened_old)
+
+ def test_closing_new_arguments(self):
+ closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None,
+ 0, None, 0, False)
+ assert_array_equal(closed_new, self.closed_old)
+
+
+def test_binary_erosion_noninteger_iterations():
+ # regression test for gh-9905, gh-9909: ValueError for
+ # non integer iterations
+ data = numpy.ones([1])
+ assert_raises(TypeError, ndimage.binary_erosion, data, iterations=0.5)
+ assert_raises(TypeError, ndimage.binary_erosion, data, iterations=1.5)
+
+
+def test_binary_dilation_noninteger_iterations():
+ # regression test for gh-9905, gh-9909: ValueError for
+ # non integer iterations
+ data = numpy.ones([1])
+ assert_raises(TypeError, ndimage.binary_dilation, data, iterations=0.5)
+ assert_raises(TypeError, ndimage.binary_dilation, data, iterations=1.5)
+
+
+def test_binary_opening_noninteger_iterations():
+ # regression test for gh-9905, gh-9909: ValueError for
+ # non integer iterations
+ data = numpy.ones([1])
+ assert_raises(TypeError, ndimage.binary_opening, data, iterations=0.5)
+ assert_raises(TypeError, ndimage.binary_opening, data, iterations=1.5)
+
+
+def test_binary_closing_noninteger_iterations():
+ # regression test for gh-9905, gh-9909: ValueError for
+ # non integer iterations
+ data = numpy.ones([1])
+ assert_raises(TypeError, ndimage.binary_closing, data, iterations=0.5)
+ assert_raises(TypeError, ndimage.binary_closing, data, iterations=1.5)
+
+
+def test_binary_closing_noninteger_brute_force_passes_when_true():
+ # regression test for gh-9905, gh-9909: ValueError for
+ # non integer iterations
+ data = numpy.ones([1])
+
+ assert ndimage.binary_erosion(
+ data, iterations=2, brute_force=1.5
+ ) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(1.5))
+ assert ndimage.binary_erosion(
+ data, iterations=2, brute_force=0.0
+ ) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(0.0))
+
+
+@pytest.mark.parametrize(
+ 'function',
+ ['binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing'],
+)
+@pytest.mark.parametrize('iterations', [1, 5])
+@pytest.mark.parametrize('brute_force', [False, True])
+def test_binary_input_as_output(function, iterations, brute_force):
+ rstate = numpy.random.RandomState(123)
+ data = rstate.randint(low=0, high=2, size=100).astype(bool)
+ ndi_func = getattr(ndimage, function)
+
+ # input data is not modified
+ data_orig = data.copy()
+ expected = ndi_func(data, brute_force=brute_force, iterations=iterations)
+ assert_array_equal(data, data_orig)
+
+ # data should now contain the expected result
+ ndi_func(data, brute_force=brute_force, iterations=iterations, output=data)
+ assert_array_equal(expected, data)
+
+
+def test_binary_hit_or_miss_input_as_output():
+ rstate = numpy.random.RandomState(123)
+ data = rstate.randint(low=0, high=2, size=100).astype(bool)
+
+ # input data is not modified
+ data_orig = data.copy()
+ expected = ndimage.binary_hit_or_miss(data)
+ assert_array_equal(data, data_orig)
+
+ # data should now contain the expected result
+ ndimage.binary_hit_or_miss(data, output=data)
+ assert_array_equal(expected, data)
+
+
+def test_distance_transform_cdt_invalid_metric():
+ msg = 'invalid metric provided'
+ with pytest.raises(ValueError, match=msg):
+ ndimage.distance_transform_cdt(np.ones((5, 5)),
+ metric="garbage")
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_ni_support.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_ni_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..a25429eebc8b3739e00465b43fd28ba24b320b45
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_ni_support.py
@@ -0,0 +1,77 @@
+import pytest
+
+import numpy as np
+from .._ni_support import _get_output
+
+
+@pytest.mark.parametrize(
+ 'dtype',
+ [
+ # String specifiers
+ 'f4', 'float32', 'complex64', 'complex128',
+ # Type and dtype specifiers
+ np.float32, float, np.dtype('f4'),
+ # Derive from input
+ None,
+ ],
+)
+def test_get_output_basic(dtype):
+ shape = (2, 3)
+
+ input_ = np.zeros(shape, 'float32')
+
+ # For None, derive dtype from input
+ expected_dtype = 'float32' if dtype is None else dtype
+
+ # Output is dtype-specifier, retrieve shape from input
+ result = _get_output(dtype, input_)
+ assert result.shape == shape
+ assert result.dtype == np.dtype(expected_dtype)
+
+ # Output is dtype specifier, with explicit shape, overriding input
+ result = _get_output(dtype, input_, shape=(3, 2))
+ assert result.shape == (3, 2)
+ assert result.dtype == np.dtype(expected_dtype)
+
+ # Output is pre-allocated array, return directly
+ output = np.zeros(shape, dtype)
+ result = _get_output(output, input_)
+ assert result is output
+
+
+def test_get_output_complex():
+ shape = (2, 3)
+
+ input_ = np.zeros(shape)
+
+ # None, promote input type to complex
+ result = _get_output(None, input_, complex_output=True)
+ assert result.shape == shape
+ assert result.dtype == np.dtype('complex128')
+
+ # Explicit type, promote type to complex
+ with pytest.warns(UserWarning, match='promoting specified output dtype to complex'):
+ result = _get_output(float, input_, complex_output=True)
+ assert result.shape == shape
+ assert result.dtype == np.dtype('complex128')
+
+ # String specifier, simply verify complex output
+ result = _get_output('complex64', input_, complex_output=True)
+ assert result.shape == shape
+ assert result.dtype == np.dtype('complex64')
+
+
+def test_get_output_error_cases():
+ input_ = np.zeros((2, 3), 'float32')
+
+ # Two separate paths can raise the same error
+ with pytest.raises(RuntimeError, match='output must have complex dtype'):
+ _get_output('float32', input_, complex_output=True)
+ with pytest.raises(RuntimeError, match='output must have complex dtype'):
+ _get_output(np.zeros((2, 3)), input_, complex_output=True)
+
+ with pytest.raises(RuntimeError, match='output must have numeric dtype'):
+ _get_output('void', input_)
+
+ with pytest.raises(RuntimeError, match='shape not correct'):
+ _get_output(np.zeros((3, 2)), input_)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py
new file mode 100644
index 0000000000000000000000000000000000000000..a74e55111f8fac906f58a947db4a214da82a3cae
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/tests/test_splines.py
@@ -0,0 +1,65 @@
+"""Tests for spline filtering."""
+import numpy as np
+import pytest
+
+from numpy.testing import assert_almost_equal
+
+from scipy import ndimage
+
+
+def get_spline_knot_values(order):
+ """Knot values to the right of a B-spline's center."""
+ knot_values = {0: [1],
+ 1: [1],
+ 2: [6, 1],
+ 3: [4, 1],
+ 4: [230, 76, 1],
+ 5: [66, 26, 1]}
+
+ return knot_values[order]
+
+
+def make_spline_knot_matrix(n, order, mode='mirror'):
+ """Matrix to invert to find the spline coefficients."""
+ knot_values = get_spline_knot_values(order)
+
+ matrix = np.zeros((n, n))
+ for diag, knot_value in enumerate(knot_values):
+ indices = np.arange(diag, n)
+ if diag == 0:
+ matrix[indices, indices] = knot_value
+ else:
+ matrix[indices, indices - diag] = knot_value
+ matrix[indices - diag, indices] = knot_value
+
+ knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
+
+ if mode == 'mirror':
+ start, step = 1, 1
+ elif mode == 'reflect':
+ start, step = 0, 1
+ elif mode == 'grid-wrap':
+ start, step = -1, -1
+ else:
+ raise ValueError(f'unsupported mode {mode}')
+
+ for row in range(len(knot_values) - 1):
+ for idx, knot_value in enumerate(knot_values[row + 1:]):
+ matrix[row, start + step*idx] += knot_value
+ matrix[-row - 1, -start - 1 - step*idx] += knot_value
+
+ return matrix / knot_values_sum
+
+
+@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
+@pytest.mark.parametrize('mode', ['mirror', 'grid-wrap', 'reflect'])
+def test_spline_filter_vs_matrix_solution(order, mode):
+ n = 100
+ eye = np.eye(n, dtype=float)
+ spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order,
+ mode=mode)
+ spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order,
+ mode=mode)
+ matrix = make_spline_knot_matrix(n, order, mode=mode)
+ assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
+ assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ffb2d87fe0c5349bf5ca14fcb17bf8b9038bfa3b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_dcsrch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..517a7350735dd5a9fbe37833d8eedeb1ecfe28b2
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfbabc652f19ac47eb29f86371c3822001604517
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_isotonic.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e92436ede2aaf48a95c71b959d81a0a04b65c367
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linesearch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e67f2009259b76eef9a4679217d2c34aa36eabe1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..220e81be9289d8bd38af7b945449e52a6704da38
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d4b79e6570650fff0be3981a26d426d20e23dba
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_milp.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5957c8ea043e2038d68388bfa44423e2d05aaa19
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_numdiff.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2630cb4bc3c2bb22beb1f79bc44c95e0d6c44e79
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_root.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26afd1028f9bec9ca6a48aa3aa4728b1c736e99a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_shgo.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..41e526ad9ea2a2d893d4dce3d1df380f0d9efa2e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ddb0a0530d9cd3c5c91a312903881af48aa94de
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8cbe0194d148cffce4deed3c35ca9ddb2c6a76d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3fa6ebaa1abef92d3665c3ebcffb0730c9765b27
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2aa8879b560a551ab844f7db4497d3b97604f106
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..41c10e28cb51583b5f0c6faefd20c634785b6876
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/cobyla.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6a99c4eaeba9df85819bad2ededb1072a878f34
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack2.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a1a2022760c08de74637ae40c3227d6cb314369
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/minpack2.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0489adbb5565df5e622529303b538fc3fc9241a0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f60adcc891304e34ac9d85d108b6a232b4bf0c93
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__init__.py
@@ -0,0 +1,5 @@
+"""This module contains least-squares algorithms."""
+from .least_squares import least_squares
+from .lsq_linear import lsq_linear
+
+__all__ = ['least_squares', 'lsq_linear']
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1eaed8c36dd513435aefd3b4d26430fbc96bac97
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2e058750db17f4e60b4cc81f508074bf7936598
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..890e2d3c57a0739af06068c19e1f0c3b75c9ff44
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74c255fad5c213a940659bb8f25a3fc3b490e2c9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7440669b50a7c84bea2da0cec79c59f56d3146b5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6f1ddd61ef497114893aed94c925436a5739f2a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae4de7472171cdda36c597b357988591e1e7f6ce
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8bcf95c04864172a44b3c549f88c60e9e231738b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f34ead4a1fc4edbb3c2ab50a204aa9a3cc21cff
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/bvls.py
@@ -0,0 +1,183 @@
+"""Bounded-variable least-squares algorithm."""
+import numpy as np
+from numpy.linalg import norm, lstsq
+from scipy.optimize import OptimizeResult
+
+from .common import print_header_linear, print_iteration_linear
+
+
+def compute_kkt_optimality(g, on_bound):
+ """Compute the maximum violation of KKT conditions."""
+ g_kkt = g * on_bound
+ free_set = on_bound == 0
+ g_kkt[free_set] = np.abs(g[free_set])
+ return np.max(g_kkt)
+
+
+def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond=None):
+ m, n = A.shape
+
+ x = x_lsq.copy()
+ on_bound = np.zeros(n)
+
+ mask = x <= lb
+ x[mask] = lb[mask]
+ on_bound[mask] = -1
+
+ mask = x >= ub
+ x[mask] = ub[mask]
+ on_bound[mask] = 1
+
+ free_set = on_bound == 0
+ active_set = ~free_set
+ free_set, = np.nonzero(free_set)
+
+ r = A.dot(x) - b
+ cost = 0.5 * np.dot(r, r)
+ initial_cost = cost
+ g = A.T.dot(r)
+
+ cost_change = None
+ step_norm = None
+ iteration = 0
+
+ if verbose == 2:
+ print_header_linear()
+
+ # This is the initialization loop. The requirement is that the
+ # least-squares solution on free variables is feasible before BVLS starts.
+ # One possible initialization is to set all variables to lower or upper
+ # bounds, but many iterations may be required from this state later on.
+ # The implemented ad-hoc procedure which intuitively should give a better
+ # initial state: find the least-squares solution on current free variables,
+ # if its feasible then stop, otherwise, set violating variables to
+ # corresponding bounds and continue on the reduced set of free variables.
+
+ while free_set.size > 0:
+ if verbose == 2:
+ optimality = compute_kkt_optimality(g, on_bound)
+ print_iteration_linear(iteration, cost, cost_change, step_norm,
+ optimality)
+
+ iteration += 1
+ x_free_old = x[free_set].copy()
+
+ A_free = A[:, free_set]
+ b_free = b - A.dot(x * active_set)
+ z = lstsq(A_free, b_free, rcond=rcond)[0]
+
+ lbv = z < lb[free_set]
+ ubv = z > ub[free_set]
+ v = lbv | ubv
+
+ if np.any(lbv):
+ ind = free_set[lbv]
+ x[ind] = lb[ind]
+ active_set[ind] = True
+ on_bound[ind] = -1
+
+ if np.any(ubv):
+ ind = free_set[ubv]
+ x[ind] = ub[ind]
+ active_set[ind] = True
+ on_bound[ind] = 1
+
+ ind = free_set[~v]
+ x[ind] = z[~v]
+
+ r = A.dot(x) - b
+ cost_new = 0.5 * np.dot(r, r)
+ cost_change = cost - cost_new
+ cost = cost_new
+ g = A.T.dot(r)
+ step_norm = norm(x[free_set] - x_free_old)
+
+ if np.any(v):
+ free_set = free_set[~v]
+ else:
+ break
+
+ if max_iter is None:
+ max_iter = n
+ max_iter += iteration
+
+ termination_status = None
+
+ # Main BVLS loop.
+
+ optimality = compute_kkt_optimality(g, on_bound)
+ for iteration in range(iteration, max_iter): # BVLS Loop A
+ if verbose == 2:
+ print_iteration_linear(iteration, cost, cost_change,
+ step_norm, optimality)
+
+ if optimality < tol:
+ termination_status = 1
+
+ if termination_status is not None:
+ break
+
+ move_to_free = np.argmax(g * on_bound)
+ on_bound[move_to_free] = 0
+
+ while True: # BVLS Loop B
+
+ free_set = on_bound == 0
+ active_set = ~free_set
+ free_set, = np.nonzero(free_set)
+
+ x_free = x[free_set]
+ x_free_old = x_free.copy()
+ lb_free = lb[free_set]
+ ub_free = ub[free_set]
+
+ A_free = A[:, free_set]
+ b_free = b - A.dot(x * active_set)
+ z = lstsq(A_free, b_free, rcond=rcond)[0]
+
+ lbv, = np.nonzero(z < lb_free)
+ ubv, = np.nonzero(z > ub_free)
+ v = np.hstack((lbv, ubv))
+
+ if v.size > 0:
+ alphas = np.hstack((
+ lb_free[lbv] - x_free[lbv],
+ ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v])
+
+ i = np.argmin(alphas)
+ i_free = v[i]
+ alpha = alphas[i]
+
+ x_free *= 1 - alpha
+ x_free += alpha * z
+ x[free_set] = x_free
+
+ if i < lbv.size:
+ on_bound[free_set[i_free]] = -1
+ else:
+ on_bound[free_set[i_free]] = 1
+ else:
+ x_free = z
+ x[free_set] = x_free
+ break
+
+ step_norm = norm(x_free - x_free_old)
+
+ r = A.dot(x) - b
+ cost_new = 0.5 * np.dot(r, r)
+ cost_change = cost - cost_new
+
+ if cost_change < tol * cost:
+ termination_status = 2
+ cost = cost_new
+
+ g = A.T.dot(r)
+ optimality = compute_kkt_optimality(g, on_bound)
+
+ if termination_status is None:
+ termination_status = 0
+
+ return OptimizeResult(
+ x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound,
+ nit=iteration + 1, status=termination_status,
+ initial_cost=initial_cost)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..995c3b64ea64670463083ae41ba038f61338cdef
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/common.py
@@ -0,0 +1,733 @@
+"""Functions used by least-squares algorithms."""
+from math import copysign
+
+import numpy as np
+from numpy.linalg import norm
+
+from scipy.linalg import cho_factor, cho_solve, LinAlgError
+from scipy.sparse import issparse
+from scipy.sparse.linalg import LinearOperator, aslinearoperator
+
+
+EPS = np.finfo(float).eps
+
+
+# Functions related to a trust-region problem.
+
+
+def intersect_trust_region(x, s, Delta):
+ """Find the intersection of a line with the boundary of a trust region.
+
+ This function solves the quadratic equation with respect to t
+ ||(x + s*t)||**2 = Delta**2.
+
+ Returns
+ -------
+ t_neg, t_pos : tuple of float
+ Negative and positive roots.
+
+ Raises
+ ------
+ ValueError
+ If `s` is zero or `x` is not within the trust region.
+ """
+ a = np.dot(s, s)
+ if a == 0:
+ raise ValueError("`s` is zero.")
+
+ b = np.dot(x, s)
+
+ c = np.dot(x, x) - Delta**2
+ if c > 0:
+ raise ValueError("`x` is not within the trust region.")
+
+ d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
+
+ # Computations below avoid loss of significance, see "Numerical Recipes".
+ q = -(b + copysign(d, b))
+ t1 = q / a
+ t2 = c / q
+
+ if t1 < t2:
+ return t1, t2
+ else:
+ return t2, t1
+
+
+def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
+ rtol=0.01, max_iter=10):
+ """Solve a trust-region problem arising in least-squares minimization.
+
+ This function implements a method described by J. J. More [1]_ and used
+ in MINPACK, but it relies on a single SVD of Jacobian instead of series
+ of Cholesky decompositions. Before running this function, compute:
+ ``U, s, VT = svd(J, full_matrices=False)``.
+
+ Parameters
+ ----------
+ n : int
+ Number of variables.
+ m : int
+ Number of residuals.
+ uf : ndarray
+ Computed as U.T.dot(f).
+ s : ndarray
+ Singular values of J.
+ V : ndarray
+ Transpose of VT.
+ Delta : float
+ Radius of a trust region.
+ initial_alpha : float, optional
+ Initial guess for alpha, which might be available from a previous
+ iteration. If None, determined automatically.
+ rtol : float, optional
+ Stopping tolerance for the root-finding procedure. Namely, the
+ solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
+ max_iter : int, optional
+ Maximum allowed number of iterations for the root-finding procedure.
+
+ Returns
+ -------
+ p : ndarray, shape (n,)
+ Found solution of a trust-region problem.
+ alpha : float
+ Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
+ Sometimes called Levenberg-Marquardt parameter.
+ n_iter : int
+ Number of iterations made by root-finding procedure. Zero means
+ that Gauss-Newton step was selected as the solution.
+
+ References
+ ----------
+ .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
+ and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
+ in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
+ """
+ def phi_and_derivative(alpha, suf, s, Delta):
+ """Function of which to find zero.
+
+ It is defined as "norm of regularized (by alpha) least-squares
+ solution minus `Delta`". Refer to [1]_.
+ """
+ denom = s**2 + alpha
+ p_norm = norm(suf / denom)
+ phi = p_norm - Delta
+ phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
+ return phi, phi_prime
+
+ suf = s * uf
+
+ # Check if J has full rank and try Gauss-Newton step.
+ if m >= n:
+ threshold = EPS * m * s[0]
+ full_rank = s[-1] > threshold
+ else:
+ full_rank = False
+
+ if full_rank:
+ p = -V.dot(uf / s)
+ if norm(p) <= Delta:
+ return p, 0.0, 0
+
+ alpha_upper = norm(suf) / Delta
+
+ if full_rank:
+ phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
+ alpha_lower = -phi / phi_prime
+ else:
+ alpha_lower = 0.0
+
+ if initial_alpha is None or not full_rank and initial_alpha == 0:
+ alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
+ else:
+ alpha = initial_alpha
+
+ for it in range(max_iter):
+ if alpha < alpha_lower or alpha > alpha_upper:
+ alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
+
+ phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
+
+ if phi < 0:
+ alpha_upper = alpha
+
+ ratio = phi / phi_prime
+ alpha_lower = max(alpha_lower, alpha - ratio)
+ alpha -= (phi + Delta) * ratio / Delta
+
+ if np.abs(phi) < rtol * Delta:
+ break
+
+ p = -V.dot(suf / (s**2 + alpha))
+
+ # Make the norm of p equal to Delta, p is changed only slightly during
+ # this. It is done to prevent p lie outside the trust region (which can
+ # cause problems later).
+ p *= Delta / norm(p)
+
+ return p, alpha, it + 1
+
+
+def solve_trust_region_2d(B, g, Delta):
+ """Solve a general trust-region problem in 2 dimensions.
+
+ The problem is reformulated as a 4th order algebraic equation,
+ the solution of which is found by numpy.roots.
+
+ Parameters
+ ----------
+ B : ndarray, shape (2, 2)
+ Symmetric matrix, defines a quadratic term of the function.
+ g : ndarray, shape (2,)
+ Defines a linear term of the function.
+ Delta : float
+ Radius of a trust region.
+
+ Returns
+ -------
+ p : ndarray, shape (2,)
+ Found solution.
+ newton_step : bool
+ Whether the returned solution is the Newton step which lies within
+ the trust region.
+ """
+ try:
+ R, lower = cho_factor(B)
+ p = -cho_solve((R, lower), g)
+ if np.dot(p, p) <= Delta**2:
+ return p, True
+ except LinAlgError:
+ pass
+
+ a = B[0, 0] * Delta**2
+ b = B[0, 1] * Delta**2
+ c = B[1, 1] * Delta**2
+
+ d = g[0] * Delta
+ f = g[1] * Delta
+
+ coeffs = np.array(
+ [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
+ t = np.roots(coeffs) # Can handle leading zeros.
+ t = np.real(t[np.isreal(t)])
+
+ p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
+ value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
+ i = np.argmin(value)
+ p = p[:, i]
+
+ return p, False
+
+
+def update_tr_radius(Delta, actual_reduction, predicted_reduction,
+ step_norm, bound_hit):
+ """Update the radius of a trust region based on the cost reduction.
+
+ Returns
+ -------
+ Delta : float
+ New radius.
+ ratio : float
+ Ratio between actual and predicted reductions.
+ """
+ if predicted_reduction > 0:
+ ratio = actual_reduction / predicted_reduction
+ elif predicted_reduction == actual_reduction == 0:
+ ratio = 1
+ else:
+ ratio = 0
+
+ if ratio < 0.25:
+ Delta = 0.25 * step_norm
+ elif ratio > 0.75 and bound_hit:
+ Delta *= 2.0
+
+ return Delta, ratio
+
+
+# Construction and minimization of quadratic functions.
+
+
+def build_quadratic_1d(J, g, s, diag=None, s0=None):
+ """Parameterize a multivariate quadratic function along a line.
+
+ The resulting univariate quadratic function is given as follows::
+
+ f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
+ g.T * (s0 + s*t)
+
+ Parameters
+ ----------
+ J : ndarray, sparse matrix or LinearOperator shape (m, n)
+ Jacobian matrix, affects the quadratic term.
+ g : ndarray, shape (n,)
+ Gradient, defines the linear term.
+ s : ndarray, shape (n,)
+ Direction vector of a line.
+ diag : None or ndarray with shape (n,), optional
+ Addition diagonal part, affects the quadratic term.
+ If None, assumed to be 0.
+ s0 : None or ndarray with shape (n,), optional
+ Initial point. If None, assumed to be 0.
+
+ Returns
+ -------
+ a : float
+ Coefficient for t**2.
+ b : float
+ Coefficient for t.
+ c : float
+ Free term. Returned only if `s0` is provided.
+ """
+ v = J.dot(s)
+ a = np.dot(v, v)
+ if diag is not None:
+ a += np.dot(s * diag, s)
+ a *= 0.5
+
+ b = np.dot(g, s)
+
+ if s0 is not None:
+ u = J.dot(s0)
+ b += np.dot(u, v)
+ c = 0.5 * np.dot(u, u) + np.dot(g, s0)
+ if diag is not None:
+ b += np.dot(s0 * diag, s)
+ c += 0.5 * np.dot(s0 * diag, s0)
+ return a, b, c
+ else:
+ return a, b
+
+
+def minimize_quadratic_1d(a, b, lb, ub, c=0):
+ """Minimize a 1-D quadratic function subject to bounds.
+
+ The free term `c` is 0 by default. Bounds must be finite.
+
+ Returns
+ -------
+ t : float
+ Minimum point.
+ y : float
+ Minimum value.
+ """
+ t = [lb, ub]
+ if a != 0:
+ extremum = -0.5 * b / a
+ if lb < extremum < ub:
+ t.append(extremum)
+ t = np.asarray(t)
+ y = t * (a * t + b) + c
+ min_index = np.argmin(y)
+ return t[min_index], y[min_index]
+
+
+def evaluate_quadratic(J, g, s, diag=None):
+ """Compute values of a quadratic function arising in least squares.
+
+ The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
+
+ Parameters
+ ----------
+ J : ndarray, sparse matrix or LinearOperator, shape (m, n)
+ Jacobian matrix, affects the quadratic term.
+ g : ndarray, shape (n,)
+ Gradient, defines the linear term.
+ s : ndarray, shape (k, n) or (n,)
+ Array containing steps as rows.
+ diag : ndarray, shape (n,), optional
+ Addition diagonal part, affects the quadratic term.
+ If None, assumed to be 0.
+
+ Returns
+ -------
+ values : ndarray with shape (k,) or float
+ Values of the function. If `s` was 2-D, then ndarray is
+ returned, otherwise, float is returned.
+ """
+ if s.ndim == 1:
+ Js = J.dot(s)
+ q = np.dot(Js, Js)
+ if diag is not None:
+ q += np.dot(s * diag, s)
+ else:
+ Js = J.dot(s.T)
+ q = np.sum(Js**2, axis=0)
+ if diag is not None:
+ q += np.sum(diag * s**2, axis=1)
+
+ l = np.dot(s, g)
+
+ return 0.5 * q + l
+
+
+# Utility functions to work with bound constraints.
+
+
+def in_bounds(x, lb, ub):
+ """Check if a point lies within bounds."""
+ return np.all((x >= lb) & (x <= ub))
+
+
+def step_size_to_bound(x, s, lb, ub):
+ """Compute a min_step size required to reach a bound.
+
+ The function computes a positive scalar t, such that x + s * t is on
+ the bound.
+
+ Returns
+ -------
+ step : float
+ Computed step. Non-negative value.
+ hits : ndarray of int with shape of x
+ Each element indicates whether a corresponding variable reaches the
+ bound:
+
+ * 0 - the bound was not hit.
+ * -1 - the lower bound was hit.
+ * 1 - the upper bound was hit.
+ """
+ non_zero = np.nonzero(s)
+ s_non_zero = s[non_zero]
+ steps = np.empty_like(x)
+ steps.fill(np.inf)
+ with np.errstate(over='ignore'):
+ steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,
+ (ub - x)[non_zero] / s_non_zero)
+ min_step = np.min(steps)
+ return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)
+
+
+def find_active_constraints(x, lb, ub, rtol=1e-10):
+ """Determine which constraints are active in a given point.
+
+ The threshold is computed using `rtol` and the absolute value of the
+ closest bound.
+
+ Returns
+ -------
+ active : ndarray of int with shape of x
+ Each component shows whether the corresponding constraint is active:
+
+ * 0 - a constraint is not active.
+ * -1 - a lower bound is active.
+ * 1 - a upper bound is active.
+ """
+ active = np.zeros_like(x, dtype=int)
+
+ if rtol == 0:
+ active[x <= lb] = -1
+ active[x >= ub] = 1
+ return active
+
+ lower_dist = x - lb
+ upper_dist = ub - x
+
+ lower_threshold = rtol * np.maximum(1, np.abs(lb))
+ upper_threshold = rtol * np.maximum(1, np.abs(ub))
+
+ lower_active = (np.isfinite(lb) &
+ (lower_dist <= np.minimum(upper_dist, lower_threshold)))
+ active[lower_active] = -1
+
+ upper_active = (np.isfinite(ub) &
+ (upper_dist <= np.minimum(lower_dist, upper_threshold)))
+ active[upper_active] = 1
+
+ return active
+
+
+def make_strictly_feasible(x, lb, ub, rstep=1e-10):
+ """Shift a point to the interior of a feasible region.
+
+ Each element of the returned vector is at least at a relative distance
+ `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
+ """
+ x_new = x.copy()
+
+ active = find_active_constraints(x, lb, ub, rstep)
+ lower_mask = np.equal(active, -1)
+ upper_mask = np.equal(active, 1)
+
+ if rstep == 0:
+ x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
+ x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
+ else:
+ x_new[lower_mask] = (lb[lower_mask] +
+ rstep * np.maximum(1, np.abs(lb[lower_mask])))
+ x_new[upper_mask] = (ub[upper_mask] -
+ rstep * np.maximum(1, np.abs(ub[upper_mask])))
+
+ tight_bounds = (x_new < lb) | (x_new > ub)
+ x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
+
+ return x_new
+
+
+def CL_scaling_vector(x, g, lb, ub):
+ """Compute Coleman-Li scaling vector and its derivatives.
+
+ Components of a vector v are defined as follows::
+
+ | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
+ v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
+ | 1, otherwise
+
+ According to this definition v[i] >= 0 for all i. It differs from the
+ definition in paper [1]_ (eq. (2.2)), where the absolute value of v is
+ used. Both definitions are equivalent down the line.
+ Derivatives of v with respect to x take value 1, -1 or 0 depending on a
+ case.
+
+ Returns
+ -------
+ v : ndarray with shape of x
+ Scaling vector.
+ dv : ndarray with shape of x
+ Derivatives of v[i] with respect to x[i], diagonal elements of v's
+ Jacobian.
+
+ References
+ ----------
+ .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior,
+ and Conjugate Gradient Method for Large-Scale Bound-Constrained
+ Minimization Problems," SIAM Journal on Scientific Computing,
+ Vol. 21, Number 1, pp 1-23, 1999.
+ """
+ v = np.ones_like(x)
+ dv = np.zeros_like(x)
+
+ mask = (g < 0) & np.isfinite(ub)
+ v[mask] = ub[mask] - x[mask]
+ dv[mask] = -1
+
+ mask = (g > 0) & np.isfinite(lb)
+ v[mask] = x[mask] - lb[mask]
+ dv[mask] = 1
+
+ return v, dv
+
+
+def reflective_transformation(y, lb, ub):
+ """Compute reflective transformation and its gradient."""
+ if in_bounds(y, lb, ub):
+ return y, np.ones_like(y)
+
+ lb_finite = np.isfinite(lb)
+ ub_finite = np.isfinite(ub)
+
+ x = y.copy()
+ g_negative = np.zeros_like(y, dtype=bool)
+
+ mask = lb_finite & ~ub_finite
+ x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
+ g_negative[mask] = y[mask] < lb[mask]
+
+ mask = ~lb_finite & ub_finite
+ x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
+ g_negative[mask] = y[mask] > ub[mask]
+
+ mask = lb_finite & ub_finite
+ d = ub - lb
+ t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
+ x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
+ g_negative[mask] = t > d[mask]
+
+ g = np.ones_like(y)
+ g[g_negative] = -1
+
+ return x, g
+
+
+# Functions to display algorithm's progress.
+
+
+def print_header_nonlinear():
+ print("{:^15}{:^15}{:^15}{:^15}{:^15}{:^15}"
+ .format("Iteration", "Total nfev", "Cost", "Cost reduction",
+ "Step norm", "Optimality"))
+
+
+def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,
+ step_norm, optimality):
+ if cost_reduction is None:
+ cost_reduction = " " * 15
+ else:
+ cost_reduction = f"{cost_reduction:^15.2e}"
+
+ if step_norm is None:
+ step_norm = " " * 15
+ else:
+ step_norm = f"{step_norm:^15.2e}"
+
+ print("{:^15}{:^15}{:^15.4e}{}{}{:^15.2e}"
+ .format(iteration, nfev, cost, cost_reduction,
+ step_norm, optimality))
+
+
+def print_header_linear():
+ print("{:^15}{:^15}{:^15}{:^15}{:^15}"
+ .format("Iteration", "Cost", "Cost reduction", "Step norm",
+ "Optimality"))
+
+
+def print_iteration_linear(iteration, cost, cost_reduction, step_norm,
+ optimality):
+ if cost_reduction is None:
+ cost_reduction = " " * 15
+ else:
+ cost_reduction = f"{cost_reduction:^15.2e}"
+
+ if step_norm is None:
+ step_norm = " " * 15
+ else:
+ step_norm = f"{step_norm:^15.2e}"
+
+ print(f"{iteration:^15}{cost:^15.4e}{cost_reduction}{step_norm}{optimality:^15.2e}")
+
+
+# Simple helper functions.
+
+
+def compute_grad(J, f):
+ """Compute gradient of the least-squares cost function."""
+ if isinstance(J, LinearOperator):
+ return J.rmatvec(f)
+ else:
+ return J.T.dot(f)
+
+
+def compute_jac_scale(J, scale_inv_old=None):
+ """Compute variables scale based on the Jacobian matrix."""
+ if issparse(J):
+ scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5
+ else:
+ scale_inv = np.sum(J**2, axis=0)**0.5
+
+ if scale_inv_old is None:
+ scale_inv[scale_inv == 0] = 1
+ else:
+ scale_inv = np.maximum(scale_inv, scale_inv_old)
+
+ return 1 / scale_inv, scale_inv
+
+
+def left_multiplied_operator(J, d):
+ """Return diag(d) J as LinearOperator."""
+ J = aslinearoperator(J)
+
+ def matvec(x):
+ return d * J.matvec(x)
+
+ def matmat(X):
+ return d[:, np.newaxis] * J.matmat(X)
+
+ def rmatvec(x):
+ return J.rmatvec(x.ravel() * d)
+
+ return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
+ rmatvec=rmatvec)
+
+
+def right_multiplied_operator(J, d):
+ """Return J diag(d) as LinearOperator."""
+ J = aslinearoperator(J)
+
+ def matvec(x):
+ return J.matvec(np.ravel(x) * d)
+
+ def matmat(X):
+ return J.matmat(X * d[:, np.newaxis])
+
+ def rmatvec(x):
+ return d * J.rmatvec(x)
+
+ return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
+ rmatvec=rmatvec)
+
+
+def regularized_lsq_operator(J, diag):
+ """Return a matrix arising in regularized least squares as LinearOperator.
+
+ The matrix is
+ [ J ]
+ [ D ]
+ where D is diagonal matrix with elements from `diag`.
+ """
+ J = aslinearoperator(J)
+ m, n = J.shape
+
+ def matvec(x):
+ return np.hstack((J.matvec(x), diag * x))
+
+ def rmatvec(x):
+ x1 = x[:m]
+ x2 = x[m:]
+ return J.rmatvec(x1) + diag * x2
+
+ return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
+
+
+def right_multiply(J, d, copy=True):
+ """Compute J diag(d).
+
+ If `copy` is False, `J` is modified in place (unless being LinearOperator).
+ """
+ if copy and not isinstance(J, LinearOperator):
+ J = J.copy()
+
+ if issparse(J):
+ J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.
+ elif isinstance(J, LinearOperator):
+ J = right_multiplied_operator(J, d)
+ else:
+ J *= d
+
+ return J
+
+
+def left_multiply(J, d, copy=True):
+ """Compute diag(d) J.
+
+ If `copy` is False, `J` is modified in place (unless being LinearOperator).
+ """
+ if copy and not isinstance(J, LinearOperator):
+ J = J.copy()
+
+ if issparse(J):
+ J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.
+ elif isinstance(J, LinearOperator):
+ J = left_multiplied_operator(J, d)
+ else:
+ J *= d[:, np.newaxis]
+
+ return J
+
+
+def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):
+ """Check termination condition for nonlinear least squares."""
+ ftol_satisfied = dF < ftol * F and ratio > 0.25
+ xtol_satisfied = dx_norm < xtol * (xtol + x_norm)
+
+ if ftol_satisfied and xtol_satisfied:
+ return 4
+ elif ftol_satisfied:
+ return 2
+ elif xtol_satisfied:
+ return 3
+ else:
+ return None
+
+
+def scale_for_robust_loss_function(J, f, rho):
+ """Scale Jacobian and residuals for a robust loss function.
+
+ Arrays are modified in place.
+ """
+ J_scale = rho[1] + 2 * rho[2] * f**2
+ J_scale[J_scale < EPS] = EPS
+ J_scale **= 0.5
+
+ f *= rho[1] / J_scale
+
+ return left_multiply(J, J_scale, copy=False), f
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bb5abbe79028afed7b110603a0d5dfd6affae7f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/dogbox.py
@@ -0,0 +1,331 @@
+"""
+Dogleg algorithm with rectangular trust regions for least-squares minimization.
+
+The description of the algorithm can be found in [Voglis]_. The algorithm does
+trust-region iterations, but the shape of trust regions is rectangular as
+opposed to conventional elliptical. The intersection of a trust region and
+an initial feasible region is again some rectangle. Thus, on each iteration a
+bound-constrained quadratic optimization problem is solved.
+
+A quadratic problem is solved by well-known dogleg approach, where the
+function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
+Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
+along this path, and optimization amounts to simply following along this
+path as long as a point stays within the bounds. A constrained Cauchy step
+(along the anti-gradient) is considered for safety in rank deficient cases,
+in this situations the convergence might be slow.
+
+If during iterations some variable hit the initial bound and the component
+of anti-gradient points outside the feasible region, then a next dogleg step
+won't make any progress. At this state such variables satisfy first-order
+optimality conditions and they are excluded before computing a next dogleg
+step.
+
+Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
+Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
+dense and sparse matrices, or Jacobian being LinearOperator). The second
+option allows to solve very large problems (up to couple of millions of
+residuals on a regular PC), provided the Jacobian matrix is sufficiently
+sparse. But note that dogbox is not very good for solving problems with
+large number of constraints, because of variables exclusion-inclusion on each
+iteration (a required number of function evaluations might be high or accuracy
+of a solution will be poor), thus its large-scale usage is probably limited
+to unconstrained problems.
+
+References
+----------
+.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
+ Approach for Unconstrained and Bound Constrained Nonlinear
+ Optimization", WSEAS International Conference on Applied
+ Mathematics, Corfu, Greece, 2004.
+.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
+"""
+import numpy as np
+from numpy.linalg import lstsq, norm
+
+from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
+from scipy.optimize import OptimizeResult
+
+from .common import (
+ step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
+ build_quadratic_1d, minimize_quadratic_1d, compute_grad,
+ compute_jac_scale, check_termination, scale_for_robust_loss_function,
+ print_header_nonlinear, print_iteration_nonlinear)
+
+
+def lsmr_operator(Jop, d, active_set):
+ """Compute LinearOperator to use in LSMR by dogbox algorithm.
+
+ `active_set` mask is used to excluded active variables from computations
+ of matrix-vector products.
+ """
+ m, n = Jop.shape
+
+ def matvec(x):
+ x_free = x.ravel().copy()
+ x_free[active_set] = 0
+ return Jop.matvec(x * d)
+
+ def rmatvec(x):
+ r = d * Jop.rmatvec(x)
+ r[active_set] = 0
+ return r
+
+ return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
+
+
+def find_intersection(x, tr_bounds, lb, ub):
+ """Find intersection of trust-region bounds and initial bounds.
+
+ Returns
+ -------
+ lb_total, ub_total : ndarray with shape of x
+ Lower and upper bounds of the intersection region.
+ orig_l, orig_u : ndarray of bool with shape of x
+ True means that an original bound is taken as a corresponding bound
+ in the intersection region.
+ tr_l, tr_u : ndarray of bool with shape of x
+ True means that a trust-region bound is taken as a corresponding bound
+ in the intersection region.
+ """
+ lb_centered = lb - x
+ ub_centered = ub - x
+
+ lb_total = np.maximum(lb_centered, -tr_bounds)
+ ub_total = np.minimum(ub_centered, tr_bounds)
+
+ orig_l = np.equal(lb_total, lb_centered)
+ orig_u = np.equal(ub_total, ub_centered)
+
+ tr_l = np.equal(lb_total, -tr_bounds)
+ tr_u = np.equal(ub_total, tr_bounds)
+
+ return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
+
+
+def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
+ """Find dogleg step in a rectangular region.
+
+ Returns
+ -------
+ step : ndarray, shape (n,)
+ Computed dogleg step.
+ bound_hits : ndarray of int, shape (n,)
+ Each component shows whether a corresponding variable hits the
+ initial bound after the step is taken:
+ * 0 - a variable doesn't hit the bound.
+ * -1 - lower bound is hit.
+ * 1 - upper bound is hit.
+ tr_hit : bool
+ Whether the step hit the boundary of the trust-region.
+ """
+ lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
+ x, tr_bounds, lb, ub
+ )
+ bound_hits = np.zeros_like(x, dtype=int)
+
+ if in_bounds(newton_step, lb_total, ub_total):
+ return newton_step, bound_hits, False
+
+ to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
+
+ # The classical dogleg algorithm would check if Cauchy step fits into
+ # the bounds, and just return it constrained version if not. But in a
+ # rectangular trust region it makes sense to try to improve constrained
+ # Cauchy step too. Thus, we don't distinguish these two cases.
+
+ cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
+
+ step_diff = newton_step - cauchy_step
+ step_size, hits = step_size_to_bound(cauchy_step, step_diff,
+ lb_total, ub_total)
+ bound_hits[(hits < 0) & orig_l] = -1
+ bound_hits[(hits > 0) & orig_u] = 1
+ tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
+
+ return cauchy_step + step_size * step_diff, bound_hits, tr_hit
+
+
+def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
+ loss_function, tr_solver, tr_options, verbose):
+ f = f0
+ f_true = f.copy()
+ nfev = 1
+
+ J = J0
+ njev = 1
+
+ if loss_function is not None:
+ rho = loss_function(f)
+ cost = 0.5 * np.sum(rho[0])
+ J, f = scale_for_robust_loss_function(J, f, rho)
+ else:
+ cost = 0.5 * np.dot(f, f)
+
+ g = compute_grad(J, f)
+
+ jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
+ if jac_scale:
+ scale, scale_inv = compute_jac_scale(J)
+ else:
+ scale, scale_inv = x_scale, 1 / x_scale
+
+ Delta = norm(x0 * scale_inv, ord=np.inf)
+ if Delta == 0:
+ Delta = 1.0
+
+ on_bound = np.zeros_like(x0, dtype=int)
+ on_bound[np.equal(x0, lb)] = -1
+ on_bound[np.equal(x0, ub)] = 1
+
+ x = x0
+ step = np.empty_like(x0)
+
+ if max_nfev is None:
+ max_nfev = x0.size * 100
+
+ termination_status = None
+ iteration = 0
+ step_norm = None
+ actual_reduction = None
+
+ if verbose == 2:
+ print_header_nonlinear()
+
+ while True:
+ active_set = on_bound * g < 0
+ free_set = ~active_set
+
+ g_free = g[free_set]
+ g_full = g.copy()
+ g[active_set] = 0
+
+ g_norm = norm(g, ord=np.inf)
+ if g_norm < gtol:
+ termination_status = 1
+
+ if verbose == 2:
+ print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
+ step_norm, g_norm)
+
+ if termination_status is not None or nfev == max_nfev:
+ break
+
+ x_free = x[free_set]
+ lb_free = lb[free_set]
+ ub_free = ub[free_set]
+ scale_free = scale[free_set]
+
+ # Compute (Gauss-)Newton and build quadratic model for Cauchy step.
+ if tr_solver == 'exact':
+ J_free = J[:, free_set]
+ newton_step = lstsq(J_free, -f, rcond=-1)[0]
+
+ # Coefficients for the quadratic model along the anti-gradient.
+ a, b = build_quadratic_1d(J_free, g_free, -g_free)
+ elif tr_solver == 'lsmr':
+ Jop = aslinearoperator(J)
+
+ # We compute lsmr step in scaled variables and then
+ # transform back to normal variables, if lsmr would give exact lsq
+ # solution, this would be equivalent to not doing any
+ # transformations, but from experience it's better this way.
+
+ # We pass active_set to make computations as if we selected
+ # the free subset of J columns, but without actually doing any
+ # slicing, which is expensive for sparse matrices and impossible
+ # for LinearOperator.
+
+ lsmr_op = lsmr_operator(Jop, scale, active_set)
+ newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
+ newton_step *= scale_free
+
+ # Components of g for active variables were zeroed, so this call
+ # is correct and equivalent to using J_free and g_free.
+ a, b = build_quadratic_1d(Jop, g, -g)
+
+ actual_reduction = -1.0
+ while actual_reduction <= 0 and nfev < max_nfev:
+ tr_bounds = Delta * scale_free
+
+ step_free, on_bound_free, tr_hit = dogleg_step(
+ x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
+
+ step.fill(0.0)
+ step[free_set] = step_free
+
+ if tr_solver == 'exact':
+ predicted_reduction = -evaluate_quadratic(J_free, g_free,
+ step_free)
+ elif tr_solver == 'lsmr':
+ predicted_reduction = -evaluate_quadratic(Jop, g, step)
+
+ # gh11403 ensure that solution is fully within bounds.
+ x_new = np.clip(x + step, lb, ub)
+
+ f_new = fun(x_new)
+ nfev += 1
+
+ step_h_norm = norm(step * scale_inv, ord=np.inf)
+
+ if not np.all(np.isfinite(f_new)):
+ Delta = 0.25 * step_h_norm
+ continue
+
+ # Usual trust-region step quality estimation.
+ if loss_function is not None:
+ cost_new = loss_function(f_new, cost_only=True)
+ else:
+ cost_new = 0.5 * np.dot(f_new, f_new)
+ actual_reduction = cost - cost_new
+
+ Delta, ratio = update_tr_radius(
+ Delta, actual_reduction, predicted_reduction,
+ step_h_norm, tr_hit
+ )
+
+ step_norm = norm(step)
+ termination_status = check_termination(
+ actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
+
+ if termination_status is not None:
+ break
+
+ if actual_reduction > 0:
+ on_bound[free_set] = on_bound_free
+
+ x = x_new
+ # Set variables exactly at the boundary.
+ mask = on_bound == -1
+ x[mask] = lb[mask]
+ mask = on_bound == 1
+ x[mask] = ub[mask]
+
+ f = f_new
+ f_true = f.copy()
+
+ cost = cost_new
+
+ J = jac(x, f)
+ njev += 1
+
+ if loss_function is not None:
+ rho = loss_function(f)
+ J, f = scale_for_robust_loss_function(J, f, rho)
+
+ g = compute_grad(J, f)
+
+ if jac_scale:
+ scale, scale_inv = compute_jac_scale(J, scale_inv)
+ else:
+ step_norm = 0
+ actual_reduction = 0
+
+ iteration += 1
+
+ if termination_status is None:
+ termination_status = 0
+
+ return OptimizeResult(
+ x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
+ active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..fe54821d9e631ec5a1c8da8b3057ad997af8de00
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so differ
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py
new file mode 100644
index 0000000000000000000000000000000000000000..db8bb31c7b1530fd48ac7ae58cf501e2b0081a91
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/least_squares.py
@@ -0,0 +1,967 @@
+"""Generic interface for least-squares minimization."""
+from warnings import warn
+
+import numpy as np
+from numpy.linalg import norm
+
+from scipy.sparse import issparse
+from scipy.sparse.linalg import LinearOperator
+from scipy.optimize import _minpack, OptimizeResult
+from scipy.optimize._numdiff import approx_derivative, group_columns
+from scipy.optimize._minimize import Bounds
+
+from .trf import trf
+from .dogbox import dogbox
+from .common import EPS, in_bounds, make_strictly_feasible
+
+
+TERMINATION_MESSAGES = {
+ -1: "Improper input parameters status returned from `leastsq`",
+ 0: "The maximum number of function evaluations is exceeded.",
+ 1: "`gtol` termination condition is satisfied.",
+ 2: "`ftol` termination condition is satisfied.",
+ 3: "`xtol` termination condition is satisfied.",
+ 4: "Both `ftol` and `xtol` termination conditions are satisfied."
+}
+
+
+FROM_MINPACK_TO_COMMON = {
+ 0: -1, # Improper input parameters from MINPACK.
+ 1: 2,
+ 2: 3,
+ 3: 4,
+ 4: 1,
+ 5: 0
+ # There are 6, 7, 8 for too small tolerance parameters,
+ # but we guard against it by checking ftol, xtol, gtol beforehand.
+}
+
+
+def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
+ n = x0.size
+
+ if diff_step is None:
+ epsfcn = EPS
+ else:
+ epsfcn = diff_step**2
+
+ # Compute MINPACK's `diag`, which is inverse of our `x_scale` and
+ # ``x_scale='jac'`` corresponds to ``diag=None``.
+ if isinstance(x_scale, str) and x_scale == 'jac':
+ diag = None
+ else:
+ diag = 1 / x_scale
+
+ full_output = True
+ col_deriv = False
+ factor = 100.0
+
+ if jac is None:
+ if max_nfev is None:
+ # n squared to account for Jacobian evaluations.
+ max_nfev = 100 * n * (n + 1)
+ x, info, status = _minpack._lmdif(
+ fun, x0, (), full_output, ftol, xtol, gtol,
+ max_nfev, epsfcn, factor, diag)
+ else:
+ if max_nfev is None:
+ max_nfev = 100 * n
+ x, info, status = _minpack._lmder(
+ fun, jac, x0, (), full_output, col_deriv,
+ ftol, xtol, gtol, max_nfev, factor, diag)
+
+ f = info['fvec']
+
+ if callable(jac):
+ J = jac(x)
+ else:
+ J = np.atleast_2d(approx_derivative(fun, x))
+
+ cost = 0.5 * np.dot(f, f)
+ g = J.T.dot(f)
+ g_norm = norm(g, ord=np.inf)
+
+ nfev = info['nfev']
+ njev = info.get('njev', None)
+
+ status = FROM_MINPACK_TO_COMMON[status]
+ active_mask = np.zeros_like(x0, dtype=int)
+
+ return OptimizeResult(
+ x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
+ active_mask=active_mask, nfev=nfev, njev=njev, status=status)
+
+
+def prepare_bounds(bounds, n):
+ lb, ub = (np.asarray(b, dtype=float) for b in bounds)
+ if lb.ndim == 0:
+ lb = np.resize(lb, n)
+
+ if ub.ndim == 0:
+ ub = np.resize(ub, n)
+
+ return lb, ub
+
+
+def check_tolerance(ftol, xtol, gtol, method):
+ def check(tol, name):
+ if tol is None:
+ tol = 0
+ elif tol < EPS:
+ warn(f"Setting `{name}` below the machine epsilon ({EPS:.2e}) effectively "
+ f"disables the corresponding termination condition.",
+ stacklevel=3)
+ return tol
+
+ ftol = check(ftol, "ftol")
+ xtol = check(xtol, "xtol")
+ gtol = check(gtol, "gtol")
+
+ if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS):
+ raise ValueError("All tolerances must be higher than machine epsilon "
+ f"({EPS:.2e}) for method 'lm'.")
+ elif ftol < EPS and xtol < EPS and gtol < EPS:
+ raise ValueError("At least one of the tolerances must be higher than "
+ f"machine epsilon ({EPS:.2e}).")
+
+ return ftol, xtol, gtol
+
+
+def check_x_scale(x_scale, x0):
+ if isinstance(x_scale, str) and x_scale == 'jac':
+ return x_scale
+
+ try:
+ x_scale = np.asarray(x_scale, dtype=float)
+ valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
+ except (ValueError, TypeError):
+ valid = False
+
+ if not valid:
+ raise ValueError("`x_scale` must be 'jac' or array_like with "
+ "positive numbers.")
+
+ if x_scale.ndim == 0:
+ x_scale = np.resize(x_scale, x0.shape)
+
+ if x_scale.shape != x0.shape:
+ raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
+
+ return x_scale
+
+
+def check_jac_sparsity(jac_sparsity, m, n):
+ if jac_sparsity is None:
+ return None
+
+ if not issparse(jac_sparsity):
+ jac_sparsity = np.atleast_2d(jac_sparsity)
+
+ if jac_sparsity.shape != (m, n):
+ raise ValueError("`jac_sparsity` has wrong shape.")
+
+ return jac_sparsity, group_columns(jac_sparsity)
+
+
+# Loss functions.
+
+
+def huber(z, rho, cost_only):
+ mask = z <= 1
+ rho[0, mask] = z[mask]
+ rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
+ if cost_only:
+ return
+ rho[1, mask] = 1
+ rho[1, ~mask] = z[~mask]**-0.5
+ rho[2, mask] = 0
+ rho[2, ~mask] = -0.5 * z[~mask]**-1.5
+
+
+def soft_l1(z, rho, cost_only):
+ t = 1 + z
+ rho[0] = 2 * (t**0.5 - 1)
+ if cost_only:
+ return
+ rho[1] = t**-0.5
+ rho[2] = -0.5 * t**-1.5
+
+
+def cauchy(z, rho, cost_only):
+ rho[0] = np.log1p(z)
+ if cost_only:
+ return
+ t = 1 + z
+ rho[1] = 1 / t
+ rho[2] = -1 / t**2
+
+
+def arctan(z, rho, cost_only):
+ rho[0] = np.arctan(z)
+ if cost_only:
+ return
+ t = 1 + z**2
+ rho[1] = 1 / t
+ rho[2] = -2 * z / t**2
+
+
+IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
+ cauchy=cauchy, arctan=arctan)
+
+
+def construct_loss_function(m, loss, f_scale):
+ if loss == 'linear':
+ return None
+
+ if not callable(loss):
+ loss = IMPLEMENTED_LOSSES[loss]
+ rho = np.empty((3, m))
+
+ def loss_function(f, cost_only=False):
+ z = (f / f_scale) ** 2
+ loss(z, rho, cost_only=cost_only)
+ if cost_only:
+ return 0.5 * f_scale ** 2 * np.sum(rho[0])
+ rho[0] *= f_scale ** 2
+ rho[2] /= f_scale ** 2
+ return rho
+ else:
+ def loss_function(f, cost_only=False):
+ z = (f / f_scale) ** 2
+ rho = loss(z)
+ if cost_only:
+ return 0.5 * f_scale ** 2 * np.sum(rho[0])
+ rho[0] *= f_scale ** 2
+ rho[2] /= f_scale ** 2
+ return rho
+
+ return loss_function
+
+
+def least_squares(
+ fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
+ ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
+ f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
+ jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
+ """Solve a nonlinear least-squares problem with bounds on the variables.
+
+ Given the residuals f(x) (an m-D real function of n real
+ variables) and the loss function rho(s) (a scalar function), `least_squares`
+ finds a local minimum of the cost function F(x)::
+
+ minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
+ subject to lb <= x <= ub
+
+ The purpose of the loss function rho(s) is to reduce the influence of
+ outliers on the solution.
+
+ Parameters
+ ----------
+ fun : callable
+ Function which computes the vector of residuals, with the signature
+ ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
+ respect to its first argument. The argument ``x`` passed to this
+ function is an ndarray of shape (n,) (never a scalar, even for n=1).
+ It must allocate and return a 1-D array_like of shape (m,) or a scalar.
+ If the argument ``x`` is complex or the function ``fun`` returns
+ complex residuals, it must be wrapped in a real function of real
+ arguments, as shown at the end of the Examples section.
+ x0 : array_like with shape (n,) or float
+ Initial guess on independent variables. If float, it will be treated
+ as a 1-D array with one element. When `method` is 'trf', the initial
+ guess might be slightly adjusted to lie sufficiently within the given
+ `bounds`.
+ jac : {'2-point', '3-point', 'cs', callable}, optional
+ Method of computing the Jacobian matrix (an m-by-n matrix, where
+ element (i, j) is the partial derivative of f[i] with respect to
+ x[j]). The keywords select a finite difference scheme for numerical
+ estimation. The scheme '3-point' is more accurate, but requires
+ twice as many operations as '2-point' (default). The scheme 'cs'
+ uses complex steps, and while potentially the most accurate, it is
+ applicable only when `fun` correctly handles complex inputs and
+ can be analytically continued to the complex plane. Method 'lm'
+ always uses the '2-point' scheme. If callable, it is used as
+ ``jac(x, *args, **kwargs)`` and should return a good approximation
+ (or the exact value) for the Jacobian as an array_like (np.atleast_2d
+ is applied), a sparse matrix (csr_matrix preferred for performance) or
+ a `scipy.sparse.linalg.LinearOperator`.
+ bounds : 2-tuple of array_like or `Bounds`, optional
+ There are two ways to specify bounds:
+
+ 1. Instance of `Bounds` class
+ 2. Lower and upper bounds on independent variables. Defaults to no
+ bounds. Each array must match the size of `x0` or be a scalar,
+ in the latter case a bound will be the same for all variables.
+ Use ``np.inf`` with an appropriate sign to disable bounds on all
+ or some variables.
+ method : {'trf', 'dogbox', 'lm'}, optional
+ Algorithm to perform minimization.
+
+ * 'trf' : Trust Region Reflective algorithm, particularly suitable
+ for large sparse problems with bounds. Generally robust method.
+ * 'dogbox' : dogleg algorithm with rectangular trust regions,
+ typical use case is small problems with bounds. Not recommended
+ for problems with rank-deficient Jacobian.
+ * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
+ Doesn't handle bounds and sparse Jacobians. Usually the most
+ efficient method for small unconstrained problems.
+
+ Default is 'trf'. See Notes for more information.
+ ftol : float or None, optional
+ Tolerance for termination by the change of the cost function. Default
+ is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
+ and there was an adequate agreement between a local quadratic model and
+ the true model in the last step.
+
+ If None and 'method' is not 'lm', the termination by this condition is
+ disabled. If 'method' is 'lm', this tolerance must be higher than
+ machine epsilon.
+ xtol : float or None, optional
+ Tolerance for termination by the change of the independent variables.
+ Default is 1e-8. The exact condition depends on the `method` used:
+
+ * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``.
+ * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
+ a trust-region radius and ``xs`` is the value of ``x``
+ scaled according to `x_scale` parameter (see below).
+
+ If None and 'method' is not 'lm', the termination by this condition is
+ disabled. If 'method' is 'lm', this tolerance must be higher than
+ machine epsilon.
+ gtol : float or None, optional
+ Tolerance for termination by the norm of the gradient. Default is 1e-8.
+ The exact condition depends on a `method` used:
+
+ * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
+ ``g_scaled`` is the value of the gradient scaled to account for
+ the presence of the bounds [STIR]_.
+ * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
+ ``g_free`` is the gradient with respect to the variables which
+ are not in the optimal state on the boundary.
+ * For 'lm' : the maximum absolute value of the cosine of angles
+ between columns of the Jacobian and the residual vector is less
+ than `gtol`, or the residual vector is zero.
+
+ If None and 'method' is not 'lm', the termination by this condition is
+ disabled. If 'method' is 'lm', this tolerance must be higher than
+ machine epsilon.
+ x_scale : array_like or 'jac', optional
+ Characteristic scale of each variable. Setting `x_scale` is equivalent
+ to reformulating the problem in scaled variables ``xs = x / x_scale``.
+ An alternative view is that the size of a trust region along jth
+ dimension is proportional to ``x_scale[j]``. Improved convergence may
+ be achieved by setting `x_scale` such that a step of a given size
+ along any of the scaled variables has a similar effect on the cost
+ function. If set to 'jac', the scale is iteratively updated using the
+ inverse norms of the columns of the Jacobian matrix (as described in
+ [JJMore]_).
+ loss : str or callable, optional
+ Determines the loss function. The following keyword values are allowed:
+
+ * 'linear' (default) : ``rho(z) = z``. Gives a standard
+ least-squares problem.
+ * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
+ approximation of l1 (absolute value) loss. Usually a good
+ choice for robust least squares.
+ * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
+ similarly to 'soft_l1'.
+ * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
+ influence, but may cause difficulties in optimization process.
+ * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
+ a single residual, has properties similar to 'cauchy'.
+
+ If callable, it must take a 1-D ndarray ``z=f**2`` and return an
+ array_like with shape (3, m) where row 0 contains function values,
+ row 1 contains first derivatives and row 2 contains second
+ derivatives. Method 'lm' supports only 'linear' loss.
+ f_scale : float, optional
+ Value of soft margin between inlier and outlier residuals, default
+ is 1.0. The loss function is evaluated as follows
+ ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
+ and ``rho`` is determined by `loss` parameter. This parameter has
+ no effect with ``loss='linear'``, but for other `loss` values it is
+ of crucial importance.
+ max_nfev : None or int, optional
+ Maximum number of function evaluations before the termination.
+ If None (default), the value is chosen automatically:
+
+ * For 'trf' and 'dogbox' : 100 * n.
+ * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
+ otherwise (because 'lm' counts function calls in Jacobian
+ estimation).
+
+ diff_step : None or array_like, optional
+ Determines the relative step size for the finite difference
+ approximation of the Jacobian. The actual step is computed as
+ ``x * diff_step``. If None (default), then `diff_step` is taken to be
+ a conventional "optimal" power of machine epsilon for the finite
+ difference scheme used [NR]_.
+ tr_solver : {None, 'exact', 'lsmr'}, optional
+ Method for solving trust-region subproblems, relevant only for 'trf'
+ and 'dogbox' methods.
+
+ * 'exact' is suitable for not very large problems with dense
+ Jacobian matrices. The computational complexity per iteration is
+ comparable to a singular value decomposition of the Jacobian
+ matrix.
+ * 'lsmr' is suitable for problems with sparse and large Jacobian
+ matrices. It uses the iterative procedure
+ `scipy.sparse.linalg.lsmr` for finding a solution of a linear
+ least-squares problem and only requires matrix-vector product
+ evaluations.
+
+ If None (default), the solver is chosen based on the type of Jacobian
+ returned on the first iteration.
+ tr_options : dict, optional
+ Keyword options passed to trust-region solver.
+
+ * ``tr_solver='exact'``: `tr_options` are ignored.
+ * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
+ Additionally, ``method='trf'`` supports 'regularize' option
+ (bool, default is True), which adds a regularization term to the
+ normal equation, which improves convergence if the Jacobian is
+ rank-deficient [Byrd]_ (eq. 3.4).
+
+ jac_sparsity : {None, array_like, sparse matrix}, optional
+ Defines the sparsity structure of the Jacobian matrix for finite
+ difference estimation, its shape must be (m, n). If the Jacobian has
+ only few non-zero elements in *each* row, providing the sparsity
+ structure will greatly speed up the computations [Curtis]_. A zero
+ entry means that a corresponding element in the Jacobian is identically
+ zero. If provided, forces the use of 'lsmr' trust-region solver.
+ If None (default), then dense differencing will be used. Has no effect
+ for 'lm' method.
+ verbose : {0, 1, 2}, optional
+ Level of algorithm's verbosity:
+
+ * 0 (default) : work silently.
+ * 1 : display a termination report.
+ * 2 : display progress during iterations (not supported by 'lm'
+ method).
+
+ args, kwargs : tuple and dict, optional
+ Additional arguments passed to `fun` and `jac`. Both empty by default.
+ The calling signature is ``fun(x, *args, **kwargs)`` and the same for
+ `jac`.
+
+ Returns
+ -------
+ result : OptimizeResult
+ `OptimizeResult` with the following fields defined:
+
+ x : ndarray, shape (n,)
+ Solution found.
+ cost : float
+ Value of the cost function at the solution.
+ fun : ndarray, shape (m,)
+ Vector of residuals at the solution.
+ jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
+ Modified Jacobian matrix at the solution, in the sense that J^T J
+ is a Gauss-Newton approximation of the Hessian of the cost function.
+ The type is the same as the one used by the algorithm.
+ grad : ndarray, shape (m,)
+ Gradient of the cost function at the solution.
+ optimality : float
+ First-order optimality measure. In unconstrained problems, it is
+ always the uniform norm of the gradient. In constrained problems,
+ it is the quantity which was compared with `gtol` during iterations.
+ active_mask : ndarray of int, shape (n,)
+ Each component shows whether a corresponding constraint is active
+ (that is, whether a variable is at the bound):
+
+ * 0 : a constraint is not active.
+ * -1 : a lower bound is active.
+ * 1 : an upper bound is active.
+
+ Might be somewhat arbitrary for 'trf' method as it generates a
+ sequence of strictly feasible iterates and `active_mask` is
+ determined within a tolerance threshold.
+ nfev : int
+ Number of function evaluations done. Methods 'trf' and 'dogbox' do
+ not count function calls for numerical Jacobian approximation, as
+ opposed to 'lm' method.
+ njev : int or None
+ Number of Jacobian evaluations done. If numerical Jacobian
+ approximation is used in 'lm' method, it is set to None.
+ status : int
+ The reason for algorithm termination:
+
+ * -1 : improper input parameters status returned from MINPACK.
+ * 0 : the maximum number of function evaluations is exceeded.
+ * 1 : `gtol` termination condition is satisfied.
+ * 2 : `ftol` termination condition is satisfied.
+ * 3 : `xtol` termination condition is satisfied.
+ * 4 : Both `ftol` and `xtol` termination conditions are satisfied.
+
+ message : str
+ Verbal description of the termination reason.
+ success : bool
+ True if one of the convergence criteria is satisfied (`status` > 0).
+
+ See Also
+ --------
+ leastsq : A legacy wrapper for the MINPACK implementation of the
+ Levenberg-Marquadt algorithm.
+ curve_fit : Least-squares minimization applied to a curve-fitting problem.
+
+ Notes
+ -----
+ Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
+ algorithms implemented in MINPACK (lmder, lmdif). It runs the
+ Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
+ The implementation is based on paper [JJMore]_, it is very robust and
+ efficient with a lot of smart tricks. It should be your first choice
+ for unconstrained problems. Note that it doesn't support bounds. Also,
+ it doesn't work when m < n.
+
+ Method 'trf' (Trust Region Reflective) is motivated by the process of
+ solving a system of equations, which constitute the first-order optimality
+ condition for a bound-constrained minimization problem as formulated in
+ [STIR]_. The algorithm iteratively solves trust-region subproblems
+ augmented by a special diagonal quadratic term and with trust-region shape
+ determined by the distance from the bounds and the direction of the
+ gradient. This enhancements help to avoid making steps directly into bounds
+ and efficiently explore the whole space of variables. To further improve
+ convergence, the algorithm considers search directions reflected from the
+ bounds. To obey theoretical requirements, the algorithm keeps iterates
+ strictly feasible. With dense Jacobians trust-region subproblems are
+ solved by an exact method very similar to the one described in [JJMore]_
+ (and implemented in MINPACK). The difference from the MINPACK
+ implementation is that a singular value decomposition of a Jacobian
+ matrix is done once per iteration, instead of a QR decomposition and series
+ of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace
+ approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
+ The subspace is spanned by a scaled gradient and an approximate
+ Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
+ constraints are imposed the algorithm is very similar to MINPACK and has
+ generally comparable performance. The algorithm works quite robust in
+ unbounded and bounded problems, thus it is chosen as a default algorithm.
+
+ Method 'dogbox' operates in a trust-region framework, but considers
+ rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
+ The intersection of a current trust region and initial bounds is again
+ rectangular, so on each iteration a quadratic minimization problem subject
+ to bound constraints is solved approximately by Powell's dogleg method
+ [NumOpt]_. The required Gauss-Newton step can be computed exactly for
+ dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
+ sparse Jacobians. The algorithm is likely to exhibit slow convergence when
+ the rank of Jacobian is less than the number of variables. The algorithm
+ often outperforms 'trf' in bounded problems with a small number of
+ variables.
+
+ Robust loss functions are implemented as described in [BA]_. The idea
+ is to modify a residual vector and a Jacobian matrix on each iteration
+ such that computed gradient and Gauss-Newton Hessian approximation match
+ the true gradient and Hessian approximation of the cost function. Then
+ the algorithm proceeds in a normal way, i.e., robust loss functions are
+ implemented as a simple wrapper over standard least-squares algorithms.
+
+ .. versionadded:: 0.17.0
+
+ References
+ ----------
+ .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
+ and Conjugate Gradient Method for Large-Scale Bound-Constrained
+ Minimization Problems," SIAM Journal on Scientific Computing,
+ Vol. 21, Number 1, pp 1-23, 1999.
+ .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
+ Computing. 3rd edition", Sec. 5.7.
+ .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
+ solution of the trust region problem by minimization over
+ two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
+ 1988.
+ .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+ sparse Jacobian matrices", Journal of the Institute of
+ Mathematics and its Applications, 13, pp. 117-120, 1974.
+ .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
+ and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
+ Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
+ .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
+ Dogleg Approach for Unconstrained and Bound Constrained
+ Nonlinear Optimization", WSEAS International Conference on
+ Applied Mathematics, Corfu, Greece, 2004.
+ .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
+ 2nd edition", Chapter 4.
+ .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
+ Proceedings of the International Workshop on Vision Algorithms:
+ Theory and Practice, pp. 298-372, 1999.
+
+ Examples
+ --------
+ In this example we find a minimum of the Rosenbrock function without bounds
+ on independent variables.
+
+ >>> import numpy as np
+ >>> def fun_rosenbrock(x):
+ ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
+
+ Notice that we only provide the vector of the residuals. The algorithm
+ constructs the cost function as a sum of squares of the residuals, which
+ gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
+
+ >>> from scipy.optimize import least_squares
+ >>> x0_rosenbrock = np.array([2, 2])
+ >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
+ >>> res_1.x
+ array([ 1., 1.])
+ >>> res_1.cost
+ 9.8669242910846867e-30
+ >>> res_1.optimality
+ 8.8928864934219529e-14
+
+ We now constrain the variables, in such a way that the previous solution
+ becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
+ ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
+ to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
+
+ We also provide the analytic Jacobian:
+
+ >>> def jac_rosenbrock(x):
+ ... return np.array([
+ ... [-20 * x[0], 10],
+ ... [-1, 0]])
+
+ Putting this all together, we see that the new solution lies on the bound:
+
+ >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
+ ... bounds=([-np.inf, 1.5], np.inf))
+ >>> res_2.x
+ array([ 1.22437075, 1.5 ])
+ >>> res_2.cost
+ 0.025213093946805685
+ >>> res_2.optimality
+ 1.5885401433157753e-07
+
+ Now we solve a system of equations (i.e., the cost function should be zero
+ at a minimum) for a Broyden tridiagonal vector-valued function of 100000
+ variables:
+
+ >>> def fun_broyden(x):
+ ... f = (3 - x) * x + 1
+ ... f[1:] -= x[:-1]
+ ... f[:-1] -= 2 * x[1:]
+ ... return f
+
+ The corresponding Jacobian matrix is sparse. We tell the algorithm to
+ estimate it by finite differences and provide the sparsity structure of
+ Jacobian to significantly speed up this process.
+
+ >>> from scipy.sparse import lil_matrix
+ >>> def sparsity_broyden(n):
+ ... sparsity = lil_matrix((n, n), dtype=int)
+ ... i = np.arange(n)
+ ... sparsity[i, i] = 1
+ ... i = np.arange(1, n)
+ ... sparsity[i, i - 1] = 1
+ ... i = np.arange(n - 1)
+ ... sparsity[i, i + 1] = 1
+ ... return sparsity
+ ...
+ >>> n = 100000
+ >>> x0_broyden = -np.ones(n)
+ ...
+ >>> res_3 = least_squares(fun_broyden, x0_broyden,
+ ... jac_sparsity=sparsity_broyden(n))
+ >>> res_3.cost
+ 4.5687069299604613e-23
+ >>> res_3.optimality
+ 1.1650454296851518e-11
+
+ Let's also solve a curve fitting problem using robust loss function to
+ take care of outliers in the data. Define the model function as
+ ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
+ observation and a, b, c are parameters to estimate.
+
+ First, define the function which generates the data with noise and
+ outliers, define the model parameters, and generate data:
+
+ >>> from numpy.random import default_rng
+ >>> rng = default_rng()
+ >>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None):
+ ... rng = default_rng(seed)
+ ...
+ ... y = a + b * np.exp(t * c)
+ ...
+ ... error = noise * rng.standard_normal(t.size)
+ ... outliers = rng.integers(0, t.size, n_outliers)
+ ... error[outliers] *= 10
+ ...
+ ... return y + error
+ ...
+ >>> a = 0.5
+ >>> b = 2.0
+ >>> c = -1
+ >>> t_min = 0
+ >>> t_max = 10
+ >>> n_points = 15
+ ...
+ >>> t_train = np.linspace(t_min, t_max, n_points)
+ >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
+
+ Define function for computing residuals and initial estimate of
+ parameters.
+
+ >>> def fun(x, t, y):
+ ... return x[0] + x[1] * np.exp(x[2] * t) - y
+ ...
+ >>> x0 = np.array([1.0, 1.0, 0.0])
+
+ Compute a standard least-squares solution:
+
+ >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
+
+ Now compute two solutions with two different robust loss functions. The
+ parameter `f_scale` is set to 0.1, meaning that inlier residuals should
+ not significantly exceed 0.1 (the noise level used).
+
+ >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
+ ... args=(t_train, y_train))
+ >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
+ ... args=(t_train, y_train))
+
+ And, finally, plot all the curves. We see that by selecting an appropriate
+ `loss` we can get estimates close to optimal even in the presence of
+ strong outliers. But keep in mind that generally it is recommended to try
+ 'soft_l1' or 'huber' losses first (if at all necessary) as the other two
+ options may cause difficulties in optimization process.
+
+ >>> t_test = np.linspace(t_min, t_max, n_points * 10)
+ >>> y_true = gen_data(t_test, a, b, c)
+ >>> y_lsq = gen_data(t_test, *res_lsq.x)
+ >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
+ >>> y_log = gen_data(t_test, *res_log.x)
+ ...
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(t_train, y_train, 'o')
+ >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
+ >>> plt.plot(t_test, y_lsq, label='linear loss')
+ >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
+ >>> plt.plot(t_test, y_log, label='cauchy loss')
+ >>> plt.xlabel("t")
+ >>> plt.ylabel("y")
+ >>> plt.legend()
+ >>> plt.show()
+
+ In the next example, we show how complex-valued residual functions of
+ complex variables can be optimized with ``least_squares()``. Consider the
+ following function:
+
+ >>> def f(z):
+ ... return z - (0.5 + 0.5j)
+
+ We wrap it into a function of real variables that returns real residuals
+ by simply handling the real and imaginary parts as independent variables:
+
+ >>> def f_wrap(x):
+ ... fx = f(x[0] + 1j*x[1])
+ ... return np.array([fx.real, fx.imag])
+
+ Thus, instead of the original m-D complex function of n complex
+ variables we optimize a 2m-D real function of 2n real variables:
+
+ >>> from scipy.optimize import least_squares
+ >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
+ >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
+ >>> z
+ (0.49999999999925893+0.49999999999925893j)
+
+ """
+ if method not in ['trf', 'dogbox', 'lm']:
+ raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
+
+ if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
+ raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
+ "callable.")
+
+ if tr_solver not in [None, 'exact', 'lsmr']:
+ raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
+
+ if loss not in IMPLEMENTED_LOSSES and not callable(loss):
+ raise ValueError("`loss` must be one of {} or a callable."
+ .format(IMPLEMENTED_LOSSES.keys()))
+
+ if method == 'lm' and loss != 'linear':
+ raise ValueError("method='lm' supports only 'linear' loss function.")
+
+ if verbose not in [0, 1, 2]:
+ raise ValueError("`verbose` must be in [0, 1, 2].")
+
+ if max_nfev is not None and max_nfev <= 0:
+ raise ValueError("`max_nfev` must be None or positive integer.")
+
+ if np.iscomplexobj(x0):
+ raise ValueError("`x0` must be real.")
+
+ x0 = np.atleast_1d(x0).astype(float)
+
+ if x0.ndim > 1:
+ raise ValueError("`x0` must have at most 1 dimension.")
+
+ if isinstance(bounds, Bounds):
+ lb, ub = bounds.lb, bounds.ub
+ bounds = (lb, ub)
+ else:
+ if len(bounds) == 2:
+ lb, ub = prepare_bounds(bounds, x0.shape[0])
+ else:
+ raise ValueError("`bounds` must contain 2 elements.")
+
+ if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
+ raise ValueError("Method 'lm' doesn't support bounds.")
+
+ if lb.shape != x0.shape or ub.shape != x0.shape:
+ raise ValueError("Inconsistent shapes between bounds and `x0`.")
+
+ if np.any(lb >= ub):
+ raise ValueError("Each lower bound must be strictly less than each "
+ "upper bound.")
+
+ if not in_bounds(x0, lb, ub):
+ raise ValueError("`x0` is infeasible.")
+
+ x_scale = check_x_scale(x_scale, x0)
+
+ ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method)
+
+ if method == 'trf':
+ x0 = make_strictly_feasible(x0, lb, ub)
+
+ def fun_wrapped(x):
+ return np.atleast_1d(fun(x, *args, **kwargs))
+
+ f0 = fun_wrapped(x0)
+
+ if f0.ndim != 1:
+ raise ValueError("`fun` must return at most 1-d array_like. "
+ f"f0.shape: {f0.shape}")
+
+ if not np.all(np.isfinite(f0)):
+ raise ValueError("Residuals are not finite in the initial point.")
+
+ n = x0.size
+ m = f0.size
+
+ if method == 'lm' and m < n:
+ raise ValueError("Method 'lm' doesn't work when the number of "
+ "residuals is less than the number of variables.")
+
+ loss_function = construct_loss_function(m, loss, f_scale)
+ if callable(loss):
+ rho = loss_function(f0)
+ if rho.shape != (3, m):
+ raise ValueError("The return value of `loss` callable has wrong "
+ "shape.")
+ initial_cost = 0.5 * np.sum(rho[0])
+ elif loss_function is not None:
+ initial_cost = loss_function(f0, cost_only=True)
+ else:
+ initial_cost = 0.5 * np.dot(f0, f0)
+
+ if callable(jac):
+ J0 = jac(x0, *args, **kwargs)
+
+ if issparse(J0):
+ J0 = J0.tocsr()
+
+ def jac_wrapped(x, _=None):
+ return jac(x, *args, **kwargs).tocsr()
+
+ elif isinstance(J0, LinearOperator):
+ def jac_wrapped(x, _=None):
+ return jac(x, *args, **kwargs)
+
+ else:
+ J0 = np.atleast_2d(J0)
+
+ def jac_wrapped(x, _=None):
+ return np.atleast_2d(jac(x, *args, **kwargs))
+
+ else: # Estimate Jacobian by finite differences.
+ if method == 'lm':
+ if jac_sparsity is not None:
+ raise ValueError("method='lm' does not support "
+ "`jac_sparsity`.")
+
+ if jac != '2-point':
+ warn(f"jac='{jac}' works equivalently to '2-point' for method='lm'.",
+ stacklevel=2)
+
+ J0 = jac_wrapped = None
+ else:
+ if jac_sparsity is not None and tr_solver == 'exact':
+ raise ValueError("tr_solver='exact' is incompatible "
+ "with `jac_sparsity`.")
+
+ jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
+
+ def jac_wrapped(x, f):
+ J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
+ f0=f, bounds=bounds, args=args,
+ kwargs=kwargs, sparsity=jac_sparsity)
+ if J.ndim != 2: # J is guaranteed not sparse.
+ J = np.atleast_2d(J)
+
+ return J
+
+ J0 = jac_wrapped(x0, f0)
+
+ if J0 is not None:
+ if J0.shape != (m, n):
+ raise ValueError(
+ f"The return value of `jac` has wrong shape: expected {(m, n)}, "
+ f"actual {J0.shape}."
+ )
+
+ if not isinstance(J0, np.ndarray):
+ if method == 'lm':
+ raise ValueError("method='lm' works only with dense "
+ "Jacobian matrices.")
+
+ if tr_solver == 'exact':
+ raise ValueError(
+ "tr_solver='exact' works only with dense "
+ "Jacobian matrices.")
+
+ jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
+ if isinstance(J0, LinearOperator) and jac_scale:
+ raise ValueError("x_scale='jac' can't be used when `jac` "
+ "returns LinearOperator.")
+
+ if tr_solver is None:
+ if isinstance(J0, np.ndarray):
+ tr_solver = 'exact'
+ else:
+ tr_solver = 'lsmr'
+
+ if method == 'lm':
+ result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
+ max_nfev, x_scale, diff_step)
+
+ elif method == 'trf':
+ result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
+ gtol, max_nfev, x_scale, loss_function, tr_solver,
+ tr_options.copy(), verbose)
+
+ elif method == 'dogbox':
+ if tr_solver == 'lsmr' and 'regularize' in tr_options:
+ warn("The keyword 'regularize' in `tr_options` is not relevant "
+ "for 'dogbox' method.",
+ stacklevel=2)
+ tr_options = tr_options.copy()
+ del tr_options['regularize']
+
+ result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
+ xtol, gtol, max_nfev, x_scale, loss_function,
+ tr_solver, tr_options, verbose)
+
+ result.message = TERMINATION_MESSAGES[result.status]
+ result.success = result.status > 0
+
+ if verbose >= 1:
+ print(result.message)
+ print("Function evaluations {}, initial cost {:.4e}, final cost "
+ "{:.4e}, first-order optimality {:.2e}."
+ .format(result.nfev, initial_cost, result.cost,
+ result.optimality))
+
+ return result
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdf4d26020109d55a6aea2be3009181a388c722d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/lsq_linear.py
@@ -0,0 +1,362 @@
+"""Linear least squares with bound constraints on independent variables."""
+import numpy as np
+from numpy.linalg import norm
+from scipy.sparse import issparse, csr_matrix
+from scipy.sparse.linalg import LinearOperator, lsmr
+from scipy.optimize import OptimizeResult
+from scipy.optimize._minimize import Bounds
+
+from .common import in_bounds, compute_grad
+from .trf_linear import trf_linear
+from .bvls import bvls
+
+
+def prepare_bounds(bounds, n):
+ if len(bounds) != 2:
+ raise ValueError("`bounds` must contain 2 elements.")
+ lb, ub = (np.asarray(b, dtype=float) for b in bounds)
+
+ if lb.ndim == 0:
+ lb = np.resize(lb, n)
+
+ if ub.ndim == 0:
+ ub = np.resize(ub, n)
+
+ return lb, ub
+
+
+TERMINATION_MESSAGES = {
+ -1: "The algorithm was not able to make progress on the last iteration.",
+ 0: "The maximum number of iterations is exceeded.",
+ 1: "The first-order optimality measure is less than `tol`.",
+ 2: "The relative change of the cost function is less than `tol`.",
+ 3: "The unconstrained solution is optimal."
+}
+
+
+def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10,
+ lsq_solver=None, lsmr_tol=None, max_iter=None,
+ verbose=0, *, lsmr_maxiter=None,):
+ r"""Solve a linear least-squares problem with bounds on the variables.
+
+ Given a m-by-n design matrix A and a target vector b with m elements,
+ `lsq_linear` solves the following optimization problem::
+
+ minimize 0.5 * ||A x - b||**2
+ subject to lb <= x <= ub
+
+ This optimization problem is convex, hence a found minimum (if iterations
+ have converged) is guaranteed to be global.
+
+ Parameters
+ ----------
+ A : array_like, sparse matrix of LinearOperator, shape (m, n)
+ Design matrix. Can be `scipy.sparse.linalg.LinearOperator`.
+ b : array_like, shape (m,)
+ Target vector.
+ bounds : 2-tuple of array_like or `Bounds`, optional
+ Lower and upper bounds on parameters. Defaults to no bounds.
+ There are two ways to specify the bounds:
+
+ - Instance of `Bounds` class.
+
+ - 2-tuple of array_like: Each element of the tuple must be either
+ an array with the length equal to the number of parameters, or a
+ scalar (in which case the bound is taken to be the same for all
+ parameters). Use ``np.inf`` with an appropriate sign to disable
+ bounds on all or some parameters.
+
+ method : 'trf' or 'bvls', optional
+ Method to perform minimization.
+
+ * 'trf' : Trust Region Reflective algorithm adapted for a linear
+ least-squares problem. This is an interior-point-like method
+ and the required number of iterations is weakly correlated with
+ the number of variables.
+ * 'bvls' : Bounded-variable least-squares algorithm. This is
+ an active set method, which requires the number of iterations
+ comparable to the number of variables. Can't be used when `A` is
+ sparse or LinearOperator.
+
+ Default is 'trf'.
+ tol : float, optional
+ Tolerance parameter. The algorithm terminates if a relative change
+ of the cost function is less than `tol` on the last iteration.
+ Additionally, the first-order optimality measure is considered:
+
+ * ``method='trf'`` terminates if the uniform norm of the gradient,
+ scaled to account for the presence of the bounds, is less than
+ `tol`.
+ * ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions
+ are satisfied within `tol` tolerance.
+
+ lsq_solver : {None, 'exact', 'lsmr'}, optional
+ Method of solving unbounded least-squares problems throughout
+ iterations:
+
+ * 'exact' : Use dense QR or SVD decomposition approach. Can't be
+ used when `A` is sparse or LinearOperator.
+ * 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure
+ which requires only matrix-vector product evaluations. Can't
+ be used with ``method='bvls'``.
+
+ If None (default), the solver is chosen based on type of `A`.
+ lsmr_tol : None, float or 'auto', optional
+ Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr`
+ If None (default), it is set to ``1e-2 * tol``. If 'auto', the
+ tolerance will be adjusted based on the optimality of the current
+ iterate, which can speed up the optimization process, but is not always
+ reliable.
+ max_iter : None or int, optional
+ Maximum number of iterations before termination. If None (default), it
+ is set to 100 for ``method='trf'`` or to the number of variables for
+ ``method='bvls'`` (not counting iterations for 'bvls' initialization).
+ verbose : {0, 1, 2}, optional
+ Level of algorithm's verbosity:
+
+ * 0 : work silently (default).
+ * 1 : display a termination report.
+ * 2 : display progress during iterations.
+ lsmr_maxiter : None or int, optional
+ Maximum number of iterations for the lsmr least squares solver,
+ if it is used (by setting ``lsq_solver='lsmr'``). If None (default), it
+ uses lsmr's default of ``min(m, n)`` where ``m`` and ``n`` are the
+ number of rows and columns of `A`, respectively. Has no effect if
+ ``lsq_solver='exact'``.
+
+ Returns
+ -------
+ OptimizeResult with the following fields defined:
+ x : ndarray, shape (n,)
+ Solution found.
+ cost : float
+ Value of the cost function at the solution.
+ fun : ndarray, shape (m,)
+ Vector of residuals at the solution.
+ optimality : float
+ First-order optimality measure. The exact meaning depends on `method`,
+ refer to the description of `tol` parameter.
+ active_mask : ndarray of int, shape (n,)
+ Each component shows whether a corresponding constraint is active
+ (that is, whether a variable is at the bound):
+
+ * 0 : a constraint is not active.
+ * -1 : a lower bound is active.
+ * 1 : an upper bound is active.
+
+ Might be somewhat arbitrary for the `trf` method as it generates a
+ sequence of strictly feasible iterates and active_mask is determined
+ within a tolerance threshold.
+ unbounded_sol : tuple
+ Unbounded least squares solution tuple returned by the least squares
+ solver (set with `lsq_solver` option). If `lsq_solver` is not set or is
+ set to ``'exact'``, the tuple contains an ndarray of shape (n,) with
+ the unbounded solution, an ndarray with the sum of squared residuals,
+ an int with the rank of `A`, and an ndarray with the singular values
+ of `A` (see NumPy's ``linalg.lstsq`` for more information). If
+ `lsq_solver` is set to ``'lsmr'``, the tuple contains an ndarray of
+ shape (n,) with the unbounded solution, an int with the exit code,
+ an int with the number of iterations, and five floats with
+ various norms and the condition number of `A` (see SciPy's
+ ``sparse.linalg.lsmr`` for more information). This output can be
+ useful for determining the convergence of the least squares solver,
+ particularly the iterative ``'lsmr'`` solver. The unbounded least
+ squares problem is to minimize ``0.5 * ||A x - b||**2``.
+ nit : int
+ Number of iterations. Zero if the unconstrained solution is optimal.
+ status : int
+ Reason for algorithm termination:
+
+ * -1 : the algorithm was not able to make progress on the last
+ iteration.
+ * 0 : the maximum number of iterations is exceeded.
+ * 1 : the first-order optimality measure is less than `tol`.
+ * 2 : the relative change of the cost function is less than `tol`.
+ * 3 : the unconstrained solution is optimal.
+
+ message : str
+ Verbal description of the termination reason.
+ success : bool
+ True if one of the convergence criteria is satisfied (`status` > 0).
+
+ See Also
+ --------
+ nnls : Linear least squares with non-negativity constraint.
+ least_squares : Nonlinear least squares with bounds on the variables.
+
+ Notes
+ -----
+ The algorithm first computes the unconstrained least-squares solution by
+ `numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on
+ `lsq_solver`. This solution is returned as optimal if it lies within the
+ bounds.
+
+ Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for
+ a linear least-squares problem. The iterations are essentially the same as
+ in the nonlinear least-squares algorithm, but as the quadratic function
+ model is always accurate, we don't need to track or modify the radius of
+ a trust region. The line search (backtracking) is used as a safety net
+ when a selected step does not decrease the cost function. Read more
+ detailed description of the algorithm in `scipy.optimize.least_squares`.
+
+ Method 'bvls' runs a Python implementation of the algorithm described in
+ [BVLS]_. The algorithm maintains active and free sets of variables, on
+ each iteration chooses a new variable to move from the active set to the
+ free set and then solves the unconstrained least-squares problem on free
+ variables. This algorithm is guaranteed to give an accurate solution
+ eventually, but may require up to n iterations for a problem with n
+ variables. Additionally, an ad-hoc initialization procedure is
+ implemented, that determines which variables to set free or active
+ initially. It takes some number of iterations before actual BVLS starts,
+ but can significantly reduce the number of further iterations.
+
+ References
+ ----------
+ .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
+ and Conjugate Gradient Method for Large-Scale Bound-Constrained
+ Minimization Problems," SIAM Journal on Scientific Computing,
+ Vol. 21, Number 1, pp 1-23, 1999.
+ .. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares:
+ an Algorithm and Applications", Computational Statistics, 10,
+ 129-141, 1995.
+
+ Examples
+ --------
+ In this example, a problem with a large sparse matrix and bounds on the
+ variables is solved.
+
+ >>> import numpy as np
+ >>> from scipy.sparse import rand
+ >>> from scipy.optimize import lsq_linear
+ >>> rng = np.random.default_rng()
+ ...
+ >>> m = 20000
+ >>> n = 10000
+ ...
+ >>> A = rand(m, n, density=1e-4, random_state=rng)
+ >>> b = rng.standard_normal(m)
+ ...
+ >>> lb = rng.standard_normal(n)
+ >>> ub = lb + 1
+ ...
+ >>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1)
+ # may vary
+ The relative change of the cost function is less than `tol`.
+ Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04,
+ first-order optimality 4.66e-08.
+ """
+ if method not in ['trf', 'bvls']:
+ raise ValueError("`method` must be 'trf' or 'bvls'")
+
+ if lsq_solver not in [None, 'exact', 'lsmr']:
+ raise ValueError("`solver` must be None, 'exact' or 'lsmr'.")
+
+ if verbose not in [0, 1, 2]:
+ raise ValueError("`verbose` must be in [0, 1, 2].")
+
+ if issparse(A):
+ A = csr_matrix(A)
+ elif not isinstance(A, LinearOperator):
+ A = np.atleast_2d(np.asarray(A))
+
+ if method == 'bvls':
+ if lsq_solver == 'lsmr':
+ raise ValueError("method='bvls' can't be used with "
+ "lsq_solver='lsmr'")
+
+ if not isinstance(A, np.ndarray):
+ raise ValueError("method='bvls' can't be used with `A` being "
+ "sparse or LinearOperator.")
+
+ if lsq_solver is None:
+ if isinstance(A, np.ndarray):
+ lsq_solver = 'exact'
+ else:
+ lsq_solver = 'lsmr'
+ elif lsq_solver == 'exact' and not isinstance(A, np.ndarray):
+ raise ValueError("`exact` solver can't be used when `A` is "
+ "sparse or LinearOperator.")
+
+ if len(A.shape) != 2: # No ndim for LinearOperator.
+ raise ValueError("`A` must have at most 2 dimensions.")
+
+ if max_iter is not None and max_iter <= 0:
+ raise ValueError("`max_iter` must be None or positive integer.")
+
+ m, n = A.shape
+
+ b = np.atleast_1d(b)
+ if b.ndim != 1:
+ raise ValueError("`b` must have at most 1 dimension.")
+
+ if b.size != m:
+ raise ValueError("Inconsistent shapes between `A` and `b`.")
+
+ if isinstance(bounds, Bounds):
+ lb = bounds.lb
+ ub = bounds.ub
+ else:
+ lb, ub = prepare_bounds(bounds, n)
+
+ if lb.shape != (n,) and ub.shape != (n,):
+ raise ValueError("Bounds have wrong shape.")
+
+ if np.any(lb >= ub):
+ raise ValueError("Each lower bound must be strictly less than each "
+ "upper bound.")
+
+ if lsmr_maxiter is not None and lsmr_maxiter < 1:
+ raise ValueError("`lsmr_maxiter` must be None or positive integer.")
+
+ if not ((isinstance(lsmr_tol, float) and lsmr_tol > 0) or
+ lsmr_tol in ('auto', None)):
+ raise ValueError("`lsmr_tol` must be None, 'auto', or positive float.")
+
+ if lsq_solver == 'exact':
+ unbd_lsq = np.linalg.lstsq(A, b, rcond=-1)
+ elif lsq_solver == 'lsmr':
+ first_lsmr_tol = lsmr_tol # tol of first call to lsmr
+ if lsmr_tol is None or lsmr_tol == 'auto':
+ first_lsmr_tol = 1e-2 * tol # default if lsmr_tol not defined
+ unbd_lsq = lsmr(A, b, maxiter=lsmr_maxiter,
+ atol=first_lsmr_tol, btol=first_lsmr_tol)
+ x_lsq = unbd_lsq[0] # extract the solution from the least squares solver
+
+ if in_bounds(x_lsq, lb, ub):
+ r = A @ x_lsq - b
+ cost = 0.5 * np.dot(r, r)
+ termination_status = 3
+ termination_message = TERMINATION_MESSAGES[termination_status]
+ g = compute_grad(A, r)
+ g_norm = norm(g, ord=np.inf)
+
+ if verbose > 0:
+ print(termination_message)
+ print(f"Final cost {cost:.4e}, first-order optimality {g_norm:.2e}")
+
+ return OptimizeResult(
+ x=x_lsq, fun=r, cost=cost, optimality=g_norm,
+ active_mask=np.zeros(n), unbounded_sol=unbd_lsq,
+ nit=0, status=termination_status,
+ message=termination_message, success=True)
+
+ if method == 'trf':
+ res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
+ max_iter, verbose, lsmr_maxiter=lsmr_maxiter)
+ elif method == 'bvls':
+ res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose)
+
+ res.unbounded_sol = unbd_lsq
+ res.message = TERMINATION_MESSAGES[res.status]
+ res.success = res.status > 0
+
+ if verbose > 0:
+ print(res.message)
+ print(
+ f"Number of iterations {res.nit}, initial cost {res.initial_cost:.4e}, "
+ f"final cost {res.cost:.4e}, first-order optimality {res.optimality:.2e}."
+ )
+
+ del res.initial_cost
+
+ return res
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py
new file mode 100644
index 0000000000000000000000000000000000000000..9154bdba5b2cc41883811ba1820dfc251e515d6c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/trf.py
@@ -0,0 +1,560 @@
+"""Trust Region Reflective algorithm for least-squares optimization.
+
+The algorithm is based on ideas from paper [STIR]_. The main idea is to
+account for the presence of the bounds by appropriate scaling of the variables (or,
+equivalently, changing a trust-region shape). Let's introduce a vector v:
+
+ | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
+ v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
+ | 1, otherwise
+
+where g is the gradient of a cost function and lb, ub are the bounds. Its
+components are distances to the bounds at which the anti-gradient points (if
+this distance is finite). Define a scaling matrix D = diag(v**0.5).
+First-order optimality conditions can be stated as
+
+ D^2 g(x) = 0.
+
+Meaning that components of the gradient should be zero for strictly interior
+variables, and components must point inside the feasible region for variables
+on the bound.
+
+Now consider this system of equations as a new optimization problem. If the
+point x is strictly interior (not on the bound), then the left-hand side is
+differentiable and the Newton step for it satisfies
+
+ (D^2 H + diag(g) Jv) p = -D^2 g
+
+where H is the Hessian matrix (or its J^T J approximation in least squares),
+Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
+elements of matrix C = diag(g) Jv are non-negative. Introduce the change
+of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables,
+we have a Newton step satisfying
+
+ B_h p_h = -g_h,
+
+where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
+J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
+to "hat" variables. To guarantee global convergence we formulate a
+trust-region problem based on the Newton step in the new variables:
+
+ 0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
+
+In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
+problem is
+
+ 0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
+
+Here, the meaning of the matrix D becomes more clear: it alters the shape
+of a trust-region, such that large steps towards the bounds are not allowed.
+In the implementation, the trust-region problem is solved in "hat" space,
+but handling of the bounds is done in the original space (see below and read
+the code).
+
+The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
+must keep iterates strictly feasible (to satisfy aforementioned
+differentiability), the parameter theta controls step back from the boundary
+(see the code for details).
+
+The algorithm does another important trick. If the trust-region solution
+doesn't fit into the bounds, then a reflected (from a firstly encountered
+bound) search direction is considered. For motivation and analysis refer to
+[STIR]_ paper (and other papers of the authors). In practice, it doesn't need
+a lot of justifications, the algorithm simply chooses the best step among
+three: a constrained trust-region step, a reflected step and a constrained
+Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
+space).
+
+Another feature is that a trust-region radius control strategy is modified to
+account for appearance of the diagonal C matrix (called diag_h in the code).
+
+Note that all described peculiarities are completely gone as we consider
+problems without bounds (the algorithm becomes a standard trust-region type
+algorithm very similar to ones implemented in MINPACK).
+
+The implementation supports two methods of solving the trust-region problem.
+The first, called 'exact', applies SVD on Jacobian and then solves the problem
+very accurately using the algorithm described in [JJMore]_. It is not
+applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
+approach (sometimes called "indefinite dogleg"), where the problem is solved
+in a subspace spanned by the gradient and the approximate Gauss-Newton step
+found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
+reformulated as a 4th order algebraic equation and solved very accurately by
+``numpy.roots``. The subspace approach allows to solve very large problems
+(up to couple of millions of residuals on a regular PC), provided the Jacobian
+matrix is sufficiently sparse.
+
+References
+----------
+.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior,
+ and Conjugate Gradient Method for Large-Scale Bound-Constrained
+ Minimization Problems," SIAM Journal on Scientific Computing,
+ Vol. 21, Number 1, pp 1-23, 1999.
+.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
+ and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
+"""
+import numpy as np
+from numpy.linalg import norm
+from scipy.linalg import svd, qr
+from scipy.sparse.linalg import lsmr
+from scipy.optimize import OptimizeResult
+
+from .common import (
+ step_size_to_bound, find_active_constraints, in_bounds,
+ make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region,
+ solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d,
+ evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator,
+ CL_scaling_vector, compute_grad, compute_jac_scale, check_termination,
+ update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear,
+ print_iteration_nonlinear)
+
+
+def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
+ loss_function, tr_solver, tr_options, verbose):
+ # For efficiency, it makes sense to run the simplified version of the
+ # algorithm when no bounds are imposed. We decided to write the two
+ # separate functions. It violates the DRY principle, but the individual
+ # functions are kept the most readable.
+ if np.all(lb == -np.inf) and np.all(ub == np.inf):
+ return trf_no_bounds(
+ fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
+ loss_function, tr_solver, tr_options, verbose)
+ else:
+ return trf_bounds(
+ fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
+ loss_function, tr_solver, tr_options, verbose)
+
+
+def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
+ """Select the best step according to Trust Region Reflective algorithm."""
+ if in_bounds(x + p, lb, ub):
+ p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
+ return p, p_h, -p_value
+
+ p_stride, hits = step_size_to_bound(x, p, lb, ub)
+
+ # Compute the reflected direction.
+ r_h = np.copy(p_h)
+ r_h[hits.astype(bool)] *= -1
+ r = d * r_h
+
+ # Restrict trust-region step, such that it hits the bound.
+ p *= p_stride
+ p_h *= p_stride
+ x_on_bound = x + p
+
+ # Reflected direction will cross first either feasible region or trust
+ # region boundary.
+ _, to_tr = intersect_trust_region(p_h, r_h, Delta)
+ to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub)
+
+ # Find lower and upper bounds on a step size along the reflected
+ # direction, considering the strict feasibility requirement. There is no
+ # single correct way to do that, the chosen approach seems to work best
+ # on test problems.
+ r_stride = min(to_bound, to_tr)
+ if r_stride > 0:
+ r_stride_l = (1 - theta) * p_stride / r_stride
+ if r_stride == to_bound:
+ r_stride_u = theta * to_bound
+ else:
+ r_stride_u = to_tr
+ else:
+ r_stride_l = 0
+ r_stride_u = -1
+
+ # Check if reflection step is available.
+ if r_stride_l <= r_stride_u:
+ a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
+ r_stride, r_value = minimize_quadratic_1d(
+ a, b, r_stride_l, r_stride_u, c=c)
+ r_h *= r_stride
+ r_h += p_h
+ r = r_h * d
+ else:
+ r_value = np.inf
+
+ # Now correct p_h to make it strictly interior.
+ p *= theta
+ p_h *= theta
+ p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
+
+ ag_h = -g_h
+ ag = d * ag_h
+
+ to_tr = Delta / norm(ag_h)
+ to_bound, _ = step_size_to_bound(x, ag, lb, ub)
+ if to_bound < to_tr:
+ ag_stride = theta * to_bound
+ else:
+ ag_stride = to_tr
+
+ a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
+ ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride)
+ ag_h *= ag_stride
+ ag *= ag_stride
+
+ if p_value < r_value and p_value < ag_value:
+ return p, p_h, -p_value
+ elif r_value < p_value and r_value < ag_value:
+ return r, r_h, -r_value
+ else:
+ return ag, ag_h, -ag_value
+
+
+def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
+ x_scale, loss_function, tr_solver, tr_options, verbose):
+ x = x0.copy()
+
+ f = f0
+ f_true = f.copy()
+ nfev = 1
+
+ J = J0
+ njev = 1
+ m, n = J.shape
+
+ if loss_function is not None:
+ rho = loss_function(f)
+ cost = 0.5 * np.sum(rho[0])
+ J, f = scale_for_robust_loss_function(J, f, rho)
+ else:
+ cost = 0.5 * np.dot(f, f)
+
+ g = compute_grad(J, f)
+
+ jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
+ if jac_scale:
+ scale, scale_inv = compute_jac_scale(J)
+ else:
+ scale, scale_inv = x_scale, 1 / x_scale
+
+ v, dv = CL_scaling_vector(x, g, lb, ub)
+ v[dv != 0] *= scale_inv[dv != 0]
+ Delta = norm(x0 * scale_inv / v**0.5)
+ if Delta == 0:
+ Delta = 1.0
+
+ g_norm = norm(g * v, ord=np.inf)
+
+ f_augmented = np.zeros(m + n)
+ if tr_solver == 'exact':
+ J_augmented = np.empty((m + n, n))
+ elif tr_solver == 'lsmr':
+ reg_term = 0.0
+ regularize = tr_options.pop('regularize', True)
+
+ if max_nfev is None:
+ max_nfev = x0.size * 100
+
+ alpha = 0.0 # "Levenberg-Marquardt" parameter
+
+ termination_status = None
+ iteration = 0
+ step_norm = None
+ actual_reduction = None
+
+ if verbose == 2:
+ print_header_nonlinear()
+
+ while True:
+ v, dv = CL_scaling_vector(x, g, lb, ub)
+
+ g_norm = norm(g * v, ord=np.inf)
+ if g_norm < gtol:
+ termination_status = 1
+
+ if verbose == 2:
+ print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
+ step_norm, g_norm)
+
+ if termination_status is not None or nfev == max_nfev:
+ break
+
+ # Now compute variables in "hat" space. Here, we also account for
+ # scaling introduced by `x_scale` parameter. This part is a bit tricky,
+ # you have to write down the formulas and see how the trust-region
+ # problem is formulated when the two types of scaling are applied.
+ # The idea is that first we apply `x_scale` and then apply Coleman-Li
+ # approach in the new variables.
+
+ # v is recomputed in the variables after applying `x_scale`, note that
+ # components which were identically 1 not affected.
+ v[dv != 0] *= scale_inv[dv != 0]
+
+ # Here, we apply two types of scaling.
+ d = v**0.5 * scale
+
+ # C = diag(g * scale) Jv
+ diag_h = g * dv * scale
+
+ # After all this has been done, we continue normally.
+
+ # "hat" gradient.
+ g_h = d * g
+
+ f_augmented[:m] = f
+ if tr_solver == 'exact':
+ J_augmented[:m] = J * d
+ J_h = J_augmented[:m] # Memory view.
+ J_augmented[m:] = np.diag(diag_h**0.5)
+ U, s, V = svd(J_augmented, full_matrices=False)
+ V = V.T
+ uf = U.T.dot(f_augmented)
+ elif tr_solver == 'lsmr':
+ J_h = right_multiplied_operator(J, d)
+
+ if regularize:
+ a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
+ to_tr = Delta / norm(g_h)
+ ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
+ reg_term = -ag_value / Delta**2
+
+ lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
+ gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
+ S = np.vstack((g_h, gn_h)).T
+ S, _ = qr(S, mode='economic')
+ JS = J_h.dot(S) # LinearOperator does dot too.
+ B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
+ g_S = S.T.dot(g_h)
+
+ # theta controls step back step ratio from the bounds.
+ theta = max(0.995, 1 - g_norm)
+
+ actual_reduction = -1
+ while actual_reduction <= 0 and nfev < max_nfev:
+ if tr_solver == 'exact':
+ p_h, alpha, n_iter = solve_lsq_trust_region(
+ n, m, uf, s, V, Delta, initial_alpha=alpha)
+ elif tr_solver == 'lsmr':
+ p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
+ p_h = S.dot(p_S)
+
+ p = d * p_h # Trust-region solution in the original space.
+ step, step_h, predicted_reduction = select_step(
+ x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)
+
+ x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
+ f_new = fun(x_new)
+ nfev += 1
+
+ step_h_norm = norm(step_h)
+
+ if not np.all(np.isfinite(f_new)):
+ Delta = 0.25 * step_h_norm
+ continue
+
+ # Usual trust-region step quality estimation.
+ if loss_function is not None:
+ cost_new = loss_function(f_new, cost_only=True)
+ else:
+ cost_new = 0.5 * np.dot(f_new, f_new)
+ actual_reduction = cost - cost_new
+ Delta_new, ratio = update_tr_radius(
+ Delta, actual_reduction, predicted_reduction,
+ step_h_norm, step_h_norm > 0.95 * Delta)
+
+ step_norm = norm(step)
+ termination_status = check_termination(
+ actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
+ if termination_status is not None:
+ break
+
+ alpha *= Delta / Delta_new
+ Delta = Delta_new
+
+ if actual_reduction > 0:
+ x = x_new
+
+ f = f_new
+ f_true = f.copy()
+
+ cost = cost_new
+
+ J = jac(x, f)
+ njev += 1
+
+ if loss_function is not None:
+ rho = loss_function(f)
+ J, f = scale_for_robust_loss_function(J, f, rho)
+
+ g = compute_grad(J, f)
+
+ if jac_scale:
+ scale, scale_inv = compute_jac_scale(J, scale_inv)
+ else:
+ step_norm = 0
+ actual_reduction = 0
+
+ iteration += 1
+
+ if termination_status is None:
+ termination_status = 0
+
+ active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
+ return OptimizeResult(
+ x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
+ active_mask=active_mask, nfev=nfev, njev=njev,
+ status=termination_status)
+
+
+def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
+ x_scale, loss_function, tr_solver, tr_options, verbose):
+ x = x0.copy()
+
+ f = f0
+ f_true = f.copy()
+ nfev = 1
+
+ J = J0
+ njev = 1
+ m, n = J.shape
+
+ if loss_function is not None:
+ rho = loss_function(f)
+ cost = 0.5 * np.sum(rho[0])
+ J, f = scale_for_robust_loss_function(J, f, rho)
+ else:
+ cost = 0.5 * np.dot(f, f)
+
+ g = compute_grad(J, f)
+
+ jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
+ if jac_scale:
+ scale, scale_inv = compute_jac_scale(J)
+ else:
+ scale, scale_inv = x_scale, 1 / x_scale
+
+ Delta = norm(x0 * scale_inv)
+ if Delta == 0:
+ Delta = 1.0
+
+ if tr_solver == 'lsmr':
+ reg_term = 0
+ damp = tr_options.pop('damp', 0.0)
+ regularize = tr_options.pop('regularize', True)
+
+ if max_nfev is None:
+ max_nfev = x0.size * 100
+
+ alpha = 0.0 # "Levenberg-Marquardt" parameter
+
+ termination_status = None
+ iteration = 0
+ step_norm = None
+ actual_reduction = None
+
+ if verbose == 2:
+ print_header_nonlinear()
+
+ while True:
+ g_norm = norm(g, ord=np.inf)
+ if g_norm < gtol:
+ termination_status = 1
+
+ if verbose == 2:
+ print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
+ step_norm, g_norm)
+
+ if termination_status is not None or nfev == max_nfev:
+ break
+
+ d = scale
+ g_h = d * g
+
+ if tr_solver == 'exact':
+ J_h = J * d
+ U, s, V = svd(J_h, full_matrices=False)
+ V = V.T
+ uf = U.T.dot(f)
+ elif tr_solver == 'lsmr':
+ J_h = right_multiplied_operator(J, d)
+
+ if regularize:
+ a, b = build_quadratic_1d(J_h, g_h, -g_h)
+ to_tr = Delta / norm(g_h)
+ ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
+ reg_term = -ag_value / Delta**2
+
+ damp_full = (damp**2 + reg_term)**0.5
+ gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
+ S = np.vstack((g_h, gn_h)).T
+ S, _ = qr(S, mode='economic')
+ JS = J_h.dot(S)
+ B_S = np.dot(JS.T, JS)
+ g_S = S.T.dot(g_h)
+
+ actual_reduction = -1
+ while actual_reduction <= 0 and nfev < max_nfev:
+ if tr_solver == 'exact':
+ step_h, alpha, n_iter = solve_lsq_trust_region(
+ n, m, uf, s, V, Delta, initial_alpha=alpha)
+ elif tr_solver == 'lsmr':
+ p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
+ step_h = S.dot(p_S)
+
+ predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
+ step = d * step_h
+ x_new = x + step
+ f_new = fun(x_new)
+ nfev += 1
+
+ step_h_norm = norm(step_h)
+
+ if not np.all(np.isfinite(f_new)):
+ Delta = 0.25 * step_h_norm
+ continue
+
+ # Usual trust-region step quality estimation.
+ if loss_function is not None:
+ cost_new = loss_function(f_new, cost_only=True)
+ else:
+ cost_new = 0.5 * np.dot(f_new, f_new)
+ actual_reduction = cost - cost_new
+
+ Delta_new, ratio = update_tr_radius(
+ Delta, actual_reduction, predicted_reduction,
+ step_h_norm, step_h_norm > 0.95 * Delta)
+
+ step_norm = norm(step)
+ termination_status = check_termination(
+ actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
+ if termination_status is not None:
+ break
+
+ alpha *= Delta / Delta_new
+ Delta = Delta_new
+
+ if actual_reduction > 0:
+ x = x_new
+
+ f = f_new
+ f_true = f.copy()
+
+ cost = cost_new
+
+ J = jac(x, f)
+ njev += 1
+
+ if loss_function is not None:
+ rho = loss_function(f)
+ J, f = scale_for_robust_loss_function(J, f, rho)
+
+ g = compute_grad(J, f)
+
+ if jac_scale:
+ scale, scale_inv = compute_jac_scale(J, scale_inv)
+ else:
+ step_norm = 0
+ actual_reduction = 0
+
+ iteration += 1
+
+ if termination_status is None:
+ termination_status = 0
+
+ active_mask = np.zeros_like(x)
+ return OptimizeResult(
+ x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
+ active_mask=active_mask, nfev=nfev, njev=njev,
+ status=termination_status)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd752763179bcf97945c7f34ce6a9e49e85c819e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize/_lsq/trf_linear.py
@@ -0,0 +1,249 @@
+"""The adaptation of Trust Region Reflective algorithm for a linear
+least-squares problem."""
+import numpy as np
+from numpy.linalg import norm
+from scipy.linalg import qr, solve_triangular
+from scipy.sparse.linalg import lsmr
+from scipy.optimize import OptimizeResult
+
+from .givens_elimination import givens_elimination
+from .common import (
+ EPS, step_size_to_bound, find_active_constraints, in_bounds,
+ make_strictly_feasible, build_quadratic_1d, evaluate_quadratic,
+ minimize_quadratic_1d, CL_scaling_vector, reflective_transformation,
+ print_header_linear, print_iteration_linear, compute_grad,
+ regularized_lsq_operator, right_multiplied_operator)
+
+
+def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True):
+ """Solve regularized least squares using information from QR-decomposition.
+
+ The initial problem is to solve the following system in a least-squares
+ sense::
+
+ A x = b
+ D x = 0
+
+ where D is diagonal matrix. The method is based on QR decomposition
+ of the form A P = Q R, where P is a column permutation matrix, Q is an
+ orthogonal matrix and R is an upper triangular matrix.
+
+ Parameters
+ ----------
+ m, n : int
+ Initial shape of A.
+ R : ndarray, shape (n, n)
+ Upper triangular matrix from QR decomposition of A.
+ QTb : ndarray, shape (n,)
+ First n components of Q^T b.
+ perm : ndarray, shape (n,)
+ Array defining column permutation of A, such that ith column of
+ P is perm[i]-th column of identity matrix.
+ diag : ndarray, shape (n,)
+ Array containing diagonal elements of D.
+
+ Returns
+ -------
+ x : ndarray, shape (n,)
+ Found least-squares solution.
+ """
+ if copy_R:
+ R = R.copy()
+ v = QTb.copy()
+
+ givens_elimination(R, v, diag[perm])
+
+ abs_diag_R = np.abs(np.diag(R))
+ threshold = EPS * max(m, n) * np.max(abs_diag_R)
+ nns, = np.nonzero(abs_diag_R > threshold)
+
+ R = R[np.ix_(nns, nns)]
+ v = v[nns]
+
+ x = np.zeros(n)
+ x[perm[nns]] = solve_triangular(R, v)
+
+ return x
+
+
+def backtracking(A, g, x, p, theta, p_dot_g, lb, ub):
+ """Find an appropriate step size using backtracking line search."""
+ alpha = 1
+ while True:
+ x_new, _ = reflective_transformation(x + alpha * p, lb, ub)
+ step = x_new - x
+ cost_change = -evaluate_quadratic(A, g, step)
+ if cost_change > -0.1 * alpha * p_dot_g:
+ break
+ alpha *= 0.5
+
+ active = find_active_constraints(x_new, lb, ub)
+ if np.any(active != 0):
+ x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub)
+ x_new = make_strictly_feasible(x_new, lb, ub, rstep=0)
+ step = x_new - x
+ cost_change = -evaluate_quadratic(A, g, step)
+
+ return x, step, cost_change
+
+
+def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta):
+ """Select the best step according to Trust Region Reflective algorithm."""
+ if in_bounds(x + p, lb, ub):
+ return p
+
+ p_stride, hits = step_size_to_bound(x, p, lb, ub)
+ r_h = np.copy(p_h)
+ r_h[hits.astype(bool)] *= -1
+ r = d * r_h
+
+ # Restrict step, such that it hits the bound.
+ p *= p_stride
+ p_h *= p_stride
+ x_on_bound = x + p
+
+ # Find the step size along reflected direction.
+ r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub)
+
+ # Stay interior.
+ r_stride_l = (1 - theta) * r_stride_u
+ r_stride_u *= theta
+
+ if r_stride_u > 0:
+ a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h)
+ r_stride, r_value = minimize_quadratic_1d(
+ a, b, r_stride_l, r_stride_u, c=c)
+ r_h = p_h + r_h * r_stride
+ r = d * r_h
+ else:
+ r_value = np.inf
+
+ # Now correct p_h to make it strictly interior.
+ p_h *= theta
+ p *= theta
+ p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h)
+
+ ag_h = -g_h
+ ag = d * ag_h
+ ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub)
+ ag_stride_u *= theta
+ a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h)
+ ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u)
+ ag *= ag_stride
+
+ if p_value < r_value and p_value < ag_value:
+ return p
+ elif r_value < p_value and r_value < ag_value:
+ return r
+ else:
+ return ag
+
+
+def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
+ max_iter, verbose, *, lsmr_maxiter=None):
+ m, n = A.shape
+ x, _ = reflective_transformation(x_lsq, lb, ub)
+ x = make_strictly_feasible(x, lb, ub, rstep=0.1)
+
+ if lsq_solver == 'exact':
+ QT, R, perm = qr(A, mode='economic', pivoting=True)
+ QT = QT.T
+
+ if m < n:
+ R = np.vstack((R, np.zeros((n - m, n))))
+
+ QTr = np.zeros(n)
+ k = min(m, n)
+ elif lsq_solver == 'lsmr':
+ r_aug = np.zeros(m + n)
+ auto_lsmr_tol = False
+ if lsmr_tol is None:
+ lsmr_tol = 1e-2 * tol
+ elif lsmr_tol == 'auto':
+ auto_lsmr_tol = True
+
+ r = A.dot(x) - b
+ g = compute_grad(A, r)
+ cost = 0.5 * np.dot(r, r)
+ initial_cost = cost
+
+ termination_status = None
+ step_norm = None
+ cost_change = None
+
+ if max_iter is None:
+ max_iter = 100
+
+ if verbose == 2:
+ print_header_linear()
+
+ for iteration in range(max_iter):
+ v, dv = CL_scaling_vector(x, g, lb, ub)
+ g_scaled = g * v
+ g_norm = norm(g_scaled, ord=np.inf)
+ if g_norm < tol:
+ termination_status = 1
+
+ if verbose == 2:
+ print_iteration_linear(iteration, cost, cost_change,
+ step_norm, g_norm)
+
+ if termination_status is not None:
+ break
+
+ diag_h = g * dv
+ diag_root_h = diag_h ** 0.5
+ d = v ** 0.5
+ g_h = d * g
+
+ A_h = right_multiplied_operator(A, d)
+ if lsq_solver == 'exact':
+ QTr[:k] = QT.dot(r)
+ p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm,
+ diag_root_h, copy_R=False)
+ elif lsq_solver == 'lsmr':
+ lsmr_op = regularized_lsq_operator(A_h, diag_root_h)
+ r_aug[:m] = r
+ if auto_lsmr_tol:
+ eta = 1e-2 * min(0.5, g_norm)
+ lsmr_tol = max(EPS, min(0.1, eta * g_norm))
+ p_h = -lsmr(lsmr_op, r_aug, maxiter=lsmr_maxiter,
+ atol=lsmr_tol, btol=lsmr_tol)[0]
+
+ p = d * p_h
+
+ p_dot_g = np.dot(p, g)
+ if p_dot_g > 0:
+ termination_status = -1
+
+ theta = 1 - min(0.005, g_norm)
+ step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta)
+ cost_change = -evaluate_quadratic(A, g, step)
+
+ # Perhaps almost never executed, the idea is that `p` is descent
+ # direction thus we must find acceptable cost decrease using simple
+ # "backtracking", otherwise the algorithm's logic would break.
+ if cost_change < 0:
+ x, step, cost_change = backtracking(
+ A, g, x, p, theta, p_dot_g, lb, ub)
+ else:
+ x = make_strictly_feasible(x + step, lb, ub, rstep=0)
+
+ step_norm = norm(step)
+ r = A.dot(x) - b
+ g = compute_grad(A, r)
+
+ if cost_change < tol * cost:
+ termination_status = 2
+
+ cost = 0.5 * np.dot(r, r)
+
+ if termination_status is None:
+ termination_status = 0
+
+ active_mask = find_active_constraints(x, lb, ub, rtol=tol)
+
+ return OptimizeResult(
+ x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask,
+ nit=iteration + 1, status=termination_status,
+ initial_cost=initial_cost)