Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/setup.py
from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('polynomial', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)
385
31.166667
66
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/laguerre.py
""" Objects for dealing with Laguerre series. This module provides a number of objects (mostly functions) useful for dealing with Laguerre series, including a `Laguerre` class that encapsulates the usual arithmetic operations. (General information on how this module represents and works with such polynomials is in the docstring for its "parent" sub-package, `numpy.polynomial`). Constants --------- - `lagdomain` -- Laguerre series default domain, [-1,1]. - `lagzero` -- Laguerre series that evaluates identically to 0. - `lagone` -- Laguerre series that evaluates identically to 1. - `lagx` -- Laguerre series for the identity map, ``f(x) = x``. Arithmetic ---------- - `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. - `lagadd` -- add two Laguerre series. - `lagsub` -- subtract one Laguerre series from another. - `lagmul` -- multiply two Laguerre series. - `lagdiv` -- divide one Laguerre series by another. - `lagval` -- evaluate a Laguerre series at given points. - `lagval2d` -- evaluate a 2D Laguerre series at given points. - `lagval3d` -- evaluate a 3D Laguerre series at given points. - `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product. - `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product. Calculus -------- - `lagder` -- differentiate a Laguerre series. - `lagint` -- integrate a Laguerre series. Misc Functions -------------- - `lagfromroots` -- create a Laguerre series with specified roots. - `lagroots` -- find the roots of a Laguerre series. - `lagvander` -- Vandermonde-like matrix for Laguerre polynomials. - `lagvander2d` -- Vandermonde-like matrix for 2D power series. - `lagvander3d` -- Vandermonde-like matrix for 3D power series. - `laggauss` -- Gauss-Laguerre quadrature, points and weights. - `lagweight` -- Laguerre weight function. - `lagcompanion` -- symmetrized companion matrix in Laguerre form. - `lagfit` -- least-squares fit returning a Laguerre series. - `lagtrim` -- trim leading coefficients from a Laguerre series. - `lagline` -- Laguerre series of given straight line. - `lag2poly` -- convert a Laguerre series to a polynomial. - `poly2lag` -- convert a polynomial to a Laguerre series. Classes ------- - `Laguerre` -- A Laguerre series class. See also -------- `numpy.polynomial` """ from __future__ import division, absolute_import, print_function import warnings import numpy as np import numpy.linalg as la from numpy.core.multiarray import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase __all__ = [ 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', 'laggauss', 'lagweight'] lagtrim = pu.trimcoef def poly2lag(pol): """ poly2lag(pol) Convert a polynomial to a Laguerre series. Convert an array representing the coefficients of a polynomial (relative to the "standard" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Laguerre series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Laguerre series. See Also -------- lag2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.laguerre import poly2lag >>> poly2lag(np.arange(4)) array([ 23., -63., 58., -18.]) """ [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 for i in range(deg, -1, -1): res = lagadd(lagmulx(res), pol[i]) return res def lag2poly(c): """ Convert a Laguerre series to a polynomial. Convert an array representing the coefficients of a Laguerre series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest to highest degree. Parameters ---------- c : array_like 1-D array containing the Laguerre series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-D array containing the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest order term to highest. See Also -------- poly2lag Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.laguerre import lag2poly >>> lag2poly([ 23., -63., 58., -18.]) array([ 0., 1., 2., 3.]) """ from .polynomial import polyadd, polysub, polymulx [c] = pu.as_series([c]) n = len(c) if n == 1: return c else: c0 = c[-2] c1 = c[-1] # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], (c1*(i - 1))/i) c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) return polyadd(c0, polysub(c1, polymulx(c1))) # # These are constant arrays are of integer type so as to be compatible # with the widest range of other types, such as Decimal. # # Laguerre lagdomain = np.array([0, 1]) # Laguerre coefficients representing zero. lagzero = np.array([0]) # Laguerre coefficients representing one. lagone = np.array([1]) # Laguerre coefficients representing the identity x. lagx = np.array([1, -1]) def lagline(off, scl): """ Laguerre series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``off + scl*x``. Returns ------- y : ndarray This module's representation of the Laguerre series for ``off + scl*x``. See Also -------- polyline, chebline Examples -------- >>> from numpy.polynomial.laguerre import lagline, lagval >>> lagval(0,lagline(3, 2)) 3.0 >>> lagval(1,lagline(3, 2)) 5.0 """ if scl != 0: return np.array([off + scl, -scl]) else: return np.array([off]) def lagfromroots(roots): """ Generate a Laguerre series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in Laguerre form, where the `r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) The coefficient of the last term is not generally 1 for monic polynomials in Laguerre form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then `out` is a real array, if some of the roots are complex, then `out` is complex even if all the coefficients in the result are real (see Examples below). See Also -------- polyfromroots, legfromroots, chebfromroots, hermfromroots, hermefromroots. Examples -------- >>> from numpy.polynomial.laguerre import lagfromroots, lagval >>> coef = lagfromroots((-1, 0, 1)) >>> lagval((-1, 0, 1), coef) array([ 0., 0., 0.]) >>> coef = lagfromroots((-1j, 1j)) >>> lagval((-1j, 1j), coef) array([ 0.+0.j, 0.+0.j]) """ if len(roots) == 0: return np.ones(1) else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [lagline(-r, 1) for r in roots] n = len(p) while n > 1: m, r = divmod(n, 2) tmp = [lagmul(p[i], p[i+m]) for i in range(m)] if r: tmp[0] = lagmul(tmp[0], p[-1]) p = tmp n = m return p[0] def lagadd(c1, c2): """ Add one Laguerre series to another. Returns the sum of two Laguerre series `c1` + `c2`. The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Laguerre series of their sum. See Also -------- lagsub, lagmul, lagdiv, lagpow Notes ----- Unlike multiplication, division, etc., the sum of two Laguerre series is a Laguerre series (without having to "reproject" the result onto the basis set) so addition, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial.laguerre import lagadd >>> lagadd([1, 2, 3], [1, 2, 3, 4]) array([ 2., 4., 6., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def lagsub(c1, c2): """ Subtract one Laguerre series from another. Returns the difference of two Laguerre series `c1` - `c2`. The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Of Laguerre series coefficients representing their difference. See Also -------- lagadd, lagmul, lagdiv, lagpow Notes ----- Unlike multiplication, division, etc., the difference of two Laguerre series is a Laguerre series (without having to "reproject" the result onto the basis set) so subtraction, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial.laguerre import lagsub >>> lagsub([1, 2, 3, 4], [1, 2, 3]) array([ 0., 0., 0., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 else: c2 = -c2 c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def lagmulx(c): """Multiply a Laguerre series by x. Multiply the Laguerre series `c` by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. Notes ----- The multiplication uses the recursion relationship for Laguerre polynomials in the form .. math:: xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) Examples -------- >>> from numpy.polynomial.laguerre import lagmulx >>> lagmulx([1, 2, 3]) array([ -1., -1., 11., -9.]) """ # c is a trimmed copy [c] = pu.as_series([c]) # The zero series needs special treatment if len(c) == 1 and c[0] == 0: return c prd = np.empty(len(c) + 1, dtype=c.dtype) prd[0] = c[0] prd[1] = -c[0] for i in range(1, len(c)): prd[i + 1] = -c[i]*(i + 1) prd[i] += c[i]*(2*i + 1) prd[i - 1] -= c[i]*i return prd def lagmul(c1, c2): """ Multiply one Laguerre series by another. Returns the product of two Laguerre series `c1` * `c2`. The arguments are sequences of coefficients, from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Of Laguerre series coefficients representing their product. See Also -------- lagadd, lagsub, lagdiv, lagpow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Laguerre polynomial basis set. Thus, to express the product as a Laguerre series, it is necessary to "reproject" the product onto said basis set, which may produce "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.laguerre import lagmul >>> lagmul([1, 2, 3], [0, 1, 2]) array([ 8., -13., 38., -51., 36.]) """ # s1, s2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c = c2 xs = c1 else: c = c1 xs = c2 if len(c) == 1: c0 = c[0]*xs c1 = 0 elif len(c) == 2: c0 = c[0]*xs c1 = c[1]*xs else: nd = len(c) c0 = c[-2]*xs c1 = c[-1]*xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) return lagadd(c0, lagsub(c1, lagmulx(c1))) def lagdiv(c1, c2): """ Divide one Laguerre series by another. Returns the quotient-with-remainder of two Laguerre series `c1` / `c2`. The arguments are sequences of coefficients from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Laguerre series coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of Laguerre series coefficients representing the quotient and remainder. See Also -------- lagadd, lagsub, lagmul, lagpow Notes ----- In general, the (polynomial) division of one Laguerre series by another results in quotient and remainder terms that are not in the Laguerre polynomial basis set. Thus, to express these results as a Laguerre series, it is necessary to "reproject" the results onto the Laguerre basis set, which may produce "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.laguerre import lagdiv >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 0.])) >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 1., 1.])) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: return c1[:1]*0, c1 elif lc2 == 1: return c1/c2[-1], c1[:1]*0 else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): p = lagmul([0]*i + [1], c2) q = rem[-1]/p[-1] rem = rem[:-1] - q*p[:-1] quo[i] = q return quo, pu.trimseq(rem) def lagpow(c, pow, maxpower=16): """Raise a Laguerre series to a power. Returns the Laguerre series `c` raised to the power `pow`. The argument `c` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` Parameters ---------- c : array_like 1-D array of Laguerre series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Laguerre series of power. See Also -------- lagadd, lagsub, lagmul, lagdiv Examples -------- >>> from numpy.polynomial.laguerre import lagpow >>> lagpow([1, 2, 3], 2) array([ 14., -16., 56., -72., 54.]) """ # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") elif power == 0: return np.array([1], dtype=c.dtype) elif power == 1: return c else: # This can be made more efficient by using powers of two # in the usual way. prd = c for i in range(2, power + 1): prd = lagmul(prd, c) return prd def lagder(c, m=1, scl=1, axis=0): """ Differentiate a Laguerre series. Returns the Laguerre series coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Laguerre series coefficients. If `c` is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- der : ndarray Laguerre series of the derivative. See Also -------- lagint Notes ----- In general, the result of differentiating a Laguerre series does not resemble the same operation on a power series. Thus the result of this function may be "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.laguerre import lagder >>> lagder([ 1., 1., 1., -3.]) array([ 1., 2., 3.]) >>> lagder([ 1., 0., 0., -4., 3.], m=2) array([ 1., 2., 3.]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of derivation must be integer") if cnt < 0: raise ValueError("The order of derivation must be non-negative") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 1, -1): der[j - 1] = -c[j] c[j - 1] += c[j] der[0] = -c[1] c = der c = np.moveaxis(c, 0, iaxis) return c def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): """ Integrate a Laguerre series. Returns the Laguerre series coefficients `c` integrated `m` times from `lbnd` along `axis`. At each iteration the resulting series is **multiplied** by `scl` and an integration constant, `k`, is added. The scaling factor is for use in a linear change of variable. ("Buyer beware": note that, depending on what one is doing, one may want `scl` to be the reciprocal of what one might expect; for more information, see the Notes section below.) The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Laguerre series coefficients. If `c` is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Order of integration, must be positive. (Default: 1) k : {[], list, scalar}, optional Integration constant(s). The value of the first integral at ``lbnd`` is the first value in the list, the value of the second integral at ``lbnd`` is the second value, etc. If ``k == []`` (the default), all constants are set to zero. If ``m == 1``, a single scalar can be given instead of a list. lbnd : scalar, optional The lower bound of the integral. (Default: 0) scl : scalar, optional Following each integration the result is *multiplied* by `scl` before the integration constant is added. (Default: 1) axis : int, optional Axis over which the integral is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- S : ndarray Laguerre series coefficients of the integral. Raises ------ ValueError If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or ``np.ndim(scl) != 0``. See Also -------- lagder Notes ----- Note that the result of each integration is *multiplied* by `scl`. Why is this important to note? Say one is making a linear change of variable :math:`u = ax + b` in an integral relative to `x`. Then :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - perhaps not what one would have first thought. Also note that, in general, the result of integrating a C-series needs to be "reprojected" onto the C-series basis set. Thus, typically, the result of this function is "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.laguerre import lagint >>> lagint([1,2,3]) array([ 1., 1., 1., -3.]) >>> lagint([1,2,3], m=2) array([ 1., 0., 0., -4., 3.]) >>> lagint([1,2,3], k=1) array([ 2., 1., 1., -3.]) >>> lagint([1,2,3], lbnd=-1) array([ 11.5, 1. , 1. , -3. ]) >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) array([ 11.16666667, -5. , -3. , 2. ]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if not np.iterable(k): k = [k] cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of integration must be integer") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") if np.ndim(lbnd) != 0: raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) tmp[0] = c[0] tmp[1] = -c[0] for j in range(1, n): tmp[j] += c[j] tmp[j + 1] = -c[j] tmp[0] += k[i] - lagval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) return c def lagval(x, c, tensor=True): """ Evaluate a Laguerre series at points x. If `c` is of length `n + 1`, this function returns the value: .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If `c` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that scalars have shape (,). Trailing zeros in the coefficients will be used in the evaluation, so they should be avoided if efficiency is a concern. Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with with themselves and with the elements of `c`. c : array_like Array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]. If `c` is multidimensional the remaining indices enumerate multiple polynomials. In the two dimensional case the coefficients may be thought of as stored in the columns of `c`. tensor : boolean, optional If True, the shape of the coefficient array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `c` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. .. versionadded:: 1.7.0 Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- lagval2d, laggrid2d, lagval3d, laggrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. Examples -------- >>> from numpy.polynomial.laguerre import lagval >>> coef = [1,2,3] >>> lagval(1, coef) -0.5 >>> lagval([[1,2],[3,4]], coef) array([[-0.5, -4. ], [-4.5, -2. ]]) """ c = np.array(c, ndmin=1, copy=0) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) if len(c) == 1: c0 = c[0] c1 = 0 elif len(c) == 2: c0 = c[0] c1 = c[1] else: nd = len(c) c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = c[-i] - (c1*(nd - 1))/nd c1 = tmp + (c1*((2*nd - 1) - x))/nd return c0 + c1*(1 - x) def lagval2d(x, y, c): """ Evaluate a 2-D Laguerre series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `(x, y)`, where `x` and `y` must have the same shape. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points formed with pairs of corresponding values from `x` and `y`. See Also -------- lagval, laggrid2d, lagval3d, laggrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y = np.array((x, y), copy=0) except Exception: raise ValueError('x, y are incompatible') c = lagval(x, c) c = lagval(y, c, tensor=False) return c def laggrid2d(x, y, c): """ Evaluate a 2-D Laguerre series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) where the points `(a, b)` consist of all pairs formed by taking `a` from `x` and `b` from `y`. The resulting points form a grid with `x` in the first dimension and `y` in the second. The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape + y.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points in the Cartesian product of `x` and `y`. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in `c[i,j]`. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional Chebyshev series at points in the Cartesian product of `x` and `y`. See Also -------- lagval, lagval2d, lagval3d, laggrid3d Notes ----- .. versionadded:: 1.7.0 """ c = lagval(x, c) c = lagval(y, c) return c def lagval3d(x, y, z, c): """ Evaluate a 3-D Laguerre series at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If any of `x`, `y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension greater than 3 the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the multidimension polynomial on points formed with triples of corresponding values from `x`, `y`, and `z`. See Also -------- lagval, lagval2d, laggrid2d, laggrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y, z = np.array((x, y, z), copy=0) except Exception: raise ValueError('x, y, z are incompatible') c = lagval(x, c) c = lagval(y, c, tensor=False) c = lagval(z, c, tensor=False) return c def laggrid3d(x, y, z, c): """ Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) where the points `(a, b, c)` consist of all triples formed by taking `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form a grid with `x` in the first dimension, `y` in the second, and `z` in the third. The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than three dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape + y.shape + z.shape. Parameters ---------- x, y, z : array_like, compatible objects The three dimensional series is evaluated at the points in the Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- lagval, lagval2d, laggrid2d, lagval3d Notes ----- .. versionadded:: 1.7.0 """ c = lagval(x, c) c = lagval(y, c) c = lagval(z, c) return c def lagvander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points `x`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = L_i(x) where `0 <= i <= deg`. The leading indices of `V` index the elements of `x` and the last index is the degree of the Laguerre polynomial. If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and ``lagval(x, c)`` are the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of Laguerre series of the same degree and sample points. Parameters ---------- x : array_like Array of points. The dtype is converted to float64 or complex128 depending on whether any of the elements are complex. If `x` is scalar it is converted to a 1-D array. deg : int Degree of the resulting matrix. Returns ------- vander : ndarray The pseudo-Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg + 1,)``, where The last index is the degree of the corresponding Laguerre polynomial. The dtype will be the same as the converted `x`. Examples -------- >>> from numpy.polynomial.laguerre import lagvander >>> x = np.array([0, 1, 2]) >>> lagvander(x, 3) array([[ 1. , 1. , 1. , 1. ], [ 1. , 0. , -0.5 , -0.66666667], [ 1. , -1. , -1. , -0.33333333]]) """ ideg = int(deg) if ideg != deg: raise ValueError("deg must be integer") if ideg < 0: raise ValueError("deg must be non-negative") x = np.array(x, copy=0, ndmin=1) + 0.0 dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 if ideg > 0: v[1] = 1 - x for i in range(2, ideg + 1): v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i return np.moveaxis(v, 0, -1) def lagvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y)`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of `V` index the points `(x, y)` and the last index encodes the degrees of the Laguerre polynomials. If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 2-D Laguerre series of the same degrees and sample points. Parameters ---------- x, y : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg]. Returns ------- vander2d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same as the converted `x` and `y`. See Also -------- lagvander, lagvander3d. lagval2d, lagval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy = ideg x, y = np.array((x, y), copy=0) + 0.0 vx = lagvander(x, degx) vy = lagvander(y, degy) v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) def lagvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, then The pseudo-Vandermonde matrix is defined by .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading indices of `V` index the points `(x, y, z)` and the last index encodes the degrees of the Laguerre polynomials. If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns of `V` correspond to the elements of a 3-D coefficient array `c` of shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 3-D Laguerre series of the same degrees and sample points. Parameters ---------- x, y, z : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg, z_deg]. Returns ------- vander3d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will be the same as the converted `x`, `y`, and `z`. See Also -------- lagvander, lagvander3d. lagval2d, lagval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy, degz = ideg x, y, z = np.array((x, y, z), copy=0) + 0.0 vx = lagvander(x, degx) vy = lagvander(y, degy) vz = lagvander(z, degz) v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) def lagfit(x, y, deg, rcond=None, full=False, w=None): """ Least squares fit of Laguerre series to data. Return the coefficients of a Laguerre series of degree `deg` that is the least squares fit to the data values `y` given at points `x`. If `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple fits are done, one for each column of `y`, and the resulting coefficients are stored in the corresponding columns of a 2-D return. The fitted polynomial(s) are in the form .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), where `n` is `deg`. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int or 1-D array_like Degree(s) of the fitting polynomials. If `deg` is a single integer all terms up to and including the `deg`'th term are included in the fit. For NumPy versions >= 1.11.0 a list of integers specifying the degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. Returns ------- coef : ndarray, shape (M,) or (M, K) Laguerre coefficients ordered from low to high. If `y` was 2-D, the coefficients for the data in column k of `y` are in column `k`. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is only raised if `full` = False. The warnings can be turned off by >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) See Also -------- chebfit, legfit, polyfit, hermfit, hermefit lagval : Evaluates a Laguerre series. lagvander : pseudo Vandermonde matrix of Laguerre series. lagweight : Laguerre weight function. linalg.lstsq : Computes a least-squares fit from the matrix. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution is the coefficients of the Laguerre series `p` that minimizes the sum of the weighted squared errors .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, where the :math:`w_j` are the weights. This problem is solved by setting up as the (typically) overdetermined matrix equation .. math:: V(x) * c = w * y, where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the coefficients to be solved for, `w` are the weights, and `y` are the observed values. This equation is then solved using the singular value decomposition of `V`. If some of the singular values of `V` are so small that they are neglected, then a `RankWarning` will be issued. This means that the coefficient values may be poorly determined. Using a lower order fit will usually get rid of the warning. The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious and have large contributions from roundoff error. Fits using Laguerre series are probably most useful when the data can be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre weight. In that case the weight ``sqrt(w(x[i])`` should be used together with data values ``y[i]/sqrt(w(x[i])``. The weight function is available as `lagweight`. References ---------- .. [1] Wikipedia, "Curve fitting", http://en.wikipedia.org/wiki/Curve_fitting Examples -------- >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) >>> err = np.random.randn(len(x))/10 >>> y = lagval(x, [1, 2, 3]) + err >>> lagfit(x, y, 2) array([ 0.96971004, 2.00193749, 3.00288744]) """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 deg = np.asarray(deg) # check arguments. if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") if deg.ndim == 0: lmax = deg order = lmax + 1 van = lagvander(x, lmax) else: deg = np.sort(deg) lmax = deg[-1] order = len(deg) van = lagvander(x, lmax)[:, deg] # set up the least squares matrices in transposed form lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected 1D vector for w") if len(x) != len(w): raise TypeError("expected x and w to have same length") # apply weights. Don't use inplace operations as they # can cause problems with NA. lhs = lhs * w rhs = rhs * w # set rcond if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) else: scl = np.sqrt(np.square(lhs).sum(1)) scl[scl == 0] = 1 # Solve the least squares problem. c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero if deg.ndim > 0: if c.ndim == 2: cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) else: cc = np.zeros(lmax+1, dtype=c.dtype) cc[deg] = c c = cc # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] else: return c def lagcompanion(c): """ Return the companion matrix of c. The usual companion matrix of the Laguerre polynomials is already symmetric when `c` is a basis Laguerre polynomial, so no scaling is applied. Parameters ---------- c : array_like 1-D array of Laguerre series coefficients ordered from low to high degree. Returns ------- mat : ndarray Companion matrix of dimensions (deg, deg). Notes ----- .. versionadded:: 1.7.0 """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: return np.array([[1 + c[0]/c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) top = mat.reshape(-1)[1::n+1] mid = mat.reshape(-1)[0::n+1] bot = mat.reshape(-1)[n::n+1] top[...] = -np.arange(1, n) mid[...] = 2.*np.arange(n) + 1. bot[...] = top mat[:, -1] += (c[:-1]/c[-1])*n return mat def lagroots(c): """ Compute the roots of a Laguerre series. Return the roots (a.k.a. "zeros") of the polynomial .. math:: p(x) = \\sum_i c[i] * L_i(x). Parameters ---------- c : 1-D array_like 1-D array of coefficients. Returns ------- out : ndarray Array of the roots of the series. If all the roots are real, then `out` is also real, otherwise it is complex. See Also -------- polyroots, legroots, chebroots, hermroots, hermeroots Notes ----- The root estimates are obtained as the eigenvalues of the companion matrix, Roots far from the origin of the complex plane may have large errors due to the numerical instability of the series for such values. Roots with multiplicity greater than 1 will also show larger errors as the value of the series near such points is relatively insensitive to errors in the roots. Isolated roots near the origin can be improved by a few iterations of Newton's method. The Laguerre series basis polynomials aren't powers of `x` so the results of this function may seem unintuitive. Examples -------- >>> from numpy.polynomial.laguerre import lagroots, lagfromroots >>> coef = lagfromroots([0, 1, 2]) >>> coef array([ 2., -8., 12., -6.]) >>> lagroots(coef) array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00]) """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: return np.array([1 + c[0]/c[1]]) m = lagcompanion(c) r = la.eigvals(m) r.sort() return r def laggauss(deg): """ Gauss-Laguerre quadrature. Computes the sample points and weights for Gauss-Laguerre quadrature. These sample points and weights will correctly integrate polynomials of degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` with the weight function :math:`f(x) = \\exp(-x)`. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- .. versionadded:: 1.7.0 The results have only been tested up to degree 100 higher degrees may be problematic. The weights are determined by using the fact that .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) where :math:`c` is a constant independent of :math:`k` and :math:`x_k` is the k'th root of :math:`L_n`, and then scaling the results to get the right value when integrating 1. """ ideg = int(deg) if ideg != deg or ideg < 1: raise ValueError("deg must be a non-negative integer") # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0]*deg + [1]) m = lagcompanion(c) x = la.eigvalsh(m) # improve roots by one application of Newton dy = lagval(x, c) df = lagval(x, lagder(c)) x -= dy/df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = lagval(x, c[1:]) fm /= np.abs(fm).max() df /= np.abs(df).max() w = 1/(fm * df) # scale w to get the right value, 1 in this case w /= w.sum() return x, w def lagweight(x): """Weight function of the Laguerre polynomials. The weight function is :math:`exp(-x)` and the interval of integration is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at `x`. Notes ----- .. versionadded:: 1.7.0 """ w = np.exp(-x) return w # # Laguerre series class # class Laguerre(ABCPolyBase): """A Laguerre series class. The Laguerre class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed in the `ABCPolyBase` documentation. Parameters ---------- coef : array_like Laguerre coefficients in order of increasing degree, i.e, ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. The default value is [0, 1]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [0, 1]. .. versionadded:: 1.6.0 """ # Virtual Functions _add = staticmethod(lagadd) _sub = staticmethod(lagsub) _mul = staticmethod(lagmul) _div = staticmethod(lagdiv) _pow = staticmethod(lagpow) _val = staticmethod(lagval) _int = staticmethod(lagint) _der = staticmethod(lagder) _fit = staticmethod(lagfit) _line = staticmethod(lagline) _roots = staticmethod(lagroots) _fromroots = staticmethod(lagfromroots) # Virtual properties nickname = 'lag' domain = np.array(lagdomain) window = np.array(lagdomain)
56,309
30.213969
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/hermite_e.py
""" Objects for dealing with Hermite_e series. This module provides a number of objects (mostly functions) useful for dealing with Hermite_e series, including a `HermiteE` class that encapsulates the usual arithmetic operations. (General information on how this module represents and works with such polynomials is in the docstring for its "parent" sub-package, `numpy.polynomial`). Constants --------- - `hermedomain` -- Hermite_e series default domain, [-1,1]. - `hermezero` -- Hermite_e series that evaluates identically to 0. - `hermeone` -- Hermite_e series that evaluates identically to 1. - `hermex` -- Hermite_e series for the identity map, ``f(x) = x``. Arithmetic ---------- - `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. - `hermeadd` -- add two Hermite_e series. - `hermesub` -- subtract one Hermite_e series from another. - `hermemul` -- multiply two Hermite_e series. - `hermediv` -- divide one Hermite_e series by another. - `hermeval` -- evaluate a Hermite_e series at given points. - `hermeval2d` -- evaluate a 2D Hermite_e series at given points. - `hermeval3d` -- evaluate a 3D Hermite_e series at given points. - `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product. - `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product. Calculus -------- - `hermeder` -- differentiate a Hermite_e series. - `hermeint` -- integrate a Hermite_e series. Misc Functions -------------- - `hermefromroots` -- create a Hermite_e series with specified roots. - `hermeroots` -- find the roots of a Hermite_e series. - `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials. - `hermevander2d` -- Vandermonde-like matrix for 2D power series. - `hermevander3d` -- Vandermonde-like matrix for 3D power series. - `hermegauss` -- Gauss-Hermite_e quadrature, points and weights. - `hermeweight` -- Hermite_e weight function. - `hermecompanion` -- symmetrized companion matrix in Hermite_e form. - `hermefit` -- least-squares fit returning a Hermite_e series. - `hermetrim` -- trim leading coefficients from a Hermite_e series. - `hermeline` -- Hermite_e series of given straight line. - `herme2poly` -- convert a Hermite_e series to a polynomial. - `poly2herme` -- convert a polynomial to a Hermite_e series. Classes ------- - `HermiteE` -- A Hermite_e series class. See also -------- `numpy.polynomial` """ from __future__ import division, absolute_import, print_function import warnings import numpy as np import numpy.linalg as la from numpy.core.multiarray import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase __all__ = [ 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', 'hermegauss', 'hermeweight'] hermetrim = pu.trimcoef def poly2herme(pol): """ poly2herme(pol) Convert a polynomial to a Hermite series. Convert an array representing the coefficients of a polynomial (relative to the "standard" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Hermite series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Hermite series. See Also -------- herme2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite_e import poly2herme >>> poly2herme(np.arange(4)) array([ 2., 10., 2., 3.]) """ [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 for i in range(deg, -1, -1): res = hermeadd(hermemulx(res), pol[i]) return res def herme2poly(c): """ Convert a Hermite series to a polynomial. Convert an array representing the coefficients of a Hermite series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest to highest degree. Parameters ---------- c : array_like 1-D array containing the Hermite series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-D array containing the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest order term to highest. See Also -------- poly2herme Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite_e import herme2poly >>> herme2poly([ 2., 10., 2., 3.]) array([ 0., 1., 2., 3.]) """ from .polynomial import polyadd, polysub, polymulx [c] = pu.as_series([c]) n = len(c) if n == 1: return c if n == 2: return c else: c0 = c[-2] c1 = c[-1] # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], c1*(i - 1)) c1 = polyadd(tmp, polymulx(c1)) return polyadd(c0, polymulx(c1)) # # These are constant arrays are of integer type so as to be compatible # with the widest range of other types, such as Decimal. # # Hermite hermedomain = np.array([-1, 1]) # Hermite coefficients representing zero. hermezero = np.array([0]) # Hermite coefficients representing one. hermeone = np.array([1]) # Hermite coefficients representing the identity x. hermex = np.array([0, 1]) def hermeline(off, scl): """ Hermite series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``off + scl*x``. Returns ------- y : ndarray This module's representation of the Hermite series for ``off + scl*x``. See Also -------- polyline, chebline Examples -------- >>> from numpy.polynomial.hermite_e import hermeline >>> from numpy.polynomial.hermite_e import hermeline, hermeval >>> hermeval(0,hermeline(3, 2)) 3.0 >>> hermeval(1,hermeline(3, 2)) 5.0 """ if scl != 0: return np.array([off, scl]) else: return np.array([off]) def hermefromroots(roots): """ Generate a HermiteE series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in HermiteE form, where the `r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) The coefficient of the last term is not generally 1 for monic polynomials in HermiteE form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then `out` is a real array, if some of the roots are complex, then `out` is complex even if all the coefficients in the result are real (see Examples below). See Also -------- polyfromroots, legfromroots, lagfromroots, hermfromroots, chebfromroots. Examples -------- >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval >>> coef = hermefromroots((-1, 0, 1)) >>> hermeval((-1, 0, 1), coef) array([ 0., 0., 0.]) >>> coef = hermefromroots((-1j, 1j)) >>> hermeval((-1j, 1j), coef) array([ 0.+0.j, 0.+0.j]) """ if len(roots) == 0: return np.ones(1) else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [hermeline(-r, 1) for r in roots] n = len(p) while n > 1: m, r = divmod(n, 2) tmp = [hermemul(p[i], p[i+m]) for i in range(m)] if r: tmp[0] = hermemul(tmp[0], p[-1]) p = tmp n = m return p[0] def hermeadd(c1, c2): """ Add one Hermite series to another. Returns the sum of two Hermite series `c1` + `c2`. The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Hermite series of their sum. See Also -------- hermesub, hermemul, hermediv, hermepow Notes ----- Unlike multiplication, division, etc., the sum of two Hermite series is a Hermite series (without having to "reproject" the result onto the basis set) so addition, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial.hermite_e import hermeadd >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) array([ 2., 4., 6., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def hermesub(c1, c2): """ Subtract one Hermite series from another. Returns the difference of two Hermite series `c1` - `c2`. The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their difference. See Also -------- hermeadd, hermemul, hermediv, hermepow Notes ----- Unlike multiplication, division, etc., the difference of two Hermite series is a Hermite series (without having to "reproject" the result onto the basis set) so subtraction, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial.hermite_e import hermesub >>> hermesub([1, 2, 3, 4], [1, 2, 3]) array([ 0., 0., 0., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 else: c2 = -c2 c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def hermemulx(c): """Multiply a Hermite series by x. Multiply the Hermite series `c` by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. Notes ----- The multiplication uses the recursion relationship for Hermite polynomials in the form .. math:: xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) Examples -------- >>> from numpy.polynomial.hermite_e import hermemulx >>> hermemulx([1, 2, 3]) array([ 2., 7., 2., 3.]) """ # c is a trimmed copy [c] = pu.as_series([c]) # The zero series needs special treatment if len(c) == 1 and c[0] == 0: return c prd = np.empty(len(c) + 1, dtype=c.dtype) prd[0] = c[0]*0 prd[1] = c[0] for i in range(1, len(c)): prd[i + 1] = c[i] prd[i - 1] += c[i]*i return prd def hermemul(c1, c2): """ Multiply one Hermite series by another. Returns the product of two Hermite series `c1` * `c2`. The arguments are sequences of coefficients, from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their product. See Also -------- hermeadd, hermesub, hermediv, hermepow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Hermite polynomial basis set. Thus, to express the product as a Hermite series, it is necessary to "reproject" the product onto said basis set, which may produce "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermemul >>> hermemul([1, 2, 3], [0, 1, 2]) array([ 14., 15., 28., 7., 6.]) """ # s1, s2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c = c2 xs = c1 else: c = c1 xs = c2 if len(c) == 1: c0 = c[0]*xs c1 = 0 elif len(c) == 2: c0 = c[0]*xs c1 = c[1]*xs else: nd = len(c) c0 = c[-2]*xs c1 = c[-1]*xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = hermesub(c[-i]*xs, c1*(nd - 1)) c1 = hermeadd(tmp, hermemulx(c1)) return hermeadd(c0, hermemulx(c1)) def hermediv(c1, c2): """ Divide one Hermite series by another. Returns the quotient-with-remainder of two Hermite series `c1` / `c2`. The arguments are sequences of coefficients from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of Hermite series coefficients representing the quotient and remainder. See Also -------- hermeadd, hermesub, hermemul, hermepow Notes ----- In general, the (polynomial) division of one Hermite series by another results in quotient and remainder terms that are not in the Hermite polynomial basis set. Thus, to express these results as a Hermite series, it is necessary to "reproject" the results onto the Hermite basis set, which may produce "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermediv >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 0.])) >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 1., 2.])) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: return c1[:1]*0, c1 elif lc2 == 1: return c1/c2[-1], c1[:1]*0 else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): p = hermemul([0]*i + [1], c2) q = rem[-1]/p[-1] rem = rem[:-1] - q*p[:-1] quo[i] = q return quo, pu.trimseq(rem) def hermepow(c, pow, maxpower=16): """Raise a Hermite series to a power. Returns the Hermite series `c` raised to the power `pow`. The argument `c` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Hermite series of power. See Also -------- hermeadd, hermesub, hermemul, hermediv Examples -------- >>> from numpy.polynomial.hermite_e import hermepow >>> hermepow([1, 2, 3], 2) array([ 23., 28., 46., 12., 9.]) """ # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") elif power == 0: return np.array([1], dtype=c.dtype) elif power == 1: return c else: # This can be made more efficient by using powers of two # in the usual way. prd = c for i in range(2, power + 1): prd = hermemul(prd, c) return prd def hermeder(c, m=1, scl=1, axis=0): """ Differentiate a Hermite_e series. Returns the series coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Hermite_e series coefficients. If `c` is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- der : ndarray Hermite series of the derivative. See Also -------- hermeint Notes ----- In general, the result of differentiating a Hermite series does not resemble the same operation on a power series. Thus the result of this function may be "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermeder >>> hermeder([ 1., 1., 1., 1.]) array([ 1., 2., 3.]) >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) array([ 1., 2., 3.]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of derivation must be integer") if cnt < 0: raise ValueError("The order of derivation must be non-negative") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: return c[:1]*0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 0, -1): der[j - 1] = j*c[j] c = der c = np.moveaxis(c, 0, iaxis) return c def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): """ Integrate a Hermite_e series. Returns the Hermite_e series coefficients `c` integrated `m` times from `lbnd` along `axis`. At each iteration the resulting series is **multiplied** by `scl` and an integration constant, `k`, is added. The scaling factor is for use in a linear change of variable. ("Buyer beware": note that, depending on what one is doing, one may want `scl` to be the reciprocal of what one might expect; for more information, see the Notes section below.) The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Hermite_e series coefficients. If c is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Order of integration, must be positive. (Default: 1) k : {[], list, scalar}, optional Integration constant(s). The value of the first integral at ``lbnd`` is the first value in the list, the value of the second integral at ``lbnd`` is the second value, etc. If ``k == []`` (the default), all constants are set to zero. If ``m == 1``, a single scalar can be given instead of a list. lbnd : scalar, optional The lower bound of the integral. (Default: 0) scl : scalar, optional Following each integration the result is *multiplied* by `scl` before the integration constant is added. (Default: 1) axis : int, optional Axis over which the integral is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- S : ndarray Hermite_e series coefficients of the integral. Raises ------ ValueError If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or ``np.ndim(scl) != 0``. See Also -------- hermeder Notes ----- Note that the result of each integration is *multiplied* by `scl`. Why is this important to note? Say one is making a linear change of variable :math:`u = ax + b` in an integral relative to `x`. Then :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - perhaps not what one would have first thought. Also note that, in general, the result of integrating a C-series needs to be "reprojected" onto the C-series basis set. Thus, typically, the result of this function is "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermeint >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. array([ 1., 1., 1., 1.]) >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. array([ 2., 1., 1., 1.]) >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 array([-1., 1., 1., 1.]) >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if not np.iterable(k): k = [k] cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of integration must be integer") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") if np.ndim(lbnd) != 0: raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) tmp[0] = c[0]*0 tmp[1] = c[0] for j in range(1, n): tmp[j + 1] = c[j]/(j + 1) tmp[0] += k[i] - hermeval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) return c def hermeval(x, c, tensor=True): """ Evaluate an HermiteE series at points x. If `c` is of length `n + 1`, this function returns the value: .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If `c` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that scalars have shape (,). Trailing zeros in the coefficients will be used in the evaluation, so they should be avoided if efficiency is a concern. Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with with themselves and with the elements of `c`. c : array_like Array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]. If `c` is multidimensional the remaining indices enumerate multiple polynomials. In the two dimensional case the coefficients may be thought of as stored in the columns of `c`. tensor : boolean, optional If True, the shape of the coefficient array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `c` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. .. versionadded:: 1.7.0 Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- hermeval2d, hermegrid2d, hermeval3d, hermegrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. Examples -------- >>> from numpy.polynomial.hermite_e import hermeval >>> coef = [1,2,3] >>> hermeval(1, coef) 3.0 >>> hermeval([[1,2],[3,4]], coef) array([[ 3., 14.], [ 31., 54.]]) """ c = np.array(c, ndmin=1, copy=0) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) if len(c) == 1: c0 = c[0] c1 = 0 elif len(c) == 2: c0 = c[0] c1 = c[1] else: nd = len(c) c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = c[-i] - c1*(nd - 1) c1 = tmp + c1*x return c0 + c1*x def hermeval2d(x, y, c): """ Evaluate a 2-D HermiteE series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `(x, y)`, where `x` and `y` must have the same shape. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points formed with pairs of corresponding values from `x` and `y`. See Also -------- hermeval, hermegrid2d, hermeval3d, hermegrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y = np.array((x, y), copy=0) except Exception: raise ValueError('x, y are incompatible') c = hermeval(x, c) c = hermeval(y, c, tensor=False) return c def hermegrid2d(x, y, c): """ Evaluate a 2-D HermiteE series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) where the points `(a, b)` consist of all pairs formed by taking `a` from `x` and `b` from `y`. The resulting points form a grid with `x` in the first dimension and `y` in the second. The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points in the Cartesian product of `x` and `y`. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- hermeval, hermeval2d, hermeval3d, hermegrid3d Notes ----- .. versionadded:: 1.7.0 """ c = hermeval(x, c) c = hermeval(y, c) return c def hermeval3d(x, y, z, c): """ Evaluate a 3-D Hermite_e series at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If any of `x`, `y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension greater than 3 the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the multidimensional polynomial on points formed with triples of corresponding values from `x`, `y`, and `z`. See Also -------- hermeval, hermeval2d, hermegrid2d, hermegrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y, z = np.array((x, y, z), copy=0) except Exception: raise ValueError('x, y, z are incompatible') c = hermeval(x, c) c = hermeval(y, c, tensor=False) c = hermeval(z, c, tensor=False) return c def hermegrid3d(x, y, z, c): """ Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) where the points `(a, b, c)` consist of all triples formed by taking `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form a grid with `x` in the first dimension, `y` in the second, and `z` in the third. The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than three dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape + y.shape + z.shape. Parameters ---------- x, y, z : array_like, compatible objects The three dimensional series is evaluated at the points in the Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- hermeval, hermeval2d, hermegrid2d, hermeval3d Notes ----- .. versionadded:: 1.7.0 """ c = hermeval(x, c) c = hermeval(y, c) c = hermeval(z, c) return c def hermevander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points `x`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = He_i(x), where `0 <= i <= deg`. The leading indices of `V` index the elements of `x` and the last index is the degree of the HermiteE polynomial. If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and ``hermeval(x, c)`` are the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of HermiteE series of the same degree and sample points. Parameters ---------- x : array_like Array of points. The dtype is converted to float64 or complex128 depending on whether any of the elements are complex. If `x` is scalar it is converted to a 1-D array. deg : int Degree of the resulting matrix. Returns ------- vander : ndarray The pseudo-Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg + 1,)``, where The last index is the degree of the corresponding HermiteE polynomial. The dtype will be the same as the converted `x`. Examples -------- >>> from numpy.polynomial.hermite_e import hermevander >>> x = np.array([-1, 0, 1]) >>> hermevander(x, 3) array([[ 1., -1., 0., 2.], [ 1., 0., -1., -0.], [ 1., 1., 0., -2.]]) """ ideg = int(deg) if ideg != deg: raise ValueError("deg must be integer") if ideg < 0: raise ValueError("deg must be non-negative") x = np.array(x, copy=0, ndmin=1) + 0.0 dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): v[i] = (v[i-1]*x - v[i-2]*(i - 1)) return np.moveaxis(v, 0, -1) def hermevander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y)`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of `V` index the points `(x, y)` and the last index encodes the degrees of the HermiteE polynomials. If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 2-D HermiteE series of the same degrees and sample points. Parameters ---------- x, y : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg]. Returns ------- vander2d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same as the converted `x` and `y`. See Also -------- hermevander, hermevander3d. hermeval2d, hermeval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy = ideg x, y = np.array((x, y), copy=0) + 0.0 vx = hermevander(x, degx) vy = hermevander(y, degy) v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) def hermevander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, then Hehe pseudo-Vandermonde matrix is defined by .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading indices of `V` index the points `(x, y, z)` and the last index encodes the degrees of the HermiteE polynomials. If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns of `V` correspond to the elements of a 3-D coefficient array `c` of shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 3-D HermiteE series of the same degrees and sample points. Parameters ---------- x, y, z : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg, z_deg]. Returns ------- vander3d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will be the same as the converted `x`, `y`, and `z`. See Also -------- hermevander, hermevander3d. hermeval2d, hermeval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy, degz = ideg x, y, z = np.array((x, y, z), copy=0) + 0.0 vx = hermevander(x, degx) vy = hermevander(y, degy) vz = hermevander(z, degz) v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) def hermefit(x, y, deg, rcond=None, full=False, w=None): """ Least squares fit of Hermite series to data. Return the coefficients of a HermiteE series of degree `deg` that is the least squares fit to the data values `y` given at points `x`. If `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple fits are done, one for each column of `y`, and the resulting coefficients are stored in the corresponding columns of a 2-D return. The fitted polynomial(s) are in the form .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), where `n` is `deg`. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int or 1-D array_like Degree(s) of the fitting polynomials. If `deg` is a single integer all terms up to and including the `deg`'th term are included in the fit. For NumPy versions >= 1.11.0 a list of integers specifying the degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. Returns ------- coef : ndarray, shape (M,) or (M, K) Hermite coefficients ordered from low to high. If `y` was 2-D, the coefficients for the data in column k of `y` are in column `k`. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is only raised if `full` = False. The warnings can be turned off by >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) See Also -------- chebfit, legfit, polyfit, hermfit, polyfit hermeval : Evaluates a Hermite series. hermevander : pseudo Vandermonde matrix of Hermite series. hermeweight : HermiteE weight function. linalg.lstsq : Computes a least-squares fit from the matrix. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution is the coefficients of the HermiteE series `p` that minimizes the sum of the weighted squared errors .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, where the :math:`w_j` are the weights. This problem is solved by setting up the (typically) overdetermined matrix equation .. math:: V(x) * c = w * y, where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` are the coefficients to be solved for, and the elements of `y` are the observed values. This equation is then solved using the singular value decomposition of `V`. If some of the singular values of `V` are so small that they are neglected, then a `RankWarning` will be issued. This means that the coefficient values may be poorly determined. Using a lower order fit will usually get rid of the warning. The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious and have large contributions from roundoff error. Fits using HermiteE series are probably most useful when the data can be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE weight. In that case the weight ``sqrt(w(x[i])`` should be used together with data values ``y[i]/sqrt(w(x[i])``. The weight function is available as `hermeweight`. References ---------- .. [1] Wikipedia, "Curve fitting", http://en.wikipedia.org/wiki/Curve_fitting Examples -------- >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) >>> err = np.random.randn(len(x))/10 >>> y = hermeval(x, [1, 2, 3]) + err >>> hermefit(x, y, 2) array([ 1.01690445, 1.99951418, 2.99948696]) """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 deg = np.asarray(deg) # check arguments. if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") if deg.ndim == 0: lmax = deg order = lmax + 1 van = hermevander(x, lmax) else: deg = np.sort(deg) lmax = deg[-1] order = len(deg) van = hermevander(x, lmax)[:, deg] # set up the least squares matrices in transposed form lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected 1D vector for w") if len(x) != len(w): raise TypeError("expected x and w to have same length") # apply weights. Don't use inplace operations as they # can cause problems with NA. lhs = lhs * w rhs = rhs * w # set rcond if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) else: scl = np.sqrt(np.square(lhs).sum(1)) scl[scl == 0] = 1 # Solve the least squares problem. c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero if deg.ndim > 0: if c.ndim == 2: cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) else: cc = np.zeros(lmax+1, dtype=c.dtype) cc[deg] = c c = cc # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] else: return c def hermecompanion(c): """ Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when `c` is an HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. Parameters ---------- c : array_like 1-D array of HermiteE series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). Notes ----- .. versionadded:: 1.7.0 """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: return np.array([[-c[0]/c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) scl = np.multiply.accumulate(scl)[::-1] top = mat.reshape(-1)[1::n+1] bot = mat.reshape(-1)[n::n+1] top[...] = np.sqrt(np.arange(1, n)) bot[...] = top mat[:, -1] -= scl*c[:-1]/c[-1] return mat def hermeroots(c): """ Compute the roots of a HermiteE series. Return the roots (a.k.a. "zeros") of the polynomial .. math:: p(x) = \\sum_i c[i] * He_i(x). Parameters ---------- c : 1-D array_like 1-D array of coefficients. Returns ------- out : ndarray Array of the roots of the series. If all the roots are real, then `out` is also real, otherwise it is complex. See Also -------- polyroots, legroots, lagroots, hermroots, chebroots Notes ----- The root estimates are obtained as the eigenvalues of the companion matrix, Roots far from the origin of the complex plane may have large errors due to the numerical instability of the series for such values. Roots with multiplicity greater than 1 will also show larger errors as the value of the series near such points is relatively insensitive to errors in the roots. Isolated roots near the origin can be improved by a few iterations of Newton's method. The HermiteE series basis polynomials aren't powers of `x` so the results of this function may seem unintuitive. Examples -------- >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots >>> coef = hermefromroots([-1, 0, 1]) >>> coef array([ 0., 2., 0., 1.]) >>> hermeroots(coef) array([-1., 0., 1.]) """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: return np.array([-c[0]/c[1]]) m = hermecompanion(c) r = la.eigvals(m) r.sort() return r def _normed_hermite_e_n(x, n): """ Evaluate a normalized HermiteE polynomial. Compute the value of the normalized HermiteE polynomial of degree ``n`` at the points ``x``. Parameters ---------- x : ndarray of double. Points at which to evaluate the function n : int Degree of the normalized HermiteE function to be evaluated. Returns ------- values : ndarray The shape of the return value is described above. Notes ----- .. versionadded:: 1.10.0 This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard HermiteE functions overflow when n >= 207. """ if n == 0: return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi)) c0 = 0. c1 = 1./np.sqrt(np.sqrt(2*np.pi)) nd = float(n) for i in range(n - 1): tmp = c0 c0 = -c1*np.sqrt((nd - 1.)/nd) c1 = tmp + c1*x*np.sqrt(1./nd) nd = nd - 1.0 return c0 + c1*x def hermegauss(deg): """ Gauss-HermiteE quadrature. Computes the sample points and weights for Gauss-HermiteE quadrature. These sample points and weights will correctly integrate polynomials of degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` with the weight function :math:`f(x) = \\exp(-x^2/2)`. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- .. versionadded:: 1.7.0 The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) where :math:`c` is a constant independent of :math:`k` and :math:`x_k` is the k'th root of :math:`He_n`, and then scaling the results to get the right value when integrating 1. """ ideg = int(deg) if ideg != deg or ideg < 1: raise ValueError("deg must be a non-negative integer") # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0]*deg + [1]) m = hermecompanion(c) x = la.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_e_n(x, ideg) df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) x -= dy/df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = _normed_hermite_e_n(x, ideg - 1) fm /= np.abs(fm).max() w = 1/(fm * fm) # for Hermite_e we can also symmetrize w = (w + w[::-1])/2 x = (x - x[::-1])/2 # scale w to get the right value w *= np.sqrt(2*np.pi) / w.sum() return x, w def hermeweight(x): """Weight function of the Hermite_e polynomials. The weight function is :math:`\\exp(-x^2/2)` and the interval of integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at `x`. Notes ----- .. versionadded:: 1.7.0 """ w = np.exp(-.5*x**2) return w # # HermiteE series class # class HermiteE(ABCPolyBase): """An HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed in the `ABCPolyBase` documentation. Parameters ---------- coef : array_like HermiteE coefficients in order of increasing degree, i.e, ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. The default value is [-1, 1]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1, 1]. .. versionadded:: 1.6.0 """ # Virtual Functions _add = staticmethod(hermeadd) _sub = staticmethod(hermesub) _mul = staticmethod(hermemul) _div = staticmethod(hermediv) _pow = staticmethod(hermepow) _val = staticmethod(hermeval) _int = staticmethod(hermeint) _der = staticmethod(hermeder) _fit = staticmethod(hermefit) _line = staticmethod(hermeline) _roots = staticmethod(hermeroots) _fromroots = staticmethod(hermefromroots) # Virtual properties nickname = 'herme' domain = np.array(hermedomain) window = np.array(hermedomain)
58,086
30.381415
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/polynomial.py
""" Objects for dealing with polynomials. This module provides a number of objects (mostly functions) useful for dealing with polynomials, including a `Polynomial` class that encapsulates the usual arithmetic operations. (General information on how this module represents and works with polynomial objects is in the docstring for its "parent" sub-package, `numpy.polynomial`). Constants --------- - `polydomain` -- Polynomial default domain, [-1,1]. - `polyzero` -- (Coefficients of the) "zero polynomial." - `polyone` -- (Coefficients of the) constant polynomial 1. - `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``. Arithmetic ---------- - `polyadd` -- add two polynomials. - `polysub` -- subtract one polynomial from another. - `polymul` -- multiply two polynomials. - `polydiv` -- divide one polynomial by another. - `polypow` -- raise a polynomial to an positive integer power - `polyval` -- evaluate a polynomial at given points. - `polyval2d` -- evaluate a 2D polynomial at given points. - `polyval3d` -- evaluate a 3D polynomial at given points. - `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product. - `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product. Calculus -------- - `polyder` -- differentiate a polynomial. - `polyint` -- integrate a polynomial. Misc Functions -------------- - `polyfromroots` -- create a polynomial with specified roots. - `polyroots` -- find the roots of a polynomial. - `polyvalfromroots` -- evalute a polynomial at given points from roots. - `polyvander` -- Vandermonde-like matrix for powers. - `polyvander2d` -- Vandermonde-like matrix for 2D power series. - `polyvander3d` -- Vandermonde-like matrix for 3D power series. - `polycompanion` -- companion matrix in power series form. - `polyfit` -- least-squares fit returning a polynomial. - `polytrim` -- trim leading coefficients from a polynomial. - `polyline` -- polynomial representing given straight line. Classes ------- - `Polynomial` -- polynomial class. See Also -------- `numpy.polynomial` """ from __future__ import division, absolute_import, print_function __all__ = [ 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] import warnings import numpy as np import numpy.linalg as la from numpy.core.multiarray import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase polytrim = pu.trimcoef # # These are constant arrays are of integer type so as to be compatible # with the widest range of other types, such as Decimal. # # Polynomial default domain. polydomain = np.array([-1, 1]) # Polynomial coefficients representing zero. polyzero = np.array([0]) # Polynomial coefficients representing one. polyone = np.array([1]) # Polynomial coefficients representing the identity x. polyx = np.array([0, 1]) # # Polynomial series functions # def polyline(off, scl): """ Returns an array representing a linear polynomial. Parameters ---------- off, scl : scalars The "y-intercept" and "slope" of the line, respectively. Returns ------- y : ndarray This module's representation of the linear polynomial ``off + scl*x``. See Also -------- chebline Examples -------- >>> from numpy.polynomial import polynomial as P >>> P.polyline(1,-1) array([ 1, -1]) >>> P.polyval(1, P.polyline(1,-1)) # should be 0 0.0 """ if scl != 0: return np.array([off, scl]) else: return np.array([off]) def polyfromroots(roots): """ Generate a monic polynomial with given roots. Return the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), where the `r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * x + ... + x^n The coefficient of the last term is 1 for monic polynomials in this form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of the polynomial's coefficients If all the roots are real, then `out` is also real, otherwise it is complex. (see Examples below). See Also -------- chebfromroots, legfromroots, lagfromroots, hermfromroots hermefromroots Notes ----- The coefficients are determined by multiplying together linear factors of the form `(x - r_i)`, i.e. .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) where ``n == len(roots) - 1``; note that this implies that `1` is always returned for :math:`a_n`. Examples -------- >>> from numpy.polynomial import polynomial as P >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x array([ 0., -1., 0., 1.]) >>> j = complex(0,1) >>> P.polyfromroots((-j,j)) # complex returned, though values are real array([ 1.+0.j, 0.+0.j, 1.+0.j]) """ if len(roots) == 0: return np.ones(1) else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [polyline(-r, 1) for r in roots] n = len(p) while n > 1: m, r = divmod(n, 2) tmp = [polymul(p[i], p[i+m]) for i in range(m)] if r: tmp[0] = polymul(tmp[0], p[-1]) p = tmp n = m return p[0] def polyadd(c1, c2): """ Add one polynomial to another. Returns the sum of two polynomials `c1` + `c2`. The arguments are sequences of coefficients from lowest order term to highest, i.e., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- out : ndarray The coefficient array representing their sum. See Also -------- polysub, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> sum = P.polyadd(c1,c2); sum array([ 4., 4., 4.]) >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) 28.0 """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def polysub(c1, c2): """ Subtract one polynomial from another. Returns the difference of two polynomials `c1` - `c2`. The arguments are sequences of coefficients from lowest order term to highest, i.e., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- out : ndarray Of coefficients representing their difference. See Also -------- polyadd, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polysub(c1,c2) array([-2., 0., 2.]) >>> P.polysub(c2,c1) # -P.polysub(c1,c2) array([ 2., 0., -2.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 else: c2 = -c2 c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def polymulx(c): """Multiply a polynomial by x. Multiply the polynomial `c` by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of polynomial coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. Notes ----- .. versionadded:: 1.5.0 """ # c is a trimmed copy [c] = pu.as_series([c]) # The zero series needs special treatment if len(c) == 1 and c[0] == 0: return c prd = np.empty(len(c) + 1, dtype=c.dtype) prd[0] = c[0]*0 prd[1:] = c return prd def polymul(c1, c2): """ Multiply one polynomial by another. Returns the product of two polynomials `c1` * `c2`. The arguments are sequences of coefficients, from lowest order term to highest, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` Parameters ---------- c1, c2 : array_like 1-D arrays of coefficients representing a polynomial, relative to the "standard" basis, and ordered from lowest order term to highest. Returns ------- out : ndarray Of the coefficients of their product. See Also -------- polyadd, polysub, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polymul(c1,c2) array([ 3., 8., 14., 8., 3.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) ret = np.convolve(c1, c2) return pu.trimseq(ret) def polydiv(c1, c2): """ Divide one polynomial by another. Returns the quotient-with-remainder of two polynomials `c1` / `c2`. The arguments are sequences of coefficients, from lowest order term to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of coefficient series representing the quotient and remainder. See Also -------- polyadd, polysub, polymul, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polydiv(c1,c2) (array([ 3.]), array([-8., -4.])) >>> P.polydiv(c2,c1) (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: raise ZeroDivisionError() len1 = len(c1) len2 = len(c2) if len2 == 1: return c1/c2[-1], c1[:1]*0 elif len1 < len2: return c1[:1]*0, c1 else: dlen = len1 - len2 scl = c2[-1] c2 = c2[:-1]/scl i = dlen j = len1 - 1 while i >= 0: c1[i:j] -= c2*c1[j] i -= 1 j -= 1 return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) def polypow(c, pow, maxpower=None): """Raise a polynomial to a power. Returns the polynomial `c` raised to the power `pow`. The argument `c` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``1 + 2*x + 3*x**2.`` Parameters ---------- c : array_like 1-D array of array of series coefficients ordered from low to high degree. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Power series of power. See Also -------- polyadd, polysub, polymul, polydiv Examples -------- """ # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") elif power == 0: return np.array([1], dtype=c.dtype) elif power == 1: return c else: # This can be made more efficient by using powers of two # in the usual way. prd = c for i in range(2, power + 1): prd = np.convolve(prd, c) return prd def polyder(c, m=1, scl=1, axis=0): """ Differentiate a polynomial. Returns the polynomial coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of polynomial coefficients. If c is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- der : ndarray Polynomial coefficients of the derivative. See Also -------- polyint Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 array([ 2., 6., 12.]) >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 array([ 24.]) >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 array([ -2., -6., -12.]) >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x array([ 6., 24.]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': # astype fails with NA c = c + 0.0 cdt = c.dtype cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of derivation must be integer") if cnt < 0: raise ValueError("The order of derivation must be non-negative") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=cdt) for j in range(n, 0, -1): der[j - 1] = j*c[j] c = der c = np.moveaxis(c, 0, iaxis) return c def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): """ Integrate a polynomial. Returns the polynomial coefficients `c` integrated `m` times from `lbnd` along `axis`. At each iteration the resulting series is **multiplied** by `scl` and an integration constant, `k`, is added. The scaling factor is for use in a linear change of variable. ("Buyer beware": note that, depending on what one is doing, one may want `scl` to be the reciprocal of what one might expect; for more information, see the Notes section below.) The argument `c` is an array of coefficients, from low to high degree along each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like 1-D array of polynomial coefficients, ordered from low to high. m : int, optional Order of integration, must be positive. (Default: 1) k : {[], list, scalar}, optional Integration constant(s). The value of the first integral at zero is the first value in the list, the value of the second integral at zero is the second value, etc. If ``k == []`` (the default), all constants are set to zero. If ``m == 1``, a single scalar can be given instead of a list. lbnd : scalar, optional The lower bound of the integral. (Default: 0) scl : scalar, optional Following each integration the result is *multiplied* by `scl` before the integration constant is added. (Default: 1) axis : int, optional Axis over which the integral is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- S : ndarray Coefficient array of the integral. Raises ------ ValueError If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or ``np.ndim(scl) != 0``. See Also -------- polyder Notes ----- Note that the result of each integration is *multiplied* by `scl`. Why is this important to note? Say one is making a linear change of variable :math:`u = ax + b` in an integral relative to `x`. Then :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - perhaps not what one would have first thought. Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = (1,2,3) >>> P.polyint(c) # should return array([0, 1, 1, 1]) array([ 0., 1., 1., 1.]) >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) array([ 0. , 0. , 0. , 0.16666667, 0.08333333, 0.05 ]) >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) array([ 3., 1., 1., 1.]) >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) array([ 6., 1., 1., 1.]) >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) array([ 0., -2., -2., -2.]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': # astype doesn't preserve mask attribute. c = c + 0.0 cdt = c.dtype if not np.iterable(k): k = [k] cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of integration must be integer") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") if np.ndim(lbnd) != 0: raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c k = list(k) + [0]*(cnt - len(k)) c = np.moveaxis(c, iaxis, 0) for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) tmp[0] = c[0]*0 tmp[1] = c[0] for j in range(1, n): tmp[j + 1] = c[j]/(j + 1) tmp[0] += k[i] - polyval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) return c def polyval(x, c, tensor=True): """ Evaluate a polynomial at points x. If `c` is of length `n + 1`, this function returns the value .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If `c` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that scalars have shape (,). Trailing zeros in the coefficients will be used in the evaluation, so they should be avoided if efficiency is a concern. Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with with themselves and with the elements of `c`. c : array_like Array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]. If `c` is multidimensional the remaining indices enumerate multiple polynomials. In the two dimensional case the coefficients may be thought of as stored in the columns of `c`. tensor : boolean, optional If True, the shape of the coefficient array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `c` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. .. versionadded:: 1.7.0 Returns ------- values : ndarray, compatible object The shape of the returned array is described above. See Also -------- polyval2d, polygrid2d, polyval3d, polygrid3d Notes ----- The evaluation uses Horner's method. Examples -------- >>> from numpy.polynomial.polynomial import polyval >>> polyval(1, [1,2,3]) 6.0 >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> polyval(a, [1,2,3]) array([[ 1., 6.], [ 17., 34.]]) >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients >>> coef array([[0, 1], [2, 3]]) >>> polyval([1,2], coef, tensor=True) array([[ 2., 4.], [ 4., 7.]]) >>> polyval([1,2], coef, tensor=False) array([ 2., 7.]) """ c = np.array(c, ndmin=1, copy=0) if c.dtype.char in '?bBhHiIlLqQpP': # astype fails with NA c = c + 0.0 if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) c0 = c[-1] + x*0 for i in range(2, len(c) + 1): c0 = c[-i] + c0*x return c0 def polyvalfromroots(x, r, tensor=True): """ Evaluate a polynomial specified by its roots at points x. If `r` is of length `N`, this function returns the value .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `r`. If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor is ``True`` the shape will be r.shape[1:] + x.shape; that is, each polynomial is evaluated at every value of `x`. If `tensor` is ``False``, the shape will be r.shape[1:]; that is, each polynomial is evaluated only for the corresponding broadcast value of `x`. Note that scalars have shape (,). .. versionadded:: 1.12 Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with with themselves and with the elements of `r`. r : array_like Array of roots. If `r` is multidimensional the first index is the root index, while the remaining indices enumerate multiple polynomials. For instance, in the two dimensional case the roots of each polynomial may be thought of as stored in the columns of `r`. tensor : boolean, optional If True, the shape of the roots array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `r` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `r` for the evaluation. This keyword is useful when `r` is multidimensional. The default value is True. Returns ------- values : ndarray, compatible object The shape of the returned array is described above. See Also -------- polyroots, polyfromroots, polyval Examples -------- >>> from numpy.polynomial.polynomial import polyvalfromroots >>> polyvalfromroots(1, [1,2,3]) 0.0 >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> polyvalfromroots(a, [-1, 0, 1]) array([[ -0., 0.], [ 6., 24.]]) >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients >>> r # each column of r defines one polynomial array([[-2, -1], [ 0, 1]]) >>> b = [-2, 1] >>> polyvalfromroots(b, r, tensor=True) array([[-0., 3.], [ 3., 0.]]) >>> polyvalfromroots(b, r, tensor=False) array([-0., 0.]) """ r = np.array(r, ndmin=1, copy=0) if r.dtype.char in '?bBhHiIlLqQpP': r = r.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray): if tensor: r = r.reshape(r.shape + (1,)*x.ndim) elif x.ndim >= r.ndim: raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). This function returns the value .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `(x, y)`, where `x` and `y` must have the same shape. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in `c[i,j]`. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points formed with pairs of corresponding values from `x` and `y`. See Also -------- polyval, polygrid2d, polyval3d, polygrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y = np.array((x, y), copy=0) except Exception: raise ValueError('x, y are incompatible') c = polyval(x, c) c = polyval(y, c, tensor=False) return c def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j where the points `(a, b)` consist of all pairs formed by taking `a` from `x` and `b` from `y`. The resulting points form a grid with `x` in the first dimension and `y` in the second. The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape + y.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points in the Cartesian product of `x` and `y`. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- polyval, polyval2d, polyval3d, polygrid3d Notes ----- .. versionadded:: 1.7.0 """ c = polyval(x, c) c = polyval(y, c) return c def polyval3d(x, y, z, c): """ Evaluate a 3-D polynomial at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If any of `x`, `y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension greater than 3 the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the multidimensional polynomial on points formed with triples of corresponding values from `x`, `y`, and `z`. See Also -------- polyval, polyval2d, polygrid2d, polygrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y, z = np.array((x, y, z), copy=0) except Exception: raise ValueError('x, y, z are incompatible') c = polyval(x, c) c = polyval(y, c, tensor=False) c = polyval(z, c, tensor=False) return c def polygrid3d(x, y, z, c): """ Evaluate a 3-D polynomial on the Cartesian product of x, y and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k where the points `(a, b, c)` consist of all triples formed by taking `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form a grid with `x` in the first dimension, `y` in the second, and `z` in the third. The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than three dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape + y.shape + z.shape. Parameters ---------- x, y, z : array_like, compatible objects The three dimensional series is evaluated at the points in the Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- polyval, polyval2d, polygrid2d, polyval3d Notes ----- .. versionadded:: 1.7.0 """ c = polyval(x, c) c = polyval(y, c) c = polyval(z, c) return c def polyvander(x, deg): """Vandermonde matrix of given degree. Returns the Vandermonde matrix of degree `deg` and sample points `x`. The Vandermonde matrix is defined by .. math:: V[..., i] = x^i, where `0 <= i <= deg`. The leading indices of `V` index the elements of `x` and the last index is the power of `x`. If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and ``polyval(x, c)`` are the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of polynomials of the same degree and sample points. Parameters ---------- x : array_like Array of points. The dtype is converted to float64 or complex128 depending on whether any of the elements are complex. If `x` is scalar it is converted to a 1-D array. deg : int Degree of the resulting matrix. Returns ------- vander : ndarray. The Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg + 1,)``, where the last index is the power of `x`. The dtype will be the same as the converted `x`. See Also -------- polyvander2d, polyvander3d """ ideg = int(deg) if ideg != deg: raise ValueError("deg must be integer") if ideg < 0: raise ValueError("deg must be non-negative") x = np.array(x, copy=0, ndmin=1) + 0.0 dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): v[i] = v[i-1]*x return np.moveaxis(v, 0, -1) def polyvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y)`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of `V` index the points `(x, y)` and the last index encodes the powers of `x` and `y`. If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 2-D polynomials of the same degrees and sample points. Parameters ---------- x, y : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg]. Returns ------- vander2d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same as the converted `x` and `y`. See Also -------- polyvander, polyvander3d. polyval2d, polyval3d """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy = ideg x, y = np.array((x, y), copy=0) + 0.0 vx = polyvander(x, degx) vy = polyvander(y, degy) v = vx[..., None]*vy[..., None,:] # einsum bug #v = np.einsum("...i,...j->...ij", vx, vy) return v.reshape(v.shape[:-2] + (-1,)) def polyvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, then The pseudo-Vandermonde matrix is defined by .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading indices of `V` index the points `(x, y, z)` and the last index encodes the powers of `x`, `y`, and `z`. If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns of `V` correspond to the elements of a 3-D coefficient array `c` of shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 3-D polynomials of the same degrees and sample points. Parameters ---------- x, y, z : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg, z_deg]. Returns ------- vander3d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will be the same as the converted `x`, `y`, and `z`. See Also -------- polyvander, polyvander3d. polyval2d, polyval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy, degz = ideg x, y, z = np.array((x, y, z), copy=0) + 0.0 vx = polyvander(x, degx) vy = polyvander(y, degy) vz = polyvander(z, degz) v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] # einsum bug #v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz) return v.reshape(v.shape[:-3] + (-1,)) def polyfit(x, y, deg, rcond=None, full=False, w=None): """ Least-squares fit of a polynomial to data. Return the coefficients of a polynomial of degree `deg` that is the least squares fit to the data values `y` given at points `x`. If `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple fits are done, one for each column of `y`, and the resulting coefficients are stored in the corresponding columns of a 2-D return. The fitted polynomial(s) are in the form .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, where `n` is `deg`. Parameters ---------- x : array_like, shape (`M`,) x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. y : array_like, shape (`M`,) or (`M`, `K`) y-coordinates of the sample points. Several sets of sample points sharing the same x-coordinates can be (independently) fit with one call to `polyfit` by passing in for `y` a 2-D array that contains one data set per column. deg : int or 1-D array_like Degree(s) of the fitting polynomials. If `deg` is a single integer all terms up to and including the `deg`'th term are included in the fit. For NumPy versions >= 1.11.0 a list of integers specifying the degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than `rcond`, relative to the largest singular value, will be ignored. The default value is ``len(x)*eps``, where `eps` is the relative precision of the platform's float type, about 2e-16 in most cases. full : bool, optional Switch determining the nature of the return value. When ``False`` (the default) just the coefficients are returned; when ``True``, diagnostic information from the singular value decomposition (used to solve the fit's matrix equation) is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. .. versionadded:: 1.5.0 Returns ------- coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) Polynomial coefficients ordered from low to high. If `y` was 2-D, the coefficients in column `k` of `coef` represent the polynomial fit to the data in `y`'s `k`-th column. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Raises ------ RankWarning Raised if the matrix in the least-squares fit is rank deficient. The warning is only raised if `full` == False. The warnings can be turned off by: >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) See Also -------- chebfit, legfit, lagfit, hermfit, hermefit polyval : Evaluates a polynomial. polyvander : Vandermonde matrix for powers. linalg.lstsq : Computes a least-squares fit from the matrix. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution is the coefficients of the polynomial `p` that minimizes the sum of the weighted squared errors .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, where the :math:`w_j` are the weights. This problem is solved by setting up the (typically) over-determined matrix equation: .. math :: V(x) * c = w * y, where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the coefficients to be solved for, `w` are the weights, and `y` are the observed values. This equation is then solved using the singular value decomposition of `V`. If some of the singular values of `V` are so small that they are neglected (and `full` == ``False``), a `RankWarning` will be raised. This means that the coefficient values may be poorly determined. Fitting to a lower order polynomial will usually get rid of the warning (but may not be what you want, of course; if you have independent reason(s) for choosing the degree which isn't working, you may have to: a) reconsider those reasons, and/or b) reconsider the quality of your data). The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious and have large contributions from roundoff error. Polynomial fits using double precision tend to "fail" at about (polynomial) degree 20. Fits using Chebyshev or Legendre series are generally better conditioned, but much can still depend on the distribution of the sample points and the smoothness of the data. If the quality of the fit is inadequate, splines may be a good alternative. Examples -------- >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" >>> c, stats = P.polyfit(x,y,3,full=True) >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) >>> stats # note the large SSR, explaining the rather poor results [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, 0.28853036]), 1.1324274851176597e-014] Same thing without the added noise >>> y = x**3 - x >>> c, stats = P.polyfit(x,y,3,full=True) >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16, 1.00000000e+00]) >>> stats # note the minuscule SSR [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, 0.50443316, 0.28853036]), 1.1324274851176597e-014] """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 deg = np.asarray(deg) # check arguments. if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") if deg.ndim == 0: lmax = deg order = lmax + 1 van = polyvander(x, lmax) else: deg = np.sort(deg) lmax = deg[-1] order = len(deg) van = polyvander(x, lmax)[:, deg] # set up the least squares matrices in transposed form lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected 1D vector for w") if len(x) != len(w): raise TypeError("expected x and w to have same length") # apply weights. Don't use inplace operations as they # can cause problems with NA. lhs = lhs * w rhs = rhs * w # set rcond if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) else: scl = np.sqrt(np.square(lhs).sum(1)) scl[scl == 0] = 1 # Solve the least squares problem. c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero if deg.ndim == 1: if c.ndim == 2: cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) else: cc = np.zeros(lmax + 1, dtype=c.dtype) cc[deg] = c c = cc # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] else: return c def polycompanion(c): """ Return the companion matrix of c. The companion matrix for power series cannot be made symmetric by scaling the basis, so this function differs from those for the orthogonal polynomials. Parameters ---------- c : array_like 1-D array of polynomial coefficients ordered from low to high degree. Returns ------- mat : ndarray Companion matrix of dimensions (deg, deg). Notes ----- .. versionadded:: 1.7.0 """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: return np.array([[-c[0]/c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) bot = mat.reshape(-1)[n::n+1] bot[...] = 1 mat[:, -1] -= c[:-1]/c[-1] return mat def polyroots(c): """ Compute the roots of a polynomial. Return the roots (a.k.a. "zeros") of the polynomial .. math:: p(x) = \\sum_i c[i] * x^i. Parameters ---------- c : 1-D array_like 1-D array of polynomial coefficients. Returns ------- out : ndarray Array of the roots of the polynomial. If all the roots are real, then `out` is also real, otherwise it is complex. See Also -------- chebroots Notes ----- The root estimates are obtained as the eigenvalues of the companion matrix, Roots far from the origin of the complex plane may have large errors due to the numerical instability of the power series for such values. Roots with multiplicity greater than 1 will also show larger errors as the value of the series near such points is relatively insensitive to errors in the roots. Isolated roots near the origin can be improved by a few iterations of Newton's method. Examples -------- >>> import numpy.polynomial.polynomial as poly >>> poly.polyroots(poly.polyfromroots((-1,0,1))) array([-1., 0., 1.]) >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype dtype('float64') >>> j = complex(0,1) >>> poly.polyroots(poly.polyfromroots((-j,0,j))) array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: return np.array([], dtype=c.dtype) if len(c) == 2: return np.array([-c[0]/c[1]]) m = polycompanion(c) r = la.eigvals(m) r.sort() return r # # polynomial class # class Polynomial(ABCPolyBase): """A power series class. The Polynomial class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed in the `ABCPolyBase` documentation. Parameters ---------- coef : array_like Polynomial coefficients in order of increasing degree, i.e., ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. The default value is [-1, 1]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1, 1]. .. versionadded:: 1.6.0 """ # Virtual Functions _add = staticmethod(polyadd) _sub = staticmethod(polysub) _mul = staticmethod(polymul) _div = staticmethod(polydiv) _pow = staticmethod(polypow) _val = staticmethod(polyval) _int = staticmethod(polyint) _der = staticmethod(polyder) _fit = staticmethod(polyfit) _line = staticmethod(polyline) _roots = staticmethod(polyroots) _fromroots = staticmethod(polyfromroots) # Virtual properties nickname = 'poly' domain = np.array(polydomain) window = np.array(polydomain)
52,808
31.083232
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/hermite.py
""" Objects for dealing with Hermite series. This module provides a number of objects (mostly functions) useful for dealing with Hermite series, including a `Hermite` class that encapsulates the usual arithmetic operations. (General information on how this module represents and works with such polynomials is in the docstring for its "parent" sub-package, `numpy.polynomial`). Constants --------- - `hermdomain` -- Hermite series default domain, [-1,1]. - `hermzero` -- Hermite series that evaluates identically to 0. - `hermone` -- Hermite series that evaluates identically to 1. - `hermx` -- Hermite series for the identity map, ``f(x) = x``. Arithmetic ---------- - `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. - `hermadd` -- add two Hermite series. - `hermsub` -- subtract one Hermite series from another. - `hermmul` -- multiply two Hermite series. - `hermdiv` -- divide one Hermite series by another. - `hermval` -- evaluate a Hermite series at given points. - `hermval2d` -- evaluate a 2D Hermite series at given points. - `hermval3d` -- evaluate a 3D Hermite series at given points. - `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product. - `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product. Calculus -------- - `hermder` -- differentiate a Hermite series. - `hermint` -- integrate a Hermite series. Misc Functions -------------- - `hermfromroots` -- create a Hermite series with specified roots. - `hermroots` -- find the roots of a Hermite series. - `hermvander` -- Vandermonde-like matrix for Hermite polynomials. - `hermvander2d` -- Vandermonde-like matrix for 2D power series. - `hermvander3d` -- Vandermonde-like matrix for 3D power series. - `hermgauss` -- Gauss-Hermite quadrature, points and weights. - `hermweight` -- Hermite weight function. - `hermcompanion` -- symmetrized companion matrix in Hermite form. - `hermfit` -- least-squares fit returning a Hermite series. - `hermtrim` -- trim leading coefficients from a Hermite series. - `hermline` -- Hermite series of given straight line. - `herm2poly` -- convert a Hermite series to a polynomial. - `poly2herm` -- convert a polynomial to a Hermite series. Classes ------- - `Hermite` -- A Hermite series class. See also -------- `numpy.polynomial` """ from __future__ import division, absolute_import, print_function import warnings import numpy as np import numpy.linalg as la from numpy.core.multiarray import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase __all__ = [ 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] hermtrim = pu.trimcoef def poly2herm(pol): """ poly2herm(pol) Convert a polynomial to a Hermite series. Convert an array representing the coefficients of a polynomial (relative to the "standard" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Hermite series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Hermite series. See Also -------- herm2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite import poly2herm >>> poly2herm(np.arange(4)) array([ 1. , 2.75 , 0.5 , 0.375]) """ [pol] = pu.as_series([pol]) deg = len(pol) - 1 res = 0 for i in range(deg, -1, -1): res = hermadd(hermmulx(res), pol[i]) return res def herm2poly(c): """ Convert a Hermite series to a polynomial. Convert an array representing the coefficients of a Hermite series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest to highest degree. Parameters ---------- c : array_like 1-D array containing the Hermite series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-D array containing the coefficients of the equivalent polynomial (relative to the "standard" basis) ordered from lowest order term to highest. See Also -------- poly2herm Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite import herm2poly >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) array([ 0., 1., 2., 3.]) """ from .polynomial import polyadd, polysub, polymulx [c] = pu.as_series([c]) n = len(c) if n == 1: return c if n == 2: c[1] *= 2 return c else: c0 = c[-2] c1 = c[-1] # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], c1*(2*(i - 1))) c1 = polyadd(tmp, polymulx(c1)*2) return polyadd(c0, polymulx(c1)*2) # # These are constant arrays are of integer type so as to be compatible # with the widest range of other types, such as Decimal. # # Hermite hermdomain = np.array([-1, 1]) # Hermite coefficients representing zero. hermzero = np.array([0]) # Hermite coefficients representing one. hermone = np.array([1]) # Hermite coefficients representing the identity x. hermx = np.array([0, 1/2]) def hermline(off, scl): """ Hermite series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``off + scl*x``. Returns ------- y : ndarray This module's representation of the Hermite series for ``off + scl*x``. See Also -------- polyline, chebline Examples -------- >>> from numpy.polynomial.hermite import hermline, hermval >>> hermval(0,hermline(3, 2)) 3.0 >>> hermval(1,hermline(3, 2)) 5.0 """ if scl != 0: return np.array([off, scl/2]) else: return np.array([off]) def hermfromroots(roots): """ Generate a Hermite series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in Hermite form, where the `r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) The coefficient of the last term is not generally 1 for monic polynomials in Hermite form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then `out` is a real array, if some of the roots are complex, then `out` is complex even if all the coefficients in the result are real (see Examples below). See Also -------- polyfromroots, legfromroots, lagfromroots, chebfromroots, hermefromroots. Examples -------- >>> from numpy.polynomial.hermite import hermfromroots, hermval >>> coef = hermfromroots((-1, 0, 1)) >>> hermval((-1, 0, 1), coef) array([ 0., 0., 0.]) >>> coef = hermfromroots((-1j, 1j)) >>> hermval((-1j, 1j), coef) array([ 0.+0.j, 0.+0.j]) """ if len(roots) == 0: return np.ones(1) else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [hermline(-r, 1) for r in roots] n = len(p) while n > 1: m, r = divmod(n, 2) tmp = [hermmul(p[i], p[i+m]) for i in range(m)] if r: tmp[0] = hermmul(tmp[0], p[-1]) p = tmp n = m return p[0] def hermadd(c1, c2): """ Add one Hermite series to another. Returns the sum of two Hermite series `c1` + `c2`. The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Hermite series of their sum. See Also -------- hermsub, hermmul, hermdiv, hermpow Notes ----- Unlike multiplication, division, etc., the sum of two Hermite series is a Hermite series (without having to "reproject" the result onto the basis set) so addition, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial.hermite import hermadd >>> hermadd([1, 2, 3], [1, 2, 3, 4]) array([ 2., 4., 6., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] += c2 ret = c1 else: c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def hermsub(c1, c2): """ Subtract one Hermite series from another. Returns the difference of two Hermite series `c1` - `c2`. The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their difference. See Also -------- hermadd, hermmul, hermdiv, hermpow Notes ----- Unlike multiplication, division, etc., the difference of two Hermite series is a Hermite series (without having to "reproject" the result onto the basis set) so subtraction, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial.hermite import hermsub >>> hermsub([1, 2, 3, 4], [1, 2, 3]) array([ 0., 0., 0., 4.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 else: c2 = -c2 c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret) def hermmulx(c): """Multiply a Hermite series by x. Multiply the Hermite series `c` by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. Notes ----- The multiplication uses the recursion relationship for Hermite polynomials in the form .. math:: xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) Examples -------- >>> from numpy.polynomial.hermite import hermmulx >>> hermmulx([1, 2, 3]) array([ 2. , 6.5, 1. , 1.5]) """ # c is a trimmed copy [c] = pu.as_series([c]) # The zero series needs special treatment if len(c) == 1 and c[0] == 0: return c prd = np.empty(len(c) + 1, dtype=c.dtype) prd[0] = c[0]*0 prd[1] = c[0]/2 for i in range(1, len(c)): prd[i + 1] = c[i]/2 prd[i - 1] += c[i]*i return prd def hermmul(c1, c2): """ Multiply one Hermite series by another. Returns the product of two Hermite series `c1` * `c2`. The arguments are sequences of coefficients, from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their product. See Also -------- hermadd, hermsub, hermdiv, hermpow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Hermite polynomial basis set. Thus, to express the product as a Hermite series, it is necessary to "reproject" the product onto said basis set, which may produce "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite import hermmul >>> hermmul([1, 2, 3], [0, 1, 2]) array([ 52., 29., 52., 7., 6.]) """ # s1, s2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c = c2 xs = c1 else: c = c1 xs = c2 if len(c) == 1: c0 = c[0]*xs c1 = 0 elif len(c) == 2: c0 = c[0]*xs c1 = c[1]*xs else: nd = len(c) c0 = c[-2]*xs c1 = c[-1]*xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) c1 = hermadd(tmp, hermmulx(c1)*2) return hermadd(c0, hermmulx(c1)*2) def hermdiv(c1, c2): """ Divide one Hermite series by another. Returns the quotient-with-remainder of two Hermite series `c1` / `c2`. The arguments are sequences of coefficients from lowest order "term" to highest, e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of Hermite series coefficients representing the quotient and remainder. See Also -------- hermadd, hermsub, hermmul, hermpow Notes ----- In general, the (polynomial) division of one Hermite series by another results in quotient and remainder terms that are not in the Hermite polynomial basis set. Thus, to express these results as a Hermite series, it is necessary to "reproject" the results onto the Hermite basis set, which may produce "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite import hermdiv >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 0.])) >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 2., 2.])) >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) (array([ 1., 2., 3.]), array([ 1., 1.])) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: raise ZeroDivisionError() lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: return c1[:1]*0, c1 elif lc2 == 1: return c1/c2[-1], c1[:1]*0 else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): p = hermmul([0]*i + [1], c2) q = rem[-1]/p[-1] rem = rem[:-1] - q*p[:-1] quo[i] = q return quo, pu.trimseq(rem) def hermpow(c, pow, maxpower=16): """Raise a Hermite series to a power. Returns the Hermite series `c` raised to the power `pow`. The argument `c` is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Hermite series of power. See Also -------- hermadd, hermsub, hermmul, hermdiv Examples -------- >>> from numpy.polynomial.hermite import hermpow >>> hermpow([1, 2, 3], 2) array([ 81., 52., 82., 12., 9.]) """ # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) if power != pow or power < 0: raise ValueError("Power must be a non-negative integer.") elif maxpower is not None and power > maxpower: raise ValueError("Power is too large") elif power == 0: return np.array([1], dtype=c.dtype) elif power == 1: return c else: # This can be made more efficient by using powers of two # in the usual way. prd = c for i in range(2, power + 1): prd = hermmul(prd, c) return prd def hermder(c, m=1, scl=1, axis=0): """ Differentiate a Hermite series. Returns the Hermite series coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Hermite series coefficients. If `c` is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- der : ndarray Hermite series of the derivative. See Also -------- hermint Notes ----- In general, the result of differentiating a Hermite series does not resemble the same operation on a power series. Thus the result of this function may be "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite import hermder >>> hermder([ 1. , 0.5, 0.5, 0.5]) array([ 1., 2., 3.]) >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) array([ 1., 2., 3.]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of derivation must be integer") if cnt < 0: raise ValueError("The order of derivation must be non-negative") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 0, -1): der[j - 1] = (2*j)*c[j] c = der c = np.moveaxis(c, 0, iaxis) return c def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): """ Integrate a Hermite series. Returns the Hermite series coefficients `c` integrated `m` times from `lbnd` along `axis`. At each iteration the resulting series is **multiplied** by `scl` and an integration constant, `k`, is added. The scaling factor is for use in a linear change of variable. ("Buyer beware": note that, depending on what one is doing, one may want `scl` to be the reciprocal of what one might expect; for more information, see the Notes section below.) The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Hermite series coefficients. If c is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Order of integration, must be positive. (Default: 1) k : {[], list, scalar}, optional Integration constant(s). The value of the first integral at ``lbnd`` is the first value in the list, the value of the second integral at ``lbnd`` is the second value, etc. If ``k == []`` (the default), all constants are set to zero. If ``m == 1``, a single scalar can be given instead of a list. lbnd : scalar, optional The lower bound of the integral. (Default: 0) scl : scalar, optional Following each integration the result is *multiplied* by `scl` before the integration constant is added. (Default: 1) axis : int, optional Axis over which the integral is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- S : ndarray Hermite series coefficients of the integral. Raises ------ ValueError If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or ``np.ndim(scl) != 0``. See Also -------- hermder Notes ----- Note that the result of each integration is *multiplied* by `scl`. Why is this important to note? Say one is making a linear change of variable :math:`u = ax + b` in an integral relative to `x`. Then :math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a` - perhaps not what one would have first thought. Also note that, in general, the result of integrating a C-series needs to be "reprojected" onto the C-series basis set. Thus, typically, the result of this function is "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite import hermint >>> hermint([1,2,3]) # integrate once, value 0 at 0. array([ 1. , 0.5, 0.5, 0.5]) >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. array([ 2. , 0.5, 0.5, 0.5]) >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 array([-2. , 0.5, 0.5, 0.5]) >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) """ c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if not np.iterable(k): k = [k] cnt, iaxis = [int(t) for t in [m, axis]] if cnt != m: raise ValueError("The order of integration must be integer") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") if np.ndim(lbnd) != 0: raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) c *= scl if n == 1 and np.all(c[0] == 0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) tmp[0] = c[0]*0 tmp[1] = c[0]/2 for j in range(1, n): tmp[j + 1] = c[j]/(2*(j + 1)) tmp[0] += k[i] - hermval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) return c def hermval(x, c, tensor=True): """ Evaluate an Hermite series at points x. If `c` is of length `n + 1`, this function returns the value: .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If `c` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that scalars have shape (,). Trailing zeros in the coefficients will be used in the evaluation, so they should be avoided if efficiency is a concern. Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with with themselves and with the elements of `c`. c : array_like Array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]. If `c` is multidimensional the remaining indices enumerate multiple polynomials. In the two dimensional case the coefficients may be thought of as stored in the columns of `c`. tensor : boolean, optional If True, the shape of the coefficient array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `c` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. .. versionadded:: 1.7.0 Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- hermval2d, hermgrid2d, hermval3d, hermgrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. Examples -------- >>> from numpy.polynomial.hermite import hermval >>> coef = [1,2,3] >>> hermval(1, coef) 11.0 >>> hermval([[1,2],[3,4]], coef) array([[ 11., 51.], [ 115., 203.]]) """ c = np.array(c, ndmin=1, copy=0) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) x2 = x*2 if len(c) == 1: c0 = c[0] c1 = 0 elif len(c) == 2: c0 = c[0] c1 = c[1] else: nd = len(c) c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = c[-i] - c1*(2*(nd - 1)) c1 = tmp + c1*x2 return c0 + c1*x2 def hermval2d(x, y, c): """ Evaluate a 2-D Hermite series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `(x, y)`, where `x` and `y` must have the same shape. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points formed with pairs of corresponding values from `x` and `y`. See Also -------- hermval, hermgrid2d, hermval3d, hermgrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y = np.array((x, y), copy=0) except Exception: raise ValueError('x, y are incompatible') c = hermval(x, c) c = hermval(y, c, tensor=False) return c def hermgrid2d(x, y, c): """ Evaluate a 2-D Hermite series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) where the points `(a, b)` consist of all pairs formed by taking `a` from `x` and `b` from `y`. The resulting points form a grid with `x` in the first dimension and `y` in the second. The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than two dimensions, ones are implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points in the Cartesian product of `x` and `y`. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- hermval, hermval2d, hermval3d, hermgrid3d Notes ----- .. versionadded:: 1.7.0 """ c = hermval(x, c) c = hermval(y, c) return c def hermval3d(x, y, z, c): """ Evaluate a 3-D Hermite series at points (x, y, z). This function returns the values: .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than 3 dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape. Parameters ---------- x, y, z : array_like, compatible object The three dimensional series is evaluated at the points `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If any of `x`, `y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension greater than 3 the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the multidimensional polynomial on points formed with triples of corresponding values from `x`, `y`, and `z`. See Also -------- hermval, hermval2d, hermgrid2d, hermgrid3d Notes ----- .. versionadded:: 1.7.0 """ try: x, y, z = np.array((x, y, z), copy=0) except Exception: raise ValueError('x, y, z are incompatible') c = hermval(x, c) c = hermval(y, c, tensor=False) c = hermval(z, c, tensor=False) return c def hermgrid3d(x, y, z, c): """ Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) where the points `(a, b, c)` consist of all triples formed by taking `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form a grid with `x` in the first dimension, `y` in the second, and `z` in the third. The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than three dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape + y.shape + z.shape. Parameters ---------- x, y, z : array_like, compatible objects The three dimensional series is evaluated at the points in the Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- hermval, hermval2d, hermgrid2d, hermval3d Notes ----- .. versionadded:: 1.7.0 """ c = hermval(x, c) c = hermval(y, c) c = hermval(z, c) return c def hermvander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points `x`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = H_i(x), where `0 <= i <= deg`. The leading indices of `V` index the elements of `x` and the last index is the degree of the Hermite polynomial. If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and ``hermval(x, c)`` are the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of Hermite series of the same degree and sample points. Parameters ---------- x : array_like Array of points. The dtype is converted to float64 or complex128 depending on whether any of the elements are complex. If `x` is scalar it is converted to a 1-D array. deg : int Degree of the resulting matrix. Returns ------- vander : ndarray The pseudo-Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg + 1,)``, where The last index is the degree of the corresponding Hermite polynomial. The dtype will be the same as the converted `x`. Examples -------- >>> from numpy.polynomial.hermite import hermvander >>> x = np.array([-1, 0, 1]) >>> hermvander(x, 3) array([[ 1., -2., 2., 4.], [ 1., 0., -2., -0.], [ 1., 2., 2., -4.]]) """ ideg = int(deg) if ideg != deg: raise ValueError("deg must be integer") if ideg < 0: raise ValueError("deg must be non-negative") x = np.array(x, copy=0, ndmin=1) + 0.0 dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) v[0] = x*0 + 1 if ideg > 0: x2 = x*2 v[1] = x2 for i in range(2, ideg + 1): v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) return np.moveaxis(v, 0, -1) def hermvander2d(x, y, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y)`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of `V` index the points `(x, y)` and the last index encodes the degrees of the Hermite polynomials. If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 2-D Hermite series of the same degrees and sample points. Parameters ---------- x, y : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg]. Returns ------- vander2d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same as the converted `x` and `y`. See Also -------- hermvander, hermvander3d. hermval2d, hermval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy = ideg x, y = np.array((x, y), copy=0) + 0.0 vx = hermvander(x, degx) vy = hermvander(y, degy) v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) def hermvander3d(x, y, z, deg): """Pseudo-Vandermonde matrix of given degrees. Returns the pseudo-Vandermonde matrix of degrees `deg` and sample points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, then The pseudo-Vandermonde matrix is defined by .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading indices of `V` index the points `(x, y, z)` and the last index encodes the degrees of the Hermite polynomials. If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns of `V` correspond to the elements of a 3-D coefficient array `c` of shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of 3-D Hermite series of the same degrees and sample points. Parameters ---------- x, y, z : array_like Arrays of point coordinates, all of the same shape. The dtypes will be converted to either float64 or complex128 depending on whether any of the elements are complex. Scalars are converted to 1-D arrays. deg : list of ints List of maximum degrees of the form [x_deg, y_deg, z_deg]. Returns ------- vander3d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will be the same as the converted `x`, `y`, and `z`. See Also -------- hermvander, hermvander3d. hermval2d, hermval3d Notes ----- .. versionadded:: 1.7.0 """ ideg = [int(d) for d in deg] is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] if is_valid != [1, 1, 1]: raise ValueError("degrees must be non-negative integers") degx, degy, degz = ideg x, y, z = np.array((x, y, z), copy=0) + 0.0 vx = hermvander(x, degx) vy = hermvander(y, degy) vz = hermvander(z, degz) v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) def hermfit(x, y, deg, rcond=None, full=False, w=None): """ Least squares fit of Hermite series to data. Return the coefficients of a Hermite series of degree `deg` that is the least squares fit to the data values `y` given at points `x`. If `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple fits are done, one for each column of `y`, and the resulting coefficients are stored in the corresponding columns of a 2-D return. The fitted polynomial(s) are in the form .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), where `n` is `deg`. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int or 1-D array_like Degree(s) of the fitting polynomials. If `deg` is a single integer all terms up to and including the `deg`'th term are included in the fit. For NumPy versions >= 1.11.0 a list of integers specifying the degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (`M`,), optional Weights. If not None, the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. Returns ------- coef : ndarray, shape (M,) or (M, K) Hermite coefficients ordered from low to high. If `y` was 2-D, the coefficients for the data in column k of `y` are in column `k`. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is only raised if `full` = False. The warnings can be turned off by >>> import warnings >>> warnings.simplefilter('ignore', RankWarning) See Also -------- chebfit, legfit, lagfit, polyfit, hermefit hermval : Evaluates a Hermite series. hermvander : Vandermonde matrix of Hermite series. hermweight : Hermite weight function linalg.lstsq : Computes a least-squares fit from the matrix. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution is the coefficients of the Hermite series `p` that minimizes the sum of the weighted squared errors .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, where the :math:`w_j` are the weights. This problem is solved by setting up the (typically) overdetermined matrix equation .. math:: V(x) * c = w * y, where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the coefficients to be solved for, `w` are the weights, `y` are the observed values. This equation is then solved using the singular value decomposition of `V`. If some of the singular values of `V` are so small that they are neglected, then a `RankWarning` will be issued. This means that the coefficient values may be poorly determined. Using a lower order fit will usually get rid of the warning. The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious and have large contributions from roundoff error. Fits using Hermite series are probably most useful when the data can be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite weight. In that case the weight ``sqrt(w(x[i])`` should be used together with data values ``y[i]/sqrt(w(x[i])``. The weight function is available as `hermweight`. References ---------- .. [1] Wikipedia, "Curve fitting", http://en.wikipedia.org/wiki/Curve_fitting Examples -------- >>> from numpy.polynomial.hermite import hermfit, hermval >>> x = np.linspace(-10, 10) >>> err = np.random.randn(len(x))/10 >>> y = hermval(x, [1, 2, 3]) + err >>> hermfit(x, y, 2) array([ 0.97902637, 1.99849131, 3.00006 ]) """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 deg = np.asarray(deg) # check arguments. if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if len(x) != len(y): raise TypeError("expected x and y to have same length") if deg.ndim == 0: lmax = deg order = lmax + 1 van = hermvander(x, lmax) else: deg = np.sort(deg) lmax = deg[-1] order = len(deg) van = hermvander(x, lmax)[:, deg] # set up the least squares matrices in transposed form lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected 1D vector for w") if len(x) != len(w): raise TypeError("expected x and w to have same length") # apply weights. Don't use inplace operations as they # can cause problems with NA. lhs = lhs * w rhs = rhs * w # set rcond if rcond is None: rcond = len(x)*np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) else: scl = np.sqrt(np.square(lhs).sum(1)) scl[scl == 0] = 1 # Solve the least squares problem. c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero if deg.ndim > 0: if c.ndim == 2: cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) else: cc = np.zeros(lmax+1, dtype=c.dtype) cc[deg] = c c = cc # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" warnings.warn(msg, pu.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] else: return c def hermcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when `c` is an Hermite basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). Notes ----- .. versionadded:: 1.7.0 """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: return np.array([[-.5*c[0]/c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) scl = np.multiply.accumulate(scl)[::-1] top = mat.reshape(-1)[1::n+1] bot = mat.reshape(-1)[n::n+1] top[...] = np.sqrt(.5*np.arange(1, n)) bot[...] = top mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) return mat def hermroots(c): """ Compute the roots of a Hermite series. Return the roots (a.k.a. "zeros") of the polynomial .. math:: p(x) = \\sum_i c[i] * H_i(x). Parameters ---------- c : 1-D array_like 1-D array of coefficients. Returns ------- out : ndarray Array of the roots of the series. If all the roots are real, then `out` is also real, otherwise it is complex. See Also -------- polyroots, legroots, lagroots, chebroots, hermeroots Notes ----- The root estimates are obtained as the eigenvalues of the companion matrix, Roots far from the origin of the complex plane may have large errors due to the numerical instability of the series for such values. Roots with multiplicity greater than 1 will also show larger errors as the value of the series near such points is relatively insensitive to errors in the roots. Isolated roots near the origin can be improved by a few iterations of Newton's method. The Hermite series basis polynomials aren't powers of `x` so the results of this function may seem unintuitive. Examples -------- >>> from numpy.polynomial.hermite import hermroots, hermfromroots >>> coef = hermfromroots([-1, 0, 1]) >>> coef array([ 0. , 0.25 , 0. , 0.125]) >>> hermroots(coef) array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: return np.array([-.5*c[0]/c[1]]) m = hermcompanion(c) r = la.eigvals(m) r.sort() return r def _normed_hermite_n(x, n): """ Evaluate a normalized Hermite polynomial. Compute the value of the normalized Hermite polynomial of degree ``n`` at the points ``x``. Parameters ---------- x : ndarray of double. Points at which to evaluate the function n : int Degree of the normalized Hermite function to be evaluated. Returns ------- values : ndarray The shape of the return value is described above. Notes ----- .. versionadded:: 1.10.0 This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard Hermite functions overflow when n >= 207. """ if n == 0: return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi)) c0 = 0. c1 = 1./np.sqrt(np.sqrt(np.pi)) nd = float(n) for i in range(n - 1): tmp = c0 c0 = -c1*np.sqrt((nd - 1.)/nd) c1 = tmp + c1*x*np.sqrt(2./nd) nd = nd - 1.0 return c0 + c1*x*np.sqrt(2) def hermgauss(deg): """ Gauss-Hermite quadrature. Computes the sample points and weights for Gauss-Hermite quadrature. These sample points and weights will correctly integrate polynomials of degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` with the weight function :math:`f(x) = \\exp(-x^2)`. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- .. versionadded:: 1.7.0 The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) where :math:`c` is a constant independent of :math:`k` and :math:`x_k` is the k'th root of :math:`H_n`, and then scaling the results to get the right value when integrating 1. """ ideg = int(deg) if ideg != deg or ideg < 1: raise ValueError("deg must be a non-negative integer") # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0]*deg + [1], dtype=np.float64) m = hermcompanion(c) x = la.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_n(x, ideg) df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) x -= dy/df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = _normed_hermite_n(x, ideg - 1) fm /= np.abs(fm).max() w = 1/(fm * fm) # for Hermite we can also symmetrize w = (w + w[::-1])/2 x = (x - x[::-1])/2 # scale w to get the right value w *= np.sqrt(np.pi) / w.sum() return x, w def hermweight(x): """ Weight function of the Hermite polynomials. The weight function is :math:`\\exp(-x^2)` and the interval of integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at `x`. Notes ----- .. versionadded:: 1.7.0 """ w = np.exp(-x**2) return w # # Hermite series class # class Hermite(ABCPolyBase): """An Hermite series class. The Hermite class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed in the `ABCPolyBase` documentation. Parameters ---------- coef : array_like Hermite coefficients in order of increasing degree, i.e, ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``. domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. The default value is [-1, 1]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1, 1]. .. versionadded:: 1.6.0 """ # Virtual Functions _add = staticmethod(hermadd) _sub = staticmethod(hermsub) _mul = staticmethod(hermmul) _div = staticmethod(hermdiv) _pow = staticmethod(hermpow) _val = staticmethod(hermval) _int = staticmethod(hermint) _der = staticmethod(hermder) _fit = staticmethod(hermfit) _line = staticmethod(hermline) _roots = staticmethod(hermroots) _fromroots = staticmethod(hermfromroots) # Virtual properties nickname = 'herm' domain = np.array(hermdomain) window = np.array(hermdomain)
57,896
30.228155
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/polyutils.py
""" Utility classes and functions for the polynomial modules. This module provides: error and warning objects; a polynomial base class; and some routines used in both the `polynomial` and `chebyshev` modules. Error objects ------------- .. autosummary:: :toctree: generated/ PolyError base class for this sub-package's errors. PolyDomainError raised when domains are mismatched. Warning objects --------------- .. autosummary:: :toctree: generated/ RankWarning raised in least-squares fit for rank-deficient matrix. Base class ---------- .. autosummary:: :toctree: generated/ PolyBase Obsolete base class for the polynomial classes. Do not use. Functions --------- .. autosummary:: :toctree: generated/ as_series convert list of array_likes into 1-D arrays of common type. trimseq remove trailing zeros. trimcoef remove small trailing coefficients. getdomain return the domain appropriate for a given set of abscissae. mapdomain maps points between domains. mapparms parameters of the linear map between domains. """ from __future__ import division, absolute_import, print_function import numpy as np __all__ = [ 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase'] # # Warnings and Exceptions # class RankWarning(UserWarning): """Issued by chebfit when the design matrix is rank deficient.""" pass class PolyError(Exception): """Base class for errors in this module.""" pass class PolyDomainError(PolyError): """Issued by the generic Poly class when two domains don't match. This is raised when an binary operation is passed Poly objects with different domains. """ pass # # Base class for all polynomial types # class PolyBase(object): """ Base class for all polynomial types. Deprecated in numpy 1.9.0, use the abstract ABCPolyBase class instead. Note that the latter requires a number of virtual functions to be implemented. """ pass # # Helper functions to convert inputs to 1-D arrays # def trimseq(seq): """Remove small Poly series coefficients. Parameters ---------- seq : sequence Sequence of Poly series coefficients. This routine fails for empty sequences. Returns ------- series : sequence Subsequence with trailing zeros removed. If the resulting sequence would be empty, return the first element. The returned sequence may or may not be a view. Notes ----- Do not lose the type info if the sequence contains unknown objects. """ if len(seq) == 0: return seq else: for i in range(len(seq) - 1, -1, -1): if seq[i] != 0: break return seq[:i+1] def as_series(alist, trim=True): """ Return argument as a list of 1-d arrays. The returned list contains array(s) of dtype double, complex double, or object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array raises a Value Error if it is not first reshaped into either a 1-d or 2-d array. Parameters ---------- alist : array_like A 1- or 2-d array_like trim : boolean, optional When True, trailing zeros are removed from the inputs. When False, the inputs are passed through intact. Returns ------- [a1, a2,...] : list of 1-D arrays A copy of the input data as a list of 1-d arrays. Raises ------ ValueError Raised when `as_series` cannot convert its input to 1-d arrays, or at least one of the resulting arrays is empty. Examples -------- >>> from numpy.polynomial import polyutils as pu >>> a = np.arange(4) >>> pu.as_series(a) [array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])] >>> b = np.arange(6).reshape((2,3)) >>> pu.as_series(b) [array([ 0., 1., 2.]), array([ 3., 4., 5.])] >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) [array([ 1.]), array([ 0., 1., 2.]), array([ 0., 1.])] >>> pu.as_series([2, [1.1, 0.]]) [array([ 2.]), array([ 1.1])] >>> pu.as_series([2, [1.1, 0.]], trim=False) [array([ 2.]), array([ 1.1, 0. ])] """ arrays = [np.array(a, ndmin=1, copy=0) for a in alist] if min([a.size for a in arrays]) == 0: raise ValueError("Coefficient array is empty") if any([a.ndim != 1 for a in arrays]): raise ValueError("Coefficient array is not 1-d") if trim: arrays = [trimseq(a) for a in arrays] if any([a.dtype == np.dtype(object) for a in arrays]): ret = [] for a in arrays: if a.dtype != np.dtype(object): tmp = np.empty(len(a), dtype=np.dtype(object)) tmp[:] = a[:] ret.append(tmp) else: ret.append(a.copy()) else: try: dtype = np.common_type(*arrays) except Exception: raise ValueError("Coefficient arrays have no common type") ret = [np.array(a, copy=1, dtype=dtype) for a in arrays] return ret def trimcoef(c, tol=0): """ Remove "small" "trailing" coefficients from a polynomial. "Small" means "small in absolute value" and is controlled by the parameter `tol`; "trailing" means highest order coefficient(s), e.g., in ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) both the 3-rd and 4-th order coefficients would be "trimmed." Parameters ---------- c : array_like 1-d array of coefficients, ordered from lowest order to highest. tol : number, optional Trailing (i.e., highest order) elements with absolute value less than or equal to `tol` (default value is zero) are removed. Returns ------- trimmed : ndarray 1-d array with trailing zeros removed. If the resulting series would be empty, a series containing a single zero is returned. Raises ------ ValueError If `tol` < 0 See Also -------- trimseq Examples -------- >>> from numpy.polynomial import polyutils as pu >>> pu.trimcoef((0,0,3,0,5,0,0)) array([ 0., 0., 3., 0., 5.]) >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed array([ 0.]) >>> i = complex(0,1) # works for complex >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) array([ 0.0003+0.j , 0.0010-0.001j]) """ if tol < 0: raise ValueError("tol must be non-negative") [c] = as_series([c]) [ind] = np.nonzero(np.abs(c) > tol) if len(ind) == 0: return c[:1]*0 else: return c[:ind[-1] + 1].copy() def getdomain(x): """ Return a domain suitable for given abscissae. Find a domain suitable for a polynomial or Chebyshev series defined at the values supplied. Parameters ---------- x : array_like 1-d array of abscissae whose domain will be determined. Returns ------- domain : ndarray 1-d array containing two values. If the inputs are complex, then the two returned points are the lower left and upper right corners of the smallest rectangle (aligned with the axes) in the complex plane containing the points `x`. If the inputs are real, then the two points are the ends of the smallest interval containing the points `x`. See Also -------- mapparms, mapdomain Examples -------- >>> from numpy.polynomial import polyutils as pu >>> points = np.arange(4)**2 - 5; points array([-5, -4, -1, 4]) >>> pu.getdomain(points) array([-5., 4.]) >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle >>> pu.getdomain(c) array([-1.-1.j, 1.+1.j]) """ [x] = as_series([x], trim=False) if x.dtype.char in np.typecodes['Complex']: rmin, rmax = x.real.min(), x.real.max() imin, imax = x.imag.min(), x.imag.max() return np.array((complex(rmin, imin), complex(rmax, imax))) else: return np.array((x.min(), x.max())) def mapparms(old, new): """ Linear map parameters between domains. Return the parameters of the linear map ``offset + scale*x`` that maps `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. Parameters ---------- old, new : array_like Domains. Each domain must (successfully) convert to a 1-d array containing precisely two values. Returns ------- offset, scale : scalars The map ``L(x) = offset + scale*x`` maps the first domain to the second. See Also -------- getdomain, mapdomain Notes ----- Also works for complex numbers, and thus can be used to calculate the parameters required to map any line in the complex plane to any other line therein. Examples -------- >>> from numpy.polynomial import polyutils as pu >>> pu.mapparms((-1,1),(-1,1)) (0.0, 1.0) >>> pu.mapparms((1,-1),(-1,1)) (0.0, -1.0) >>> i = complex(0,1) >>> pu.mapparms((-i,-1),(1,i)) ((1+1j), (1+0j)) """ oldlen = old[1] - old[0] newlen = new[1] - new[0] off = (old[1]*new[0] - old[0]*new[1])/oldlen scl = newlen/oldlen return off, scl def mapdomain(x, old, new): """ Apply linear map to input points. The linear map ``offset + scale*x`` that maps the domain `old` to the domain `new` is applied to the points `x`. Parameters ---------- x : array_like Points to be mapped. If `x` is a subtype of ndarray the subtype will be preserved. old, new : array_like The two domains that determine the map. Each must (successfully) convert to 1-d arrays containing precisely two values. Returns ------- x_out : ndarray Array of points of the same shape as `x`, after application of the linear map between the two domains. See Also -------- getdomain, mapparms Notes ----- Effectively, this implements: .. math :: x\\_out = new[0] + m(x - old[0]) where .. math :: m = \\frac{new[1]-new[0]}{old[1]-old[0]} Examples -------- >>> from numpy.polynomial import polyutils as pu >>> old_domain = (-1,1) >>> new_domain = (0,2*np.pi) >>> x = np.linspace(-1,1,6); x array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, 6.28318531]) >>> x - pu.mapdomain(x_out, new_domain, old_domain) array([ 0., 0., 0., 0., 0., 0.]) Also works for complex numbers (and thus can be used to map any line in the complex plane to any other line therein). >>> i = complex(0,1) >>> old = (-1 - i, 1 + i) >>> new = (-1 + i, 1 - i) >>> z = np.linspace(old[0], old[1], 6); z array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ]) >>> new_z = P.mapdomain(z, old, new); new_z array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) """ x = np.asanyarray(x) off, scl = mapparms(old, new) return off + scl*x
11,529
26.917676
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/__init__.py
""" A sub-package for efficiently dealing with polynomials. Within the documentation for this sub-package, a "finite power series," i.e., a polynomial (also referred to simply as a "series") is represented by a 1-D numpy array of the polynomial's coefficients, ordered from lowest order term to highest. For example, array([1,2,3]) represents ``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial applicable to the specific module in question, e.g., `polynomial` (which "wraps" the "standard" basis) or `chebyshev`. For optimal performance, all operations on polynomials, including evaluation at an argument, are implemented as operations on the coefficients. Additional (module-specific) information can be found in the docstring for the module of interest. """ from __future__ import division, absolute_import, print_function from .polynomial import Polynomial from .chebyshev import Chebyshev from .legendre import Legendre from .hermite import Hermite from .hermite_e import HermiteE from .laguerre import Laguerre from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench
1,140
39.75
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/_polybase.py
""" Abstract base class for the various polynomial Classes. The ABCPolyBase class provides the methods needed to implement the common API for the various polynomial classes. It operates as a mixin, but uses the abc module from the stdlib, hence it is only available for Python >= 2.6. """ from __future__ import division, absolute_import, print_function from abc import ABCMeta, abstractmethod, abstractproperty from numbers import Number import numpy as np from . import polyutils as pu __all__ = ['ABCPolyBase'] class ABCPolyBase(object): """An abstract base class for series classes. ABCPolyBase provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the methods listed below. .. versionadded:: 1.9.0 Parameters ---------- coef : array_like Series coefficients in order of increasing degree, i.e., ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where ``P_i`` is the basis polynomials of degree ``i``. domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. The default value is the derived class domain. window : (2,) array_like, optional Window, see domain for its use. The default value is the derived class window. Attributes ---------- coef : (N,) ndarray Series coefficients in order of increasing degree. domain : (2,) ndarray Domain that is mapped to window. window : (2,) ndarray Window that domain is mapped to. Class Attributes ---------------- maxpower : int Maximum power allowed, i.e., the largest number ``n`` such that ``p(x)**n`` is allowed. This is to limit runaway polynomial size. domain : (2,) ndarray Default domain of the class. window : (2,) ndarray Default window of the class. """ __metaclass__ = ABCMeta # Not hashable __hash__ = None # Opt out of numpy ufuncs and Python ops with ndarray subclasses. __array_ufunc__ = None # Limit runaway size. T_n^m has degree n*m maxpower = 100 @abstractproperty def domain(self): pass @abstractproperty def window(self): pass @abstractproperty def nickname(self): pass @abstractmethod def _add(self): pass @abstractmethod def _sub(self): pass @abstractmethod def _mul(self): pass @abstractmethod def _div(self): pass @abstractmethod def _pow(self): pass @abstractmethod def _val(self): pass @abstractmethod def _int(self): pass @abstractmethod def _der(self): pass @abstractmethod def _fit(self): pass @abstractmethod def _line(self): pass @abstractmethod def _roots(self): pass @abstractmethod def _fromroots(self): pass def has_samecoef(self, other): """Check if coefficients match. .. versionadded:: 1.6.0 Parameters ---------- other : class instance The other class must have the ``coef`` attribute. Returns ------- bool : boolean True if the coefficients are the same, False otherwise. """ if len(self.coef) != len(other.coef): return False elif not np.all(self.coef == other.coef): return False else: return True def has_samedomain(self, other): """Check if domains match. .. versionadded:: 1.6.0 Parameters ---------- other : class instance The other class must have the ``domain`` attribute. Returns ------- bool : boolean True if the domains are the same, False otherwise. """ return np.all(self.domain == other.domain) def has_samewindow(self, other): """Check if windows match. .. versionadded:: 1.6.0 Parameters ---------- other : class instance The other class must have the ``window`` attribute. Returns ------- bool : boolean True if the windows are the same, False otherwise. """ return np.all(self.window == other.window) def has_sametype(self, other): """Check if types match. .. versionadded:: 1.7.0 Parameters ---------- other : object Class instance. Returns ------- bool : boolean True if other is same class as self """ return isinstance(other, self.__class__) def _get_coefficients(self, other): """Interpret other as polynomial coefficients. The `other` argument is checked to see if it is of the same class as self with identical domain and window. If so, return its coefficients, otherwise return `other`. .. versionadded:: 1.9.0 Parameters ---------- other : anything Object to be checked. Returns ------- coef The coefficients of`other` if it is a compatible instance, of ABCPolyBase, otherwise `other`. Raises ------ TypeError When `other` is an incompatible instance of ABCPolyBase. """ if isinstance(other, ABCPolyBase): if not isinstance(other, self.__class__): raise TypeError("Polynomial types differ") elif not np.all(self.domain == other.domain): raise TypeError("Domains differ") elif not np.all(self.window == other.window): raise TypeError("Windows differ") return other.coef return other def __init__(self, coef, domain=None, window=None): [coef] = pu.as_series([coef], trim=False) self.coef = coef if domain is not None: [domain] = pu.as_series([domain], trim=False) if len(domain) != 2: raise ValueError("Domain has wrong number of elements.") self.domain = domain if window is not None: [window] = pu.as_series([window], trim=False) if len(window) != 2: raise ValueError("Window has wrong number of elements.") self.window = window def __repr__(self): format = "%s(%s, domain=%s, window=%s)" coef = repr(self.coef)[6:-1] domain = repr(self.domain)[6:-1] window = repr(self.window)[6:-1] name = self.__class__.__name__ return format % (name, coef, domain, window) def __str__(self): format = "%s(%s)" coef = str(self.coef) name = self.nickname return format % (name, coef) # Pickle and copy def __getstate__(self): ret = self.__dict__.copy() ret['coef'] = self.coef.copy() ret['domain'] = self.domain.copy() ret['window'] = self.window.copy() return ret def __setstate__(self, dict): self.__dict__ = dict # Call def __call__(self, arg): off, scl = pu.mapparms(self.domain, self.window) arg = off + scl*arg return self._val(arg, self.coef) def __iter__(self): return iter(self.coef) def __len__(self): return len(self.coef) # Numeric properties. def __neg__(self): return self.__class__(-self.coef, self.domain, self.window) def __pos__(self): return self def __add__(self, other): othercoef = self._get_coefficients(other) try: coef = self._add(self.coef, othercoef) except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __sub__(self, other): othercoef = self._get_coefficients(other) try: coef = self._sub(self.coef, othercoef) except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __mul__(self, other): othercoef = self._get_coefficients(other) try: coef = self._mul(self.coef, othercoef) except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __div__(self, other): # set to __floordiv__, /, for now. return self.__floordiv__(other) def __truediv__(self, other): # there is no true divide if the rhs is not a Number, although it # could return the first n elements of an infinite series. # It is hard to see where n would come from, though. if not isinstance(other, Number) or isinstance(other, bool): form = "unsupported types for true division: '%s', '%s'" raise TypeError(form % (type(self), type(other))) return self.__floordiv__(other) def __floordiv__(self, other): res = self.__divmod__(other) if res is NotImplemented: return res return res[0] def __mod__(self, other): res = self.__divmod__(other) if res is NotImplemented: return res return res[1] def __divmod__(self, other): othercoef = self._get_coefficients(other) try: quo, rem = self._div(self.coef, othercoef) except ZeroDivisionError as e: raise e except Exception: return NotImplemented quo = self.__class__(quo, self.domain, self.window) rem = self.__class__(rem, self.domain, self.window) return quo, rem def __pow__(self, other): coef = self._pow(self.coef, other, maxpower=self.maxpower) res = self.__class__(coef, self.domain, self.window) return res def __radd__(self, other): try: coef = self._add(other, self.coef) except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __rsub__(self, other): try: coef = self._sub(other, self.coef) except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __rmul__(self, other): try: coef = self._mul(other, self.coef) except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __rdiv__(self, other): # set to __floordiv__ /. return self.__rfloordiv__(other) def __rtruediv__(self, other): # An instance of ABCPolyBase is not considered a # Number. return NotImplemented def __rfloordiv__(self, other): res = self.__rdivmod__(other) if res is NotImplemented: return res return res[0] def __rmod__(self, other): res = self.__rdivmod__(other) if res is NotImplemented: return res return res[1] def __rdivmod__(self, other): try: quo, rem = self._div(other, self.coef) except ZeroDivisionError as e: raise e except Exception: return NotImplemented quo = self.__class__(quo, self.domain, self.window) rem = self.__class__(rem, self.domain, self.window) return quo, rem # Enhance me # some augmented arithmetic operations could be added here def __eq__(self, other): res = (isinstance(other, self.__class__) and np.all(self.domain == other.domain) and np.all(self.window == other.window) and (self.coef.shape == other.coef.shape) and np.all(self.coef == other.coef)) return res def __ne__(self, other): return not self.__eq__(other) # # Extra methods. # def copy(self): """Return a copy. Returns ------- new_series : series Copy of self. """ return self.__class__(self.coef, self.domain, self.window) def degree(self): """The degree of the series. .. versionadded:: 1.5.0 Returns ------- degree : int Degree of the series, one less than the number of coefficients. """ return len(self) - 1 def cutdeg(self, deg): """Truncate series to the given degree. Reduce the degree of the series to `deg` by discarding the high order terms. If `deg` is greater than the current degree a copy of the current series is returned. This can be useful in least squares where the coefficients of the high degree terms may be very small. .. versionadded:: 1.5.0 Parameters ---------- deg : non-negative int The series is reduced to degree `deg` by discarding the high order terms. The value of `deg` must be a non-negative integer. Returns ------- new_series : series New instance of series with reduced degree. """ return self.truncate(deg + 1) def trim(self, tol=0): """Remove trailing coefficients Remove trailing coefficients until a coefficient is reached whose absolute value greater than `tol` or the beginning of the series is reached. If all the coefficients would be removed the series is set to ``[0]``. A new series instance is returned with the new coefficients. The current instance remains unchanged. Parameters ---------- tol : non-negative number. All trailing coefficients less than `tol` will be removed. Returns ------- new_series : series Contains the new set of coefficients. """ coef = pu.trimcoef(self.coef, tol) return self.__class__(coef, self.domain, self.window) def truncate(self, size): """Truncate series to length `size`. Reduce the series to length `size` by discarding the high degree terms. The value of `size` must be a positive integer. This can be useful in least squares where the coefficients of the high degree terms may be very small. Parameters ---------- size : positive int The series is reduced to length `size` by discarding the high degree terms. The value of `size` must be a positive integer. Returns ------- new_series : series New instance of series with truncated coefficients. """ isize = int(size) if isize != size or isize < 1: raise ValueError("size must be a positive integer") if isize >= len(self.coef): coef = self.coef else: coef = self.coef[:isize] return self.__class__(coef, self.domain, self.window) def convert(self, domain=None, kind=None, window=None): """Convert series to a different kind and/or domain and/or window. Parameters ---------- domain : array_like, optional The domain of the converted series. If the value is None, the default domain of `kind` is used. kind : class, optional The polynomial series type class to which the current instance should be converted. If kind is None, then the class of the current instance is used. window : array_like, optional The window of the converted series. If the value is None, the default window of `kind` is used. Returns ------- new_series : series The returned class can be of different type than the current instance and/or have a different domain and/or different window. Notes ----- Conversion between domains and class types can result in numerically ill defined series. Examples -------- """ if kind is None: kind = self.__class__ if domain is None: domain = kind.domain if window is None: window = kind.window return self(kind.identity(domain, window=window)) def mapparms(self): """Return the mapping parameters. The returned values define a linear map ``off + scl*x`` that is applied to the input arguments before the series is evaluated. The map depends on the ``domain`` and ``window``; if the current ``domain`` is equal to the ``window`` the resulting map is the identity. If the coefficients of the series instance are to be used by themselves outside this class, then the linear function must be substituted for the ``x`` in the standard representation of the base polynomials. Returns ------- off, scl : float or complex The mapping function is defined by ``off + scl*x``. Notes ----- If the current domain is the interval ``[l1, r1]`` and the window is ``[l2, r2]``, then the linear mapping function ``L`` is defined by the equations:: L(l1) = l2 L(r1) = r2 """ return pu.mapparms(self.domain, self.window) def integ(self, m=1, k=[], lbnd=None): """Integrate. Return a series instance that is the definite integral of the current series. Parameters ---------- m : non-negative int The number of integrations to perform. k : array_like Integration constants. The first constant is applied to the first integration, the second to the second, and so on. The list of values must less than or equal to `m` in length and any missing values are set to zero. lbnd : Scalar The lower bound of the definite integral. Returns ------- new_series : series A new series representing the integral. The domain is the same as the domain of the integrated series. """ off, scl = self.mapparms() if lbnd is None: lbnd = 0 else: lbnd = off + scl*lbnd coef = self._int(self.coef, m, k, lbnd, 1./scl) return self.__class__(coef, self.domain, self.window) def deriv(self, m=1): """Differentiate. Return a series instance of that is the derivative of the current series. Parameters ---------- m : non-negative int Find the derivative of order `m`. Returns ------- new_series : series A new series representing the derivative. The domain is the same as the domain of the differentiated series. """ off, scl = self.mapparms() coef = self._der(self.coef, m, scl) return self.__class__(coef, self.domain, self.window) def roots(self): """Return the roots of the series polynomial. Compute the roots for the series. Note that the accuracy of the roots decrease the further outside the domain they lie. Returns ------- roots : ndarray Array containing the roots of the series. """ roots = self._roots(self.coef) return pu.mapdomain(roots, self.window, self.domain) def linspace(self, n=100, domain=None): """Return x, y values at equally spaced points in domain. Returns the x, y values at `n` linearly spaced points across the domain. Here y is the value of the polynomial at the points x. By default the domain is the same as that of the series instance. This method is intended mostly as a plotting aid. .. versionadded:: 1.5.0 Parameters ---------- n : int, optional Number of point pairs to return. The default value is 100. domain : {None, array_like}, optional If not None, the specified domain is used instead of that of the calling instance. It should be of the form ``[beg,end]``. The default is None which case the class domain is used. Returns ------- x, y : ndarray x is equal to linspace(self.domain[0], self.domain[1], n) and y is the series evaluated at element of x. """ if domain is None: domain = self.domain x = np.linspace(domain[0], domain[1], n) y = self(x) return x, y @classmethod def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, window=None): """Least squares fit to data. Return a series instance that is the least squares fit to the data `y` sampled at `x`. The domain of the returned instance can be specified and this will often result in a superior fit with less chance of ill conditioning. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int or 1-D array_like Degree(s) of the fitting polynomials. If `deg` is a single integer all terms up to and including the `deg`'th term are included in the fit. For NumPy versions >= 1.11.0 a list of integers specifying the degrees of the terms to include may be used instead. domain : {None, [beg, end], []}, optional Domain to use for the returned series. If ``None``, then a minimal domain that covers the points `x` is chosen. If ``[]`` the class domain is used. The default value was the class domain in NumPy 1.4 and ``None`` in later versions. The ``[]`` option was added in numpy 1.5.0. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (M,), optional Weights. If not None the contribution of each point ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the weights are chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. The default value is None. .. versionadded:: 1.5.0 window : {[beg, end]}, optional Window to use for the returned series. The default value is the default class domain .. versionadded:: 1.6.0 Returns ------- new_series : series A series that represents the least squares fit to the data and has the domain specified in the call. [resid, rank, sv, rcond] : list These values are only returned if `full` = True resid -- sum of squared residuals of the least squares fit rank -- the numerical rank of the scaled Vandermonde matrix sv -- singular values of the scaled Vandermonde matrix rcond -- value of `rcond`. For more details, see `linalg.lstsq`. """ if domain is None: domain = pu.getdomain(x) elif type(domain) is list and len(domain) == 0: domain = cls.domain if window is None: window = cls.window xnew = pu.mapdomain(x, domain, window) res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) if full: [coef, status] = res return cls(coef, domain=domain, window=window), status else: coef = res return cls(coef, domain=domain, window=window) @classmethod def fromroots(cls, roots, domain=[], window=None): """Return series instance that has the specified roots. Returns a series representing the product ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a list of roots. Parameters ---------- roots : array_like List of roots. domain : {[], None, array_like}, optional Domain for the resulting series. If None the domain is the interval from the smallest root to the largest. If [] the domain is the class domain. The default is []. window : {None, array_like}, optional Window for the returned series. If None the class window is used. The default is None. Returns ------- new_series : series Series with the specified roots. """ [roots] = pu.as_series([roots], trim=False) if domain is None: domain = pu.getdomain(roots) elif type(domain) is list and len(domain) == 0: domain = cls.domain if window is None: window = cls.window deg = len(roots) off, scl = pu.mapparms(domain, window) rnew = off + scl*roots coef = cls._fromroots(rnew) / scl**deg return cls(coef, domain=domain, window=window) @classmethod def identity(cls, domain=None, window=None): """Identity function. If ``p`` is the returned series, then ``p(x) == x`` for all values of x. Parameters ---------- domain : {None, array_like}, optional If given, the array must be of the form ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of the domain. If None is given then the class domain is used. The default is None. window : {None, array_like}, optional If given, the resulting array must be if the form ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of the window. If None is given then the class window is used. The default is None. Returns ------- new_series : series Series of representing the identity. """ if domain is None: domain = cls.domain if window is None: window = cls.window off, scl = pu.mapparms(window, domain) coef = cls._line(off, scl) return cls(coef, domain, window) @classmethod def basis(cls, deg, domain=None, window=None): """Series basis polynomial of degree `deg`. Returns the series representing the basis polynomial of degree `deg`. .. versionadded:: 1.7.0 Parameters ---------- deg : int Degree of the basis polynomial for the series. Must be >= 0. domain : {None, array_like}, optional If given, the array must be of the form ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of the domain. If None is given then the class domain is used. The default is None. window : {None, array_like}, optional If given, the resulting array must be if the form ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of the window. If None is given then the class window is used. The default is None. Returns ------- new_series : series A series with the coefficient of the `deg` term set to one and all others zero. """ if domain is None: domain = cls.domain if window is None: window = cls.window ideg = int(deg) if ideg != deg or ideg < 0: raise ValueError("deg must be non-negative integer") return cls([0]*ideg + [1], domain, window) @classmethod def cast(cls, series, domain=None, window=None): """Convert series to series of this class. The `series` is expected to be an instance of some polynomial series of one of the types supported by by the numpy.polynomial module, but could be some other class that supports the convert method. .. versionadded:: 1.7.0 Parameters ---------- series : series The series instance to be converted. domain : {None, array_like}, optional If given, the array must be of the form ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of the domain. If None is given then the class domain is used. The default is None. window : {None, array_like}, optional If given, the resulting array must be if the form ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of the window. If None is given then the class window is used. The default is None. Returns ------- new_series : series A series of the same kind as the calling class and equal to `series` when evaluated. See Also -------- convert : similar instance method """ if domain is None: domain = cls.domain if window is None: window = cls.window return series.convert(domain, cls, window)
30,092
30.346875
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_hermite_e.py
"""Tests for hermite_e module. """ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite ) He0 = np.array([1]) He1 = np.array([0, 1]) He2 = np.array([-1, 0, 1]) He3 = np.array([0, -3, 0, 1]) He4 = np.array([3, 0, -6, 0, 1]) He5 = np.array([0, 15, 0, -10, 0, 1]) He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] def trim(x): return herme.hermetrim(x, tol=1e-6) class TestConstants(object): def test_hermedomain(self): assert_equal(herme.hermedomain, [-1, 1]) def test_hermezero(self): assert_equal(herme.hermezero, [0]) def test_hermeone(self): assert_equal(herme.hermeone, [1]) def test_hermex(self): assert_equal(herme.hermex, [0, 1]) class TestArithmetic(object): x = np.linspace(-3, 3, 100) def test_hermeadd(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = herme.hermeadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermesub(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = herme.hermesub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermemulx(self): assert_equal(herme.hermemulx([0]), [0]) assert_equal(herme.hermemulx([1]), [0, 1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [i, 0, 1] assert_equal(herme.hermemulx(ser), tgt) def test_hermemul(self): # check values of result for i in range(5): pol1 = [0]*i + [1] val1 = herme.hermeval(self.x, pol1) for j in range(5): msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = herme.hermeval(self.x, pol2) pol3 = herme.hermemul(pol1, pol2) val3 = herme.hermeval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) assert_almost_equal(val3, val1*val2, err_msg=msg) def test_hermediv(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = herme.hermeadd(ci, cj) quo, rem = herme.hermediv(tgt, ci) res = herme.hermeadd(herme.hermemul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([4., 2., 3.]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 y = polyval(x, [1., 2., 3.]) def test_hermeval(self): #check empty input assert_equal(herme.hermeval([], [1]).size, 0) #check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Helist] for i in range(10): msg = "At i=%d" % i tgt = y[i] res = herme.hermeval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) #check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(herme.hermeval(x, [1]).shape, dims) assert_equal(herme.hermeval(x, [1, 0]).shape, dims) assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) def test_hermeval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 res = herme.hermeval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herme.hermeval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_hermeval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 res = herme.hermeval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herme.hermeval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_hermegrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j->ij', y1, y2) res = herme.hermegrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herme.hermegrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) def test_hermegrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herme.hermegrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herme.hermegrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) class TestIntegral(object): def test_hermeint(self): # check exceptions assert_raises(ValueError, herme.hermeint, [0], .5) assert_raises(ValueError, herme.hermeint, [0], -1) assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) assert_raises(ValueError, herme.hermeint, [0], scl=[0]) assert_raises(ValueError, herme.hermeint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = herme.hermeint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i]) res = herme.herme2poly(hermeint) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 pol = [0]*i + [1] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) assert_almost_equal(herme.hermeval(-1, hermeint), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) res = herme.herme2poly(hermeint) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1) res = herme.hermeint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k]) res = herme.hermeint(pol, m=j, k=list(range(j))) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_hermeint_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T res = herme.hermeint(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([herme.hermeint(c) for c in c2d]) res = herme.hermeint(c2d, axis=1) assert_almost_equal(res, tgt) tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) res = herme.hermeint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) class TestDerivative(object): def test_hermeder(self): # check exceptions assert_raises(ValueError, herme.hermeder, [0], .5) assert_raises(ValueError, herme.hermeder, [0], -1) # check that zeroth derivative does nothing for i in range(5): tgt = [0]*i + [1] res = herme.hermeder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = herme.hermeder( herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) def test_hermeder_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T res = herme.hermeder(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([herme.hermeder(c) for c in c2d]) res = herme.hermeder(c2d, axis=1) assert_almost_equal(res, tgt) class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 def test_hermevander(self): # check for 1d x x = np.arange(3) v = herme.hermevander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], herme.hermeval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = herme.hermevander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], herme.hermeval(x, coef)) def test_hermevander2d(self): # also tests hermeval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = herme.hermevander2d(x1, x2, [1, 2]) tgt = herme.hermeval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = herme.hermevander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_hermevander3d(self): # also tests hermeval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) tgt = herme.hermeval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) class TestFitting(object): def test_hermefit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, herme.hermefit, [1], [1], -1) assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) assert_raises(TypeError, herme.hermefit, [], [1], 0) assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) assert_raises(TypeError, herme.hermefit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = herme.hermefit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(herme.hermeval(x, coef3), y) coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(herme.hermeval(x, coef3), y) # coef4 = herme.hermefit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(herme.hermeval(x, coef4), y) coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(herme.hermeval(x, coef4), y) # check things still work if deg is not in strict increasing coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) assert_equal(len(coef4), 5) assert_almost_equal(herme.hermeval(x, coef4), y) # coef2d = herme.hermefit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = herme.hermefit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) # test fitting only even Legendre polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = herme.hermefit(x, y, 4) assert_almost_equal(herme.hermeval(x, coef1), y) coef2 = herme.hermefit(x, y, [0, 2, 4]) assert_almost_equal(herme.hermeval(x, coef2), y) assert_almost_equal(coef1, coef2) class TestCompanion(object): def test_raises(self): assert_raises(ValueError, herme.hermecompanion, []) assert_raises(ValueError, herme.hermecompanion, [1]) def test_dimensions(self): for i in range(1, 5): coef = [0]*i + [1] assert_(herme.hermecompanion(coef).shape == (i, i)) def test_linear_root(self): assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) class TestGauss(object): def test_100(self): x, w = herme.hermegauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = herme.hermevander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct tgt = np.sqrt(2*np.pi) assert_almost_equal(w.sum(), tgt) class TestMisc(object): def test_hermefromroots(self): res = herme.hermefromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = herme.hermefromroots(roots) res = herme.hermeval(roots, pol) tgt = 0 assert_(len(pol) == i + 1) assert_almost_equal(herme.herme2poly(pol)[-1], 1) assert_almost_equal(res, tgt) def test_hermeroots(self): assert_almost_equal(herme.hermeroots([1]), []) assert_almost_equal(herme.hermeroots([1, 1]), [-1]) for i in range(2, 5): tgt = np.linspace(-1, 1, i) res = herme.hermeroots(herme.hermefromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_hermetrim(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, herme.hermetrim, coef, -1) # Test results assert_equal(herme.hermetrim(coef), coef[:-1]) assert_equal(herme.hermetrim(coef, 1), coef[:-3]) assert_equal(herme.hermetrim(coef, 2), [0]) def test_hermeline(self): assert_equal(herme.hermeline(3, 4), [3, 4]) def test_herme2poly(self): for i in range(10): assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) def test_poly2herme(self): for i in range(10): assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) def test_weight(self): x = np.linspace(-5, 5, 11) tgt = np.exp(-.5*x**2) res = herme.hermeweight(x) assert_almost_equal(res, tgt) if __name__ == "__main__": run_module_suite()
18,789
32.9783
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_laguerre.py
"""Tests for laguerre module. """ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite ) L0 = np.array([1])/1 L1 = np.array([1, -1])/1 L2 = np.array([2, -4, 1])/2 L3 = np.array([6, -18, 9, -1])/6 L4 = np.array([24, -96, 72, -16, 1])/24 L5 = np.array([120, -600, 600, -200, 25, -1])/120 L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 Llist = [L0, L1, L2, L3, L4, L5, L6] def trim(x): return lag.lagtrim(x, tol=1e-6) class TestConstants(object): def test_lagdomain(self): assert_equal(lag.lagdomain, [0, 1]) def test_lagzero(self): assert_equal(lag.lagzero, [0]) def test_lagone(self): assert_equal(lag.lagone, [1]) def test_lagx(self): assert_equal(lag.lagx, [1, -1]) class TestArithmetic(object): x = np.linspace(-3, 3, 100) def test_lagadd(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = lag.lagadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_lagsub(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = lag.lagsub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_lagmulx(self): assert_equal(lag.lagmulx([0]), [0]) assert_equal(lag.lagmulx([1]), [1, -1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] assert_almost_equal(lag.lagmulx(ser), tgt) def test_lagmul(self): # check values of result for i in range(5): pol1 = [0]*i + [1] val1 = lag.lagval(self.x, pol1) for j in range(5): msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = lag.lagval(self.x, pol2) pol3 = lag.lagmul(pol1, pol2) val3 = lag.lagval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) assert_almost_equal(val3, val1*val2, err_msg=msg) def test_lagdiv(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = lag.lagadd(ci, cj) quo, rem = lag.lagdiv(tgt, ci) res = lag.lagadd(lag.lagmul(quo, ci), rem) assert_almost_equal(trim(res), trim(tgt), err_msg=msg) class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([9., -14., 6.]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 y = polyval(x, [1., 2., 3.]) def test_lagval(self): #check empty input assert_equal(lag.lagval([], [1]).size, 0) #check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(7): msg = "At i=%d" % i tgt = y[i] res = lag.lagval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) #check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(lag.lagval(x, [1]).shape, dims) assert_equal(lag.lagval(x, [1, 0]).shape, dims) assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) def test_lagval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 res = lag.lagval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = lag.lagval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_lagval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 res = lag.lagval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = lag.lagval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_laggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j->ij', y1, y2) res = lag.laggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = lag.laggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) def test_laggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = lag.laggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = lag.laggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) class TestIntegral(object): def test_lagint(self): # check exceptions assert_raises(ValueError, lag.lagint, [0], .5) assert_raises(ValueError, lag.lagint, [0], -1) assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) assert_raises(ValueError, lag.lagint, [0], scl=[0]) assert_raises(ValueError, lag.lagint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = lag.lagint([0], m=i, k=k) assert_almost_equal(res, [1, -1]) # check single integration with integration constant for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i]) res = lag.lag2poly(lagint) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 pol = [0]*i + [1] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) assert_almost_equal(lag.lagval(-1, lagint), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) res = lag.lag2poly(lagint) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1) res = lag.lagint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k]) res = lag.lagint(pol, m=j, k=list(range(j))) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k], scl=2) res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_lagint_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T res = lag.lagint(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([lag.lagint(c) for c in c2d]) res = lag.lagint(c2d, axis=1) assert_almost_equal(res, tgt) tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) res = lag.lagint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) class TestDerivative(object): def test_lagder(self): # check exceptions assert_raises(ValueError, lag.lagder, [0], .5) assert_raises(ValueError, lag.lagder, [0], -1) # check that zeroth derivative does nothing for i in range(5): tgt = [0]*i + [1] res = lag.lagder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = lag.lagder(lag.lagint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) def test_lagder_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T res = lag.lagder(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([lag.lagder(c) for c in c2d]) res = lag.lagder(c2d, axis=1) assert_almost_equal(res, tgt) class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 def test_lagvander(self): # check for 1d x x = np.arange(3) v = lag.lagvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], lag.lagval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = lag.lagvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], lag.lagval(x, coef)) def test_lagvander2d(self): # also tests lagval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = lag.lagvander2d(x1, x2, [1, 2]) tgt = lag.lagval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = lag.lagvander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_lagvander3d(self): # also tests lagval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) tgt = lag.lagval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) class TestFitting(object): def test_lagfit(self): def f(x): return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, lag.lagfit, [1], [1], -1) assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) assert_raises(TypeError, lag.lagfit, [], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, lag.lagfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = lag.lagfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(lag.lagval(x, coef3), y) coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(lag.lagval(x, coef3), y) # coef4 = lag.lagfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) # coef2d = lag.lagfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = lag.lagfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) class TestCompanion(object): def test_raises(self): assert_raises(ValueError, lag.lagcompanion, []) assert_raises(ValueError, lag.lagcompanion, [1]) def test_dimensions(self): for i in range(1, 5): coef = [0]*i + [1] assert_(lag.lagcompanion(coef).shape == (i, i)) def test_linear_root(self): assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) class TestGauss(object): def test_100(self): x, w = lag.laggauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = lag.lagvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct tgt = 1.0 assert_almost_equal(w.sum(), tgt) class TestMisc(object): def test_lagfromroots(self): res = lag.lagfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = lag.lagfromroots(roots) res = lag.lagval(roots, pol) tgt = 0 assert_(len(pol) == i + 1) assert_almost_equal(lag.lag2poly(pol)[-1], 1) assert_almost_equal(res, tgt) def test_lagroots(self): assert_almost_equal(lag.lagroots([1]), []) assert_almost_equal(lag.lagroots([0, 1]), [1]) for i in range(2, 5): tgt = np.linspace(0, 3, i) res = lag.lagroots(lag.lagfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_lagtrim(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, lag.lagtrim, coef, -1) # Test results assert_equal(lag.lagtrim(coef), coef[:-1]) assert_equal(lag.lagtrim(coef, 1), coef[:-3]) assert_equal(lag.lagtrim(coef, 2), [0]) def test_lagline(self): assert_equal(lag.lagline(3, 4), [7, -4]) def test_lag2poly(self): for i in range(7): assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) def test_poly2lag(self): for i in range(7): assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) def test_weight(self): x = np.linspace(0, 10, 11) tgt = np.exp(-x) res = lag.lagweight(x) assert_almost_equal(res, tgt) if __name__ == "__main__": run_module_suite()
17,398
31.582397
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_printing.py
from __future__ import division, absolute_import, print_function import numpy.polynomial as poly from numpy.testing import run_module_suite, assert_equal class TestStr(object): def test_polynomial_str(self): res = str(poly.Polynomial([0, 1])) tgt = 'poly([0. 1.])' assert_equal(res, tgt) def test_chebyshev_str(self): res = str(poly.Chebyshev([0, 1])) tgt = 'cheb([0. 1.])' assert_equal(res, tgt) def test_legendre_str(self): res = str(poly.Legendre([0, 1])) tgt = 'leg([0. 1.])' assert_equal(res, tgt) def test_hermite_str(self): res = str(poly.Hermite([0, 1])) tgt = 'herm([0. 1.])' assert_equal(res, tgt) def test_hermiteE_str(self): res = str(poly.HermiteE([0, 1])) tgt = 'herme([0. 1.])' assert_equal(res, tgt) def test_laguerre_str(self): res = str(poly.Laguerre([0, 1])) tgt = 'lag([0. 1.])' assert_equal(res, tgt) class TestRepr(object): def test_polynomial_str(self): res = repr(poly.Polynomial([0, 1])) tgt = 'Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1])' assert_equal(res, tgt) def test_chebyshev_str(self): res = repr(poly.Chebyshev([0, 1])) tgt = 'Chebyshev([0., 1.], domain=[-1, 1], window=[-1, 1])' assert_equal(res, tgt) def test_legendre_repr(self): res = repr(poly.Legendre([0, 1])) tgt = 'Legendre([0., 1.], domain=[-1, 1], window=[-1, 1])' assert_equal(res, tgt) def test_hermite_repr(self): res = repr(poly.Hermite([0, 1])) tgt = 'Hermite([0., 1.], domain=[-1, 1], window=[-1, 1])' assert_equal(res, tgt) def test_hermiteE_repr(self): res = repr(poly.HermiteE([0, 1])) tgt = 'HermiteE([0., 1.], domain=[-1, 1], window=[-1, 1])' assert_equal(res, tgt) def test_laguerre_repr(self): res = repr(poly.Laguerre([0, 1])) tgt = 'Laguerre([0., 1.], domain=[0, 1], window=[0, 1])' assert_equal(res, tgt) # if __name__ == "__main__": run_module_suite()
2,140
27.546667
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_legendre.py
"""Tests for legendre module. """ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite ) L0 = np.array([1]) L1 = np.array([0, 1]) L2 = np.array([-1, 0, 3])/2 L3 = np.array([0, -3, 0, 5])/2 L4 = np.array([3, 0, -30, 0, 35])/8 L5 = np.array([0, 15, 0, -70, 0, 63])/8 L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] def trim(x): return leg.legtrim(x, tol=1e-6) class TestConstants(object): def test_legdomain(self): assert_equal(leg.legdomain, [-1, 1]) def test_legzero(self): assert_equal(leg.legzero, [0]) def test_legone(self): assert_equal(leg.legone, [1]) def test_legx(self): assert_equal(leg.legx, [0, 1]) class TestArithmetic(object): x = np.linspace(-1, 1, 100) def test_legadd(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = leg.legadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_legsub(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = leg.legsub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_legmulx(self): assert_equal(leg.legmulx([0]), [0]) assert_equal(leg.legmulx([1]), [0, 1]) for i in range(1, 5): tmp = 2*i + 1 ser = [0]*i + [1] tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] assert_equal(leg.legmulx(ser), tgt) def test_legmul(self): # check values of result for i in range(5): pol1 = [0]*i + [1] val1 = leg.legval(self.x, pol1) for j in range(5): msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = leg.legval(self.x, pol2) pol3 = leg.legmul(pol1, pol2) val3 = leg.legval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) assert_almost_equal(val3, val1*val2, err_msg=msg) def test_legdiv(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = leg.legadd(ci, cj) quo, rem = leg.legdiv(tgt, ci) res = leg.legadd(leg.legmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([2., 2., 2.]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 y = polyval(x, [1., 2., 3.]) def test_legval(self): #check empty input assert_equal(leg.legval([], [1]).size, 0) #check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(10): msg = "At i=%d" % i tgt = y[i] res = leg.legval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) #check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(leg.legval(x, [1]).shape, dims) assert_equal(leg.legval(x, [1, 0]).shape, dims) assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) def test_legval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 res = leg.legval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = leg.legval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_legval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 res = leg.legval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_leggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j->ij', y1, y2) res = leg.leggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = leg.leggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) def test_leggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = leg.leggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = leg.leggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) class TestIntegral(object): def test_legint(self): # check exceptions assert_raises(ValueError, leg.legint, [0], .5) assert_raises(ValueError, leg.legint, [0], -1) assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) assert_raises(ValueError, leg.legint, [0], lbnd=[0]) assert_raises(ValueError, leg.legint, [0], scl=[0]) assert_raises(ValueError, leg.legint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = leg.legint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i]) res = leg.leg2poly(legint) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 pol = [0]*i + [1] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) assert_almost_equal(leg.legval(-1, legint), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i], scl=2) res = leg.leg2poly(legint) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1) res = leg.legint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k]) res = leg.legint(pol, m=j, k=list(range(j))) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k], scl=2) res = leg.legint(pol, m=j, k=list(range(j)), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_legint_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([leg.legint(c) for c in c2d.T]).T res = leg.legint(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([leg.legint(c) for c in c2d]) res = leg.legint(c2d, axis=1) assert_almost_equal(res, tgt) tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) res = leg.legint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) class TestDerivative(object): def test_legder(self): # check exceptions assert_raises(ValueError, leg.legder, [0], .5) assert_raises(ValueError, leg.legder, [0], -1) # check that zeroth derivative does nothing for i in range(5): tgt = [0]*i + [1] res = leg.legder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = leg.legder(leg.legint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) def test_legder_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([leg.legder(c) for c in c2d.T]).T res = leg.legder(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([leg.legder(c) for c in c2d]) res = leg.legder(c2d, axis=1) assert_almost_equal(res, tgt) class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 def test_legvander(self): # check for 1d x x = np.arange(3) v = leg.legvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], leg.legval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = leg.legvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], leg.legval(x, coef)) def test_legvander2d(self): # also tests polyval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = leg.legvander2d(x1, x2, [1, 2]) tgt = leg.legval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = leg.legvander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_legvander3d(self): # also tests polyval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) tgt = leg.legval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) class TestFitting(object): def test_legfit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, leg.legfit, [1], [1], -1) assert_raises(TypeError, leg.legfit, [[1]], [1], 0) assert_raises(TypeError, leg.legfit, [], [1], 0) assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, leg.legfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = leg.legfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(leg.legval(x, coef3), y) coef3 = leg.legfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(leg.legval(x, coef3), y) # coef4 = leg.legfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) # check things still work if deg is not in strict increasing coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) # coef2d = leg.legfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = leg.legfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) # test fitting only even Legendre polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = leg.legfit(x, y, 4) assert_almost_equal(leg.legval(x, coef1), y) coef2 = leg.legfit(x, y, [0, 2, 4]) assert_almost_equal(leg.legval(x, coef2), y) assert_almost_equal(coef1, coef2) class TestCompanion(object): def test_raises(self): assert_raises(ValueError, leg.legcompanion, []) assert_raises(ValueError, leg.legcompanion, [1]) def test_dimensions(self): for i in range(1, 5): coef = [0]*i + [1] assert_(leg.legcompanion(coef).shape == (i, i)) def test_linear_root(self): assert_(leg.legcompanion([1, 2])[0, 0] == -.5) class TestGauss(object): def test_100(self): x, w = leg.leggauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = leg.legvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct tgt = 2.0 assert_almost_equal(w.sum(), tgt) class TestMisc(object): def test_legfromroots(self): res = leg.legfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = leg.legfromroots(roots) res = leg.legval(roots, pol) tgt = 0 assert_(len(pol) == i + 1) assert_almost_equal(leg.leg2poly(pol)[-1], 1) assert_almost_equal(res, tgt) def test_legroots(self): assert_almost_equal(leg.legroots([1]), []) assert_almost_equal(leg.legroots([1, 2]), [-.5]) for i in range(2, 5): tgt = np.linspace(-1, 1, i) res = leg.legroots(leg.legfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_legtrim(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, leg.legtrim, coef, -1) # Test results assert_equal(leg.legtrim(coef), coef[:-1]) assert_equal(leg.legtrim(coef, 1), coef[:-3]) assert_equal(leg.legtrim(coef, 2), [0]) def test_legline(self): assert_equal(leg.legline(3, 4), [3, 4]) def test_leg2poly(self): for i in range(10): assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) def test_poly2leg(self): for i in range(10): assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) def test_weight(self): x = np.linspace(-1, 1, 11) tgt = 1. res = leg.legweight(x) assert_almost_equal(res, tgt) if __name__ == "__main__": run_module_suite()
18,162
31.844485
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_chebyshev.py
"""Tests for chebyshev module. """ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite ) def trim(x): return cheb.chebtrim(x, tol=1e-6) T0 = [1] T1 = [0, 1] T2 = [-1, 0, 2] T3 = [0, -3, 0, 4] T4 = [1, 0, -8, 0, 8] T5 = [0, 5, 0, -20, 0, 16] T6 = [-1, 0, 18, 0, -48, 0, 32] T7 = [0, -7, 0, 56, 0, -112, 0, 64] T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] class TestPrivate(object): def test__cseries_to_zseries(self): for i in range(5): inp = np.array([2] + [1]*i, np.double) tgt = np.array([.5]*i + [2] + [.5]*i, np.double) res = cheb._cseries_to_zseries(inp) assert_equal(res, tgt) def test__zseries_to_cseries(self): for i in range(5): inp = np.array([.5]*i + [2] + [.5]*i, np.double) tgt = np.array([2] + [1]*i, np.double) res = cheb._zseries_to_cseries(inp) assert_equal(res, tgt) class TestConstants(object): def test_chebdomain(self): assert_equal(cheb.chebdomain, [-1, 1]) def test_chebzero(self): assert_equal(cheb.chebzero, [0]) def test_chebone(self): assert_equal(cheb.chebone, [1]) def test_chebx(self): assert_equal(cheb.chebx, [0, 1]) class TestArithmetic(object): def test_chebadd(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = cheb.chebadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebsub(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = cheb.chebsub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebmulx(self): assert_equal(cheb.chebmulx([0]), [0]) assert_equal(cheb.chebmulx([1]), [0, 1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [.5, 0, .5] assert_equal(cheb.chebmulx(ser), tgt) def test_chebmul(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(i + j + 1) tgt[i + j] += .5 tgt[abs(i - j)] += .5 res = cheb.chebmul([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebdiv(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = cheb.chebadd(ci, cj) quo, rem = cheb.chebdiv(tgt, ci) res = cheb.chebadd(cheb.chebmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([2.5, 2., 1.5]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 y = polyval(x, [1., 2., 3.]) def test_chebval(self): #check empty input assert_equal(cheb.chebval([], [1]).size, 0) #check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Tlist] for i in range(10): msg = "At i=%d" % i tgt = y[i] res = cheb.chebval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) #check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(cheb.chebval(x, [1]).shape, dims) assert_equal(cheb.chebval(x, [1, 0]).shape, dims) assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) def test_chebval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 res = cheb.chebval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = cheb.chebval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_chebval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 res = cheb.chebval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = cheb.chebval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_chebgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j->ij', y1, y2) res = cheb.chebgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = cheb.chebgrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) def test_chebgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = cheb.chebgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = cheb.chebgrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) class TestIntegral(object): def test_chebint(self): # check exceptions assert_raises(ValueError, cheb.chebint, [0], .5) assert_raises(ValueError, cheb.chebint, [0], -1) assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) assert_raises(ValueError, cheb.chebint, [0], scl=[0]) assert_raises(ValueError, cheb.chebint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = cheb.chebint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i]) res = cheb.cheb2poly(chebint) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 pol = [0]*i + [1] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) assert_almost_equal(cheb.chebval(-1, chebint), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) res = cheb.cheb2poly(chebint) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1) res = cheb.chebint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k]) res = cheb.chebint(pol, m=j, k=list(range(j))) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_chebint_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T res = cheb.chebint(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([cheb.chebint(c) for c in c2d]) res = cheb.chebint(c2d, axis=1) assert_almost_equal(res, tgt) tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) res = cheb.chebint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) class TestDerivative(object): def test_chebder(self): # check exceptions assert_raises(ValueError, cheb.chebder, [0], .5) assert_raises(ValueError, cheb.chebder, [0], -1) # check that zeroth derivative does nothing for i in range(5): tgt = [0]*i + [1] res = cheb.chebder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) def test_chebder_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T res = cheb.chebder(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([cheb.chebder(c) for c in c2d]) res = cheb.chebder(c2d, axis=1) assert_almost_equal(res, tgt) class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 def test_chebvander(self): # check for 1d x x = np.arange(3) v = cheb.chebvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], cheb.chebval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = cheb.chebvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], cheb.chebval(x, coef)) def test_chebvander2d(self): # also tests chebval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = cheb.chebvander2d(x1, x2, [1, 2]) tgt = cheb.chebval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = cheb.chebvander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_chebvander3d(self): # also tests chebval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) tgt = cheb.chebval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) class TestFitting(object): def test_chebfit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, cheb.chebfit, [1], [1], -1) assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) assert_raises(TypeError, cheb.chebfit, [], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, cheb.chebfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = cheb.chebfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(cheb.chebval(x, coef3), y) coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(cheb.chebval(x, coef3), y) # coef4 = cheb.chebfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) # check things still work if deg is not in strict increasing coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) # coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = cheb.chebfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) # test fitting only even polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = cheb.chebfit(x, y, 4) assert_almost_equal(cheb.chebval(x, coef1), y) coef2 = cheb.chebfit(x, y, [0, 2, 4]) assert_almost_equal(cheb.chebval(x, coef2), y) assert_almost_equal(coef1, coef2) class TestInterpolate(object): def f(self, x): return x * (x - 1) * (x - 2) def test_raises(self): assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) def test_dimensions(self): for deg in range(1, 5): assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) def test_approximation(self): def powx(x, p): return x**p x = np.linspace(-1, 1, 10) for deg in range(0, 10): for p in range(0, deg + 1): c = cheb.chebinterpolate(powx, deg, (p,)) assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) class TestCompanion(object): def test_raises(self): assert_raises(ValueError, cheb.chebcompanion, []) assert_raises(ValueError, cheb.chebcompanion, [1]) def test_dimensions(self): for i in range(1, 5): coef = [0]*i + [1] assert_(cheb.chebcompanion(coef).shape == (i, i)) def test_linear_root(self): assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) class TestGauss(object): def test_100(self): x, w = cheb.chebgauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = cheb.chebvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct tgt = np.pi assert_almost_equal(w.sum(), tgt) class TestMisc(object): def test_chebfromroots(self): res = cheb.chebfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) tgt = [0]*i + [1] res = cheb.chebfromroots(roots)*2**(i-1) assert_almost_equal(trim(res), trim(tgt)) def test_chebroots(self): assert_almost_equal(cheb.chebroots([1]), []) assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) for i in range(2, 5): tgt = np.linspace(-1, 1, i) res = cheb.chebroots(cheb.chebfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_chebtrim(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, cheb.chebtrim, coef, -1) # Test results assert_equal(cheb.chebtrim(coef), coef[:-1]) assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) assert_equal(cheb.chebtrim(coef, 2), [0]) def test_chebline(self): assert_equal(cheb.chebline(3, 4), [3, 4]) def test_cheb2poly(self): for i in range(10): assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) def test_poly2cheb(self): for i in range(10): assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) def test_weight(self): x = np.linspace(-1, 1, 11)[1:-1] tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) res = cheb.chebweight(x) assert_almost_equal(res, tgt) def test_chebpts1(self): #test exceptions assert_raises(ValueError, cheb.chebpts1, 1.5) assert_raises(ValueError, cheb.chebpts1, 0) #test points tgt = [0] assert_almost_equal(cheb.chebpts1(1), tgt) tgt = [-0.70710678118654746, 0.70710678118654746] assert_almost_equal(cheb.chebpts1(2), tgt) tgt = [-0.86602540378443871, 0, 0.86602540378443871] assert_almost_equal(cheb.chebpts1(3), tgt) tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] assert_almost_equal(cheb.chebpts1(4), tgt) def test_chebpts2(self): #test exceptions assert_raises(ValueError, cheb.chebpts2, 1.5) assert_raises(ValueError, cheb.chebpts2, 1) #test points tgt = [-1, 1] assert_almost_equal(cheb.chebpts2(2), tgt) tgt = [-1, 0, 1] assert_almost_equal(cheb.chebpts2(3), tgt) tgt = [-1, -0.5, .5, 1] assert_almost_equal(cheb.chebpts2(4), tgt) tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] assert_almost_equal(cheb.chebpts2(5), tgt) if __name__ == "__main__": run_module_suite()
20,420
32.204878
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_polyutils.py
"""Tests for polyutils module. """ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.polyutils as pu from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite ) class TestMisc(object): def test_trimseq(self): for i in range(5): tgt = [1] res = pu.trimseq([1] + [0]*5) assert_equal(res, tgt) def test_as_series(self): # check exceptions assert_raises(ValueError, pu.as_series, [[]]) assert_raises(ValueError, pu.as_series, [[[1, 2]]]) assert_raises(ValueError, pu.as_series, [[1], ['a']]) # check common types types = ['i', 'd', 'O'] for i in range(len(types)): for j in range(i): ci = np.ones(1, types[i]) cj = np.ones(1, types[j]) [resi, resj] = pu.as_series([ci, cj]) assert_(resi.dtype.char == resj.dtype.char) assert_(resj.dtype.char == types[i]) def test_trimcoef(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, pu.trimcoef, coef, -1) # Test results assert_equal(pu.trimcoef(coef), coef[:-1]) assert_equal(pu.trimcoef(coef, 1), coef[:-3]) assert_equal(pu.trimcoef(coef, 2), [0]) class TestDomain(object): def test_getdomain(self): # test for real values x = [1, 10, 3, -1] tgt = [-1, 10] res = pu.getdomain(x) assert_almost_equal(res, tgt) # test for complex values x = [1 + 1j, 1 - 1j, 0, 2] tgt = [-1j, 2 + 1j] res = pu.getdomain(x) assert_almost_equal(res, tgt) def test_mapdomain(self): # test for real values dom1 = [0, 4] dom2 = [1, 3] tgt = dom2 res = pu. mapdomain(dom1, dom1, dom2) assert_almost_equal(res, tgt) # test for complex values dom1 = [0 - 1j, 2 + 1j] dom2 = [-2, 2] tgt = dom2 x = dom1 res = pu.mapdomain(x, dom1, dom2) assert_almost_equal(res, tgt) # test for multidimensional arrays dom1 = [0, 4] dom2 = [1, 3] tgt = np.array([dom2, dom2]) x = np.array([dom1, dom1]) res = pu.mapdomain(x, dom1, dom2) assert_almost_equal(res, tgt) # test that subtypes are preserved. dom1 = [0, 4] dom2 = [1, 3] x = np.matrix([dom1, dom1]) res = pu.mapdomain(x, dom1, dom2) assert_(isinstance(res, np.matrix)) def test_mapparms(self): # test for real values dom1 = [0, 4] dom2 = [1, 3] tgt = [1, .5] res = pu. mapparms(dom1, dom2) assert_almost_equal(res, tgt) # test for complex values dom1 = [0 - 1j, 2 + 1j] dom2 = [-2, 2] tgt = [-1 + 1j, 1 - 1j] res = pu.mapparms(dom1, dom2) assert_almost_equal(res, tgt) if __name__ == "__main__": run_module_suite()
3,085
26.801802
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_polynomial.py
"""Tests for polynomial module. """ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.polynomial as poly from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite ) def trim(x): return poly.polytrim(x, tol=1e-6) T0 = [1] T1 = [0, 1] T2 = [-1, 0, 2] T3 = [0, -3, 0, 4] T4 = [1, 0, -8, 0, 8] T5 = [0, 5, 0, -20, 0, 16] T6 = [-1, 0, 18, 0, -48, 0, 32] T7 = [0, -7, 0, 56, 0, -112, 0, 64] T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] class TestConstants(object): def test_polydomain(self): assert_equal(poly.polydomain, [-1, 1]) def test_polyzero(self): assert_equal(poly.polyzero, [0]) def test_polyone(self): assert_equal(poly.polyone, [1]) def test_polyx(self): assert_equal(poly.polyx, [0, 1]) class TestArithmetic(object): def test_polyadd(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = poly.polyadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polysub(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = poly.polysub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polymulx(self): assert_equal(poly.polymulx([0]), [0]) assert_equal(poly.polymulx([1]), [0, 1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i + 1) + [1] assert_equal(poly.polymulx(ser), tgt) def test_polymul(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(i + j + 1) tgt[i + j] += 1 res = poly.polymul([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polydiv(self): # check zero division assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) # check scalar division quo, rem = poly.polydiv([2], [2]) assert_equal((quo, rem), (1, 0)) quo, rem = poly.polydiv([2, 2], [2]) assert_equal((quo, rem), ((1, 1), 0)) # check rest. for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1, 2] cj = [0]*j + [1, 2] tgt = poly.polyadd(ci, cj) quo, rem = poly.polydiv(tgt, ci) res = poly.polyadd(poly.polymul(quo, ci), rem) assert_equal(res, tgt, err_msg=msg) class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([1., 2., 3.]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 y = poly.polyval(x, [1., 2., 3.]) def test_polyval(self): #check empty input assert_equal(poly.polyval([], [1]).size, 0) #check normal input) x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(5): tgt = y[i] res = poly.polyval(x, [0]*i + [1]) assert_almost_equal(res, tgt) tgt = x*(x**2 - 1) res = poly.polyval(x, [0, -1, 0, 1]) assert_almost_equal(res, tgt) #check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(poly.polyval(x, [1]).shape, dims) assert_equal(poly.polyval(x, [1, 0]).shape, dims) assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) def test_polyvalfromroots(self): # check exception for broadcasting x values over root array with # too few dimensions assert_raises(ValueError, poly.polyvalfromroots, [1], [1], tensor=False) # check empty input assert_equal(poly.polyvalfromroots([], [1]).size, 0) assert_(poly.polyvalfromroots([], [1]).shape == (0,)) # check empty input + multidimensional roots assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) # check scalar input assert_equal(poly.polyvalfromroots(1, 1), 0) assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) # check normal input) x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(1, 5): tgt = y[i] res = poly.polyvalfromroots(x, [0]*i) assert_almost_equal(res, tgt) tgt = x*(x - 1)*(x + 1) res = poly.polyvalfromroots(x, [-1, 0, 1]) assert_almost_equal(res, tgt) # check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) # check compatibility with factorization ptest = [15, 2, -16, -2, 1] r = poly.polyroots(ptest) x = np.linspace(-1, 1) assert_almost_equal(poly.polyval(x, ptest), poly.polyvalfromroots(x, r)) # check multidimensional arrays of roots and values # check tensor=False rshape = (3, 5) x = np.arange(-3, 2) r = np.random.randint(-5, 5, size=rshape) res = poly.polyvalfromroots(x, r, tensor=False) tgt = np.empty(r.shape[1:]) for ii in range(tgt.size): tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) assert_equal(res, tgt) # check tensor=True x = np.vstack([x, 2*x]) res = poly.polyvalfromroots(x, r, tensor=True) tgt = np.empty(r.shape[1:] + x.shape) for ii in range(r.shape[1]): for jj in range(x.shape[0]): tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) assert_equal(res, tgt) def test_polyval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 res = poly.polyval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = poly.polyval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_polyval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 res = poly.polyval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j->ij', y1, y2) res = poly.polygrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = poly.polygrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) def test_polygrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = poly.polygrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = poly.polygrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) class TestIntegral(object): def test_polyint(self): # check exceptions assert_raises(ValueError, poly.polyint, [0], .5) assert_raises(ValueError, poly.polyint, [0], -1) assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) assert_raises(ValueError, poly.polyint, [0], scl=[0]) assert_raises(ValueError, poly.polyint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = poly.polyint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] res = poly.polyint(pol, m=1, k=[i]) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 pol = [0]*i + [1] res = poly.polyint(pol, m=1, k=[i], lbnd=-1) assert_almost_equal(poly.polyval(-1, res), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] res = poly.polyint(pol, m=1, k=[i], scl=2) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1) res = poly.polyint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k]) res = poly.polyint(pol, m=j, k=list(range(j))) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k], scl=2) res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_polyint_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T res = poly.polyint(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([poly.polyint(c) for c in c2d]) res = poly.polyint(c2d, axis=1) assert_almost_equal(res, tgt) tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) res = poly.polyint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) class TestDerivative(object): def test_polyder(self): # check exceptions assert_raises(ValueError, poly.polyder, [0], .5) assert_raises(ValueError, poly.polyder, [0], -1) # check that zeroth derivative does nothing for i in range(5): tgt = [0]*i + [1] res = poly.polyder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = poly.polyder(poly.polyint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) def test_polyder_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T res = poly.polyder(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([poly.polyder(c) for c in c2d]) res = poly.polyder(c2d, axis=1) assert_almost_equal(res, tgt) class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 def test_polyvander(self): # check for 1d x x = np.arange(3) v = poly.polyvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], poly.polyval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = poly.polyvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], poly.polyval(x, coef)) def test_polyvander2d(self): # also tests polyval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = poly.polyvander2d(x1, x2, [1, 2]) tgt = poly.polyval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = poly.polyvander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_polyvander3d(self): # also tests polyval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) tgt = poly.polyval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) class TestCompanion(object): def test_raises(self): assert_raises(ValueError, poly.polycompanion, []) assert_raises(ValueError, poly.polycompanion, [1]) def test_dimensions(self): for i in range(1, 5): coef = [0]*i + [1] assert_(poly.polycompanion(coef).shape == (i, i)) def test_linear_root(self): assert_(poly.polycompanion([1, 2])[0, 0] == -.5) class TestMisc(object): def test_polyfromroots(self): res = poly.polyfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) tgt = Tlist[i] res = poly.polyfromroots(roots)*2**(i-1) assert_almost_equal(trim(res), trim(tgt)) def test_polyroots(self): assert_almost_equal(poly.polyroots([1]), []) assert_almost_equal(poly.polyroots([1, 2]), [-.5]) for i in range(2, 5): tgt = np.linspace(-1, 1, i) res = poly.polyroots(poly.polyfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_polyfit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, poly.polyfit, [1], [1], -1) assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) assert_raises(TypeError, poly.polyfit, [], [1], 0) assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, poly.polyfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = poly.polyfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(poly.polyval(x, coef3), y) coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(poly.polyval(x, coef3), y) # coef4 = poly.polyfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(poly.polyval(x, coef4), y) coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(poly.polyval(x, coef4), y) # coef2d = poly.polyfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 yw[0::2] = 0 wcoef3 = poly.polyfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) # test fitting only even Polyendre polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = poly.polyfit(x, y, 4) assert_almost_equal(poly.polyval(x, coef1), y) coef2 = poly.polyfit(x, y, [0, 2, 4]) assert_almost_equal(poly.polyval(x, coef2), y) assert_almost_equal(coef1, coef2) def test_polytrim(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, poly.polytrim, coef, -1) # Test results assert_equal(poly.polytrim(coef), coef[:-1]) assert_equal(poly.polytrim(coef, 1), coef[:-3]) assert_equal(poly.polytrim(coef, 2), [0]) def test_polyline(self): assert_equal(poly.polyline(3, 4), [3, 4]) if __name__ == "__main__": run_module_suite()
19,253
32.602094
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_classes.py
"""Test inter-conversion of different polynomial classes. This tests the convert and cast methods of all the polynomial classes. """ from __future__ import division, absolute_import, print_function import operator as op from numbers import Number import numpy as np from numpy.polynomial import ( Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite) from numpy.compat import long classes = ( Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) def test_class_methods(): for Poly1 in classes: for Poly2 in classes: yield check_conversion, Poly1, Poly2 yield check_cast, Poly1, Poly2 for Poly in classes: yield check_call, Poly yield check_identity, Poly yield check_basis, Poly yield check_fromroots, Poly yield check_fit, Poly yield check_equal, Poly yield check_not_equal, Poly yield check_add, Poly yield check_sub, Poly yield check_mul, Poly yield check_floordiv, Poly yield check_truediv, Poly yield check_mod, Poly yield check_divmod, Poly yield check_pow, Poly yield check_integ, Poly yield check_deriv, Poly yield check_roots, Poly yield check_linspace, Poly yield check_mapparms, Poly yield check_degree, Poly yield check_copy, Poly yield check_cutdeg, Poly yield check_truncate, Poly yield check_trim, Poly yield check_ufunc_override, Poly # # helper functions # random = np.random.random def assert_poly_almost_equal(p1, p2, msg=""): try: assert_(np.all(p1.domain == p2.domain)) assert_(np.all(p1.window == p2.window)) assert_almost_equal(p1.coef, p2.coef) except AssertionError: msg = "Result: %s\nTarget: %s", (p1, p2) raise AssertionError(msg) # # conversion methods that depend on two classes # def check_conversion(Poly1, Poly2): x = np.linspace(0, 1, 10) coef = random((3,)) d1 = Poly1.domain + random((2,))*.25 w1 = Poly1.window + random((2,))*.25 p1 = Poly1(coef, domain=d1, window=w1) d2 = Poly2.domain + random((2,))*.25 w2 = Poly2.window + random((2,))*.25 p2 = p1.convert(kind=Poly2, domain=d2, window=w2) assert_almost_equal(p2.domain, d2) assert_almost_equal(p2.window, w2) assert_almost_equal(p2(x), p1(x)) def check_cast(Poly1, Poly2): x = np.linspace(0, 1, 10) coef = random((3,)) d1 = Poly1.domain + random((2,))*.25 w1 = Poly1.window + random((2,))*.25 p1 = Poly1(coef, domain=d1, window=w1) d2 = Poly2.domain + random((2,))*.25 w2 = Poly2.window + random((2,))*.25 p2 = Poly2.cast(p1, domain=d2, window=w2) assert_almost_equal(p2.domain, d2) assert_almost_equal(p2.window, w2) assert_almost_equal(p2(x), p1(x)) # # methods that depend on one class # def check_identity(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 x = np.linspace(d[0], d[1], 11) p = Poly.identity(domain=d, window=w) assert_equal(p.domain, d) assert_equal(p.window, w) assert_almost_equal(p(x), x) def check_basis(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 p = Poly.basis(5, domain=d, window=w) assert_equal(p.domain, d) assert_equal(p.window, w) assert_equal(p.coef, [0]*5 + [1]) def check_fromroots(Poly): # check that requested roots are zeros of a polynomial # of correct degree, domain, and window. d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 r = random((5,)) p1 = Poly.fromroots(r, domain=d, window=w) assert_equal(p1.degree(), len(r)) assert_equal(p1.domain, d) assert_equal(p1.window, w) assert_almost_equal(p1(r), 0) # check that polynomial is monic pdom = Polynomial.domain pwin = Polynomial.window p2 = Polynomial.cast(p1, domain=pdom, window=pwin) assert_almost_equal(p2.coef[-1], 1) def check_fit(Poly): def f(x): return x*(x - 1)*(x - 2) x = np.linspace(0, 3) y = f(x) # check default value of domain and window p = Poly.fit(x, y, 3) assert_almost_equal(p.domain, [0, 3]) assert_almost_equal(p(x), y) assert_equal(p.degree(), 3) # check with given domains and window d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 p = Poly.fit(x, y, 3, domain=d, window=w) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, d) assert_almost_equal(p.window, w) p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, d) assert_almost_equal(p.window, w) # check with class domain default p = Poly.fit(x, y, 3, []) assert_equal(p.domain, Poly.domain) assert_equal(p.window, Poly.window) p = Poly.fit(x, y, [0, 1, 2, 3], []) assert_equal(p.domain, Poly.domain) assert_equal(p.window, Poly.window) # check that fit accepts weights. w = np.zeros_like(x) z = y + random(y.shape)*.25 w[::2] = 1 p1 = Poly.fit(x[::2], z[::2], 3) p2 = Poly.fit(x, z, 3, w=w) p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) assert_almost_equal(p1(x), p2(x)) assert_almost_equal(p2(x), p3(x)) def check_equal(Poly): p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) assert_(p1 == p1) assert_(not p1 == p2) assert_(not p1 == p3) assert_(not p1 == p4) def check_not_equal(Poly): p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) assert_(not p1 != p1) assert_(p1 != p2) assert_(p1 != p3) assert_(p1 != p4) def check_add(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = p1 + p2 assert_poly_almost_equal(p2 + p1, p3) assert_poly_almost_equal(p1 + c2, p3) assert_poly_almost_equal(c2 + p1, p3) assert_poly_almost_equal(p1 + tuple(c2), p3) assert_poly_almost_equal(tuple(c2) + p1, p3) assert_poly_almost_equal(p1 + np.array(c2), p3) assert_poly_almost_equal(np.array(c2) + p1, p3) assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.add, p1, Chebyshev([0])) else: assert_raises(TypeError, op.add, p1, Polynomial([0])) def check_sub(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = p1 - p2 assert_poly_almost_equal(p2 - p1, -p3) assert_poly_almost_equal(p1 - c2, p3) assert_poly_almost_equal(c2 - p1, -p3) assert_poly_almost_equal(p1 - tuple(c2), p3) assert_poly_almost_equal(tuple(c2) - p1, -p3) assert_poly_almost_equal(p1 - np.array(c2), p3) assert_poly_almost_equal(np.array(c2) - p1, -p3) assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.sub, p1, Chebyshev([0])) else: assert_raises(TypeError, op.sub, p1, Polynomial([0])) def check_mul(Poly): c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = p1 * p2 assert_poly_almost_equal(p2 * p1, p3) assert_poly_almost_equal(p1 * c2, p3) assert_poly_almost_equal(c2 * p1, p3) assert_poly_almost_equal(p1 * tuple(c2), p3) assert_poly_almost_equal(tuple(c2) * p1, p3) assert_poly_almost_equal(p1 * np.array(c2), p3) assert_poly_almost_equal(np.array(c2) * p1, p3) assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) assert_poly_almost_equal(2 * p1, p1 * Poly([2])) assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.mul, p1, Chebyshev([0])) else: assert_raises(TypeError, op.mul, p1, Polynomial([0])) def check_floordiv(Poly): c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) c3 = list(random((2,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = Poly(c3) p4 = p1 * p2 + p3 c4 = list(p4.coef) assert_poly_almost_equal(p4 // p2, p1) assert_poly_almost_equal(p4 // c2, p1) assert_poly_almost_equal(c4 // p2, p1) assert_poly_almost_equal(p4 // tuple(c2), p1) assert_poly_almost_equal(tuple(c4) // p2, p1) assert_poly_almost_equal(p4 // np.array(c2), p1) assert_poly_almost_equal(np.array(c4) // p2, p1) assert_poly_almost_equal(2 // p2, Poly([0])) assert_poly_almost_equal(p2 // 2, 0.5*p2) assert_raises( TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) assert_raises( TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) else: assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) def check_truediv(Poly): # true division is valid only if the denominator is a Number and # not a python bool. p1 = Poly([1,2,3]) p2 = p1 * 5 for stype in np.ScalarType: if not issubclass(stype, Number) or issubclass(stype, bool): continue s = stype(5) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for stype in (int, long, float): s = stype(5) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for stype in [complex]: s = stype(5, 0) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for s in [tuple(), list(), dict(), bool(), np.array([1])]: assert_raises(TypeError, op.truediv, p2, s) assert_raises(TypeError, op.truediv, s, p2) for ptype in classes: assert_raises(TypeError, op.truediv, p2, ptype(1)) def check_mod(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) c3 = list(random((2,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = Poly(c3) p4 = p1 * p2 + p3 c4 = list(p4.coef) assert_poly_almost_equal(p4 % p2, p3) assert_poly_almost_equal(p4 % c2, p3) assert_poly_almost_equal(c4 % p2, p3) assert_poly_almost_equal(p4 % tuple(c2), p3) assert_poly_almost_equal(tuple(c4) % p2, p3) assert_poly_almost_equal(p4 % np.array(c2), p3) assert_poly_almost_equal(np.array(c4) % p2, p3) assert_poly_almost_equal(2 % p2, Poly([2])) assert_poly_almost_equal(p2 % 2, Poly([0])) assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.mod, p1, Chebyshev([0])) else: assert_raises(TypeError, op.mod, p1, Polynomial([0])) def check_divmod(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) c3 = list(random((2,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = Poly(c3) p4 = p1 * p2 + p3 c4 = list(p4.coef) quo, rem = divmod(p4, p2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p4, c2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(c4, p2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p4, tuple(c2)) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(tuple(c4), p2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p4, np.array(c2)) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(np.array(c4), p2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p2, 2) assert_poly_almost_equal(quo, 0.5*p2) assert_poly_almost_equal(rem, Poly([0])) quo, rem = divmod(2, p2) assert_poly_almost_equal(quo, Poly([0])) assert_poly_almost_equal(rem, Poly([2])) assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, divmod, p1, Chebyshev([0])) else: assert_raises(TypeError, divmod, p1, Polynomial([0])) def check_roots(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 tgt = np.sort(random((5,))) res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) assert_almost_equal(res, tgt) # default domain and window res = np.sort(Poly.fromroots(tgt).roots()) assert_almost_equal(res, tgt) def check_degree(Poly): p = Poly.basis(5) assert_equal(p.degree(), 5) def check_copy(Poly): p1 = Poly.basis(5) p2 = p1.copy() assert_(p1 == p2) assert_(p1 is not p2) assert_(p1.coef is not p2.coef) assert_(p1.domain is not p2.domain) assert_(p1.window is not p2.window) def check_integ(Poly): P = Polynomial # Check defaults p0 = Poly.cast(P([1*2, 2*3, 3*4])) p1 = P.cast(p0.integ()) p2 = P.cast(p0.integ(2)) assert_poly_almost_equal(p1, P([0, 2, 3, 4])) assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) # Check with k p0 = Poly.cast(P([1*2, 2*3, 3*4])) p1 = P.cast(p0.integ(k=1)) p2 = P.cast(p0.integ(2, k=[1, 1])) assert_poly_almost_equal(p1, P([1, 2, 3, 4])) assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) # Check with lbnd p0 = Poly.cast(P([1*2, 2*3, 3*4])) p1 = P.cast(p0.integ(lbnd=1)) p2 = P.cast(p0.integ(2, lbnd=1)) assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) # Check scaling d = 2*Poly.domain p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) p1 = P.cast(p0.integ()) p2 = P.cast(p0.integ(2)) assert_poly_almost_equal(p1, P([0, 2, 3, 4])) assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) def check_deriv(Poly): # Check that the derivative is the inverse of integration. It is # assumes that the integration has been checked elsewhere. d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 p1 = Poly([1, 2, 3], domain=d, window=w) p2 = p1.integ(2, k=[1, 2]) p3 = p1.integ(1, k=[1]) assert_almost_equal(p2.deriv(1).coef, p3.coef) assert_almost_equal(p2.deriv(2).coef, p1.coef) # default domain and window p1 = Poly([1, 2, 3]) p2 = p1.integ(2, k=[1, 2]) p3 = p1.integ(1, k=[1]) assert_almost_equal(p2.deriv(1).coef, p3.coef) assert_almost_equal(p2.deriv(2).coef, p1.coef) def check_linspace(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 p = Poly([1, 2, 3], domain=d, window=w) # check default domain xtgt = np.linspace(d[0], d[1], 20) ytgt = p(xtgt) xres, yres = p.linspace(20) assert_almost_equal(xres, xtgt) assert_almost_equal(yres, ytgt) # check specified domain xtgt = np.linspace(0, 2, 20) ytgt = p(xtgt) xres, yres = p.linspace(20, domain=[0, 2]) assert_almost_equal(xres, xtgt) assert_almost_equal(yres, ytgt) def check_pow(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 tgt = Poly([1], domain=d, window=w) tst = Poly([1, 2, 3], domain=d, window=w) for i in range(5): assert_poly_almost_equal(tst**i, tgt) tgt = tgt * tst # default domain and window tgt = Poly([1]) tst = Poly([1, 2, 3]) for i in range(5): assert_poly_almost_equal(tst**i, tgt) tgt = tgt * tst # check error for invalid powers assert_raises(ValueError, op.pow, tgt, 1.5) assert_raises(ValueError, op.pow, tgt, -1) def check_call(Poly): P = Polynomial d = Poly.domain x = np.linspace(d[0], d[1], 11) # Check defaults p = Poly.cast(P([1, 2, 3])) tgt = 1 + x*(2 + 3*x) res = p(x) assert_almost_equal(res, tgt) def check_cutdeg(Poly): p = Poly([1, 2, 3]) assert_raises(ValueError, p.cutdeg, .5) assert_raises(ValueError, p.cutdeg, -1) assert_equal(len(p.cutdeg(3)), 3) assert_equal(len(p.cutdeg(2)), 3) assert_equal(len(p.cutdeg(1)), 2) assert_equal(len(p.cutdeg(0)), 1) def check_truncate(Poly): p = Poly([1, 2, 3]) assert_raises(ValueError, p.truncate, .5) assert_raises(ValueError, p.truncate, 0) assert_equal(len(p.truncate(4)), 3) assert_equal(len(p.truncate(3)), 3) assert_equal(len(p.truncate(2)), 2) assert_equal(len(p.truncate(1)), 1) def check_trim(Poly): c = [1, 1e-6, 1e-12, 0] p = Poly(c) assert_equal(p.trim().coef, c[:3]) assert_equal(p.trim(1e-10).coef, c[:2]) assert_equal(p.trim(1e-5).coef, c[:1]) def check_mapparms(Poly): # check with defaults. Should be identity. d = Poly.domain w = Poly.window p = Poly([1], domain=d, window=w) assert_almost_equal([0, 1], p.mapparms()) # w = 2*d + 1 p = Poly([1], domain=d, window=w) assert_almost_equal([1, 2], p.mapparms()) def check_ufunc_override(Poly): p = Poly([1, 2, 3]) x = np.ones(3) assert_raises(TypeError, np.add, p, x) assert_raises(TypeError, np.add, x, p) class TestInterpolate(object): def f(self, x): return x * (x - 1) * (x - 2) def test_raises(self): assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) def test_dimensions(self): for deg in range(1, 5): assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) def test_approximation(self): def powx(x, p): return x**p x = np.linspace(0, 2, 10) for deg in range(0, 10): for t in range(0, deg + 1): p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) assert_almost_equal(p(x), powx(x, t), decimal=12) if __name__ == "__main__": run_module_suite()
19,014
30.019576
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/polynomial/tests/test_hermite.py
"""Tests for hermite module. """ from __future__ import division, absolute_import, print_function import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, run_module_suite ) H0 = np.array([1]) H1 = np.array([0, 2]) H2 = np.array([-2, 0, 4]) H3 = np.array([0, -12, 0, 8]) H4 = np.array([12, 0, -48, 0, 16]) H5 = np.array([0, 120, 0, -160, 0, 32]) H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] def trim(x): return herm.hermtrim(x, tol=1e-6) class TestConstants(object): def test_hermdomain(self): assert_equal(herm.hermdomain, [-1, 1]) def test_hermzero(self): assert_equal(herm.hermzero, [0]) def test_hermone(self): assert_equal(herm.hermone, [1]) def test_hermx(self): assert_equal(herm.hermx, [0, .5]) class TestArithmetic(object): x = np.linspace(-3, 3, 100) def test_hermadd(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = herm.hermadd([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermsub(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = herm.hermsub([0]*i + [1], [0]*j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermmulx(self): assert_equal(herm.hermmulx([0]), [0]) assert_equal(herm.hermmulx([1]), [0, .5]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [i, 0, .5] assert_equal(herm.hermmulx(ser), tgt) def test_hermmul(self): # check values of result for i in range(5): pol1 = [0]*i + [1] val1 = herm.hermval(self.x, pol1) for j in range(5): msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = herm.hermval(self.x, pol2) pol3 = herm.hermmul(pol1, pol2) val3 = herm.hermval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) assert_almost_equal(val3, val1*val2, err_msg=msg) def test_hermdiv(self): for i in range(5): for j in range(5): msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = herm.hermadd(ci, cj) quo, rem = herm.hermdiv(tgt, ci) res = herm.hermadd(herm.hermmul(quo, ci), rem) assert_equal(trim(res), trim(tgt), err_msg=msg) class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([2.5, 1., .75]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 y = polyval(x, [1., 2., 3.]) def test_hermval(self): #check empty input assert_equal(herm.hermval([], [1]).size, 0) #check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Hlist] for i in range(10): msg = "At i=%d" % i tgt = y[i] res = herm.hermval(x, [0]*i + [1]) assert_almost_equal(res, tgt, err_msg=msg) #check that shape is preserved for i in range(3): dims = [2]*i x = np.zeros(dims) assert_equal(herm.hermval(x, [1]).shape, dims) assert_equal(herm.hermval(x, [1, 0]).shape, dims) assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) def test_hermval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) #test values tgt = y1*y2 res = herm.hermval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herm.hermval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_hermval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test exceptions assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) #test values tgt = y1*y2*y3 res = herm.hermval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herm.hermval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_hermgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j->ij', y1, y2) res = herm.hermgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herm.hermgrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) def test_hermgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y #test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herm.hermgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) #test shape z = np.ones((2, 3)) res = herm.hermgrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) class TestIntegral(object): def test_hermint(self): # check exceptions assert_raises(ValueError, herm.hermint, [0], .5) assert_raises(ValueError, herm.hermint, [0], -1) assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) assert_raises(ValueError, herm.hermint, [0], scl=[0]) assert_raises(ValueError, herm.hermint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): k = [0]*(i - 2) + [1] res = herm.hermint([0], m=i, k=k) assert_almost_equal(res, [0, .5]) # check single integration with integration constant for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [1/scl] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i]) res = herm.herm2poly(hermint) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 pol = [0]*i + [1] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) assert_almost_equal(herm.hermval(-1, hermint), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 pol = [0]*i + [1] tgt = [i] + [0]*i + [2/scl] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) res = herm.herm2poly(hermint) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1) res = herm.hermint(pol, m=j) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with defined k for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k]) res = herm.hermint(pol, m=j, k=list(range(j))) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with scaling for i in range(5): for j in range(2, 5): pol = [0]*i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k], scl=2) res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) assert_almost_equal(trim(res), trim(tgt)) def test_hermint_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T res = herm.hermint(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([herm.hermint(c) for c in c2d]) res = herm.hermint(c2d, axis=1) assert_almost_equal(res, tgt) tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) res = herm.hermint(c2d, k=3, axis=1) assert_almost_equal(res, tgt) class TestDerivative(object): def test_hermder(self): # check exceptions assert_raises(ValueError, herm.hermder, [0], .5) assert_raises(ValueError, herm.hermder, [0], -1) # check that zeroth derivative does nothing for i in range(5): tgt = [0]*i + [1] res = herm.hermder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = herm.hermder(herm.hermint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): tgt = [0]*i + [1] res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) def test_hermder_axis(self): # check that axis keyword works c2d = np.random.random((3, 4)) tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T res = herm.hermder(c2d, axis=0) assert_almost_equal(res, tgt) tgt = np.vstack([herm.hermder(c) for c in c2d]) res = herm.hermder(c2d, axis=1) assert_almost_equal(res, tgt) class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 def test_hermvander(self): # check for 1d x x = np.arange(3) v = herm.hermvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = herm.hermvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0]*i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef)) def test_hermvander2d(self): # also tests hermval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = herm.hermvander2d(x1, x2, [1, 2]) tgt = herm.hermval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = herm.hermvander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_hermvander3d(self): # also tests hermval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) tgt = herm.hermval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) class TestFitting(object): def test_hermfit(self): def f(x): return x*(x - 1)*(x - 2) def f2(x): return x**4 + x**2 + 1 # Test exceptions assert_raises(ValueError, herm.hermfit, [1], [1], -1) assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) assert_raises(TypeError, herm.hermfit, [], [1], 0) assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, herm.hermfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = herm.hermfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(herm.hermval(x, coef3), y) coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(herm.hermval(x, coef3), y) # coef4 = herm.hermfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(herm.hermval(x, coef4), y) coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(herm.hermval(x, coef4), y) # check things still work if deg is not in strict increasing coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) assert_equal(len(coef4), 5) assert_almost_equal(herm.hermval(x, coef4), y) # coef2d = herm.hermfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = herm.hermfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) # test fitting only even Legendre polynomials x = np.linspace(-1, 1) y = f2(x) coef1 = herm.hermfit(x, y, 4) assert_almost_equal(herm.hermval(x, coef1), y) coef2 = herm.hermfit(x, y, [0, 2, 4]) assert_almost_equal(herm.hermval(x, coef2), y) assert_almost_equal(coef1, coef2) class TestCompanion(object): def test_raises(self): assert_raises(ValueError, herm.hermcompanion, []) assert_raises(ValueError, herm.hermcompanion, [1]) def test_dimensions(self): for i in range(1, 5): coef = [0]*i + [1] assert_(herm.hermcompanion(coef).shape == (i, i)) def test_linear_root(self): assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) class TestGauss(object): def test_100(self): x, w = herm.hermgauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = herm.hermvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct tgt = np.sqrt(np.pi) assert_almost_equal(w.sum(), tgt) class TestMisc(object): def test_hermfromroots(self): res = herm.hermfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = herm.hermfromroots(roots) res = herm.hermval(roots, pol) tgt = 0 assert_(len(pol) == i + 1) assert_almost_equal(herm.herm2poly(pol)[-1], 1) assert_almost_equal(res, tgt) def test_hermroots(self): assert_almost_equal(herm.hermroots([1]), []) assert_almost_equal(herm.hermroots([1, 1]), [-.5]) for i in range(2, 5): tgt = np.linspace(-1, 1, i) res = herm.hermroots(herm.hermfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) def test_hermtrim(self): coef = [2, -1, 1, 0] # Test exceptions assert_raises(ValueError, herm.hermtrim, coef, -1) # Test results assert_equal(herm.hermtrim(coef), coef[:-1]) assert_equal(herm.hermtrim(coef, 1), coef[:-3]) assert_equal(herm.hermtrim(coef, 2), [0]) def test_hermline(self): assert_equal(herm.hermline(3, 4), [3, 2]) def test_herm2poly(self): for i in range(10): assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) def test_poly2herm(self): for i in range(10): assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) def test_weight(self): x = np.linspace(-5, 5, 11) tgt = np.exp(-x**2) res = herm.hermweight(x) assert_almost_equal(res, tgt) if __name__ == "__main__": run_module_suite()
18,459
32.442029
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/npyio.py
from __future__ import division, absolute_import, print_function import io import sys import os import re import itertools import warnings import weakref from operator import itemgetter, index as opindex import numpy as np from . import format from ._datasource import DataSource from numpy.core.multiarray import packbits, unpackbits from ._iotools import ( LineSplitter, NameValidator, StringConverter, ConverterError, ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields, flatten_dtype, easy_dtype, _decode_line ) from numpy.compat import ( asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode, is_pathlib_path ) if sys.version_info[0] >= 3: import pickle else: import cPickle as pickle from future_builtins import map loads = pickle.loads __all__ = [ 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' ] class BagObj(object): """ BagObj(obj) Convert attribute look-ups to getitems on the object passed in. Parameters ---------- obj : class instance Object on which attribute look-up is performed. Examples -------- >>> from numpy.lib.npyio import BagObj as BO >>> class BagDemo(object): ... def __getitem__(self, key): # An instance of BagObj(BagDemo) ... # will call this method when any ... # attribute look-up is required ... result = "Doesn't matter what you want, " ... return result + "you're gonna get this" ... >>> demo_obj = BagDemo() >>> bagobj = BO(demo_obj) >>> bagobj.hello_there "Doesn't matter what you want, you're gonna get this" >>> bagobj.I_can_be_anything "Doesn't matter what you want, you're gonna get this" """ def __init__(self, obj): # Use weakref to make NpzFile objects collectable by refcount self._obj = weakref.proxy(obj) def __getattribute__(self, key): try: return object.__getattribute__(self, '_obj')[key] except KeyError: raise AttributeError(key) def __dir__(self): """ Enables dir(bagobj) to list the files in an NpzFile. This also enables tab-completion in an interpreter or IPython. """ return object.__getattribute__(self, '_obj').keys() def zipfile_factory(file, *args, **kwargs): """ Create a ZipFile. Allows for Zip64, and the `file` argument can accept file, str, or pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile constructor. """ if is_pathlib_path(file): file = str(file) import zipfile kwargs['allowZip64'] = True return zipfile.ZipFile(file, *args, **kwargs) class NpzFile(object): """ NpzFile(fid) A dictionary-like object with lazy-loading of files in the zipped archive provided on construction. `NpzFile` is used to load files in the NumPy ``.npz`` data archive format. It assumes that files in the archive have a ``.npy`` extension, other files are ignored. The arrays and file strings are lazily loaded on either getitem access using ``obj['key']`` or attribute lookup using ``obj.f.key``. A list of all files (without ``.npy`` extensions) can be obtained with ``obj.files`` and the ZipFile object itself using ``obj.zip``. Attributes ---------- files : list of str List of all files in the archive with a ``.npy`` extension. zip : ZipFile instance The ZipFile object initialized with the zipped archive. f : BagObj instance An object on which attribute can be performed as an alternative to getitem access on the `NpzFile` instance itself. allow_pickle : bool, optional Allow loading pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass on to pickle.load. These are only useful when loading object arrays saved on Python 2 when using Python 3. Parameters ---------- fid : file or str The zipped archive to open. This is either a file-like object or a string containing the path to the archive. own_fid : bool, optional Whether NpzFile should close the file handle. Requires that `fid` is a file-like object. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npz = np.load(outfile) >>> isinstance(npz, np.lib.io.NpzFile) True >>> npz.files ['y', 'x'] >>> npz['x'] # getitem access array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> npz.f.x # attribute lookup array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ def __init__(self, fid, own_fid=False, allow_pickle=True, pickle_kwargs=None): # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) self._files = _zip.namelist() self.files = [] self.allow_pickle = allow_pickle self.pickle_kwargs = pickle_kwargs for x in self._files: if x.endswith('.npy'): self.files.append(x[:-4]) else: self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: self.fid = fid else: self.fid = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): """ Close the file. """ if self.zip is not None: self.zip.close() self.zip = None if self.fid is not None: self.fid.close() self.fid = None self.f = None # break reference cycle def __del__(self): self.close() def __getitem__(self, key): # FIXME: This seems like it will copy strings around # more than is strictly necessary. The zipfile # will read the string and then # the format.read_array will copy the string # to another place in memory. # It would be better if the zipfile could read # (or at least uncompress) the data # directly into the array memory. member = 0 if key in self._files: member = 1 elif key in self.files: member = 1 key += '.npy' if member: bytes = self.zip.open(key) magic = bytes.read(len(format.MAGIC_PREFIX)) bytes.close() if magic == format.MAGIC_PREFIX: bytes = self.zip.open(key) return format.read_array(bytes, allow_pickle=self.allow_pickle, pickle_kwargs=self.pickle_kwargs) else: return self.zip.read(key) else: raise KeyError("%s is not a file in the archive" % key) def __iter__(self): return iter(self.files) def items(self): """ Return a list of tuples, with each tuple (filename, array in file). """ return [(f, self[f]) for f in self.files] def iteritems(self): """Generator that returns tuples (filename, array in file).""" for f in self.files: yield (f, self[f]) def keys(self): """Return files in the archive with a ``.npy`` extension.""" return self.files def iterkeys(self): """Return an iterator over the files in the archive.""" return self.__iter__() def __contains__(self, key): return self.files.__contains__(key) def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII'): """ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. Parameters ---------- file : file-like object, string, or pathlib.Path The file to read. File-like objects must support the ``seek()`` and ``read()`` methods. Pickled files require that the file-like object support the ``readline()`` method as well. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional If not None, then memory-map the file, using the given mode (see `numpy.memmap` for a detailed description of the modes). A memory-mapped array is kept on disk. However, it can be accessed and sliced like any ndarray. Memory mapping is especially useful for accessing small fragments of large files without reading the entire file into memory. allow_pickle : bool, optional Allow loading pickled object arrays stored in npy files. Reasons for disallowing pickles include security, as loading pickled data can execute arbitrary code. If pickles are disallowed, loading object arrays will fail. Default: True fix_imports : bool, optional Only useful when loading Python 2 generated pickled files on Python 3, which includes npy/npz files containing object arrays. If `fix_imports` is True, pickle will try to map the old Python 2 names to the new names used in Python 3. encoding : str, optional What encoding to use when reading Python 2 strings. Only useful when loading Python 2 generated pickled files in Python 3, which includes npy/npz files containing object arrays. Values other than 'latin1', 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical data. Default: 'ASCII' Returns ------- result : array, tuple, dict, etc. Data stored in the file. For ``.npz`` files, the returned instance of NpzFile class must be closed to avoid leaking file descriptors. Raises ------ IOError If the input file does not exist or cannot be read. ValueError The file contains an object array, but allow_pickle=False given. See Also -------- save, savez, savez_compressed, loadtxt memmap : Create a memory-map to an array stored in a file on disk. lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. Notes ----- - If the file contains pickle data, then whatever object is stored in the pickle is returned. - If the file is a ``.npy`` file, then a single array is returned. - If the file is a ``.npz`` file, then a dictionary-like object is returned, containing ``{filename: array}`` key-value pairs, one for each file in the archive. - If the file is a ``.npz`` file, the returned value supports the context manager protocol in a similar fashion to the open function:: with load('foo.npz') as data: a = data['a'] The underlying file descriptor is closed when exiting the 'with' block. Examples -------- Store data to disk, and load it again: >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) >>> np.load('/tmp/123.npy') array([[1, 2, 3], [4, 5, 6]]) Store compressed data to disk, and load it again: >>> a=np.array([[1, 2, 3], [4, 5, 6]]) >>> b=np.array([1, 2]) >>> np.savez('/tmp/123.npz', a=a, b=b) >>> data = np.load('/tmp/123.npz') >>> data['a'] array([[1, 2, 3], [4, 5, 6]]) >>> data['b'] array([1, 2]) >>> data.close() Mem-map the stored array, and then access the second row directly from disk: >>> X = np.load('/tmp/123.npy', mmap_mode='r') >>> X[1, :] memmap([4, 5, 6]) """ own_fid = False if isinstance(file, basestring): fid = open(file, "rb") own_fid = True elif is_pathlib_path(file): fid = file.open("rb") own_fid = True else: fid = file if encoding not in ('ASCII', 'latin1', 'bytes'): # The 'encoding' value for pickle also affects what encoding # the serialized binary data of NumPy arrays is loaded # in. Pickle does not pass on the encoding information to # NumPy. The unpickling code in numpy.core.multiarray is # written to assume that unicode data appearing where binary # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. # # Other encoding values can corrupt binary data, and we # purposefully disallow them. For the same reason, the errors= # argument is not exposed, as values other than 'strict' # result can similarly silently corrupt numerical data. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") if sys.version_info[0] >= 3: pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) else: # Nothing to do on Python 2 pickle_kwargs = {} try: # Code to distinguish from NumPy binary files and pickles. _ZIP_PREFIX = b'PK\x03\x04' N = len(format.MAGIC_PREFIX) magic = fid.read(N) # If the file size is less than N, we need to make sure not # to seek past the beginning of the file fid.seek(-min(N, len(magic)), 1) # back-up if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz) # Transfer file ownership to NpzFile tmp = own_fid own_fid = False return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) elif magic == format.MAGIC_PREFIX: # .npy file if mmap_mode: return format.open_memmap(file, mode=mmap_mode) else: return format.read_array(fid, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) else: # Try a pickle if not allow_pickle: raise ValueError("allow_pickle=False, but file does not contain " "non-pickled data") try: return pickle.load(fid, **pickle_kwargs) except Exception: raise IOError( "Failed to interpret file %s as a pickle" % repr(file)) finally: if own_fid: fid.close() def save(file, arr, allow_pickle=True, fix_imports=True): """ Save an array to a binary file in NumPy ``.npy`` format. Parameters ---------- file : file, str, or pathlib.Path File or filename to which the data is saved. If file is a file-object, then the filename is unchanged. If file is a string or Path, a ``.npy`` extension will be appended to the file name if it does not already have one. arr : array_like Array data to be saved. allow_pickle : bool, optional Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is compatible between Python 2 and Python 3). Default: True fix_imports : bool, optional Only useful in forcing objects in object arrays on Python 3 to be pickled in a Python 2 compatible way. If `fix_imports` is True, pickle will try to map the new Python 3 names to the old module names used in Python 2, so that the pickle data stream is readable with Python 2. See Also -------- savez : Save several arrays into a ``.npz`` archive savetxt, load Notes ----- For a description of the ``.npy`` format, see the module docstring of `numpy.lib.format` or the NumPy Enhancement Proposal http://docs.scipy.org/doc/numpy/neps/npy-format.html Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> np.save(outfile, x) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ own_fid = False if isinstance(file, basestring): if not file.endswith('.npy'): file = file + '.npy' fid = open(file, "wb") own_fid = True elif is_pathlib_path(file): if not file.name.endswith('.npy'): file = file.parent / (file.name + '.npy') fid = file.open("wb") own_fid = True else: fid = file if sys.version_info[0] >= 3: pickle_kwargs = dict(fix_imports=fix_imports) else: # Nothing to do on Python 2 pickle_kwargs = None try: arr = np.asanyarray(arr) format.write_array(fid, arr, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) finally: if own_fid: fid.close() def savez(file, *args, **kwds): """ Save several arrays into a single file in uncompressed ``.npz`` format. If arguments are passed in with no keywords, the corresponding variable names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword arguments are given, the corresponding variable names, in the ``.npz`` file will match the keyword names. Parameters ---------- file : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string or a Path, the ``.npz`` extension will be appended to the file name if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to know the names of the arrays outside `savez`, the arrays will be saved with names "arr_0", "arr_1", and so on. These arguments can be any expression. kwds : Keyword arguments, optional Arrays to save to the file. Arrays will be saved in the file with the keyword names. Returns ------- None See Also -------- save : Save a single array to a binary file in NumPy format. savetxt : Save an array to a file as plain text. savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is not compressed and each file in the archive contains one variable in ``.npy`` format. For a description of the ``.npy`` format, see `numpy.lib.format` or the NumPy Enhancement Proposal http://docs.scipy.org/doc/numpy/neps/npy-format.html When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for its list of arrays (with the ``.files`` attribute), and for the arrays themselves. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) Using `savez` with \\*args, the arrays are saved with default names. >>> np.savez(outfile, x, y) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> npzfile = np.load(outfile) >>> npzfile.files ['arr_1', 'arr_0'] >>> npzfile['arr_0'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) Using `savez` with \\**kwds, the arrays are saved with the keyword names. >>> outfile = TemporaryFile() >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npzfile = np.load(outfile) >>> npzfile.files ['y', 'x'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ _savez(file, args, kwds, False) def savez_compressed(file, *args, **kwds): """ Save several arrays into a single file in compressed ``.npz`` format. If keyword arguments are given, then filenames are taken from the keywords. If arguments are passed in with no keywords, then stored file names are arr_0, arr_1, etc. Parameters ---------- file : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string or a Path, the ``.npz`` extension will be appended to the file name if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to know the names of the arrays outside `savez`, the arrays will be saved with names "arr_0", "arr_1", and so on. These arguments can be any expression. kwds : Keyword arguments, optional Arrays to save to the file. Arrays will be saved in the file with the keyword names. Returns ------- None See Also -------- numpy.save : Save a single array to a binary file in NumPy format. numpy.savetxt : Save an array to a file as plain text. numpy.savez : Save several arrays into an uncompressed ``.npz`` file format numpy.load : Load the files created by savez_compressed. Notes ----- The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is compressed with ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable in ``.npy`` format. For a description of the ``.npy`` format, see `numpy.lib.format` or the NumPy Enhancement Proposal http://docs.scipy.org/doc/numpy/neps/npy-format.html When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for its list of arrays (with the ``.files`` attribute), and for the arrays themselves. Examples -------- >>> test_array = np.random.rand(3, 2) >>> test_vector = np.random.rand(4) >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) >>> loaded = np.load('/tmp/123.npz') >>> print(np.array_equal(test_array, loaded['a'])) True >>> print(np.array_equal(test_vector, loaded['b'])) True """ _savez(file, args, kwds, True) def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): # Import is postponed to here since zipfile depends on gzip, an optional # component of the so-called standard library. import zipfile if isinstance(file, basestring): if not file.endswith('.npz'): file = file + '.npz' elif is_pathlib_path(file): if not file.name.endswith('.npz'): file = file.parent / (file.name + '.npz') namedict = kwds for i, val in enumerate(args): key = 'arr_%d' % i if key in namedict.keys(): raise ValueError( "Cannot use un-named variables and keyword %s" % key) namedict[key] = val if compress: compression = zipfile.ZIP_DEFLATED else: compression = zipfile.ZIP_STORED zipf = zipfile_factory(file, mode="w", compression=compression) if sys.version_info >= (3, 6): # Since Python 3.6 it is possible to write directly to a ZIP file. for key, val in namedict.items(): fname = key + '.npy' val = np.asanyarray(val) force_zip64 = val.nbytes >= 2**30 with zipf.open(fname, 'w', force_zip64=force_zip64) as fid: format.write_array(fid, val, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) else: # Stage arrays in a temporary file on disk, before writing to zip. # Import deferred for startup time improvement import tempfile # Since target file might be big enough to exceed capacity of a global # temporary directory, create temp file side-by-side with the target file. file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp') fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy') os.close(fd) try: for key, val in namedict.items(): fname = key + '.npy' fid = open(tmpfile, 'wb') try: format.write_array(fid, np.asanyarray(val), allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) fid.close() fid = None zipf.write(tmpfile, arcname=fname) except IOError as exc: raise IOError("Failed to write to %s: %s" % (tmpfile, exc)) finally: if fid: fid.close() finally: os.remove(tmpfile) zipf.close() def _getconv(dtype): """ Find the correct dtype converter. Adapted from matplotlib """ def floatconv(x): x.lower() if '0x' in x: return float.fromhex(x) return float(x) typ = dtype.type if issubclass(typ, np.bool_): return lambda x: bool(int(x)) if issubclass(typ, np.uint64): return np.uint64 if issubclass(typ, np.int64): return np.int64 if issubclass(typ, np.integer): return lambda x: int(float(x)) elif issubclass(typ, np.longdouble): return np.longdouble elif issubclass(typ, np.floating): return floatconv elif issubclass(typ, complex): return lambda x: complex(asstr(x)) elif issubclass(typ, np.bytes_): return asbytes elif issubclass(typ, np.unicode_): return asunicode else: return asstr # amount of lines loadtxt reads in one chunk, can be overriden for testing _loadtxt_chunksize = 50000 def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0, encoding='bytes'): """ Load data from a text file. Each row in the text file must have the same number of values. Parameters ---------- fname : file, str, or pathlib.Path File, filename, or generator to read. If the filename extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note that generators should return byte strings for Python 3k. dtype : data-type, optional Data-type of the resulting array; default: float. If this is a structured data-type, the resulting array will be 1-dimensional, and each row will be interpreted as an element of the array. In this case, the number of columns used must match the number of fields in the data-type. comments : str or sequence of str, optional The characters or list of characters used to indicate the start of a comment. For backwards compatibility, byte strings will be decoded as 'latin1'. The default is '#'. delimiter : str, optional The string used to separate values. For backwards compatibility, byte strings will be decoded as 'latin1'. The default is whitespace. converters : dict, optional A dictionary mapping column number to a function that will convert that column to a float. E.g., if column 0 is a date string: ``converters = {0: datestr2num}``. Converters can also be used to provide a default value for missing data (but see also `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional Skip the first `skiprows` lines; default: 0. usecols : int or sequence, optional Which columns to read, with 0 being the first. For example, usecols = (1,4,5) will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. .. versionchanged:: 1.11.0 When a single column has to be read it is possible to use an integer instead of a tuple. E.g ``usecols = 3`` reads the fourth column the same way as `usecols = (3,)`` would. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a structured data-type, arrays are returned for each field. Default is False. ndmin : int, optional The returned array will have at least `ndmin` dimensions. Otherwise mono-dimensional axes will be squeezed. Legal values: 0 (default), 1 or 2. .. versionadded:: 1.6.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. The special value 'bytes' enables backward compatibility workarounds that ensures you receive byte arrays as results if possible and passes latin1 encoded strings to converters. Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. .. versionadded:: 1.14.0 Returns ------- out : ndarray Data read from the text file. See Also -------- load, fromstring, fromregex genfromtxt : Load data with missing values handled as specified. scipy.io.loadmat : reads MATLAB data files Notes ----- This function aims to be a fast reader for simply formatted files. The `genfromtxt` function provides more sophisticated handling of, e.g., lines with missing values. .. versionadded:: 1.10.0 The strings produced by the Python float.hex method can be used as input for floats. Examples -------- >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO("0 1\\n2 3") >>> np.loadtxt(c) array([[ 0., 1.], [ 2., 3.]]) >>> d = StringIO("M 21 72\\nF 35 58") >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), ... 'formats': ('S1', 'i4', 'f4')}) array([('M', 21, 72.0), ('F', 35, 58.0)], dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')]) >>> c = StringIO("1,0,2\\n3,0,4") >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) >>> x array([ 1., 3.]) >>> y array([ 2., 4.]) """ # Type conversions for Py3 convenience if comments is not None: if isinstance(comments, (basestring, bytes)): comments = [comments] comments = [_decode_line(x) for x in comments] # Compile regex for comments beforehand comments = (re.escape(comment) for comment in comments) regex_comments = re.compile('|'.join(comments)) if delimiter is not None: delimiter = _decode_line(delimiter) user_converters = converters if encoding == 'bytes': encoding = None byte_converters = True else: byte_converters = False if usecols is not None: # Allow usecols to be a single int or a sequence of ints try: usecols_as_list = list(usecols) except TypeError: usecols_as_list = [usecols] for col_idx in usecols_as_list: try: opindex(col_idx) except TypeError as e: e.args = ( "usecols must be an int or a sequence of ints but " "it contains at least one element of type %s" % type(col_idx), ) raise # Fall back to existing code usecols = usecols_as_list fown = False try: if is_pathlib_path(fname): fname = str(fname) if _is_string_like(fname): fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) fencoding = getattr(fh, 'encoding', 'latin1') fh = iter(fh) fown = True else: fh = iter(fname) fencoding = getattr(fname, 'encoding', 'latin1') except TypeError: raise ValueError('fname must be a string, file handle, or generator') # input may be a python2 io stream if encoding is not None: fencoding = encoding # we must assume local encoding # TOOD emit portability warning? elif fencoding is None: import locale fencoding = locale.getpreferredencoding() # not to be confused with the flatten_dtype we import... def flatten_dtype_internal(dt): """Unpack a structured data-type, and produce re-packing info.""" if dt.names is None: # If the dtype is flattened, return. # If the dtype has a shape, the dtype occurs # in the list more than once. shape = dt.shape if len(shape) == 0: return ([dt.base], None) else: packing = [(shape[-1], list)] if len(shape) > 1: for dim in dt.shape[-2::-1]: packing = [(dim*packing[0][0], packing*dim)] return ([dt.base] * int(np.prod(dt.shape)), packing) else: types = [] packing = [] for field in dt.names: tp, bytes = dt.fields[field] flat_dt, flat_packing = flatten_dtype_internal(tp) types.extend(flat_dt) # Avoid extra nesting for subarrays if tp.ndim > 0: packing.extend(flat_packing) else: packing.append((len(flat_dt), flat_packing)) return (types, packing) def pack_items(items, packing): """Pack items into nested lists based on re-packing info.""" if packing is None: return items[0] elif packing is tuple: return tuple(items) elif packing is list: return list(items) else: start = 0 ret = [] for length, subpacking in packing: ret.append(pack_items(items[start:start+length], subpacking)) start += length return tuple(ret) def split_line(line): """Chop off comments, strip, and split at delimiter. """ line = _decode_line(line, encoding=encoding) if comments is not None: line = regex_comments.split(line, maxsplit=1)[0] line = line.strip('\r\n') if line: return line.split(delimiter) else: return [] def read_data(chunk_size): """Parse each line, including the first. The file read, `fh`, is a global defined above. Parameters ---------- chunk_size : int At most `chunk_size` lines are read at a time, with iteration until all lines are read. """ X = [] for i, line in enumerate(itertools.chain([first_line], fh)): vals = split_line(line) if len(vals) == 0: continue if usecols: vals = [vals[j] for j in usecols] if len(vals) != N: line_num = i + skiprows + 1 raise ValueError("Wrong number of columns at line %d" % line_num) # Convert each value according to its column and store items = [conv(val) for (conv, val) in zip(converters, vals)] # Then pack it according to the dtype's nesting items = pack_items(items, packing) X.append(items) if len(X) > chunk_size: yield X X = [] if X: yield X try: # Make sure we're dealing with a proper dtype dtype = np.dtype(dtype) defconv = _getconv(dtype) # Skip the first `skiprows` lines for i in range(skiprows): next(fh) # Read until we find a line with some values, and use # it to estimate the number of columns, N. first_vals = None try: while not first_vals: first_line = next(fh) first_vals = split_line(first_line) except StopIteration: # End of lines reached first_line = '' first_vals = [] warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2) N = len(usecols or first_vals) dtype_types, packing = flatten_dtype_internal(dtype) if len(dtype_types) > 1: # We're dealing with a structured array, each field of # the dtype matches a column converters = [_getconv(dt) for dt in dtype_types] else: # All fields have the same dtype converters = [defconv for i in range(N)] if N > 1: packing = [(N, tuple)] # By preference, use the converters specified by the user for i, conv in (user_converters or {}).items(): if usecols: try: i = usecols.index(i) except ValueError: # Unused converter specified continue if byte_converters: # converters may use decode to workaround numpy's old behaviour, # so encode the string again before passing to the user converter def tobytes_first(x, conv): if type(x) is bytes: return conv(x) return conv(x.encode("latin1")) import functools converters[i] = functools.partial(tobytes_first, conv=conv) else: converters[i] = conv converters = [conv if conv is not bytes else lambda x: x.encode(fencoding) for conv in converters] # read data in chunks and fill it into an array via resize # over-allocating and shrinking the array later may be faster but is # probably not relevant compared to the cost of actually reading and # converting the data X = None for x in read_data(_loadtxt_chunksize): if X is None: X = np.array(x, dtype) else: nshape = list(X.shape) pos = nshape[0] nshape[0] += len(x) X.resize(nshape) X[pos:, ...] = x finally: if fown: fh.close() # recursive closures have a cyclic reference to themselves, which # requires gc to collect (gh-10620). To avoid this problem, for # performance and PyPy friendliness, we break the cycle: flatten_dtype_internal = None pack_items = None if X is None: X = np.array([], dtype) # Multicolumn data are returned with shape (1, N, M), i.e. # (1, 1, M) for a single row - remove the singleton dimension there if X.ndim == 3 and X.shape[:2] == (1, 1): X.shape = (1, -1) # Verify that the array has at least dimensions `ndmin`. # Check correctness of the values of `ndmin` if ndmin not in [0, 1, 2]: raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) # Tweak the size and shape of the arrays - remove extraneous dimensions if X.ndim > ndmin: X = np.squeeze(X) # and ensure we have the minimum number of dimensions asked for # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 if X.ndim < ndmin: if ndmin == 1: X = np.atleast_1d(X) elif ndmin == 2: X = np.atleast_2d(X).T if unpack: if len(dtype_types) > 1: # For structured arrays, return an array for each field. return [X[field] for field in dtype.names] else: return X.T else: return X def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# ', encoding=None): """ Save an array to a text file. Parameters ---------- fname : filename or file handle If the filename ends in ``.gz``, the file is automatically saved in compressed gzip format. `loadtxt` understands gzipped files transparently. X : 1D or 2D array_like Data to be saved to a text file. fmt : str or sequence of strs, optional A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which case `delimiter` is ignored. For complex `X`, the legal options for `fmt` are: a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted like `' (%s+%sj)' % (fmt, fmt)` b) a full string specifying every real and imaginary part, e.g. `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns c) a list of specifiers, one per column - in this case, the real and imaginary part must have separate specifiers, e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns delimiter : str, optional String or character separating columns. newline : str, optional String or character separating lines. .. versionadded:: 1.5.0 header : str, optional String that will be written at the beginning of the file. .. versionadded:: 1.7.0 footer : str, optional String that will be written at the end of the file. .. versionadded:: 1.7.0 comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``numpy.loadtxt``. .. versionadded:: 1.7.0 encoding : {None, str}, optional Encoding used to encode the outputfile. Does not apply to output streams. If the encoding is something other than 'bytes' or 'latin1' you will not be able to load the file in NumPy versions < 1.14. Default is 'latin1'. .. versionadded:: 1.14.0 See Also -------- save : Save an array to a binary file in NumPy ``.npy`` format savez : Save several arrays into an uncompressed ``.npz`` archive savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- Further explanation of the `fmt` parameter (``%[flag]width[.precision]specifier``): flags: ``-`` : left justify ``+`` : Forces to precede result with + or -. ``0`` : Left pad the number with zeros instead of space (see width). width: Minimum number of characters to be printed. The value is not truncated if it has more characters. precision: - For integer specifiers (eg. ``d,i,o,x``), the minimum number of digits. - For ``e, E`` and ``f`` specifiers, the number of digits to print after the decimal point. - For ``g`` and ``G``, the maximum number of significant digits. - For ``s``, the maximum number of characters. specifiers: ``c`` : character ``d`` or ``i`` : signed decimal integer ``e`` or ``E`` : scientific notation with ``e`` or ``E``. ``f`` : decimal floating point ``g,G`` : use the shorter of ``e,E`` or ``f`` ``o`` : signed octal ``s`` : string of characters ``u`` : unsigned decimal integer ``x,X`` : unsigned hexadecimal integer This explanation of ``fmt`` is not complete, for an exhaustive specification see [1]_. References ---------- .. [1] `Format Specification Mini-Language <http://docs.python.org/library/string.html# format-specification-mini-language>`_, Python Documentation. Examples -------- >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation """ # Py3 conversions first if isinstance(fmt, bytes): fmt = asstr(fmt) delimiter = asstr(delimiter) class WriteWrap(object): """Convert to unicode in py2 or to bytes on bytestream inputs. """ def __init__(self, fh, encoding): self.fh = fh self.encoding = encoding self.do_write = self.first_write def close(self): self.fh.close() def write(self, v): self.do_write(v) def write_bytes(self, v): if isinstance(v, bytes): self.fh.write(v) else: self.fh.write(v.encode(self.encoding)) def write_normal(self, v): self.fh.write(asunicode(v)) def first_write(self, v): try: self.write_normal(v) self.write = self.write_normal except TypeError: # input is probably a bytestream self.write_bytes(v) self.write = self.write_bytes own_fh = False if is_pathlib_path(fname): fname = str(fname) if _is_string_like(fname): # datasource doesn't support creating a new file ... open(fname, 'wt').close() fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) own_fh = True # need to convert str to unicode for text io output if sys.version_info[0] == 2: fh = WriteWrap(fh, encoding or 'latin1') elif hasattr(fname, 'write'): # wrap to handle byte output streams fh = WriteWrap(fname, encoding or 'latin1') else: raise ValueError('fname must be a string or file handle') try: X = np.asarray(X) # Handle 1-dimensional arrays if X.ndim == 0 or X.ndim > 2: raise ValueError( "Expected 1D or 2D array, got %dD array instead" % X.ndim) elif X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: X = np.atleast_2d(X).T ncol = 1 # Complex dtype -- each field indicates a separate column else: ncol = len(X.dtype.descr) else: ncol = X.shape[1] iscomplex_X = np.iscomplexobj(X) # `fmt` can be a string with multiple insertion points or a # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') if type(fmt) in (list, tuple): if len(fmt) != ncol: raise AttributeError('fmt has wrong shape. %s' % str(fmt)) format = asstr(delimiter).join(map(asstr, fmt)) elif isinstance(fmt, str): n_fmt_chars = fmt.count('%') error = ValueError('fmt has wrong number of %% formats: %s' % fmt) if n_fmt_chars == 1: if iscomplex_X: fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol else: fmt = [fmt, ] * ncol format = delimiter.join(fmt) elif iscomplex_X and n_fmt_chars != (2 * ncol): raise error elif ((not iscomplex_X) and n_fmt_chars != ncol): raise error else: format = fmt else: raise ValueError('invalid fmt: %r' % (fmt,)) if len(header) > 0: header = header.replace('\n', '\n' + comments) fh.write(comments + header + newline) if iscomplex_X: for row in X: row2 = [] for number in row: row2.append(number.real) row2.append(number.imag) fh.write(format % tuple(row2) + newline) else: for row in X: try: v = format % tuple(row) + newline except TypeError: raise TypeError("Mismatch between array dtype ('%s') and " "format specifier ('%s')" % (str(X.dtype), format)) fh.write(v) if len(footer) > 0: footer = footer.replace('\n', '\n' + comments) fh.write(comments + footer + newline) finally: if own_fh: fh.close() def fromregex(file, regexp, dtype, encoding=None): """ Construct an array from a text file, using regular expression parsing. The returned array is always a structured array, and is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields of the structured array. Parameters ---------- file : str or file File name or file object to read. regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. dtype : dtype or list of dtypes Dtype for the structured array. encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. .. versionadded:: 1.14.0 Returns ------- output : ndarray The output array, containing the part of the content of `file` that was matched by `regexp`. `output` is always a structured array. Raises ------ TypeError When `dtype` is not a valid dtype for a structured array. See Also -------- fromstring, loadtxt Notes ----- Dtypes for structured arrays can be specified in several forms, but all forms specify at least the data type and field name. For details see `doc.structured_arrays`. Examples -------- >>> f = open('test.dat', 'w') >>> f.write("1312 foo\\n1534 bar\\n444 qux") >>> f.close() >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] >>> output = np.fromregex('test.dat', regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], dtype=[('num', '<i8'), ('key', '|S3')]) >>> output['num'] array([1312, 1534, 444], dtype=int64) """ own_fh = False if not hasattr(file, "read"): file = np.lib._datasource.open(file, 'rt', encoding=encoding) own_fh = True try: if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) content = file.read() if isinstance(content, bytes) and not isinstance(regexp, bytes): regexp = asbytes(regexp) elif not isinstance(content, bytes) and isinstance(regexp, bytes): regexp = asstr(regexp) if not hasattr(regexp, 'match'): regexp = re.compile(regexp) seq = regexp.findall(content) if seq and not isinstance(seq[0], tuple): # Only one group is in the regexp. # Create the new array as a single data-type and then # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) output.dtype = dtype else: output = np.array(seq, dtype=dtype) return output finally: if own_fh: file.close() #####-------------------------------------------------------------------------- #---- --- ASCII functions --- #####-------------------------------------------------------------------------- def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, deletechars=None, replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None, encoding='bytes'): """ Load data from a text file, with missing values handled as specified. Each line past the first `skip_header` lines is split at the `delimiter` character, and characters following the `comments` character are discarded. Parameters ---------- fname : file, str, pathlib.Path, list of str, generator File, filename, list, or generator to read. If the filename extension is `.gz` or `.bz2`, the file is first decompressed. Note that generators must return byte strings in Python 3k. The strings in a list or produced by a generator are treated as lines. dtype : dtype, optional Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded delimiter : str, int, or sequence, optional The string used to separate values. By default, any consecutive whitespaces act as delimiter. An integer or sequence of integers can also be provided as width(s) of each field. skiprows : int, optional `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. skip_header : int, optional The number of lines to skip at the beginning of the file. skip_footer : int, optional The number of lines to skip at the end of the file. converters : variable, optional The set of functions that convert the data of a column to a value. The converters can also be used to provide a default value for missing data: ``converters = {3: lambda s: float(s or 0)}``. missing : variable, optional `missing` was removed in numpy 1.10. Please use `missing_values` instead. missing_values : variable, optional The set of strings corresponding to missing data. filling_values : variable, optional The set of values to be used as default when the data are missing. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional If `names` is True, the field names are read from the first line after the first `skip_header` lines. This line can optionally be proceeded by a comment delimeter. If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a structured dtype. If `names` is None, the names of the dtype fields will be used, if any. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended an underscore: for example, `file` would become `file_`. deletechars : str, optional A string combining invalid characters that must be deleted from the names. defaultfmt : str, optional A format used to define default field names, such as "f%i" or "f_%02i". autostrip : bool, optional Whether to automatically strip white spaces from the variables. replace_space : char, optional Character(s) used in replacement of white spaces in the variables names. By default, use a '_'. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case sensitive. If False or 'upper', field names are converted to upper case. If 'lower', field names are converted to lower case. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)`` usemask : bool, optional If True, return a masked array. If False, return a regular array. loose : bool, optional If True, do not raise errors for invalid values. invalid_raise : bool, optional If True, an exception is raised if an inconsistency is detected in the number of columns. If False, a warning is emitted and the offending lines are skipped. max_rows : int, optional The maximum number of rows to read. Must not be used with skip_footer at the same time. If given, the value must be at least 1. Default is to read the entire file. .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` is a file object. The special value 'bytes' enables backward compatibility workarounds that ensure that you receive byte arrays when possible and passes latin1 encoded strings to converters. Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. .. versionadded:: 1.14.0 Returns ------- out : ndarray Data read from the text file. If `usemask` is True, this is a masked array. See Also -------- numpy.loadtxt : equivalent function when no data is missing. Notes ----- * When spaces are used as delimiters, or when no delimiter has been given as input, there should not be any missing data between two fields. * When the variables are named (either by a flexible dtype or with `names`, there must not be any header in the file (else a ValueError exception is raised). * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. References ---------- .. [1] NumPy User Guide, section `I/O with NumPy <http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_. Examples --------- >>> from io import StringIO >>> import numpy as np Comma delimited file with mixed dtype >>> s = StringIO("1,1.3,abcde") >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Using dtype = None >>> s.seek(0) # needed for StringIO example only >>> data = np.genfromtxt(s, dtype=None, ... names = ['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Specifying dtype and names >>> s.seek(0) >>> data = np.genfromtxt(s, dtype="i8,f8,S5", ... names=['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) An example with fixed-width columns >>> s = StringIO("11.3abcde") >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data array((1, 1.3, 'abcde'), dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')]) """ if max_rows is not None: if skip_footer: raise ValueError( "The keywords 'skip_footer' and 'max_rows' can not be " "specified at the same time.") if max_rows < 1: raise ValueError("'max_rows' must be at least 1.") if usemask: from numpy.ma import MaskedArray, make_mask_descr # Check the input dictionary of converters user_converters = converters or {} if not isinstance(user_converters, dict): raise TypeError( "The input argument 'converter' should be a valid dictionary " "(got '%s' instead)" % type(user_converters)) if encoding == 'bytes': encoding = None byte_converters = True else: byte_converters = False # Initialize the filehandle, the LineSplitter and the NameValidator own_fhd = False try: if is_pathlib_path(fname): fname = str(fname) if isinstance(fname, basestring): fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding)) own_fhd = True else: fhd = iter(fname) except TypeError: raise TypeError( "fname must be a string, filehandle, list of strings, " "or generator. Got %s instead." % type(fname)) split_line = LineSplitter(delimiter=delimiter, comments=comments, autostrip=autostrip, encoding=encoding) validate_names = NameValidator(excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Skip the first `skip_header` rows for i in range(skip_header): next(fhd) # Keep on until we find the first valid values first_values = None try: while not first_values: first_line = _decode_line(next(fhd), encoding) if names is True: if comments in first_line: first_line = ( ''.join(first_line.split(comments)[1:])) first_values = split_line(first_line) except StopIteration: # return an empty array if the datafile is empty first_line = '' first_values = [] warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) # Should we take the first values as names ? if names is True: fval = first_values[0].strip() if fval in comments: del first_values[0] # Check the columns to use: make sure `usecols` is a list if usecols is not None: try: usecols = [_.strip() for _ in usecols.split(",")] except AttributeError: try: usecols = list(usecols) except TypeError: usecols = [usecols, ] nbcols = len(usecols or first_values) # Check the names and overwrite the dtype.names if needed if names is True: names = validate_names([str(_.strip()) for _ in first_values]) first_line = '' elif _is_string_like(names): names = validate_names([_.strip() for _ in names.split(',')]) elif names: names = validate_names(names) # Get the dtype if dtype is not None: dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Make sure the names is a list (for 2.5) if names is not None: names = list(names) if usecols: for (i, current) in enumerate(usecols): # if usecols is a list of names, convert to a list of indices if _is_string_like(current): usecols[i] = names.index(current) elif current < 0: usecols[i] = current + len(first_values) # If the dtype is not None, make sure we update it if (dtype is not None) and (len(dtype) > nbcols): descr = dtype.descr dtype = np.dtype([descr[_] for _ in usecols]) names = list(dtype.names) # If `names` is not None, update the names elif (names is not None) and (len(names) > nbcols): names = [names[_] for _ in usecols] elif (names is not None) and (dtype is not None): names = list(dtype.names) # Process the missing values ............................... # Rename missing_values for convenience user_missing_values = missing_values or () if isinstance(user_missing_values, bytes): user_missing_values = user_missing_values.decode('latin1') # Define the list of missing_values (one column: one list) missing_values = [list(['']) for _ in range(nbcols)] # We have a dictionary: process it field by field if isinstance(user_missing_values, dict): # Loop on the items for (key, val) in user_missing_values.items(): # Is the key a string ? if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped continue # Redefine the key as needed if it's a column number if usecols: try: key = usecols.index(key) except ValueError: pass # Transform the value as a list of string if isinstance(val, (list, tuple)): val = [str(_) for _ in val] else: val = [str(val), ] # Add the value(s) to the current list of missing if key is None: # None acts as default for miss in missing_values: miss.extend(val) else: missing_values[key].extend(val) # We have a sequence : each item matches a column elif isinstance(user_missing_values, (list, tuple)): for (value, entry) in zip(user_missing_values, missing_values): value = str(value) if value not in entry: entry.append(value) # We have a string : apply it to all entries elif isinstance(user_missing_values, basestring): user_value = user_missing_values.split(",") for entry in missing_values: entry.extend(user_value) # We have something else: apply it to all entries else: for entry in missing_values: entry.extend([str(user_missing_values)]) # Process the filling_values ............................... # Rename the input for convenience user_filling_values = filling_values if user_filling_values is None: user_filling_values = [] # Define the default filling_values = [None] * nbcols # We have a dictionary : update each entry individually if isinstance(user_filling_values, dict): for (key, val) in user_filling_values.items(): if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped, continue # Redefine the key if it's a column number and usecols is defined if usecols: try: key = usecols.index(key) except ValueError: pass # Add the value to the list filling_values[key] = val # We have a sequence : update on a one-to-one basis elif isinstance(user_filling_values, (list, tuple)): n = len(user_filling_values) if (n <= nbcols): filling_values[:n] = user_filling_values else: filling_values = user_filling_values[:nbcols] # We have something else : use it for all entries else: filling_values = [user_filling_values] * nbcols # Initialize the converters ................................ if dtype is None: # Note: we can't use a [...]*nbcols, as we would have 3 times the same # ... converter, instead of 3 different converters. converters = [StringConverter(None, missing_values=miss, default=fill) for (miss, fill) in zip(missing_values, filling_values)] else: dtype_flat = flatten_dtype(dtype, flatten_base=True) # Initialize the converters if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) converters = [StringConverter(dt, locked=True, missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) converters = [StringConverter(dtype, locked=True, missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): # If the converter is specified by column names, use the index instead if _is_string_like(j): try: j = names.index(j) i = j except ValueError: continue elif usecols: try: i = usecols.index(j) except ValueError: # Unused converter specified continue else: i = j # Find the value to test - first_line is not filtered by usecols: if len(first_line): testing_value = first_values[j] else: testing_value = None if conv is bytes: user_conv = asbytes elif byte_converters: # converters may use decode to workaround numpy's old behaviour, # so encode the string again before passing to the user converter def tobytes_first(x, conv): if type(x) is bytes: return conv(x) return conv(x.encode("latin1")) import functools user_conv = functools.partial(tobytes_first, conv=conv) else: user_conv = conv converters[i].update(user_conv, locked=True, testing_value=testing_value, default=filling_values[i], missing_values=missing_values[i],) uc_update.append((i, user_conv)) # Make sure we have the corrected keys in user_converters... user_converters.update(uc_update) # Fixme: possible error as following variable never used. # miss_chars = [_.missing_values for _ in converters] # Initialize the output lists ... # ... rows rows = [] append_to_rows = rows.append # ... masks if usemask: masks = [] append_to_masks = masks.append # ... invalid invalid = [] append_to_invalid = invalid.append # Parse each line for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): values = split_line(line) nbvalues = len(values) # Skip an empty line if nbvalues == 0: continue if usecols: # Select only the columns we need try: values = [values[_] for _ in usecols] except IndexError: append_to_invalid((i + skip_header + 1, nbvalues)) continue elif nbvalues != nbcols: append_to_invalid((i + skip_header + 1, nbvalues)) continue # Store the values append_to_rows(tuple(values)) if usemask: append_to_masks(tuple([v.strip() in m for (v, m) in zip(values, missing_values)])) if len(rows) == max_rows: break if own_fhd: fhd.close() # Upgrade the converters (if needed) if dtype is None: for (i, converter) in enumerate(converters): current_column = [itemgetter(i)(_m) for _m in rows] try: converter.iterupgrade(current_column) except ConverterLockError: errmsg = "Converter #%i is locked and cannot be upgraded: " % i current_column = map(itemgetter(i), rows) for (j, value) in enumerate(current_column): try: converter.upgrade(value) except (ConverterError, ValueError): errmsg += "(occurred line #%i for value '%s')" errmsg %= (j + 1 + skip_header, value) raise ConverterError(errmsg) # Check that we don't have invalid values nbinvalid = len(invalid) if nbinvalid > 0: nbrows = len(rows) + nbinvalid - skip_footer # Construct the error message template = " Line #%%i (got %%i columns instead of %i)" % nbcols if skip_footer > 0: nbinvalid_skipped = len([_ for _ in invalid if _[0] > nbrows + skip_header]) invalid = invalid[:nbinvalid - nbinvalid_skipped] skip_footer -= nbinvalid_skipped # # nbrows -= skip_footer # errmsg = [template % (i, nb) # for (i, nb) in invalid if i < nbrows] # else: errmsg = [template % (i, nb) for (i, nb) in invalid] if len(errmsg): errmsg.insert(0, "Some errors were detected !") errmsg = "\n".join(errmsg) # Raise an exception ? if invalid_raise: raise ValueError(errmsg) # Issue a warning ? else: warnings.warn(errmsg, ConversionWarning, stacklevel=2) # Strip the last skip_footer data if skip_footer > 0: rows = rows[:-skip_footer] if usemask: masks = masks[:-skip_footer] # Convert each value according to the converter: # We want to modify the list in place to avoid creating a new one... if loose: rows = list( zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) else: rows = list( zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) # Reset the dtype data = rows if dtype is None: # Get the dtypes from the types of the converters column_types = [conv.type for conv in converters] # Find the columns with strings... strcolidx = [i for (i, v) in enumerate(column_types) if v == np.unicode_] if byte_converters and strcolidx: # convert strings back to bytes for backward compatibility warnings.warn( "Reading unicode strings without specifying the encoding " "argument is deprecated. Set the encoding, use None for the " "system default.", np.VisibleDeprecationWarning, stacklevel=2) def encode_unicode_cols(row_tup): row = list(row_tup) for i in strcolidx: row[i] = row[i].encode('latin1') return tuple(row) try: data = [encode_unicode_cols(r) for r in data] except UnicodeEncodeError: pass else: for i in strcolidx: column_types[i] = np.bytes_ # Update string types to be the right length sized_column_types = column_types[:] for i, col_type in enumerate(column_types): if np.issubdtype(col_type, np.character): n_chars = max(len(row[i]) for row in data) sized_column_types[i] = (col_type, n_chars) if names is None: # If the dtype is uniform (before sizing strings) base = set([ c_type for c, c_type in zip(converters, column_types) if c._checked]) if len(base) == 1: uniform_type, = base (ddtype, mdtype) = (uniform_type, bool) else: ddtype = [(defaultfmt % i, dt) for (i, dt) in enumerate(sized_column_types)] if usemask: mdtype = [(defaultfmt % i, bool) for (i, dt) in enumerate(sized_column_types)] else: ddtype = list(zip(names, sized_column_types)) mdtype = list(zip(names, [bool] * len(sized_column_types))) output = np.array(data, dtype=ddtype) if usemask: outputmask = np.array(masks, dtype=mdtype) else: # Overwrite the initial dtype names if needed if names and dtype.names: dtype.names = names # Case 1. We have a structured type if len(dtype_flat) > 1: # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] # First, create the array using a flattened dtype: # [('a', int), ('b1', int), ('b2', float)] # Then, view the array using the specified dtype. if 'O' in (_.char for _ in dtype_flat): if has_nested_fields(dtype): raise NotImplementedError( "Nested fields involving objects are not supported...") else: output = np.array(data, dtype=dtype) else: rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) output = rows.view(dtype) # Now, process the rowmasks the same way if usemask: rowmasks = np.array( masks, dtype=np.dtype([('', bool) for t in dtype_flat])) # Construct the new dtype mdtype = make_mask_descr(dtype) outputmask = rowmasks.view(mdtype) # Case #2. We have a basic dtype else: # We used some user-defined converters if user_converters: ishomogeneous = True descr = [] for i, ttype in enumerate([conv.type for conv in converters]): # Keep the dtype of the current converter if i in user_converters: ishomogeneous &= (ttype == dtype.type) if np.issubdtype(ttype, np.character): ttype = (ttype, max(len(row[i]) for row in data)) descr.append(('', ttype)) else: descr.append(('', dtype)) # So we changed the dtype ? if not ishomogeneous: # We have more than one field if len(descr) > 1: dtype = np.dtype(descr) # We have only one field: drop the name if not needed. else: dtype = np.dtype(ttype) # output = np.array(data, dtype) if usemask: if dtype.names: mdtype = [(_, bool) for _ in dtype.names] else: mdtype = bool outputmask = np.array(masks, dtype=mdtype) # Try to take care of the missing data we missed names = output.dtype.names if usemask and names: for (name, conv) in zip(names, converters): missing_values = [conv(_) for _ in conv.missing_values if _ != ''] for mval in missing_values: outputmask[name] |= (output[name] == mval) # Construct the final array if usemask: output = output.view(MaskedArray) output._mask = outputmask if unpack: return output.squeeze().T return output.squeeze() def ndfromtxt(fname, **kwargs): """ Load ASCII data stored in a file and return it as a single array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function. """ kwargs['usemask'] = False return genfromtxt(fname, **kwargs) def mafromtxt(fname, **kwargs): """ Load ASCII data stored in a text file and return a masked array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. """ kwargs['usemask'] = True return genfromtxt(fname, **kwargs) def recfromtxt(fname, **kwargs): """ Load ASCII data from a file and return it in a record array. If ``usemask=False`` a standard `recarray` is returned, if ``usemask=True`` a MaskedRecords array is returned. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ kwargs.setdefault("dtype", None) usemask = kwargs.get('usemask', False) output = genfromtxt(fname, **kwargs) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output def recfromcsv(fname, **kwargs): """ Load ASCII data stored in a comma-separated file. The returned array is a record array (if ``usemask=False``, see `recarray`) or a masked record array (if ``usemask=True``, see `ma.mrecords.MaskedRecords`). Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ # Set default kwargs for genfromtxt as relevant to csv import. kwargs.setdefault("case_sensitive", "lower") kwargs.setdefault("names", True) kwargs.setdefault("delimiter", ",") kwargs.setdefault("dtype", None) output = genfromtxt(fname, **kwargs) usemask = kwargs.get("usemask", False) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output
83,172
35.55956
95
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/user_array.py
""" Standard container-class for easy multiple-inheritance. Try to inherit from the ndarray instead of using this class as this is not complete. """ from __future__ import division, absolute_import, print_function from numpy.core import ( array, asarray, absolute, add, subtract, multiply, divide, remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, bitwise_xor, invert, less, less_equal, not_equal, equal, greater, greater_equal, shape, reshape, arange, sin, sqrt, transpose ) from numpy.compat import long class container(object): """ container(data, dtype=None, copy=True) Standard container-class for easy multiple-inheritance. Methods ------- copy tostring byteswap astype """ def __init__(self, data, dtype=None, copy=True): self.array = array(data, dtype, copy=copy) def __repr__(self): if self.ndim > 0: return self.__class__.__name__ + repr(self.array)[len("array"):] else: return self.__class__.__name__ + "(" + repr(self.array) + ")" def __array__(self, t=None): if t: return self.array.astype(t) return self.array # Array as sequence def __len__(self): return len(self.array) def __getitem__(self, index): return self._rc(self.array[index]) def __setitem__(self, index, value): self.array[index] = asarray(value, self.dtype) def __abs__(self): return self._rc(absolute(self.array)) def __neg__(self): return self._rc(-self.array) def __add__(self, other): return self._rc(self.array + asarray(other)) __radd__ = __add__ def __iadd__(self, other): add(self.array, other, self.array) return self def __sub__(self, other): return self._rc(self.array - asarray(other)) def __rsub__(self, other): return self._rc(asarray(other) - self.array) def __isub__(self, other): subtract(self.array, other, self.array) return self def __mul__(self, other): return self._rc(multiply(self.array, asarray(other))) __rmul__ = __mul__ def __imul__(self, other): multiply(self.array, other, self.array) return self def __div__(self, other): return self._rc(divide(self.array, asarray(other))) def __rdiv__(self, other): return self._rc(divide(asarray(other), self.array)) def __idiv__(self, other): divide(self.array, other, self.array) return self def __mod__(self, other): return self._rc(remainder(self.array, other)) def __rmod__(self, other): return self._rc(remainder(other, self.array)) def __imod__(self, other): remainder(self.array, other, self.array) return self def __divmod__(self, other): return (self._rc(divide(self.array, other)), self._rc(remainder(self.array, other))) def __rdivmod__(self, other): return (self._rc(divide(other, self.array)), self._rc(remainder(other, self.array))) def __pow__(self, other): return self._rc(power(self.array, asarray(other))) def __rpow__(self, other): return self._rc(power(asarray(other), self.array)) def __ipow__(self, other): power(self.array, other, self.array) return self def __lshift__(self, other): return self._rc(left_shift(self.array, other)) def __rshift__(self, other): return self._rc(right_shift(self.array, other)) def __rlshift__(self, other): return self._rc(left_shift(other, self.array)) def __rrshift__(self, other): return self._rc(right_shift(other, self.array)) def __ilshift__(self, other): left_shift(self.array, other, self.array) return self def __irshift__(self, other): right_shift(self.array, other, self.array) return self def __and__(self, other): return self._rc(bitwise_and(self.array, other)) def __rand__(self, other): return self._rc(bitwise_and(other, self.array)) def __iand__(self, other): bitwise_and(self.array, other, self.array) return self def __xor__(self, other): return self._rc(bitwise_xor(self.array, other)) def __rxor__(self, other): return self._rc(bitwise_xor(other, self.array)) def __ixor__(self, other): bitwise_xor(self.array, other, self.array) return self def __or__(self, other): return self._rc(bitwise_or(self.array, other)) def __ror__(self, other): return self._rc(bitwise_or(other, self.array)) def __ior__(self, other): bitwise_or(self.array, other, self.array) return self def __pos__(self): return self._rc(self.array) def __invert__(self): return self._rc(invert(self.array)) def _scalarfunc(self, func): if self.ndim == 0: return func(self[0]) else: raise TypeError( "only rank-0 arrays can be converted to Python scalars.") def __complex__(self): return self._scalarfunc(complex) def __float__(self): return self._scalarfunc(float) def __int__(self): return self._scalarfunc(int) def __long__(self): return self._scalarfunc(long) def __hex__(self): return self._scalarfunc(hex) def __oct__(self): return self._scalarfunc(oct) def __lt__(self, other): return self._rc(less(self.array, other)) def __le__(self, other): return self._rc(less_equal(self.array, other)) def __eq__(self, other): return self._rc(equal(self.array, other)) def __ne__(self, other): return self._rc(not_equal(self.array, other)) def __gt__(self, other): return self._rc(greater(self.array, other)) def __ge__(self, other): return self._rc(greater_equal(self.array, other)) def copy(self): "" return self._rc(self.array.copy()) def tostring(self): "" return self.array.tostring() def byteswap(self): "" return self._rc(self.array.byteswap()) def astype(self, typecode): "" return self._rc(self.array.astype(typecode)) def _rc(self, a): if len(shape(a)) == 0: return a else: return self.__class__(a) def __array_wrap__(self, *args): return self.__class__(args[0]) def __setattr__(self, attr, value): if attr == 'array': object.__setattr__(self, attr, value) return try: self.array.__setattr__(attr, value) except AttributeError: object.__setattr__(self, attr, value) # Only called after other approaches fail. def __getattr__(self, attr): if (attr == 'array'): return object.__getattribute__(self, attr) return self.array.__getattribute__(attr) ############################################################# # Test of class container ############################################################# if __name__ == '__main__': temp = reshape(arange(10000), (100, 100)) ua = container(temp) # new object created begin test print(dir(ua)) print(shape(ua), ua.shape) # I have changed Numeric.py ua_small = ua[:3, :5] print(ua_small) # this did not change ua[0,0], which is not normal behavior ua_small[0, 0] = 10 print(ua_small[0, 0], ua[0, 0]) print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) print(less(ua_small, 103), type(less(ua_small, 103))) print(type(ua_small * reshape(arange(15), shape(ua_small)))) print(reshape(ua_small, (5, 3))) print(transpose(ua_small))
7,817
26.051903
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/type_check.py
"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py """ from __future__ import division, absolute_import, print_function __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', 'isreal', 'nan_to_num', 'real', 'real_if_close', 'typename', 'asfarray', 'mintypecode', 'asscalar', 'common_type'] import numpy.core.numeric as _nx from numpy.core.numeric import asarray, asanyarray, array, isnan, zeros from .ufunclike import isneginf, isposinf _typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' def mintypecode(typechars,typeset='GDFgdf',default='d'): """ Return the character for the minimum-size type to which given types can be safely cast. The returned type character must represent the smallest size dtype such that an array of the returned type can handle the data from an array of all types in `typechars` (or if `typechars` is an array, then its dtype.char). Parameters ---------- typechars : list of str or array_like If a list of strings, each string should represent a dtype. If array_like, the character representation of the array dtype is used. typeset : str or list of str, optional The set of characters that the returned character is chosen from. The default set is 'GDFgdf'. default : str, optional The default character, this is returned if none of the characters in `typechars` matches a character in `typeset`. Returns ------- typechar : str The character representing the minimum-size type that was found. See Also -------- dtype, sctype2char, maximum_sctype Examples -------- >>> np.mintypecode(['d', 'f', 'S']) 'd' >>> x = np.array([1.1, 2-3.j]) >>> np.mintypecode(x) 'D' >>> np.mintypecode('abceh', default='G') 'G' """ typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char for t in typechars] intersection = [t for t in typecodes if t in typeset] if not intersection: return default if 'F' in intersection and 'd' in intersection: return 'D' l = [] for t in intersection: i = _typecodes_by_elsize.index(t) l.append((i, t)) l.sort() return l[0][1] def asfarray(a, dtype=_nx.float_): """ Return an array converted to a float type. Parameters ---------- a : array_like The input array. dtype : str or dtype object, optional Float type code to coerce input array `a`. If `dtype` is one of the 'int' dtypes, it is replaced with float64. Returns ------- out : ndarray The input `a` as a float ndarray. Examples -------- >>> np.asfarray([2, 3]) array([ 2., 3.]) >>> np.asfarray([2, 3], dtype='float') array([ 2., 3.]) >>> np.asfarray([2, 3], dtype='int8') array([ 2., 3.]) """ if not _nx.issubdtype(dtype, _nx.inexact): dtype = _nx.float_ return asarray(a, dtype=dtype) def real(val): """ Return the real part of the complex argument. Parameters ---------- val : array_like Input array. Returns ------- out : ndarray or scalar The real component of the complex argument. If `val` is real, the type of `val` is used for the output. If `val` has complex elements, the returned type is float. See Also -------- real_if_close, imag, angle Examples -------- >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.real array([ 1., 3., 5.]) >>> a.real = 9 >>> a array([ 9.+2.j, 9.+4.j, 9.+6.j]) >>> a.real = np.array([9, 8, 7]) >>> a array([ 9.+2.j, 8.+4.j, 7.+6.j]) >>> np.real(1 + 1j) 1.0 """ try: return val.real except AttributeError: return asanyarray(val).real def imag(val): """ Return the imaginary part of the complex argument. Parameters ---------- val : array_like Input array. Returns ------- out : ndarray or scalar The imaginary component of the complex argument. If `val` is real, the type of `val` is used for the output. If `val` has complex elements, the returned type is float. See Also -------- real, angle, real_if_close Examples -------- >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.imag array([ 2., 4., 6.]) >>> a.imag = np.array([8, 10, 12]) >>> a array([ 1. +8.j, 3.+10.j, 5.+12.j]) >>> np.imag(1 + 1j) 1.0 """ try: return val.imag except AttributeError: return asanyarray(val).imag def iscomplex(x): """ Returns a bool array, where True if input element is complex. What is tested is whether the input has a non-zero imaginary part, not if the input type is complex. Parameters ---------- x : array_like Input array. Returns ------- out : ndarray of bools Output array. See Also -------- isreal iscomplexobj : Return True if x is a complex type or an array of complex numbers. Examples -------- >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) array([ True, False, False, False, False, True]) """ ax = asanyarray(x) if issubclass(ax.dtype.type, _nx.complexfloating): return ax.imag != 0 res = zeros(ax.shape, bool) return +res # convet to array-scalar if needed def isreal(x): """ Returns a bool array, where True if input element is real. If element has complex type with zero complex part, the return value for that element is True. Parameters ---------- x : array_like Input array. Returns ------- out : ndarray, bool Boolean array of same shape as `x`. See Also -------- iscomplex isrealobj : Return True if x is not a complex type. Examples -------- >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) array([False, True, True, True, True, False]) """ return imag(x) == 0 def iscomplexobj(x): """ Check for a complex type or an array of complex numbers. The type of the input is checked, not the value. Even if the input has an imaginary part equal to zero, `iscomplexobj` evaluates to True. Parameters ---------- x : any The input can be of any type and shape. Returns ------- iscomplexobj : bool The return value, True if `x` is of a complex type or has at least one complex element. See Also -------- isrealobj, iscomplex Examples -------- >>> np.iscomplexobj(1) False >>> np.iscomplexobj(1+0j) True >>> np.iscomplexobj([3, 1+0j, True]) True """ try: dtype = x.dtype type_ = dtype.type except AttributeError: type_ = asarray(x).dtype.type return issubclass(type_, _nx.complexfloating) def isrealobj(x): """ Return True if x is a not complex type or an array of complex numbers. The type of the input is checked, not the value. So even if the input has an imaginary part equal to zero, `isrealobj` evaluates to False if the data type is complex. Parameters ---------- x : any The input can be of any type and shape. Returns ------- y : bool The return value, False if `x` is of a complex type. See Also -------- iscomplexobj, isreal Examples -------- >>> np.isrealobj(1) True >>> np.isrealobj(1+0j) False >>> np.isrealobj([3, 1+0j, True]) False """ return not iscomplexobj(x) #----------------------------------------------------------------------------- def _getmaxmin(t): from numpy.core import getlimits f = getlimits.finfo(t) return f.max, f.min def nan_to_num(x, copy=True): """ Replace nan with zero and inf with large finite numbers. If `x` is inexact, NaN is replaced by zero, and infinity and -infinity replaced by the respectively largest and most negative finite floating point values representable by ``x.dtype``. For complex dtypes, the above is applied to each of the real and imaginary components of `x` separately. If `x` is not inexact, then no replacements are made. Parameters ---------- x : array_like Input data. copy : bool, optional Whether to create a copy of `x` (True) or to replace values in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. .. versionadded:: 1.13 Returns ------- out : ndarray `x`, with the non-finite values replaced. If `copy` is False, this may be `x` itself. See Also -------- isinf : Shows which elements are positive or negative infinity. isneginf : Shows which elements are negative infinity. isposinf : Shows which elements are positive infinity. isnan : Shows which elements are Not a Number (NaN). isfinite : Shows which elements are finite (not NaN, not infinity) Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Examples -------- >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) >>> np.nan_to_num(x) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, -1.28000000e+002, 1.28000000e+002]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) >>> np.nan_to_num(y) array([ 1.79769313e+308 +0.00000000e+000j, 0.00000000e+000 +0.00000000e+000j, 0.00000000e+000 +1.79769313e+308j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type if not issubclass(xtype, _nx.inexact): return x iscomplex = issubclass(xtype, _nx.complexfloating) isscalar = (x.ndim == 0) x = x[None] if isscalar else x dest = (x.real, x.imag) if iscomplex else (x,) maxf, minf = _getmaxmin(x.real.dtype) for d in dest: _nx.copyto(d, 0.0, where=isnan(d)) _nx.copyto(d, maxf, where=isposinf(d)) _nx.copyto(d, minf, where=isneginf(d)) return x[0] if isscalar else x #----------------------------------------------------------------------------- def real_if_close(a,tol=100): """ If complex input returns a real array if complex parts are close to zero. "Close to zero" is defined as `tol` * (machine epsilon of the type for `a`). Parameters ---------- a : array_like Input array. tol : float Tolerance in machine epsilons for the complex part of the elements in the array. Returns ------- out : ndarray If `a` is real, the type of `a` is used for the output. If `a` has complex elements, the returned type is float. See Also -------- real, imag, angle Notes ----- Machine epsilon varies from machine to machine and between data types but Python floats on most platforms have a machine epsilon equal to 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print out the machine epsilon for floats. Examples -------- >>> np.finfo(float).eps 2.2204460492503131e-16 >>> np.real_if_close([2.1 + 4e-14j], tol=1000) array([ 2.1]) >>> np.real_if_close([2.1 + 4e-13j], tol=1000) array([ 2.1 +4.00000000e-13j]) """ a = asanyarray(a) if not issubclass(a.dtype.type, _nx.complexfloating): return a if tol > 1: from numpy.core import getlimits f = getlimits.finfo(a.dtype.type) tol = f.eps * tol if _nx.all(_nx.absolute(a.imag) < tol): a = a.real return a def asscalar(a): """ Convert an array of size 1 to its scalar equivalent. Parameters ---------- a : ndarray Input array of size 1. Returns ------- out : scalar Scalar representation of `a`. The output data type is the same type returned by the input's `item` method. Examples -------- >>> np.asscalar(np.array([24])) 24 """ return a.item() #----------------------------------------------------------------------------- _namefromtype = {'S1': 'character', '?': 'bool', 'b': 'signed char', 'B': 'unsigned char', 'h': 'short', 'H': 'unsigned short', 'i': 'integer', 'I': 'unsigned integer', 'l': 'long integer', 'L': 'unsigned long integer', 'q': 'long long integer', 'Q': 'unsigned long long integer', 'f': 'single precision', 'd': 'double precision', 'g': 'long precision', 'F': 'complex single precision', 'D': 'complex double precision', 'G': 'complex long double precision', 'S': 'string', 'U': 'unicode', 'V': 'void', 'O': 'object' } def typename(char): """ Return a description for the given data type code. Parameters ---------- char : str Data type code. Returns ------- out : str Description of the input data type code. See Also -------- dtype, typecodes Examples -------- >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] >>> for typechar in typechars: ... print(typechar, ' : ', np.typename(typechar)) ... S1 : character ? : bool B : unsigned char D : complex double precision G : complex long double precision F : complex single precision I : unsigned integer H : unsigned short L : unsigned long integer O : object Q : unsigned long long integer S : string U : unicode V : void b : signed char d : double precision g : long precision f : single precision i : integer h : short l : long integer q : long long integer """ return _namefromtype[char] #----------------------------------------------------------------------------- #determine the "minimum common type" for a group of arrays. array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble], [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]] array_precision = {_nx.half: 0, _nx.single: 1, _nx.double: 2, _nx.longdouble: 3, _nx.csingle: 1, _nx.cdouble: 2, _nx.clongdouble: 3} def common_type(*arrays): """ Return a scalar type which is common to the input arrays. The return type will always be an inexact (i.e. floating point) scalar type, even if all the arrays are integer arrays. If one of the inputs is an integer array, the minimum precision type that is returned is a 64-bit floating point dtype. All input arrays except int64 and uint64 can be safely cast to the returned dtype without loss of information. Parameters ---------- array1, array2, ... : ndarrays Input arrays. Returns ------- out : data type code Data type code. See Also -------- dtype, mintypecode Examples -------- >>> np.common_type(np.arange(2, dtype=np.float32)) <type 'numpy.float32'> >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) <type 'numpy.float64'> >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) <type 'numpy.complex128'> """ is_complex = False precision = 0 for a in arrays: t = a.dtype.type if iscomplexobj(a): is_complex = True if issubclass(t, _nx.integer): p = 2 # array_precision[_nx.double] else: p = array_precision.get(t, None) if p is None: raise TypeError("can't get common type for non-numeric array") precision = max(precision, p) if is_complex: return array_type[1][precision] else: return array_type[0][precision]
16,500
25.359425
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/shape_base.py
from __future__ import division, absolute_import, print_function import warnings import numpy.core.numeric as _nx from numpy.core.numeric import ( asarray, zeros, outer, concatenate, array, asanyarray ) from numpy.core.fromnumeric import product, reshape, transpose from numpy.core.multiarray import normalize_axis_index from numpy.core import vstack, atleast_3d from numpy.lib.index_tricks import ndindex from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells __all__ = [ 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', 'apply_along_axis', 'kron', 'tile', 'get_array_wrap' ] def apply_along_axis(func1d, axis, arr, *args, **kwargs): """ Apply a function to 1-D slices along the given axis. Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a` is a 1-D slice of `arr` along `axis`. This is equivalent to (but faster than) the following use of `ndindex` and `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: Ni, Nk = a.shape[:axis], a.shape[axis+1:] for ii in ndindex(Ni): for kk in ndindex(Nk): f = func1d(arr[ii + s_[:,] + kk]) Nj = f.shape for jj in ndindex(Nj): out[ii + jj + kk] = f[jj] Equivalently, eliminating the inner loop, this can be expressed as:: Ni, Nk = a.shape[:axis], a.shape[axis+1:] for ii in ndindex(Ni): for kk in ndindex(Nk): out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) Parameters ---------- func1d : function (M,) -> (Nj...) This function should accept 1-D arrays. It is applied to 1-D slices of `arr` along the specified axis. axis : integer Axis along which `arr` is sliced. arr : ndarray (Ni..., M, Nk...) Input array. args : any Additional arguments to `func1d`. kwargs : any Additional named arguments to `func1d`. .. versionadded:: 1.9.0 Returns ------- out : ndarray (Ni..., Nj..., Nk...) The output array. The shape of `out` is identical to the shape of `arr`, except along the `axis` dimension. This axis is removed, and replaced with new dimensions equal to the shape of the return value of `func1d`. So if `func1d` returns a scalar `out` will have one fewer dimensions than `arr`. See Also -------- apply_over_axes : Apply a function repeatedly over multiple axes. Examples -------- >>> def my_func(a): ... \"\"\"Average first and last element of a 1-D array\"\"\" ... return (a[0] + a[-1]) * 0.5 >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) >>> np.apply_along_axis(my_func, 0, b) array([ 4., 5., 6.]) >>> np.apply_along_axis(my_func, 1, b) array([ 2., 5., 8.]) For a function that returns a 1D array, the number of dimensions in `outarr` is the same as `arr`. >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) >>> np.apply_along_axis(sorted, 1, b) array([[1, 7, 8], [3, 4, 9], [2, 5, 6]]) For a function that returns a higher dimensional array, those dimensions are inserted in place of the `axis` dimension. >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) >>> np.apply_along_axis(np.diag, -1, b) array([[[1, 0, 0], [0, 2, 0], [0, 0, 3]], [[4, 0, 0], [0, 5, 0], [0, 0, 6]], [[7, 0, 0], [0, 8, 0], [0, 0, 9]]]) """ # handle negative axes arr = asanyarray(arr) nd = arr.ndim axis = normalize_axis_index(axis, nd) # arr, with the iteration axis at the end in_dims = list(range(nd)) inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis]) # compute indices for the iteration axes, and append a trailing ellipsis to # prevent 0d arrays decaying to scalars, which fixes gh-8642 inds = ndindex(inarr_view.shape[:-1]) inds = (ind + (Ellipsis,) for ind in inds) # invoke the function on the first item try: ind0 = next(inds) except StopIteration: raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0') res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) # build a buffer for storing evaluations of func1d. # remove the requested axis, and add the new ones on the end. # laid out so that each write is contiguous. # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype) # permutation of axes such that out = buff.transpose(buff_permute) buff_dims = list(range(buff.ndim)) buff_permute = ( buff_dims[0 : axis] + buff_dims[buff.ndim-res.ndim : buff.ndim] + buff_dims[axis : buff.ndim-res.ndim] ) # matrices have a nasty __array_prepare__ and __array_wrap__ if not isinstance(res, matrix): buff = res.__array_prepare__(buff) # save the first result, then compute and save all remaining results buff[ind0] = res for ind in inds: buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) if not isinstance(res, matrix): # wrap the array, to preserve subclasses buff = res.__array_wrap__(buff) # finally, rotate the inserted axes back to where they belong return transpose(buff, buff_permute) else: # matrices have to be transposed first, because they collapse dimensions! out_arr = transpose(buff, buff_permute) return res.__array_wrap__(out_arr) def apply_over_axes(func, a, axes): """ Apply a function repeatedly over multiple axes. `func` is called as `res = func(a, axis)`, where `axis` is the first element of `axes`. The result `res` of the function call must have either the same dimensions as `a` or one less dimension. If `res` has one less dimension than `a`, a dimension is inserted before `axis`. The call to `func` is then repeated for each axis in `axes`, with `res` as the first argument. Parameters ---------- func : function This function must take two arguments, `func(a, axis)`. a : array_like Input array. axes : array_like Axes over which `func` is applied; the elements must be integers. Returns ------- apply_over_axis : ndarray The output array. The number of dimensions is the same as `a`, but the shape can be different. This depends on whether `func` changes the shape of its output with respect to its input. See Also -------- apply_along_axis : Apply a function to 1-D slices of an array along the given axis. Notes ------ This function is equivalent to tuple axis arguments to reorderable ufuncs with keepdims=True. Tuple axis arguments to ufuncs have been available since version 1.7.0. Examples -------- >>> a = np.arange(24).reshape(2,3,4) >>> a array([[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]]) Sum over axes 0 and 2. The result has same number of dimensions as the original array: >>> np.apply_over_axes(np.sum, a, [0,2]) array([[[ 60], [ 92], [124]]]) Tuple axis arguments to ufuncs are equivalent: >>> np.sum(a, axis=(0,2), keepdims=True) array([[[ 60], [ 92], [124]]]) """ val = asarray(a) N = a.ndim if array(axes).ndim == 0: axes = (axes,) for axis in axes: if axis < 0: axis = N + axis args = (val, axis) res = func(*args) if res.ndim == val.ndim: val = res else: res = expand_dims(res, axis) if res.ndim == val.ndim: val = res else: raise ValueError("function is not returning " "an array of the correct shape") return val def expand_dims(a, axis): """ Expand the shape of an array. Insert a new axis that will appear at the `axis` position in the expanded array shape. .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor ``axis > a.ndim`` raised errors or put the new axis where documented. Those axis values are now deprecated and will raise an AxisError in the future. Parameters ---------- a : array_like Input array. axis : int Position in the expanded axes where the new axis is placed. Returns ------- res : ndarray Output array. The number of dimensions is one greater than that of the input array. See Also -------- squeeze : The inverse operation, removing singleton dimensions reshape : Insert, remove, and combine dimensions, and resize existing ones doc.indexing, atleast_1d, atleast_2d, atleast_3d Examples -------- >>> x = np.array([1,2]) >>> x.shape (2,) The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``: >>> y = np.expand_dims(x, axis=0) >>> y array([[1, 2]]) >>> y.shape (1, 2) >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis] >>> y array([[1], [2]]) >>> y.shape (2, 1) Note that some examples may use ``None`` instead of ``np.newaxis``. These are the same objects: >>> np.newaxis is None True """ a = asarray(a) shape = a.shape if axis > a.ndim or axis < -a.ndim - 1: # 2017-05-17, 1.13.0 warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are " "deprecated and will raise an AxisError in the future.", DeprecationWarning, stacklevel=2) # When the deprecation period expires, delete this if block, if axis < 0: axis = axis + a.ndim + 1 # and uncomment the following line. # axis = normalize_axis_index(axis, a.ndim + 1) return a.reshape(shape[:axis] + (1,) + shape[axis:]) row_stack = vstack def column_stack(tup): """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns to make a single 2-D array. 2-D arrays are stacked as-is, just like with `hstack`. 1-D arrays are turned into 2-D columns first. Parameters ---------- tup : sequence of 1-D or 2-D arrays. Arrays to stack. All of them must have the same first dimension. Returns ------- stacked : 2-D array The array formed by stacking the given arrays. See Also -------- stack, hstack, vstack, concatenate Examples -------- >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1, 2], [2, 3], [3, 4]]) """ arrays = [] for v in tup: arr = array(v, copy=False, subok=True) if arr.ndim < 2: arr = array(arr, copy=False, subok=True, ndmin=2).T arrays.append(arr) return _nx.concatenate(arrays, 1) def dstack(tup): """ Stack arrays in sequence depth wise (along third axis). This is equivalent to concatenation along the third axis after 2-D arrays of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by `dsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate`, `stack` and `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of arrays The arrays must have the same shape along all but the third axis. 1-D or 2-D arrays must have the same shape. Returns ------- stacked : ndarray The array formed by stacking the given arrays, will be at least 3-D. See Also -------- stack : Join a sequence of arrays along a new axis. vstack : Stack along first axis. hstack : Stack along second axis. concatenate : Join a sequence of arrays along an existing axis. dsplit : Split array along third axis. Examples -------- >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.dstack((a,b)) array([[[1, 2], [2, 3], [3, 4]]]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[2],[3],[4]]) >>> np.dstack((a,b)) array([[[1, 2]], [[2, 3]], [[3, 4]]]) """ return _nx.concatenate([atleast_3d(_m) for _m in tup], 2) def _replace_zero_by_x_arrays(sub_arys): for i in range(len(sub_arys)): if _nx.ndim(sub_arys[i]) == 0: sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) return sub_arys def array_split(ary, indices_or_sections, axis=0): """ Split an array into multiple sub-arrays. Please refer to the ``split`` documentation. The only difference between these functions is that ``array_split`` allows `indices_or_sections` to be an integer that does *not* equally divide the axis. For an array of length l that should be split into n sections, it returns l % n sub-arrays of size l//n + 1 and the rest of size l//n. See Also -------- split : Split array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(8.0) >>> np.array_split(x, 3) [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] >>> x = np.arange(7.0) >>> np.array_split(x, 3) [array([ 0., 1., 2.]), array([ 3., 4.]), array([ 5., 6.])] """ try: Ntotal = ary.shape[axis] except AttributeError: Ntotal = len(ary) try: # handle scalar case. Nsections = len(indices_or_sections) + 1 div_points = [0] + list(indices_or_sections) + [Ntotal] except TypeError: # indices_or_sections is a scalar, not an array. Nsections = int(indices_or_sections) if Nsections <= 0: raise ValueError('number sections must be larger than 0.') Neach_section, extras = divmod(Ntotal, Nsections) section_sizes = ([0] + extras * [Neach_section+1] + (Nsections-extras) * [Neach_section]) div_points = _nx.array(section_sizes).cumsum() sub_arys = [] sary = _nx.swapaxes(ary, axis, 0) for i in range(Nsections): st = div_points[i] end = div_points[i + 1] sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) return sub_arys def split(ary,indices_or_sections,axis=0): """ Split an array into multiple sub-arrays. Parameters ---------- ary : ndarray Array to be divided into sub-arrays. indices_or_sections : int or 1-D array If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along `axis` the array is split. For example, ``[2, 3]`` would, for ``axis=0``, result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along `axis`, an empty sub-array is returned correspondingly. axis : int, optional The axis along which to split, default is 0. Returns ------- sub-arrays : list of ndarrays A list of sub-arrays. Raises ------ ValueError If `indices_or_sections` is given as an integer, but a split does not result in equal division. See Also -------- array_split : Split an array into multiple sub-arrays of equal or near-equal size. Does not raise an exception if an equal division cannot be made. hsplit : Split array into multiple sub-arrays horizontally (column-wise). vsplit : Split array into multiple sub-arrays vertically (row wise). dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). concatenate : Join a sequence of arrays along an existing axis. stack : Join a sequence of arrays along a new axis. hstack : Stack arrays in sequence horizontally (column wise). vstack : Stack arrays in sequence vertically (row wise). dstack : Stack arrays in sequence depth wise (along third dimension). Examples -------- >>> x = np.arange(9.0) >>> np.split(x, 3) [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])] >>> x = np.arange(8.0) >>> np.split(x, [3, 5, 6, 10]) [array([ 0., 1., 2.]), array([ 3., 4.]), array([ 5.]), array([ 6., 7.]), array([], dtype=float64)] """ try: len(indices_or_sections) except TypeError: sections = indices_or_sections N = ary.shape[axis] if N % sections: raise ValueError( 'array split does not result in an equal division') res = array_split(ary, indices_or_sections, axis) return res def hsplit(ary, indices_or_sections): """ Split an array into multiple sub-arrays horizontally (column-wise). Please refer to the `split` documentation. `hsplit` is equivalent to `split` with ``axis=1``, the array is always split along the second axis regardless of the array dimension. See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], [ 12., 13.]]), array([[ 2., 3.], [ 6., 7.], [ 10., 11.], [ 14., 15.]])] >>> np.hsplit(x, np.array([3, 6])) [array([[ 0., 1., 2.], [ 4., 5., 6.], [ 8., 9., 10.], [ 12., 13., 14.]]), array([[ 3.], [ 7.], [ 11.], [ 15.]]), array([], dtype=float64)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[ 0., 1.], [ 2., 3.]], [[ 4., 5.], [ 6., 7.]]]) >>> np.hsplit(x, 2) [array([[[ 0., 1.]], [[ 4., 5.]]]), array([[[ 2., 3.]], [[ 6., 7.]]])] """ if _nx.ndim(ary) == 0: raise ValueError('hsplit only works on arrays of 1 or more dimensions') if ary.ndim > 1: return split(ary, indices_or_sections, 1) else: return split(ary, indices_or_sections, 0) def vsplit(ary, indices_or_sections): """ Split an array into multiple sub-arrays vertically (row-wise). Please refer to the ``split`` documentation. ``vsplit`` is equivalent to ``split`` with `axis=0` (default), the array is always split along the first axis regardless of the array dimension. See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]) >>> np.vsplit(x, 2) [array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], [ 12., 13., 14., 15.]])] >>> np.vsplit(x, np.array([3, 6])) [array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]), array([[ 12., 13., 14., 15.]]), array([], dtype=float64)] With a higher dimensional array the split is still along the first axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[ 0., 1.], [ 2., 3.]], [[ 4., 5.], [ 6., 7.]]]) >>> np.vsplit(x, 2) [array([[[ 0., 1.], [ 2., 3.]]]), array([[[ 4., 5.], [ 6., 7.]]])] """ if _nx.ndim(ary) < 2: raise ValueError('vsplit only works on arrays of 2 or more dimensions') return split(ary, indices_or_sections, 0) def dsplit(ary, indices_or_sections): """ Split array into multiple sub-arrays along the 3rd axis (depth). Please refer to the `split` documentation. `dsplit` is equivalent to `split` with ``axis=2``, the array is always split along the third axis provided the array dimension is greater than or equal to 3. See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(2, 2, 4) >>> x array([[[ 0., 1., 2., 3.], [ 4., 5., 6., 7.]], [[ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]]) >>> np.dsplit(x, 2) [array([[[ 0., 1.], [ 4., 5.]], [[ 8., 9.], [ 12., 13.]]]), array([[[ 2., 3.], [ 6., 7.]], [[ 10., 11.], [ 14., 15.]]])] >>> np.dsplit(x, np.array([3, 6])) [array([[[ 0., 1., 2.], [ 4., 5., 6.]], [[ 8., 9., 10.], [ 12., 13., 14.]]]), array([[[ 3.], [ 7.]], [[ 11.], [ 15.]]]), array([], dtype=float64)] """ if _nx.ndim(ary) < 3: raise ValueError('dsplit only works on arrays of 3 or more dimensions') return split(ary, indices_or_sections, 2) def get_array_prepare(*args): """Find the wrapper for the array with the highest priority. In case of ties, leftmost wins. If no wrapper is found, return None """ wrappers = sorted((getattr(x, '__array_priority__', 0), -i, x.__array_prepare__) for i, x in enumerate(args) if hasattr(x, '__array_prepare__')) if wrappers: return wrappers[-1][-1] return None def get_array_wrap(*args): """Find the wrapper for the array with the highest priority. In case of ties, leftmost wins. If no wrapper is found, return None """ wrappers = sorted((getattr(x, '__array_priority__', 0), -i, x.__array_wrap__) for i, x in enumerate(args) if hasattr(x, '__array_wrap__')) if wrappers: return wrappers[-1][-1] return None def kron(a, b): """ Kronecker product of two arrays. Computes the Kronecker product, a composite array made of blocks of the second array scaled by the first. Parameters ---------- a, b : array_like Returns ------- out : ndarray See Also -------- outer : The outer product Notes ----- The function assumes that the number of dimensions of `a` and `b` are the same, if necessary prepending the smallest with ones. If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. The elements are products of elements from `a` and `b`, organized explicitly by:: kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] where:: kt = it * st + jt, t = 0,...,N In the common 2-D case (N=1), the block structure can be visualized:: [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], [ ... ... ], [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] Examples -------- >>> np.kron([1,10,100], [5,6,7]) array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) >>> np.kron([5,6,7], [1,10,100]) array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) >>> np.kron(np.eye(2), np.ones((2,2))) array([[ 1., 1., 0., 0.], [ 1., 1., 0., 0.], [ 0., 0., 1., 1.], [ 0., 0., 1., 1.]]) >>> a = np.arange(100).reshape((2,5,2,5)) >>> b = np.arange(24).reshape((2,3,4)) >>> c = np.kron(a,b) >>> c.shape (2, 10, 6, 20) >>> I = (1,3,0,2) >>> J = (0,2,1) >>> J1 = (0,) + J # extend to ndim=4 >>> S1 = (1,) + b.shape >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) >>> c[K] == a[I]*b[J] True """ b = asanyarray(b) a = array(a, copy=False, subok=True, ndmin=b.ndim) ndb, nda = b.ndim, a.ndim if (nda == 0 or ndb == 0): return _nx.multiply(a, b) as_ = a.shape bs = b.shape if not a.flags.contiguous: a = reshape(a, as_) if not b.flags.contiguous: b = reshape(b, bs) nd = ndb if (ndb != nda): if (ndb > nda): as_ = (1,)*(ndb-nda) + as_ else: bs = (1,)*(nda-ndb) + bs nd = nda result = outer(a, b).reshape(as_+bs) axis = nd-1 for _ in range(nd): result = concatenate(result, axis=axis) wrapper = get_array_prepare(a, b) if wrapper is not None: result = wrapper(result) wrapper = get_array_wrap(a, b) if wrapper is not None: result = wrapper(result) return result def tile(A, reps): """ Construct an array by repeating A the number of times given by reps. If `reps` has length ``d``, the result will have dimension of ``max(d, A.ndim)``. If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, or shape (1, 1, 3) for 3-D replication. If this is not the desired behavior, promote `A` to d-dimensions manually before calling this function. If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2). Note : Although tile may be used for broadcasting, it is strongly recommended to use numpy's broadcasting operations and functions. Parameters ---------- A : array_like The input array. reps : array_like The number of repetitions of `A` along each axis. Returns ------- c : ndarray The tiled output array. See Also -------- repeat : Repeat elements of an array. broadcast_to : Broadcast an array to a new shape Examples -------- >>> a = np.array([0, 1, 2]) >>> np.tile(a, 2) array([0, 1, 2, 0, 1, 2]) >>> np.tile(a, (2, 2)) array([[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) >>> np.tile(a, (2, 1, 2)) array([[[0, 1, 2, 0, 1, 2]], [[0, 1, 2, 0, 1, 2]]]) >>> b = np.array([[1, 2], [3, 4]]) >>> np.tile(b, 2) array([[1, 2, 1, 2], [3, 4, 3, 4]]) >>> np.tile(b, (2, 1)) array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> c = np.array([1,2,3,4]) >>> np.tile(c,(4,1)) array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) """ try: tup = tuple(reps) except TypeError: tup = (reps,) d = len(tup) if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): # Fixes the problem that the function does not make a copy if A is a # numpy array and the repetitions are 1 in all dimensions return _nx.array(A, copy=True, subok=True, ndmin=d) else: # Note that no copy of zero-sized arrays is made. However since they # have no data there is no risk of an inadvertent overwrite. c = _nx.array(A, copy=False, subok=True, ndmin=d) if (d < c.ndim): tup = (1,)*(c.ndim-d) + tup shape_out = tuple(s*t for s, t in zip(c.shape, tup)) n = c.size if n > 0: for dim_in, nrep in zip(c.shape, tup): if nrep != 1: c = c.reshape(-1, n).repeat(nrep, 0) n //= dim_in return c.reshape(shape_out)
28,668
29.72776
87
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/_datasource.py
"""A file interface for handling local and remote data files. The goal of datasource is to abstract some of the file system operations when dealing with data files so the researcher doesn't have to know all the low-level details. Through datasource, a researcher can obtain and use a file with one function call, regardless of location of the file. DataSource is meant to augment standard python libraries, not replace them. It should work seamlessly with standard file IO operations and the os module. DataSource files can originate locally or remotely: - local files : '/home/guido/src/local/data.txt' - URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' DataSource files can also be compressed or uncompressed. Currently only gzip, bz2 and xz are supported. Example:: >>> # Create a DataSource, use os.curdir (default) for local storage. >>> ds = datasource.DataSource() >>> >>> # Open a remote file. >>> # DataSource downloads the file, stores it locally in: >>> # './www.google.com/index.html' >>> # opens the file and returns a file object. >>> fp = ds.open('http://www.google.com/index.html') >>> >>> # Use the file as you normally would >>> fp.read() >>> fp.close() """ from __future__ import division, absolute_import, print_function import os import sys import shutil import io _open = open def _check_mode(mode, encoding, newline): """Check mode and that encoding and newline are compatible. Parameters ---------- mode : str File open mode. encoding : str File encoding. newline : str Newline for text files. """ if "t" in mode: if "b" in mode: raise ValueError("Invalid mode: %r" % (mode,)) else: if encoding is not None: raise ValueError("Argument 'encoding' not supported in binary mode") if newline is not None: raise ValueError("Argument 'newline' not supported in binary mode") def _python2_bz2open(fn, mode, encoding, newline): """Wrapper to open bz2 in text mode. Parameters ---------- fn : str File name mode : {'r', 'w'} File mode. Note that bz2 Text files are not supported. encoding : str Ignored, text bz2 files not supported in Python2. newline : str Ignored, text bz2 files not supported in Python2. """ import bz2 _check_mode(mode, encoding, newline) if "t" in mode: # BZ2File is missing necessary functions for TextIOWrapper raise ValueError("bz2 text files not supported in python2") else: return bz2.BZ2File(fn, mode) def _python2_gzipopen(fn, mode, encoding, newline): """ Wrapper to open gzip in text mode. Parameters ---------- fn : str, bytes, file File path or opened file. mode : str File mode. The actual files are opened as binary, but will decoded using the specified `encoding` and `newline`. encoding : str Encoding to be used when reading/writing as text. newline : str Newline to be used when reading/writing as text. """ import gzip # gzip is lacking read1 needed for TextIOWrapper class GzipWrap(gzip.GzipFile): def read1(self, n): return self.read(n) _check_mode(mode, encoding, newline) gz_mode = mode.replace("t", "") if isinstance(fn, (str, bytes)): binary_file = GzipWrap(fn, gz_mode) elif hasattr(fn, "read") or hasattr(fn, "write"): binary_file = GzipWrap(None, gz_mode, fileobj=fn) else: raise TypeError("filename must be a str or bytes object, or a file") if "t" in mode: return io.TextIOWrapper(binary_file, encoding, newline=newline) else: return binary_file # Using a class instead of a module-level dictionary # to reduce the initial 'import numpy' overhead by # deferring the import of lzma, bz2 and gzip until needed # TODO: .zip support, .tar support? class _FileOpeners(object): """ Container for different methods to open (un-)compressed files. `_FileOpeners` contains a dictionary that holds one method for each supported file format. Attribute lookup is implemented in such a way that an instance of `_FileOpeners` itself can be indexed with the keys of that dictionary. Currently uncompressed files as well as files compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. Notes ----- `_file_openers`, an instance of `_FileOpeners`, is made available for use in the `_datasource` module. Examples -------- >>> np.lib._datasource._file_openers.keys() [None, '.bz2', '.gz', '.xz', '.lzma'] >>> np.lib._datasource._file_openers['.gz'] is gzip.open True """ def __init__(self): self._loaded = False self._file_openers = {None: io.open} def _load(self): if self._loaded: return try: import bz2 if sys.version_info[0] >= 3: self._file_openers[".bz2"] = bz2.open else: self._file_openers[".bz2"] = _python2_bz2open except ImportError: pass try: import gzip if sys.version_info[0] >= 3: self._file_openers[".gz"] = gzip.open else: self._file_openers[".gz"] = _python2_gzipopen except ImportError: pass try: import lzma self._file_openers[".xz"] = lzma.open self._file_openers[".lzma"] = lzma.open except (ImportError, AttributeError): # There are incompatible backports of lzma that do not have the # lzma.open attribute, so catch that as well as ImportError. pass self._loaded = True def keys(self): """ Return the keys of currently supported file openers. Parameters ---------- None Returns ------- keys : list The keys are None for uncompressed files and the file extension strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression methods. """ self._load() return list(self._file_openers.keys()) def __getitem__(self, key): self._load() return self._file_openers[key] _file_openers = _FileOpeners() def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): """ Open `path` with `mode` and return the file object. If ``path`` is an URL, it will be downloaded, stored in the `DataSource` `destpath` directory and opened from there. Parameters ---------- path : str Local file path or URL to open. mode : str, optional Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by path. Default is 'r'. destpath : str, optional Path to the directory where the source file gets downloaded to for use. If `destpath` is None, a temporary directory will be created. The default path is the current directory. encoding : {None, str}, optional Open text file with given encoding. The default encoding will be what `io.open` uses. newline : {None, str}, optional Newline to use when reading text file. Returns ------- out : file object The opened file. Notes ----- This is a convenience function that instantiates a `DataSource` and returns the file object from ``DataSource.open(path)``. """ ds = DataSource(destpath) return ds.open(path, mode, encoding=encoding, newline=newline) class DataSource (object): """ DataSource(destpath='.') A generic data source file (file, http, ftp, ...). DataSources can be local files or remote files/URLs. The files may also be compressed or uncompressed. DataSource hides some of the low-level details of downloading the file, allowing you to simply pass in a valid file path (or URL) and obtain a file object. Parameters ---------- destpath : str or None, optional Path to the directory where the source file gets downloaded to for use. If `destpath` is None, a temporary directory will be created. The default path is the current directory. Notes ----- URLs require a scheme string (``http://``) to be used, without it they will fail:: >>> repos = DataSource() >>> repos.exists('www.google.com/index.html') False >>> repos.exists('http://www.google.com/index.html') True Temporary directories are deleted when the DataSource is deleted. Examples -------- :: >>> ds = DataSource('/home/guido') >>> urlname = 'http://www.google.com/index.html' >>> gfile = ds.open('http://www.google.com/index.html') # remote file >>> ds.abspath(urlname) '/home/guido/www.google.com/site/index.html' >>> ds = DataSource(None) # use with temporary file >>> ds.open('/home/guido/foobar.txt') <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430> >>> ds.abspath('/home/guido/foobar.txt') '/tmp/tmpy4pgsP/home/guido/foobar.txt' """ def __init__(self, destpath=os.curdir): """Create a DataSource with a local path at destpath.""" if destpath: self._destpath = os.path.abspath(destpath) self._istmpdest = False else: import tempfile # deferring import to improve startup time self._destpath = tempfile.mkdtemp() self._istmpdest = True def __del__(self): # Remove temp directories if self._istmpdest: shutil.rmtree(self._destpath) def _iszip(self, filename): """Test if the filename is a zip file by looking at the file extension. """ fname, ext = os.path.splitext(filename) return ext in _file_openers.keys() def _iswritemode(self, mode): """Test if the given mode will open a file for writing.""" # Currently only used to test the bz2 files. _writemodes = ("w", "+") for c in mode: if c in _writemodes: return True return False def _splitzipext(self, filename): """Split zip extension from filename and return filename. *Returns*: base, zip_ext : {tuple} """ if self._iszip(filename): return os.path.splitext(filename) else: return filename, None def _possible_names(self, filename): """Return a tuple containing compressed filename variations.""" names = [filename] if not self._iszip(filename): for zipext in _file_openers.keys(): if zipext: names.append(filename+zipext) return names def _isurl(self, path): """Test if path is a net location. Tests the scheme and netloc.""" # We do this here to reduce the 'import numpy' initial import time. if sys.version_info[0] >= 3: from urllib.parse import urlparse else: from urlparse import urlparse # BUG : URLs require a scheme string ('http://') to be used. # www.google.com will fail. # Should we prepend the scheme for those that don't have it and # test that also? Similar to the way we append .gz and test for # for compressed versions of files. scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) return bool(scheme and netloc) def _cache(self, path): """Cache the file specified by path. Creates a copy of the file in the datasource cache. """ # We import these here because importing urllib2 is slow and # a significant fraction of numpy's total import time. if sys.version_info[0] >= 3: from urllib.request import urlopen from urllib.error import URLError else: from urllib2 import urlopen from urllib2 import URLError upath = self.abspath(path) # ensure directory exists if not os.path.exists(os.path.dirname(upath)): os.makedirs(os.path.dirname(upath)) # TODO: Doesn't handle compressed files! if self._isurl(path): try: openedurl = urlopen(path) f = _open(upath, 'wb') try: shutil.copyfileobj(openedurl, f) finally: f.close() openedurl.close() except URLError: raise URLError("URL not found: %s" % path) else: shutil.copyfile(path, upath) return upath def _findfile(self, path): """Searches for ``path`` and returns full path if found. If path is an URL, _findfile will cache a local copy and return the path to the cached file. If path is a local file, _findfile will return a path to that local file. The search will include possible compressed versions of the file and return the first occurrence found. """ # Build list of possible local file paths if not self._isurl(path): # Valid local paths filelist = self._possible_names(path) # Paths in self._destpath filelist += self._possible_names(self.abspath(path)) else: # Cached URLs in self._destpath filelist = self._possible_names(self.abspath(path)) # Remote URLs filelist = filelist + self._possible_names(path) for name in filelist: if self.exists(name): if self._isurl(name): name = self._cache(name) return name return None def abspath(self, path): """ Return absolute path of file in the DataSource directory. If `path` is an URL, then `abspath` will return either the location the file exists locally or the location it would exist when opened using the `open` method. Parameters ---------- path : str Can be a local file or a remote URL. Returns ------- out : str Complete path, including the `DataSource` destination directory. Notes ----- The functionality is based on `os.path.abspath`. """ # We do this here to reduce the 'import numpy' initial import time. if sys.version_info[0] >= 3: from urllib.parse import urlparse else: from urlparse import urlparse # TODO: This should be more robust. Handles case where path includes # the destpath, but not other sub-paths. Failing case: # path = /home/guido/datafile.txt # destpath = /home/alex/ # upath = self.abspath(path) # upath == '/home/alex/home/guido/datafile.txt' # handle case where path includes self._destpath splitpath = path.split(self._destpath, 2) if len(splitpath) > 1: path = splitpath[1] scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) netloc = self._sanitize_relative_path(netloc) upath = self._sanitize_relative_path(upath) return os.path.join(self._destpath, netloc, upath) def _sanitize_relative_path(self, path): """Return a sanitised relative path for which os.path.abspath(os.path.join(base, path)).startswith(base) """ last = None path = os.path.normpath(path) while path != last: last = path # Note: os.path.join treats '/' as os.sep on Windows path = path.lstrip(os.sep).lstrip('/') path = path.lstrip(os.pardir).lstrip('..') drive, path = os.path.splitdrive(path) # for Windows return path def exists(self, path): """ Test if path exists. Test if `path` exists as (and in this order): - a local file. - a remote URL that has been downloaded and stored locally in the `DataSource` directory. - a remote URL that has not been downloaded, but is valid and accessible. Parameters ---------- path : str Can be a local file or a remote URL. Returns ------- out : bool True if `path` exists. Notes ----- When `path` is an URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. """ # We import this here because importing urllib2 is slow and # a significant fraction of numpy's total import time. if sys.version_info[0] >= 3: from urllib.request import urlopen from urllib.error import URLError else: from urllib2 import urlopen from urllib2 import URLError # Test local path if os.path.exists(path): return True # Test cached url upath = self.abspath(path) if os.path.exists(upath): return True # Test remote url if self._isurl(path): try: netfile = urlopen(path) netfile.close() del(netfile) return True except URLError: return False return False def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object. If `path` is an URL, it will be downloaded, stored in the `DataSource` directory and opened from there. Parameters ---------- path : str Local file path or URL to open. mode : {'r', 'w', 'a'}, optional Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by `path`. Default is 'r'. encoding : {None, str}, optional Open text file with given encoding. The default encoding will be what `io.open` uses. newline : {None, str}, optional Newline to use when reading text file. Returns ------- out : file object File object. """ # TODO: There is no support for opening a file for writing which # doesn't exist yet (creating a file). Should there be? # TODO: Add a ``subdir`` parameter for specifying the subdirectory # used to store URLs in self._destpath. if self._isurl(path) and self._iswritemode(mode): raise ValueError("URLs are not writeable") # NOTE: _findfile will fail on a new file opened for writing. found = self._findfile(path) if found: _fname, ext = self._splitzipext(found) if ext == 'bz2': mode.replace("+", "") return _file_openers[ext](found, mode=mode, encoding=encoding, newline=newline) else: raise IOError("%s not found." % path) class Repository (DataSource): """ Repository(baseurl, destpath='.') A data repository where multiple DataSource's share a base URL/directory. `Repository` extends `DataSource` by prepending a base URL (or directory) to all the files it handles. Use `Repository` when you will be working with multiple files from one base URL. Initialize `Repository` with the base URL, then refer to each file by its filename only. Parameters ---------- baseurl : str Path to the local directory or remote location that contains the data files. destpath : str or None, optional Path to the directory where the source file gets downloaded to for use. If `destpath` is None, a temporary directory will be created. The default path is the current directory. Examples -------- To analyze all files in the repository, do something like this (note: this is not self-contained code):: >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') >>> for filename in filelist: ... fp = repos.open(filename) ... fp.analyze() ... fp.close() Similarly you could use a URL for a repository:: >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') """ def __init__(self, baseurl, destpath=os.curdir): """Create a Repository with a shared url or directory of baseurl.""" DataSource.__init__(self, destpath=destpath) self._baseurl = baseurl def __del__(self): DataSource.__del__(self) def _fullpath(self, path): """Return complete path for path. Prepends baseurl if necessary.""" splitpath = path.split(self._baseurl, 2) if len(splitpath) == 1: result = os.path.join(self._baseurl, path) else: result = path # path contains baseurl already return result def _findfile(self, path): """Extend DataSource method to prepend baseurl to ``path``.""" return DataSource._findfile(self, self._fullpath(path)) def abspath(self, path): """ Return absolute path of file in the Repository directory. If `path` is an URL, then `abspath` will return either the location the file exists locally or the location it would exist when opened using the `open` method. Parameters ---------- path : str Can be a local file or a remote URL. This may, but does not have to, include the `baseurl` with which the `Repository` was initialized. Returns ------- out : str Complete path, including the `DataSource` destination directory. """ return DataSource.abspath(self, self._fullpath(path)) def exists(self, path): """ Test if path exists prepending Repository base URL to path. Test if `path` exists as (and in this order): - a local file. - a remote URL that has been downloaded and stored locally in the `DataSource` directory. - a remote URL that has not been downloaded, but is valid and accessible. Parameters ---------- path : str Can be a local file or a remote URL. This may, but does not have to, include the `baseurl` with which the `Repository` was initialized. Returns ------- out : bool True if `path` exists. Notes ----- When `path` is an URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. """ return DataSource.exists(self, self._fullpath(path)) def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object prepending Repository base URL. If `path` is an URL, it will be downloaded, stored in the DataSource directory and opened from there. Parameters ---------- path : str Local file path or URL to open. This may, but does not have to, include the `baseurl` with which the `Repository` was initialized. mode : {'r', 'w', 'a'}, optional Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by `path`. Default is 'r'. encoding : {None, str}, optional Open text file with given encoding. The default encoding will be what `io.open` uses. newline : {None, str}, optional Newline to use when reading text file. Returns ------- out : file object File object. """ return DataSource.open(self, self._fullpath(path), mode, encoding=encoding, newline=newline) def listdir(self): """ List files in the source Repository. Returns ------- files : list of str List of file names (not containing a directory part). Notes ----- Does not currently work for remote repositories. """ if self._isurl(self._baseurl): raise NotImplementedError( "Directory listing of URLs, not supported yet.") else: return os.listdir(self._baseurl)
25,311
31.121827
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/twodim_base.py
""" Basic functions for manipulating 2d arrays """ from __future__ import division, absolute_import, print_function from numpy.core.numeric import ( absolute, asanyarray, arange, zeros, greater_equal, multiply, ones, asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal, nonzero ) from numpy.core import iinfo, transpose __all__ = [ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] i1 = iinfo(int8) i2 = iinfo(int16) i4 = iinfo(int32) def _min_int(low, high): """ get small int that fits the range """ if high <= i1.max and low >= i1.min: return int8 if high <= i2.max and low >= i2.min: return int16 if high <= i4.max and low >= i4.min: return int32 return int64 def fliplr(m): """ Flip array in the left/right direction. Flip the entries in each row in the left/right direction. Columns are preserved, but appear in a different order than before. Parameters ---------- m : array_like Input array, must be at least 2-D. Returns ------- f : ndarray A view of `m` with the columns reversed. Since a view is returned, this operation is :math:`\\mathcal O(1)`. See Also -------- flipud : Flip array in the up/down direction. rot90 : Rotate array counterclockwise. Notes ----- Equivalent to m[:,::-1]. Requires the array to be at least 2-D. Examples -------- >>> A = np.diag([1.,2.,3.]) >>> A array([[ 1., 0., 0.], [ 0., 2., 0.], [ 0., 0., 3.]]) >>> np.fliplr(A) array([[ 0., 0., 1.], [ 0., 2., 0.], [ 3., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.fliplr(A) == A[:,::-1,...]) True """ m = asanyarray(m) if m.ndim < 2: raise ValueError("Input must be >= 2-d.") return m[:, ::-1] def flipud(m): """ Flip array in the up/down direction. Flip the entries in each column in the up/down direction. Rows are preserved, but appear in a different order than before. Parameters ---------- m : array_like Input array. Returns ------- out : array_like A view of `m` with the rows reversed. Since a view is returned, this operation is :math:`\\mathcal O(1)`. See Also -------- fliplr : Flip array in the left/right direction. rot90 : Rotate array counterclockwise. Notes ----- Equivalent to ``m[::-1,...]``. Does not require the array to be two-dimensional. Examples -------- >>> A = np.diag([1.0, 2, 3]) >>> A array([[ 1., 0., 0.], [ 0., 2., 0.], [ 0., 0., 3.]]) >>> np.flipud(A) array([[ 0., 0., 3.], [ 0., 2., 0.], [ 1., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.flipud(A) == A[::-1,...]) True >>> np.flipud([1,2]) array([2, 1]) """ m = asanyarray(m) if m.ndim < 1: raise ValueError("Input must be >= 1-d.") return m[::-1, ...] def eye(N, M=None, k=0, dtype=float, order='C'): """ Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. .. versionadded:: 1.14.0 Returns ------- I : ndarray of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- identity : (almost) equivalent function diag : diagonal 2-D array from a 1-D array specified by the user. Examples -------- >>> np.eye(2, dtype=int) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) array([[ 0., 1., 0.], [ 0., 0., 1.], [ 0., 0., 0.]]) """ if M is None: M = N m = zeros((N, M), dtype=dtype, order=order) if k >= M: return m if k >= 0: i = k else: i = (-k) * M m[:M-k].flat[i::M+1] = 1 return m def diag(v, k=0): """ Extract a diagonal or construct a diagonal array. See the more detailed documentation for ``numpy.diagonal`` if you use this function to extract a diagonal and wish to write to the resulting array; whether it returns a copy or a view depends on what version of numpy you are using. Parameters ---------- v : array_like If `v` is a 2-D array, return a copy of its `k`-th diagonal. If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th diagonal. k : int, optional Diagonal in question. The default is 0. Use `k>0` for diagonals above the main diagonal, and `k<0` for diagonals below the main diagonal. Returns ------- out : ndarray The extracted diagonal or constructed diagonal array. See Also -------- diagonal : Return specified diagonals. diagflat : Create a 2-D array with the flattened input as a diagonal. trace : Sum along diagonals. triu : Upper triangle of an array. tril : Lower triangle of an array. Examples -------- >>> x = np.arange(9).reshape((3,3)) >>> x array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> np.diag(x) array([0, 4, 8]) >>> np.diag(x, k=1) array([1, 5]) >>> np.diag(x, k=-1) array([3, 7]) >>> np.diag(np.diag(x)) array([[0, 0, 0], [0, 4, 0], [0, 0, 8]]) """ v = asanyarray(v) s = v.shape if len(s) == 1: n = s[0]+abs(k) res = zeros((n, n), v.dtype) if k >= 0: i = k else: i = (-k) * n res[:n-k].flat[i::n+1] = v return res elif len(s) == 2: return diagonal(v, k) else: raise ValueError("Input must be 1- or 2-d.") def diagflat(v, k=0): """ Create a two-dimensional array with the flattened input as a diagonal. Parameters ---------- v : array_like Input data, which is flattened and set as the `k`-th diagonal of the output. k : int, optional Diagonal to set; 0, the default, corresponds to the "main" diagonal, a positive (negative) `k` giving the number of the diagonal above (below) the main. Returns ------- out : ndarray The 2-D output array. See Also -------- diag : MATLAB work-alike for 1-D and 2-D arrays. diagonal : Return specified diagonals. trace : Sum along diagonals. Examples -------- >>> np.diagflat([[1,2], [3,4]]) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]]) >>> np.diagflat([1,2], 1) array([[0, 1, 0], [0, 0, 2], [0, 0, 0]]) """ try: wrap = v.__array_wrap__ except AttributeError: wrap = None v = asarray(v).ravel() s = len(v) n = s + abs(k) res = zeros((n, n), v.dtype) if (k >= 0): i = arange(0, n-k) fi = i+k+i*n else: i = arange(0, n+k) fi = i+(i-k)*n res.flat[fi] = v if not wrap: return res return wrap(res) def tri(N, M=None, k=0, dtype=float): """ An array with ones at and below the given diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the array. M : int, optional Number of columns in the array. By default, `M` is taken equal to `N`. k : int, optional The sub-diagonal at and below which the array is filled. `k` = 0 is the main diagonal, while `k` < 0 is below it, and `k` > 0 is above. The default is 0. dtype : dtype, optional Data type of the returned array. The default is float. Returns ------- tri : ndarray of shape (N, M) Array with its lower triangle filled with ones and zero elsewhere; in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise. Examples -------- >>> np.tri(3, 5, 2, dtype=int) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) >>> np.tri(3, 5, -1) array([[ 0., 0., 0., 0., 0.], [ 1., 0., 0., 0., 0.], [ 1., 1., 0., 0., 0.]]) """ if M is None: M = N m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), arange(-k, M-k, dtype=_min_int(-k, M - k))) # Avoid making a copy if the requested type is already bool m = m.astype(dtype, copy=False) return m def tril(m, k=0): """ Lower triangle of an array. Return a copy of an array with elements above the `k`-th diagonal zeroed. Parameters ---------- m : array_like, shape (M, N) Input array. k : int, optional Diagonal above which to zero elements. `k = 0` (the default) is the main diagonal, `k < 0` is below it and `k > 0` is above. Returns ------- tril : ndarray, shape (M, N) Lower triangle of `m`, of same shape and data-type as `m`. See Also -------- triu : same thing, only for the upper triangle Examples -------- >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 0, 0, 0], [ 4, 0, 0], [ 7, 8, 0], [10, 11, 12]]) """ m = asanyarray(m) mask = tri(*m.shape[-2:], k=k, dtype=bool) return where(mask, m, zeros(1, m.dtype)) def triu(m, k=0): """ Upper triangle of an array. Return a copy of a matrix with the elements below the `k`-th diagonal zeroed. Please refer to the documentation for `tril` for further details. See Also -------- tril : lower triangle of an array Examples -------- >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 1, 2, 3], [ 4, 5, 6], [ 0, 8, 9], [ 0, 0, 12]]) """ m = asanyarray(m) mask = tri(*m.shape[-2:], k=k-1, dtype=bool) return where(mask, zeros(1, m.dtype), m) # Originally borrowed from John Hunter and matplotlib def vander(x, N=None, increasing=False): """ Generate a Vandermonde matrix. The columns of the output matrix are powers of the input vector. The order of the powers is determined by the `increasing` boolean argument. Specifically, when `increasing` is False, the `i`-th output column is the input vector raised element-wise to the power of ``N - i - 1``. Such a matrix with a geometric progression in each row is named for Alexandre- Theophile Vandermonde. Parameters ---------- x : array_like 1-D input array. N : int, optional Number of columns in the output. If `N` is not specified, a square array is returned (``N = len(x)``). increasing : bool, optional Order of the powers of the columns. If True, the powers increase from left to right, if False (the default) they are reversed. .. versionadded:: 1.9.0 Returns ------- out : ndarray Vandermonde matrix. If `increasing` is False, the first column is ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is True, the columns are ``x^0, x^1, ..., x^(N-1)``. See Also -------- polynomial.polynomial.polyvander Examples -------- >>> x = np.array([1, 2, 3, 5]) >>> N = 3 >>> np.vander(x, N) array([[ 1, 1, 1], [ 4, 2, 1], [ 9, 3, 1], [25, 5, 1]]) >>> np.column_stack([x**(N-1-i) for i in range(N)]) array([[ 1, 1, 1], [ 4, 2, 1], [ 9, 3, 1], [25, 5, 1]]) >>> x = np.array([1, 2, 3, 5]) >>> np.vander(x) array([[ 1, 1, 1, 1], [ 8, 4, 2, 1], [ 27, 9, 3, 1], [125, 25, 5, 1]]) >>> np.vander(x, increasing=True) array([[ 1, 1, 1, 1], [ 1, 2, 4, 8], [ 1, 3, 9, 27], [ 1, 5, 25, 125]]) The determinant of a square Vandermonde matrix is the product of the differences between the values of the input vector: >>> np.linalg.det(np.vander(x)) 48.000000000000043 >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) 48 """ x = asarray(x) if x.ndim != 1: raise ValueError("x must be a one-dimensional array or sequence.") if N is None: N = len(x) v = empty((len(x), N), dtype=promote_types(x.dtype, int)) tmp = v[:, ::-1] if not increasing else v if N > 0: tmp[:, 0] = 1 if N > 1: tmp[:, 1:] = x[:, None] multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) return v def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): """ Compute the bi-dimensional histogram of two data samples. Parameters ---------- x : array_like, shape (N,) An array containing the x coordinates of the points to be histogrammed. y : array_like, shape (N,) An array containing the y coordinates of the points to be histogrammed. bins : int or array_like or [int, int] or [array, array], optional The bin specification: * If int, the number of bins for the two dimensions (nx=ny=bins). * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins). * If [int, int], the number of bins in each dimension (nx, ny = bins). * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins). * A combination [int, array] or [array, int], where int is the number of bins and array is the bin edges. range : array_like, shape(2,2), optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range will be considered outliers and not tallied in the histogram. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density ``bin_count / sample_count / bin_area``. weights : array_like, shape(N,), optional An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights are normalized to 1 if `normed` is True. If `normed` is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray, shape(nx, ny) The bi-dimensional histogram of samples `x` and `y`. Values in `x` are histogrammed along the first dimension and values in `y` are histogrammed along the second dimension. xedges : ndarray, shape(nx+1,) The bin edges along the first dimension. yedges : ndarray, shape(ny+1,) The bin edges along the second dimension. See Also -------- histogram : 1D histogram histogramdd : Multidimensional histogram Notes ----- When `normed` is True, then the returned histogram is the sample density, defined such that the sum over bins of the product ``bin_value * bin_area`` is 1. Please note that the histogram does not follow the Cartesian convention where `x` values are on the abscissa and `y` values on the ordinate axis. Rather, `x` is histogrammed along the first dimension of the array (vertical), and `y` along the second dimension of the array (horizontal). This ensures compatibility with `histogramdd`. Examples -------- >>> import matplotlib as mpl >>> import matplotlib.pyplot as plt Construct a 2-D histogram with variable bin width. First define the bin edges: >>> xedges = [0, 1, 3, 5] >>> yedges = [0, 2, 3, 4, 6] Next we create a histogram H with random bin content: >>> x = np.random.normal(2, 1, 100) >>> y = np.random.normal(1, 1, 100) >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) >>> H = H.T # Let each row list bins with common y range. :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins: >>> fig = plt.figure(figsize=(7, 3)) >>> ax = fig.add_subplot(131, title='imshow: square bins') >>> plt.imshow(H, interpolation='nearest', origin='low', ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges: >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', ... aspect='equal') >>> X, Y = np.meshgrid(xedges, yedges) >>> ax.pcolormesh(X, Y, H) :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to display actual bin edges with interpolation: >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) >>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear') >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 >>> im.set_data(xcenters, ycenters, H) >>> ax.images.append(im) >>> plt.show() """ from numpy import histogramdd try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = asarray(bins, float) bins = [xedges, yedges] hist, edges = histogramdd([x, y], bins, range, normed, weights) return hist, edges[0], edges[1] def mask_indices(n, mask_func, k=0): """ Return the indices to access (n, n) arrays, given a masking function. Assume `mask_func` is a function that, for a square array a of size ``(n, n)`` with a possible offset argument `k`, when called as ``mask_func(a, k)`` returns a new array with zeros in certain locations (functions like `triu` or `tril` do precisely this). Then this function returns the indices where the non-zero values would be located. Parameters ---------- n : int The returned indices will be valid to access arrays of shape (n, n). mask_func : callable A function whose call signature is similar to that of `triu`, `tril`. That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. `k` is an optional argument to the function. k : scalar An optional argument which is passed through to `mask_func`. Functions like `triu`, `tril` take a second argument that is interpreted as an offset. Returns ------- indices : tuple of arrays. The `n` arrays of indices corresponding to the locations where ``mask_func(np.ones((n, n)), k)`` is True. See Also -------- triu, tril, triu_indices, tril_indices Notes ----- .. versionadded:: 1.4.0 Examples -------- These are the indices that would allow you to access the upper triangular part of any 3x3 array: >>> iu = np.mask_indices(3, np.triu) For example, if `a` is a 3x3 array: >>> a = np.arange(9).reshape(3, 3) >>> a array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> a[iu] array([0, 1, 2, 4, 5, 8]) An offset can be passed also to the masking function. This gets us the indices starting on the first diagonal right of the main one: >>> iu1 = np.mask_indices(3, np.triu, 1) with which we now extract only three elements: >>> a[iu1] array([1, 2, 5]) """ m = ones((n, n), int) a = mask_func(m, k) return nonzero(a != 0) def tril_indices(n, k=0, m=None): """ Return the indices for the lower-triangle of an (n, m) array. Parameters ---------- n : int The row dimension of the arrays for which the returned indices will be valid. k : int, optional Diagonal offset (see `tril` for details). m : int, optional .. versionadded:: 1.9.0 The column dimension of the arrays for which the returned arrays will be valid. By default `m` is taken equal to `n`. Returns ------- inds : tuple of arrays The indices for the triangle. The returned tuple contains two arrays, each with the indices along one dimension of the array. See also -------- triu_indices : similar function, for upper-triangular. mask_indices : generic function accepting an arbitrary mask function. tril, triu Notes ----- .. versionadded:: 1.4.0 Examples -------- Compute two different sets of indices to access 4x4 arrays, one for the lower triangular part starting at the main diagonal, and one starting two diagonals further right: >>> il1 = np.tril_indices(4) >>> il2 = np.tril_indices(4, 2) Here is how they can be used with a sample array: >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) Both for indexing: >>> a[il1] array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) And for assigning values: >>> a[il1] = -1 >>> a array([[-1, 1, 2, 3], [-1, -1, 6, 7], [-1, -1, -1, 11], [-1, -1, -1, -1]]) These cover almost the whole array (two diagonals right of the main one): >>> a[il2] = -10 >>> a array([[-10, -10, -10, 3], [-10, -10, -10, -10], [-10, -10, -10, -10], [-10, -10, -10, -10]]) """ return nonzero(tri(n, m, k=k, dtype=bool)) def tril_indices_from(arr, k=0): """ Return the indices for the lower-triangle of arr. See `tril_indices` for full details. Parameters ---------- arr : array_like The indices will be valid for square arrays whose dimensions are the same as arr. k : int, optional Diagonal offset (see `tril` for details). See Also -------- tril_indices, tril Notes ----- .. versionadded:: 1.4.0 """ if arr.ndim != 2: raise ValueError("input array must be 2-d") return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) def triu_indices(n, k=0, m=None): """ Return the indices for the upper-triangle of an (n, m) array. Parameters ---------- n : int The size of the arrays for which the returned indices will be valid. k : int, optional Diagonal offset (see `triu` for details). m : int, optional .. versionadded:: 1.9.0 The column dimension of the arrays for which the returned arrays will be valid. By default `m` is taken equal to `n`. Returns ------- inds : tuple, shape(2) of ndarrays, shape(`n`) The indices for the triangle. The returned tuple contains two arrays, each with the indices along one dimension of the array. Can be used to slice a ndarray of shape(`n`, `n`). See also -------- tril_indices : similar function, for lower-triangular. mask_indices : generic function accepting an arbitrary mask function. triu, tril Notes ----- .. versionadded:: 1.4.0 Examples -------- Compute two different sets of indices to access 4x4 arrays, one for the upper triangular part starting at the main diagonal, and one starting two diagonals further right: >>> iu1 = np.triu_indices(4) >>> iu2 = np.triu_indices(4, 2) Here is how they can be used with a sample array: >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) Both for indexing: >>> a[iu1] array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) And for assigning values: >>> a[iu1] = -1 >>> a array([[-1, -1, -1, -1], [ 4, -1, -1, -1], [ 8, 9, -1, -1], [12, 13, 14, -1]]) These cover only a small part of the whole array (two diagonals right of the main one): >>> a[iu2] = -10 >>> a array([[ -1, -1, -10, -10], [ 4, -1, -1, -10], [ 8, 9, -1, -1], [ 12, 13, 14, -1]]) """ return nonzero(~tri(n, m, k=k-1, dtype=bool)) def triu_indices_from(arr, k=0): """ Return the indices for the upper-triangle of arr. See `triu_indices` for full details. Parameters ---------- arr : ndarray, shape(N, N) The indices will be valid for square arrays. k : int, optional Diagonal offset (see `triu` for details). Returns ------- triu_indices_from : tuple, shape(2) of ndarray, shape(N) Indices for the upper-triangle of `arr`. See Also -------- triu_indices, triu Notes ----- .. versionadded:: 1.4.0 """ if arr.ndim != 2: raise ValueError("input array must be 2-d") return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
25,817
26.205479
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/format.py
""" Define a simple format for saving numpy arrays to disk with the full information about them. The ``.npy`` format is the standard binary file format in NumPy for persisting a *single* arbitrary NumPy array on disk. The format stores all of the shape and dtype information necessary to reconstruct the array correctly even on another machine with a different architecture. The format is designed to be as simple as possible while achieving its limited goals. The ``.npz`` format is the standard format for persisting *multiple* NumPy arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` files, one for each array. Capabilities ------------ - Can represent all NumPy arrays including nested record arrays and object arrays. - Represents the data in its native binary form. - Supports Fortran-contiguous arrays directly. - Stores all of the necessary information to reconstruct the array including shape and dtype on a machine of a different architecture. Both little-endian and big-endian arrays are supported, and a file with little-endian numbers will yield a little-endian array on any machine reading the file. The types are described in terms of their actual sizes. For example, if a machine with a 64-bit C "long int" writes out an array with "long ints", a reading machine with 32-bit C "long ints" will yield an array with 64-bit integers. - Is straightforward to reverse engineer. Datasets often live longer than the programs that created them. A competent developer should be able to create a solution in their preferred programming language to read most ``.npy`` files that he has been given without much documentation. - Allows memory-mapping of the data. See `open_memmep`. - Can be read from a filelike stream object instead of an actual file. - Stores object arrays, i.e. arrays containing elements that are arbitrary Python objects. Files with object arrays are not to be mmapable, but can be read and written to disk. Limitations ----------- - Arbitrary subclasses of numpy.ndarray are not completely preserved. Subclasses will be accepted for writing, but only the array data will be written out. A regular numpy.ndarray object will be created upon reading the file. .. warning:: Due to limitations in the interpretation of structured dtypes, dtypes with fields with empty names will have the names replaced by 'f0', 'f1', etc. Such arrays will not round-trip through the format entirely accurately. The data is intact; only the field names will differ. We are working on a fix for this. This fix will not require a change in the file format. The arrays with such structures can still be saved and restored, and the correct dtype may be restored by using the ``loadedarray.view(correct_dtype)`` method. File extensions --------------- We recommend using the ``.npy`` and ``.npz`` extensions for files saved in this format. This is by no means a requirement; applications may wish to use these file formats but use an extension specific to the application. In the absence of an obvious alternative, however, we suggest using ``.npy`` and ``.npz``. Version numbering ----------------- The version numbering of these formats is independent of NumPy version numbering. If the format is upgraded, the code in `numpy.io` will still be able to read and write Version 1.0 files. Format Version 1.0 ------------------ The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. The next 1 byte is an unsigned byte: the major version number of the file format, e.g. ``\\x01``. The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. ``\\x00``. Note: the version of the file format is not tied to the version of the numpy package. The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN. The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline (``\\n``) and padded with spaces (``\\x20``) to make the total of ``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible by 64 for alignment purposes. The dictionary contains three keys: "descr" : dtype.descr An object that can be passed as an argument to the `numpy.dtype` constructor to create the array's dtype. "fortran_order" : bool Whether the array data is Fortran-contiguous or not. Since Fortran-contiguous arrays are a common form of non-C-contiguity, we allow them to be written directly to disk for efficiency. "shape" : tuple of int The shape of the array. For repeatability and readability, the dictionary keys are sorted in alphabetic order. This is for convenience only. A writer SHOULD implement this if possible. A reader MUST NOT depend on this. Following the header comes the array data. If the dtype contains Python objects (i.e. ``dtype.hasobject is True``), then the data is a Python pickle of the array. Otherwise the data is the contiguous (either C- or Fortran-, depending on ``fortran_order``) bytes of the array. Consumers can figure out the number of bytes by multiplying the number of elements given by the shape (noting that ``shape=()`` means there is 1 element) by ``dtype.itemsize``. Format Version 2.0 ------------------ The version 1.0 format only allowed the array header to have a total size of 65535 bytes. This can be exceeded by structured arrays with a large number of columns. The version 2.0 format extends the header size to 4 GiB. `numpy.save` will automatically save in 2.0 format if the data requires it, else it will always use the more compatible 1.0 format. The description of the fourth element of the header therefore has become: "The next 4 bytes form a little-endian unsigned int: the length of the header data HEADER_LEN." Notes ----- The ``.npy`` format, including reasons for creating it and a comparison of alternatives, is described fully in the "npy-format" NEP. """ from __future__ import division, absolute_import, print_function import numpy import sys import io import warnings from numpy.lib.utils import safe_eval from numpy.compat import asbytes, asstr, isfileobj, long, basestring if sys.version_info[0] >= 3: import pickle else: import cPickle as pickle MAGIC_PREFIX = b'\x93NUMPY' MAGIC_LEN = len(MAGIC_PREFIX) + 2 ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes # difference between version 1.0 and 2.0 is a 4 byte (I) header length # instead of 2 bytes (H) allowing storage of large structured arrays def _check_version(version): if version not in [(1, 0), (2, 0), None]: msg = "we only support format version (1,0) and (2, 0), not %s" raise ValueError(msg % (version,)) def magic(major, minor): """ Return the magic string for the given file format version. Parameters ---------- major : int in [0, 255] minor : int in [0, 255] Returns ------- magic : str Raises ------ ValueError if the version cannot be formatted. """ if major < 0 or major > 255: raise ValueError("major version must be 0 <= major < 256") if minor < 0 or minor > 255: raise ValueError("minor version must be 0 <= minor < 256") if sys.version_info[0] < 3: return MAGIC_PREFIX + chr(major) + chr(minor) else: return MAGIC_PREFIX + bytes([major, minor]) def read_magic(fp): """ Read the magic string to get the version of the file format. Parameters ---------- fp : filelike object Returns ------- major : int minor : int """ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") if magic_str[:-2] != MAGIC_PREFIX: msg = "the magic string is not correct; expected %r, got %r" raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) if sys.version_info[0] < 3: major, minor = map(ord, magic_str[-2:]) else: major, minor = magic_str[-2:] return major, minor def dtype_to_descr(dtype): """ Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype. """ if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get # fiddled with. This needs to be fixed in the C implementation of # dtype(). return dtype.descr else: return dtype.str def header_data_from_array_1_0(array): """ Get the dictionary of header metadata from a numpy.ndarray. Parameters ---------- array : numpy.ndarray Returns ------- d : dict This has the appropriate entries for writing its string representation to the header of the file. """ d = {'shape': array.shape} if array.flags.c_contiguous: d['fortran_order'] = False elif array.flags.f_contiguous: d['fortran_order'] = True else: # Totally non-contiguous data. We will have to make it C-contiguous # before writing. Note that we need to test for C_CONTIGUOUS first # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. d['fortran_order'] = False d['descr'] = dtype_to_descr(array.dtype) return d def _write_array_header(fp, d, version=None): """ Write the header for an array and returns the version used Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. version: tuple or None None means use oldest that works explicit version will raise a ValueError if the format does not allow saving this data. Default: None Returns ------- version : tuple of int the file version which needs to be used to store the data """ import struct header = ["{"] for key, value in sorted(d.items()): # Need to use repr here, since we eval these when reading header.append("'%s': %s, " % (key, repr(value))) header.append("}") header = "".join(header) header = asbytes(_filter_header(header)) hlen = len(header) + 1 # 1 for newline padlen_v1 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<H') + hlen) % ARRAY_ALIGN) padlen_v2 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<I') + hlen) % ARRAY_ALIGN) # Which version(s) we write depends on the total header size; v1 has a max of 65535 if hlen + padlen_v1 < 2**16 and version in (None, (1, 0)): version = (1, 0) header_prefix = magic(1, 0) + struct.pack('<H', hlen + padlen_v1) topad = padlen_v1 elif hlen + padlen_v2 < 2**32 and version in (None, (2, 0)): version = (2, 0) header_prefix = magic(2, 0) + struct.pack('<I', hlen + padlen_v2) topad = padlen_v2 else: msg = "Header length %s too big for version=%s" msg %= (hlen, version) raise ValueError(msg) # Pad the header with spaces and a final newline such that the magic # string, the header-length short and the header are aligned on a # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes # aligned up to ARRAY_ALIGN on systems like Linux where mmap() # offset must be page-aligned (i.e. the beginning of the file). header = header + b' '*topad + b'\n' fp.write(header_prefix) fp.write(header) return version def write_array_header_1_0(fp, d): """ Write the header for an array using the 1.0 format. Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. """ _write_array_header(fp, d, (1, 0)) def write_array_header_2_0(fp, d): """ Write the header for an array using the 2.0 format. The 2.0 format allows storing very large structured arrays. .. versionadded:: 1.9.0 Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. """ _write_array_header(fp, d, (2, 0)) def read_array_header_1_0(fp): """ Read an array header from a filelike object using the 1.0 file format version. This will leave the file object located just after the header. Parameters ---------- fp : filelike object A file object or something with a `.read()` method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid. """ return _read_array_header(fp, version=(1, 0)) def read_array_header_2_0(fp): """ Read an array header from a filelike object using the 2.0 file format version. This will leave the file object located just after the header. .. versionadded:: 1.9.0 Parameters ---------- fp : filelike object A file object or something with a `.read()` method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid. """ return _read_array_header(fp, version=(2, 0)) def _filter_header(s): """Clean up 'L' in npz header ints. Cleans up the 'L' in strings representing integers. Needed to allow npz headers produced in Python2 to be read in Python3. Parameters ---------- s : byte string Npy file header. Returns ------- header : str Cleaned up header. """ import tokenize if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO tokens = [] last_token_was_number = False # adding newline as python 2.7.5 workaround string = asstr(s) + "\n" for token in tokenize.generate_tokens(StringIO(string).readline): token_type = token[0] token_string = token[1] if (last_token_was_number and token_type == tokenize.NAME and token_string == "L"): continue else: tokens.append(token) last_token_was_number = (token_type == tokenize.NUMBER) # removing newline (see above) as python 2.7.5 workaround return tokenize.untokenize(tokens)[:-1] def _read_array_header(fp, version): """ see read_array_header_1_0 """ # Read an unsigned, little-endian short int which has the length of the # header. import struct if version == (1, 0): hlength_type = '<H' elif version == (2, 0): hlength_type = '<I' else: raise ValueError("Invalid version %r" % version) hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") header_length = struct.unpack(hlength_type, hlength_str)[0] header = _read_bytes(fp, header_length, "array header") # The header is a pretty-printed string representation of a literal # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte # boundary. The keys are strings. # "shape" : tuple of int # "fortran_order" : bool # "descr" : dtype.descr header = _filter_header(header) try: d = safe_eval(header) except SyntaxError as e: msg = "Cannot parse header: %r\nException: %r" raise ValueError(msg % (header, e)) if not isinstance(d, dict): msg = "Header is not a dictionary: %r" raise ValueError(msg % d) keys = sorted(d.keys()) if keys != ['descr', 'fortran_order', 'shape']: msg = "Header does not contain the correct keys: %r" raise ValueError(msg % (keys,)) # Sanity-check the values. if (not isinstance(d['shape'], tuple) or not numpy.all([isinstance(x, (int, long)) for x in d['shape']])): msg = "shape is not valid: %r" raise ValueError(msg % (d['shape'],)) if not isinstance(d['fortran_order'], bool): msg = "fortran_order is not a valid bool: %r" raise ValueError(msg % (d['fortran_order'],)) try: dtype = numpy.dtype(d['descr']) except TypeError as e: msg = "descr is not a valid dtype descriptor: %r" raise ValueError(msg % (d['descr'],)) return d['shape'], d['fortran_order'], dtype def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): """ Write an array to an NPY file, including a header. If the array is neither C-contiguous nor Fortran-contiguous AND the file_like object is not a real file object, this function will have to copy data in memory. Parameters ---------- fp : file_like object An open, writable file object, or similar object with a ``.write()`` method. array : ndarray The array to write to disk. version : (int, int) or None, optional The version number of the format. None means use the oldest supported version that is able to store the data. Default: None allow_pickle : bool, optional Whether to allow writing pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass to pickle.dump, excluding 'protocol'. These are only useful when pickling objects in object arrays on Python 3 to Python 2 compatible format. Raises ------ ValueError If the array cannot be persisted. This includes the case of allow_pickle=False and array being an object array. Various other errors If the array contains Python objects as part of its dtype, the process of pickling them may raise various errors if the objects are not picklable. """ _check_version(version) used_ver = _write_array_header(fp, header_data_from_array_1_0(array), version) # this warning can be removed when 1.9 has aged enough if version != (2, 0) and used_ver == (2, 0): warnings.warn("Stored array in format 2.0. It can only be" "read by NumPy >= 1.9", UserWarning, stacklevel=2) if array.itemsize == 0: buffersize = 0 else: # Set buffer size to 16 MiB to hide the Python loop overhead. buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) if array.dtype.hasobject: # We contain Python objects so we cannot write out the data # directly. Instead, we will pickle it out with version 2 of the # pickle protocol. if not allow_pickle: raise ValueError("Object arrays cannot be saved when " "allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} pickle.dump(array, fp, protocol=2, **pickle_kwargs) elif array.flags.f_contiguous and not array.flags.c_contiguous: if isfileobj(fp): array.T.tofile(fp) else: for chunk in numpy.nditer( array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='F'): fp.write(chunk.tobytes('C')) else: if isfileobj(fp): array.tofile(fp) else: for chunk in numpy.nditer( array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='C'): fp.write(chunk.tobytes('C')) def read_array(fp, allow_pickle=True, pickle_kwargs=None): """ Read an array from an NPY file. Parameters ---------- fp : file_like object If this is not a real file object, then this may take extra memory and time. allow_pickle : bool, optional Whether to allow reading pickled data. Default: True pickle_kwargs : dict Additional keyword arguments to pass to pickle.load. These are only useful when loading object arrays saved on Python 2 when using Python 3. Returns ------- array : ndarray The array from the data on disk. Raises ------ ValueError If the data is invalid, or allow_pickle=False and the file contains an object array. """ version = read_magic(fp) _check_version(version) shape, fortran_order, dtype = _read_array_header(fp, version) if len(shape) == 0: count = 1 else: count = numpy.multiply.reduce(shape, dtype=numpy.int64) # Now read the actual data. if dtype.hasobject: # The array contained Python objects. We need to unpickle the data. if not allow_pickle: raise ValueError("Object arrays cannot be loaded when " "allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} try: array = pickle.load(fp, **pickle_kwargs) except UnicodeError as err: if sys.version_info[0] >= 3: # Friendlier error message raise UnicodeError("Unpickling a python object failed: %r\n" "You may need to pass the encoding= option " "to numpy.load" % (err,)) raise else: if isfileobj(fp): # We can use the fast fromfile() function. array = numpy.fromfile(fp, dtype=dtype, count=count) else: # This is not a real file. We have to read it the # memory-intensive way. # crc32 module fails on reads greater than 2 ** 32 bytes, # breaking large reads from gzip streams. Chunk reads to # BUFFER_SIZE bytes to avoid issue and reduce memory overhead # of the read. In non-chunked case count < max_read_count, so # only one read is performed. # Use np.ndarray instead of np.empty since the latter does # not correctly instantiate zero-width string dtypes; see # https://github.com/numpy/numpy/pull/6430 array = numpy.ndarray(count, dtype=dtype) if dtype.itemsize > 0: # If dtype.itemsize == 0 then there's nothing more to read max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) for i in range(0, count, max_read_count): read_count = min(max_read_count, count - i) read_size = int(read_count * dtype.itemsize) data = _read_bytes(fp, read_size, "array data") array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, count=read_count) if fortran_order: array.shape = shape[::-1] array = array.transpose() else: array.shape = shape return array def open_memmap(filename, mode='r+', dtype=None, shape=None, fortran_order=False, version=None): """ Open a .npy file as a memory-mapped array. This may be used to read an existing file or create a new one. Parameters ---------- filename : str The name of the file on disk. This may *not* be a file-like object. mode : str, optional The mode in which to open the file; the default is 'r+'. In addition to the standard file modes, 'c' is also accepted to mean "copy on write." See `memmap` for the available mode strings. dtype : data-type, optional The data type of the array if we are creating a new file in "write" mode, if not, `dtype` is ignored. The default value is None, which results in a data-type of `float64`. shape : tuple of int The shape of the array if we are creating a new file in "write" mode, in which case this parameter is required. Otherwise, this parameter is ignored and is thus optional. fortran_order : bool, optional Whether the array should be Fortran-contiguous (True) or C-contiguous (False, the default) if we are creating a new file in "write" mode. version : tuple of int (major, minor) or None If the mode is a "write" mode, then this is the version of the file format used to create the file. None means use the oldest supported version that is able to store the data. Default: None Returns ------- marray : memmap The memory-mapped array. Raises ------ ValueError If the data or the mode is invalid. IOError If the file is not found or cannot be opened correctly. See Also -------- memmap """ if not isinstance(filename, basestring): raise ValueError("Filename must be a string. Memmap cannot use" " existing file handles.") if 'w' in mode: # We are creating the file, not reading it. # Check if we ought to create the file. _check_version(version) # Ensure that the given dtype is an authentic dtype object rather # than just something that can be interpreted as a dtype object. dtype = numpy.dtype(dtype) if dtype.hasobject: msg = "Array can't be memory-mapped: Python objects in dtype." raise ValueError(msg) d = dict( descr=dtype_to_descr(dtype), fortran_order=fortran_order, shape=shape, ) # If we got here, then it should be safe to create the file. fp = open(filename, mode+'b') try: used_ver = _write_array_header(fp, d, version) # this warning can be removed when 1.9 has aged enough if version != (2, 0) and used_ver == (2, 0): warnings.warn("Stored array in format 2.0. It can only be" "read by NumPy >= 1.9", UserWarning, stacklevel=2) offset = fp.tell() finally: fp.close() else: # Read the header of the file first. fp = open(filename, 'rb') try: version = read_magic(fp) _check_version(version) shape, fortran_order, dtype = _read_array_header(fp, version) if dtype.hasobject: msg = "Array can't be memory-mapped: Python objects in dtype." raise ValueError(msg) offset = fp.tell() finally: fp.close() if fortran_order: order = 'F' else: order = 'C' # We need to change a write-only mode to a read-write mode since we've # already written data to the file. if mode == 'w+': mode = 'r+' marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, mode=mode, offset=offset) return marray def _read_bytes(fp, size, error_template="ran out of data"): """ Read from file-like object until size bytes are read. Raises ValueError if not EOF is encountered before size bytes are read. Non-blocking objects only supported if they derive from io objects. Required as e.g. ZipExtFile in python 2.6 can return less data than requested. """ data = bytes() while True: # io files (default in python3) return None or raise on # would-block, python2 file will truncate, probably nothing can be # done about that. note that regular files can't be non-blocking try: r = fp.read(size - len(data)) data += r if len(r) == 0 or len(data) == size: break except io.BlockingIOError: pass if len(data) != size: msg = "EOF: reading %s, expected %d bytes got %d" raise ValueError(msg % (error_template, size, len(data))) else: return data
29,156
34.002401
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/arrayterator.py
""" A buffered iterator for big arrays. This module solves the problem of iterating over a big file-based array without having to read it into memory. The `Arrayterator` class wraps an array object, and when iterated it will return sub-arrays with at most a user-specified number of elements. """ from __future__ import division, absolute_import, print_function from operator import mul from functools import reduce from numpy.compat import long __all__ = ['Arrayterator'] class Arrayterator(object): """ Buffered iterator for big arrays. `Arrayterator` creates a buffered iterator for reading big arrays in small contiguous blocks. The class is useful for objects stored in the file system. It allows iteration over the object *without* reading everything in memory; instead, small blocks are read and iterated over. `Arrayterator` can be used with any object that supports multidimensional slices. This includes NumPy arrays, but also variables from Scientific.IO.NetCDF or pynetcdf for example. Parameters ---------- var : array_like The object to iterate over. buf_size : int, optional The buffer size. If `buf_size` is supplied, the maximum amount of data that will be read into memory is `buf_size` elements. Default is None, which will read as many element as possible into memory. Attributes ---------- var buf_size start stop step shape flat See Also -------- ndenumerate : Multidimensional array iterator. flatiter : Flat array iterator. memmap : Create a memory-map to an array stored in a binary file on disk. Notes ----- The algorithm works by first finding a "running dimension", along which the blocks will be extracted. Given an array of dimensions ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the first dimension will be used. If, on the other hand, ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. Blocks are extracted along this dimension, and when the last block is returned the process continues from the next dimension, until all elements have been read. Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> a_itor.shape (3, 4, 5, 6) Now we can iterate over ``a_itor``, and it will return arrays of size two. Since `buf_size` was smaller than any dimension, the first dimension will be iterated over first: >>> for subarr in a_itor: ... if not subarr.all(): ... print(subarr, subarr.shape) ... [[[[0 1]]]] (1, 1, 1, 2) """ def __init__(self, var, buf_size=None): self.var = var self.buf_size = buf_size self.start = [0 for dim in var.shape] self.stop = [dim for dim in var.shape] self.step = [1 for dim in var.shape] def __getattr__(self, attr): return getattr(self.var, attr) def __getitem__(self, index): """ Return a new arrayterator. """ # Fix index, handling ellipsis and incomplete slices. if not isinstance(index, tuple): index = (index,) fixed = [] length, dims = len(index), self.ndim for slice_ in index: if slice_ is Ellipsis: fixed.extend([slice(None)] * (dims-length+1)) length = len(fixed) elif isinstance(slice_, (int, long)): fixed.append(slice(slice_, slice_+1, 1)) else: fixed.append(slice_) index = tuple(fixed) if len(index) < dims: index += (slice(None),) * (dims-len(index)) # Return a new arrayterator object. out = self.__class__(self.var, self.buf_size) for i, (start, stop, step, slice_) in enumerate( zip(self.start, self.stop, self.step, index)): out.start[i] = start + (slice_.start or 0) out.step[i] = step * (slice_.step or 1) out.stop[i] = start + (slice_.stop or stop-start) out.stop[i] = min(stop, out.stop[i]) return out def __array__(self): """ Return corresponding data. """ slice_ = tuple(slice(*t) for t in zip( self.start, self.stop, self.step)) return self.var[slice_] @property def flat(self): """ A 1-D flat iterator for Arrayterator objects. This iterator returns elements of the array to be iterated over in `Arrayterator` one by one. It is similar to `flatiter`. See Also -------- Arrayterator flatiter Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> for subarr in a_itor.flat: ... if not subarr: ... print(subarr, type(subarr)) ... 0 <type 'numpy.int32'> """ for block in self: for value in block.flat: yield value @property def shape(self): """ The shape of the array to be iterated over. For an example, see `Arrayterator`. """ return tuple(((stop-start-1)//step+1) for start, stop, step in zip(self.start, self.stop, self.step)) def __iter__(self): # Skip arrays with degenerate dimensions if [dim for dim in self.shape if dim <= 0]: return start = self.start[:] stop = self.stop[:] step = self.step[:] ndims = self.var.ndim while True: count = self.buf_size or reduce(mul, self.shape) # iterate over each dimension, looking for the # running dimension (ie, the dimension along which # the blocks will be built from) rundim = 0 for i in range(ndims-1, -1, -1): # if count is zero we ran out of elements to read # along higher dimensions, so we read only a single position if count == 0: stop[i] = start[i]+1 elif count <= self.shape[i]: # limit along this dimension stop[i] = start[i] + count*step[i] rundim = i else: # read everything along this dimension stop[i] = self.stop[i] stop[i] = min(self.stop[i], stop[i]) count = count//self.shape[i] # yield a block slice_ = tuple(slice(*t) for t in zip(start, stop, step)) yield self.var[slice_] # Update start position, taking care of overflow to # other dimensions start[rundim] = stop[rundim] # start where we stopped for i in range(ndims-1, 0, -1): if start[i] >= self.stop[i]: start[i] = self.start[i] start[i-1] += self.step[i-1] if start[0] >= self.stop[0]: return
7,191
30.823009
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/_iotools.py
"""A collection of functions designed to help I/O with ascii files. """ from __future__ import division, absolute_import, print_function __docformat__ = "restructuredtext en" import sys import numpy as np import numpy.core.numeric as nx from numpy.compat import asbytes, asunicode, bytes, asbytes_nested, basestring if sys.version_info[0] >= 3: from builtins import bool, int, float, complex, object, str unicode = str else: from __builtin__ import bool, int, float, complex, object, unicode, str def _decode_line(line, encoding=None): """Decode bytes from binary input streams. Defaults to decoding from 'latin1'. That differs from the behavior of np.compat.asunicode that decodes from 'ascii'. Parameters ---------- line : str or bytes Line to be decoded. Returns ------- decoded_line : unicode Unicode in Python 2, a str (unicode) in Python 3. """ if type(line) is bytes: if encoding is None: line = line.decode('latin1') else: line = line.decode(encoding) return line def _is_string_like(obj): """ Check whether obj behaves like a string. """ try: obj + '' except (TypeError, ValueError): return False return True def _is_bytes_like(obj): """ Check whether obj behaves like a bytes object. """ try: obj + b'' except (TypeError, ValueError): return False return True def _to_filehandle(fname, flag='r', return_opened=False): """ Returns the filehandle corresponding to a string or a file. If the string ends in '.gz', the file is automatically unzipped. Parameters ---------- fname : string, filehandle Name of the file whose filehandle must be returned. flag : string, optional Flag indicating the status of the file ('r' for read, 'w' for write). return_opened : boolean, optional Whether to return the opening status of the file. """ if _is_string_like(fname): if fname.endswith('.gz'): import gzip fhd = gzip.open(fname, flag) elif fname.endswith('.bz2'): import bz2 fhd = bz2.BZ2File(fname) else: fhd = file(fname, flag) opened = True elif hasattr(fname, 'seek'): fhd = fname opened = False else: raise ValueError('fname must be a string or file handle') if return_opened: return fhd, opened return fhd def has_nested_fields(ndtype): """ Returns whether one or several fields of a dtype are nested. Parameters ---------- ndtype : dtype Data-type of a structured array. Raises ------ AttributeError If `ndtype` does not have a `names` attribute. Examples -------- >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) >>> np.lib._iotools.has_nested_fields(dt) False """ for name in ndtype.names or (): if ndtype[name].names: return True return False def flatten_dtype(ndtype, flatten_base=False): """ Unpack a structured data-type by collapsing nested fields and/or fields with a shape. Note that the field names are lost. Parameters ---------- ndtype : dtype The datatype to collapse flatten_base : bool, optional If True, transform a field with a shape into several fields. Default is False. Examples -------- >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ... ('block', int, (2, 3))]) >>> np.lib._iotools.flatten_dtype(dt) [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')] >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32')] """ names = ndtype.names if names is None: if flatten_base: return [ndtype.base] * int(np.prod(ndtype.shape)) return [ndtype.base] else: types = [] for field in names: info = ndtype.fields[field] flat_dt = flatten_dtype(info[0], flatten_base) types.extend(flat_dt) return types class LineSplitter(object): """ Object to split a string at a given delimiter or at given places. Parameters ---------- delimiter : str, int, or sequence of ints, optional If a string, character used to delimit consecutive fields. If an integer or a sequence of integers, width(s) of each field. comments : str, optional Character used to mark the beginning of a comment. Default is '#'. autostrip : bool, optional Whether to strip each individual field. Default is True. """ def autostrip(self, method): """ Wrapper to strip each member of the output of `method`. Parameters ---------- method : function Function that takes a single argument and returns a sequence of strings. Returns ------- wrapped : function The result of wrapping `method`. `wrapped` takes a single input argument and returns a list of strings that are stripped of white-space. """ return lambda input: [_.strip() for _ in method(input)] # def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None): delimiter = _decode_line(delimiter) comments = _decode_line(comments) self.comments = comments # Delimiter is a character if (delimiter is None) or isinstance(delimiter, basestring): delimiter = delimiter or None _handyman = self._delimited_splitter # Delimiter is a list of field widths elif hasattr(delimiter, '__iter__'): _handyman = self._variablewidth_splitter idx = np.cumsum([0] + list(delimiter)) delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] # Delimiter is a single integer elif int(delimiter): (_handyman, delimiter) = ( self._fixedwidth_splitter, int(delimiter)) else: (_handyman, delimiter) = (self._delimited_splitter, None) self.delimiter = delimiter if autostrip: self._handyman = self.autostrip(_handyman) else: self._handyman = _handyman self.encoding = encoding # def _delimited_splitter(self, line): """Chop off comments, strip, and split at delimiter. """ if self.comments is not None: line = line.split(self.comments)[0] line = line.strip(" \r\n") if not line: return [] return line.split(self.delimiter) # def _fixedwidth_splitter(self, line): if self.comments is not None: line = line.split(self.comments)[0] line = line.strip("\r\n") if not line: return [] fixed = self.delimiter slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] return [line[s] for s in slices] # def _variablewidth_splitter(self, line): if self.comments is not None: line = line.split(self.comments)[0] if not line: return [] slices = self.delimiter return [line[s] for s in slices] # def __call__(self, line): return self._handyman(_decode_line(line, self.encoding)) class NameValidator(object): """ Object to validate a list of strings to use as field names. The strings are stripped of any non alphanumeric character, and spaces are replaced by '_'. During instantiation, the user can define a list of names to exclude, as well as a list of invalid characters. Names in the exclusion list are appended a '_' character. Once an instance has been created, it can be called with a list of names, and a list of valid names will be created. The `__call__` method accepts an optional keyword "default" that sets the default name in case of ambiguity. By default this is 'f', so that names will default to `f0`, `f1`, etc. Parameters ---------- excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return', 'file', 'print']. Excluded names are appended an underscore: for example, `file` becomes `file_` if supplied. deletechars : str, optional A string combining invalid characters that must be deleted from the names. case_sensitive : {True, False, 'upper', 'lower'}, optional * If True, field names are case-sensitive. * If False or 'upper', field names are converted to upper case. * If 'lower', field names are converted to lower case. The default value is True. replace_space : '_', optional Character(s) used in replacement of white spaces. Notes ----- Calling an instance of `NameValidator` is the same as calling its method `validate`. Examples -------- >>> validator = np.lib._iotools.NameValidator() >>> validator(['file', 'field2', 'with space', 'CaSe']) ['file_', 'field2', 'with_space', 'CaSe'] >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], deletechars='q', case_sensitive='False') >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) ['excl_', 'field2', 'no_', 'with_space', 'case'] """ # defaultexcludelist = ['return', 'file', 'print'] defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") # def __init__(self, excludelist=None, deletechars=None, case_sensitive=None, replace_space='_'): # Process the exclusion list .. if excludelist is None: excludelist = [] excludelist.extend(self.defaultexcludelist) self.excludelist = excludelist # Process the list of characters to delete if deletechars is None: delete = self.defaultdeletechars else: delete = set(deletechars) delete.add('"') self.deletechars = delete # Process the case option ..... if (case_sensitive is None) or (case_sensitive is True): self.case_converter = lambda x: x elif (case_sensitive is False) or case_sensitive.startswith('u'): self.case_converter = lambda x: x.upper() elif case_sensitive.startswith('l'): self.case_converter = lambda x: x.lower() else: msg = 'unrecognized case_sensitive value %s.' % case_sensitive raise ValueError(msg) # self.replace_space = replace_space def validate(self, names, defaultfmt="f%i", nbfields=None): """ Validate a list of strings as field names for a structured array. Parameters ---------- names : sequence of str Strings to be validated. defaultfmt : str, optional Default format string, used if validating a given string reduces its length to zero. nbfields : integer, optional Final number of validated names, used to expand or shrink the initial list of names. Returns ------- validatednames : list of str The list of validated field names. Notes ----- A `NameValidator` instance can be called directly, which is the same as calling `validate`. For examples, see `NameValidator`. """ # Initial checks .............. if (names is None): if (nbfields is None): return None names = [] if isinstance(names, basestring): names = [names, ] if nbfields is not None: nbnames = len(names) if (nbnames < nbfields): names = list(names) + [''] * (nbfields - nbnames) elif (nbnames > nbfields): names = names[:nbfields] # Set some shortcuts ........... deletechars = self.deletechars excludelist = self.excludelist case_converter = self.case_converter replace_space = self.replace_space # Initializes some variables ... validatednames = [] seen = dict() nbempty = 0 # for item in names: item = case_converter(item).strip() if replace_space: item = item.replace(' ', replace_space) item = ''.join([c for c in item if c not in deletechars]) if item == '': item = defaultfmt % nbempty while item in names: nbempty += 1 item = defaultfmt % nbempty nbempty += 1 elif item in excludelist: item += '_' cnt = seen.get(item, 0) if cnt > 0: validatednames.append(item + '_%d' % cnt) else: validatednames.append(item) seen[item] = cnt + 1 return tuple(validatednames) # def __call__(self, names, defaultfmt="f%i", nbfields=None): return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) def str2bool(value): """ Tries to transform a string supposed to represent a boolean to a boolean. Parameters ---------- value : str The string that is transformed to a boolean. Returns ------- boolval : bool The boolean representation of `value`. Raises ------ ValueError If the string is not 'True' or 'False' (case independent) Examples -------- >>> np.lib._iotools.str2bool('TRUE') True >>> np.lib._iotools.str2bool('false') False """ value = value.upper() if value == 'TRUE': return True elif value == 'FALSE': return False else: raise ValueError("Invalid boolean") class ConverterError(Exception): """ Exception raised when an error occurs in a converter for string values. """ pass class ConverterLockError(ConverterError): """ Exception raised when an attempt is made to upgrade a locked converter. """ pass class ConversionWarning(UserWarning): """ Warning issued when a string converter has a problem. Notes ----- In `genfromtxt` a `ConversionWarning` is issued if raising exceptions is explicitly suppressed with the "invalid_raise" keyword. """ pass class StringConverter(object): """ Factory class for function transforming a string into another object (int, float). After initialization, an instance can be called to transform a string into another object. If the string is recognized as representing a missing value, a default value is returned. Attributes ---------- func : function Function used for the conversion. default : any Default value to return when the input corresponds to a missing value. type : type Type of the output. _status : int Integer representing the order of the conversion. _mapper : sequence of tuples Sequence of tuples (dtype, function, default value) to evaluate in order. _locked : bool Holds `locked` parameter. Parameters ---------- dtype_or_func : {None, dtype, function}, optional If a `dtype`, specifies the input data type, used to define a basic function and a default value for missing data. For example, when `dtype` is float, the `func` attribute is set to `float` and the default value to `np.nan`. If a function, this function is used to convert a string to another object. In this case, it is recommended to give an associated default value as input. default : any, optional Value to return by default, that is, when the string to be converted is flagged as missing. If not given, `StringConverter` tries to supply a reasonable default value. missing_values : {None, sequence of str}, optional ``None`` or sequence of strings indicating a missing value. If ``None`` then missing values are indicated by empty entries. The default is ``None``. locked : bool, optional Whether the StringConverter should be locked to prevent automatic upgrade or not. Default is False. """ # _mapper = [(nx.bool_, str2bool, False), (nx.integer, int, -1)] # On 32-bit systems, we need to make sure that we explicitly include # nx.int64 since ns.integer is nx.int32. if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize: _mapper.append((nx.int64, int, -1)) _mapper.extend([(nx.floating, float, nx.nan), (nx.complexfloating, complex, nx.nan + 0j), (nx.longdouble, nx.longdouble, nx.nan), (nx.unicode_, asunicode, '???'), (nx.string_, asbytes, '???')]) (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper) @classmethod def _getdtype(cls, val): """Returns the dtype of the input variable.""" return np.array(val).dtype # @classmethod def _getsubdtype(cls, val): """Returns the type of the dtype of the input variable.""" return np.array(val).dtype.type # # This is a bit annoying. We want to return the "general" type in most # cases (ie. "string" rather than "S10"), but we want to return the # specific type for datetime64 (ie. "datetime64[us]" rather than # "datetime64"). @classmethod def _dtypeortype(cls, dtype): """Returns dtype for datetime64 and type of dtype otherwise.""" if dtype.type == np.datetime64: return dtype return dtype.type # @classmethod def upgrade_mapper(cls, func, default=None): """ Upgrade the mapper of a StringConverter by adding a new function and its corresponding default. The input function (or sequence of functions) and its associated default value (if any) is inserted in penultimate position of the mapper. The corresponding type is estimated from the dtype of the default value. Parameters ---------- func : var Function, or sequence of functions Examples -------- >>> import dateutil.parser >>> import datetime >>> dateparser = datetustil.parser.parse >>> defaultdate = datetime.date(2000, 1, 1) >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) """ # Func is a single functions if hasattr(func, '__call__'): cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) return elif hasattr(func, '__iter__'): if isinstance(func[0], (tuple, list)): for _ in func: cls._mapper.insert(-1, _) return if default is None: default = [None] * len(func) else: default = list(default) default.append([None] * (len(func) - len(default))) for (fct, dft) in zip(func, default): cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) # def __init__(self, dtype_or_func=None, default=None, missing_values=None, locked=False): # Defines a lock for upgrade self._locked = bool(locked) # No input dtype: minimal initialization if dtype_or_func is None: self.func = str2bool self._status = 0 self.default = default or False dtype = np.dtype('bool') else: # Is the input a np.dtype ? try: self.func = None dtype = np.dtype(dtype_or_func) except TypeError: # dtype_or_func must be a function, then if not hasattr(dtype_or_func, '__call__'): errmsg = ("The input argument `dtype` is neither a" " function nor a dtype (got '%s' instead)") raise TypeError(errmsg % type(dtype_or_func)) # Set the function self.func = dtype_or_func # If we don't have a default, try to guess it or set it to # None if default is None: try: default = self.func('0') except ValueError: default = None dtype = self._getdtype(default) # Set the status according to the dtype _status = -1 for (i, (deftype, func, default_def)) in enumerate(self._mapper): if np.issubdtype(dtype.type, deftype): _status = i if default is None: self.default = default_def else: self.default = default break # if a converter for the specific dtype is available use that last_func = func for (i, (deftype, func, default_def)) in enumerate(self._mapper): if dtype.type == deftype: _status = i last_func = func if default is None: self.default = default_def else: self.default = default break func = last_func if _status == -1: # We never found a match in the _mapper... _status = 0 self.default = default self._status = _status # If the input was a dtype, set the function to the last we saw if self.func is None: self.func = func # If the status is 1 (int), change the function to # something more robust. if self.func == self._mapper[1][1]: if issubclass(dtype.type, np.uint64): self.func = np.uint64 elif issubclass(dtype.type, np.int64): self.func = np.int64 else: self.func = lambda x: int(float(x)) # Store the list of strings corresponding to missing values. if missing_values is None: self.missing_values = set(['']) else: if isinstance(missing_values, basestring): missing_values = missing_values.split(",") self.missing_values = set(list(missing_values) + ['']) # self._callingfunction = self._strict_call self.type = self._dtypeortype(dtype) self._checked = False self._initial_default = default # def _loose_call(self, value): try: return self.func(value) except ValueError: return self.default # def _strict_call(self, value): try: # We check if we can convert the value using the current function new_value = self.func(value) # In addition to having to check whether func can convert the # value, we also have to make sure that we don't get overflow # errors for integers. if self.func is int: try: np.array(value, dtype=self.type) except OverflowError: raise ValueError # We're still here so we can now return the new value return new_value except ValueError: if value.strip() in self.missing_values: if not self._status: self._checked = False return self.default raise ValueError("Cannot convert string '%s'" % value) # def __call__(self, value): return self._callingfunction(value) # def upgrade(self, value): """ Find the best converter for a given string, and return the result. The supplied string `value` is converted by testing different converters in order. First the `func` method of the `StringConverter` instance is tried, if this fails other available converters are tried. The order in which these other converters are tried is determined by the `_status` attribute of the instance. Parameters ---------- value : str The string to convert. Returns ------- out : any The result of converting `value` with the appropriate converter. """ self._checked = True try: return self._strict_call(value) except ValueError: # Raise an exception if we locked the converter... if self._locked: errmsg = "Converter is locked and cannot be upgraded" raise ConverterLockError(errmsg) _statusmax = len(self._mapper) # Complains if we try to upgrade by the maximum _status = self._status if _status == _statusmax: errmsg = "Could not find a valid conversion function" raise ConverterError(errmsg) elif _status < _statusmax - 1: _status += 1 (self.type, self.func, default) = self._mapper[_status] self._status = _status if self._initial_default is not None: self.default = self._initial_default else: self.default = default return self.upgrade(value) def iterupgrade(self, value): self._checked = True if not hasattr(value, '__iter__'): value = (value,) _strict_call = self._strict_call try: for _m in value: _strict_call(_m) except ValueError: # Raise an exception if we locked the converter... if self._locked: errmsg = "Converter is locked and cannot be upgraded" raise ConverterLockError(errmsg) _statusmax = len(self._mapper) # Complains if we try to upgrade by the maximum _status = self._status if _status == _statusmax: raise ConverterError( "Could not find a valid conversion function" ) elif _status < _statusmax - 1: _status += 1 (self.type, self.func, default) = self._mapper[_status] if self._initial_default is not None: self.default = self._initial_default else: self.default = default self._status = _status self.iterupgrade(value) def update(self, func, default=None, testing_value=None, missing_values='', locked=False): """ Set StringConverter attributes directly. Parameters ---------- func : function Conversion function. default : any, optional Value to return by default, that is, when the string to be converted is flagged as missing. If not given, `StringConverter` tries to supply a reasonable default value. testing_value : str, optional A string representing a standard input value of the converter. This string is used to help defining a reasonable default value. missing_values : {sequence of str, None}, optional Sequence of strings indicating a missing value. If ``None``, then the existing `missing_values` are cleared. The default is `''`. locked : bool, optional Whether the StringConverter should be locked to prevent automatic upgrade or not. Default is False. Notes ----- `update` takes the same parameters as the constructor of `StringConverter`, except that `func` does not accept a `dtype` whereas `dtype_or_func` in the constructor does. """ self.func = func self._locked = locked # Don't reset the default to None if we can avoid it if default is not None: self.default = default self.type = self._dtypeortype(self._getdtype(default)) else: try: tester = func(testing_value or '1') except (TypeError, ValueError): tester = None self.type = self._dtypeortype(self._getdtype(tester)) # Add the missing values to the existing set or clear it. if missing_values is None: # Clear all missing values even though the ctor initializes it to # set(['']) when the argument is None. self.missing_values = set() else: if not np.iterable(missing_values): missing_values = [missing_values] if not all(isinstance(v, basestring) for v in missing_values): raise TypeError("missing_values must be strings or unicode") self.missing_values.update(missing_values) def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): """ Convenience function to create a `np.dtype` object. The function processes the input `dtype` and matches it with the given names. Parameters ---------- ndtype : var Definition of the dtype. Can be any string or dictionary recognized by the `np.dtype` function, or a sequence of types. names : str or sequence, optional Sequence of strings to use as field names for a structured dtype. For convenience, `names` can be a string of a comma-separated list of names. defaultfmt : str, optional Format string used to define missing names, such as ``"f%i"`` (default) or ``"fields_%02i"``. validationargs : optional A series of optional arguments used to initialize a `NameValidator`. Examples -------- >>> np.lib._iotools.easy_dtype(float) dtype('float64') >>> np.lib._iotools.easy_dtype("i4, f8") dtype([('f0', '<i4'), ('f1', '<f8')]) >>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") dtype([('field_000', '<i4'), ('field_001', '<f8')]) >>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')]) >>> np.lib._iotools.easy_dtype(float, names="a,b,c") dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')]) """ try: ndtype = np.dtype(ndtype) except TypeError: validate = NameValidator(**validationargs) nbfields = len(ndtype) if names is None: names = [''] * len(ndtype) elif isinstance(names, basestring): names = names.split(",") names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt) ndtype = np.dtype(dict(formats=ndtype, names=names)) else: nbtypes = len(ndtype) # Explicit names if names is not None: validate = NameValidator(**validationargs) if isinstance(names, basestring): names = names.split(",") # Simple dtype: repeat to match the nb of names if nbtypes == 0: formats = tuple([ndtype.type] * len(names)) names = validate(names, defaultfmt=defaultfmt) ndtype = np.dtype(list(zip(names, formats))) # Structured dtype: just validate the names as needed else: ndtype.names = validate(names, nbfields=nbtypes, defaultfmt=defaultfmt) # No implicit names elif (nbtypes > 0): validate = NameValidator(**validationargs) # Default initial names : should we change the format ? if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and (defaultfmt != "f%i")): ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt) # Explicit initial names : just validate else: ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt) return ndtype
32,704
33.281971
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/financial.py
"""Some simple financial calculations patterned after spreadsheet computations. There is some complexity in each function so that the functions behave like ufuncs with broadcasting and being able to be called with scalars or arrays (or other sequences). Functions support the :class:`decimal.Decimal` type unless otherwise stated. """ from __future__ import division, absolute_import, print_function from decimal import Decimal import numpy as np __all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', 'irr', 'npv', 'mirr'] _when_to_num = {'end':0, 'begin':1, 'e':0, 'b':1, 0:0, 1:1, 'beginning':1, 'start':1, 'finish':0} def _convert_when(when): #Test to see if when has already been converted to ndarray #This will happen if one function calls another, for example ppmt if isinstance(when, np.ndarray): return when try: return _when_to_num[when] except (KeyError, TypeError): return [_when_to_num[x] for x in when] def fv(rate, nper, pmt, pv, when='end'): """ Compute the future value. Given: * a present value, `pv` * an interest `rate` compounded once per period, of which there are * `nper` total * a (fixed) payment, `pmt`, paid either * at the beginning (`when` = {'begin', 1}) or the end (`when` = {'end', 0}) of each period Return: the value at the end of the `nper` periods Parameters ---------- rate : scalar or array_like of shape(M, ) Rate of interest as decimal (not per cent) per period nper : scalar or array_like of shape(M, ) Number of compounding periods pmt : scalar or array_like of shape(M, ) Payment pv : scalar or array_like of shape(M, ) Present value when : {{'begin', 1}, {'end', 0}}, {string, int}, optional When payments are due ('begin' (1) or 'end' (0)). Defaults to {'end', 0}. Returns ------- out : ndarray Future values. If all input is scalar, returns a scalar float. If any input is array_like, returns future values for each input element. If multiple inputs are array_like, they all must have the same shape. Notes ----- The future value is computed by solving the equation:: fv + pv*(1+rate)**nper + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 or, when ``rate == 0``:: fv + pv + pmt * nper == 0 References ---------- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. Organization for the Advancement of Structured Information Standards (OASIS). Billerica, MA, USA. [ODT Document]. Available: http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula OpenDocument-formula-20090508.odt Examples -------- What is the future value after 10 years of saving $100 now, with an additional monthly savings of $100. Assume the interest rate is 5% (annually) compounded monthly? >>> np.fv(0.05/12, 10*12, -100, -100) 15692.928894335748 By convention, the negative sign represents cash flow out (i.e. money not available today). Thus, saving $100 a month at 5% annual interest leads to $15,692.93 available to spend in 10 years. If any input is array_like, returns an array of equal shape. Let's compare different interest rates from the example above. >>> a = np.array((0.05, 0.06, 0.07))/12 >>> np.fv(a, 10*12, -100, -100) array([ 15692.92889434, 16569.87435405, 17509.44688102]) """ when = _convert_when(when) (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when]) temp = (1+rate)**nper fact = np.where(rate == 0, nper, (1 + rate*when)*(temp - 1)/rate) return -(pv*temp + pmt*fact) def pmt(rate, nper, pv, fv=0, when='end'): """ Compute the payment against loan principal plus interest. Given: * a present value, `pv` (e.g., an amount borrowed) * a future value, `fv` (e.g., 0) * an interest `rate` compounded once per period, of which there are * `nper` total * and (optional) specification of whether payment is made at the beginning (`when` = {'begin', 1}) or the end (`when` = {'end', 0}) of each period Return: the (fixed) periodic payment. Parameters ---------- rate : array_like Rate of interest (per period) nper : array_like Number of compounding periods pv : array_like Present value fv : array_like, optional Future value (default = 0) when : {{'begin', 1}, {'end', 0}}, {string, int} When payments are due ('begin' (1) or 'end' (0)) Returns ------- out : ndarray Payment against loan plus interest. If all input is scalar, returns a scalar float. If any input is array_like, returns payment for each input element. If multiple inputs are array_like, they all must have the same shape. Notes ----- The payment is computed by solving the equation:: fv + pv*(1 + rate)**nper + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 or, when ``rate == 0``:: fv + pv + pmt * nper == 0 for ``pmt``. Note that computing a monthly mortgage payment is only one use for this function. For example, pmt returns the periodic deposit one must make to achieve a specified future balance given an initial deposit, a fixed, periodically compounded interest rate, and the total number of periods. References ---------- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. Organization for the Advancement of Structured Information Standards (OASIS). Billerica, MA, USA. [ODT Document]. Available: http://www.oasis-open.org/committees/documents.php ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt Examples -------- What is the monthly payment needed to pay off a $200,000 loan in 15 years at an annual interest rate of 7.5%? >>> np.pmt(0.075/12, 12*15, 200000) -1854.0247200054619 In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained today, a monthly payment of $1,854.02 would be required. Note that this example illustrates usage of `fv` having a default value of 0. """ when = _convert_when(when) (rate, nper, pv, fv, when) = map(np.array, [rate, nper, pv, fv, when]) temp = (1 + rate)**nper mask = (rate == 0) masked_rate = np.where(mask, 1, rate) fact = np.where(mask != 0, nper, (1 + masked_rate*when)*(temp - 1)/masked_rate) return -(fv + pv*temp) / fact def nper(rate, pmt, pv, fv=0, when='end'): """ Compute the number of periodic payments. :class:`decimal.Decimal` type is not supported. Parameters ---------- rate : array_like Rate of interest (per period) pmt : array_like Payment pv : array_like Present value fv : array_like, optional Future value when : {{'begin', 1}, {'end', 0}}, {string, int}, optional When payments are due ('begin' (1) or 'end' (0)) Notes ----- The number of periods ``nper`` is computed by solving the equation:: fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0 but if ``rate = 0`` then:: fv + pv + pmt*nper = 0 Examples -------- If you only had $150/month to pay towards the loan, how long would it take to pay-off a loan of $8,000 at 7% annual interest? >>> print(round(np.nper(0.07/12, -150, 8000), 5)) 64.07335 So, over 64 months would be required to pay off the loan. The same analysis could be done with several different interest rates and/or payments and/or total amounts to produce an entire table. >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, ... -150 : -99 : 50 , ... 8000 : 9001 : 1000])) array([[[ 64.07334877, 74.06368256], [ 108.07548412, 127.99022654]], [[ 66.12443902, 76.87897353], [ 114.70165583, 137.90124779]]]) """ when = _convert_when(when) (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when]) use_zero_rate = False with np.errstate(divide="raise"): try: z = pmt*(1+rate*when)/rate except FloatingPointError: use_zero_rate = True if use_zero_rate: return (-fv + pv) / pmt else: A = -(fv + pv)/(pmt+0) B = np.log((-fv+z) / (pv+z))/np.log(1+rate) return np.where(rate == 0, A, B) def ipmt(rate, per, nper, pv, fv=0, when='end'): """ Compute the interest portion of a payment. Parameters ---------- rate : scalar or array_like of shape(M, ) Rate of interest as decimal (not per cent) per period per : scalar or array_like of shape(M, ) Interest paid against the loan changes during the life or the loan. The `per` is the payment period to calculate the interest amount. nper : scalar or array_like of shape(M, ) Number of compounding periods pv : scalar or array_like of shape(M, ) Present value fv : scalar or array_like of shape(M, ), optional Future value when : {{'begin', 1}, {'end', 0}}, {string, int}, optional When payments are due ('begin' (1) or 'end' (0)). Defaults to {'end', 0}. Returns ------- out : ndarray Interest portion of payment. If all input is scalar, returns a scalar float. If any input is array_like, returns interest payment for each input element. If multiple inputs are array_like, they all must have the same shape. See Also -------- ppmt, pmt, pv Notes ----- The total payment is made up of payment against principal plus interest. ``pmt = ppmt + ipmt`` Examples -------- What is the amortization schedule for a 1 year loan of $2500 at 8.24% interest per year compounded monthly? >>> principal = 2500.00 The 'per' variable represents the periods of the loan. Remember that financial equations start the period count at 1! >>> per = np.arange(1*12) + 1 >>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal) >>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal) Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal 'pmt'. >>> pmt = np.pmt(0.0824/12, 1*12, principal) >>> np.allclose(ipmt + ppmt, pmt) True >>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}' >>> for payment in per: ... index = payment - 1 ... principal = principal + ppmt[index] ... print(fmt.format(payment, ppmt[index], ipmt[index], principal)) 1 -200.58 -17.17 2299.42 2 -201.96 -15.79 2097.46 3 -203.35 -14.40 1894.11 4 -204.74 -13.01 1689.37 5 -206.15 -11.60 1483.22 6 -207.56 -10.18 1275.66 7 -208.99 -8.76 1066.67 8 -210.42 -7.32 856.25 9 -211.87 -5.88 644.38 10 -213.32 -4.42 431.05 11 -214.79 -2.96 216.26 12 -216.26 -1.49 -0.00 >>> interestpd = np.sum(ipmt) >>> np.round(interestpd, 2) -112.98 """ when = _convert_when(when) rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, pv, fv, when) total_pmt = pmt(rate, nper, pv, fv, when) ipmt = _rbl(rate, per, total_pmt, pv, when)*rate try: ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt) ipmt = np.where(np.logical_and(when == 1, per == 1), 0, ipmt) except IndexError: pass return ipmt def _rbl(rate, per, pmt, pv, when): """ This function is here to simply have a different name for the 'fv' function to not interfere with the 'fv' keyword argument within the 'ipmt' function. It is the 'remaining balance on loan' which might be useful as it's own function, but is easily calculated with the 'fv' function. """ return fv(rate, (per - 1), pmt, pv, when) def ppmt(rate, per, nper, pv, fv=0, when='end'): """ Compute the payment against loan principal. Parameters ---------- rate : array_like Rate of interest (per period) per : array_like, int Amount paid against the loan changes. The `per` is the period of interest. nper : array_like Number of compounding periods pv : array_like Present value fv : array_like, optional Future value when : {{'begin', 1}, {'end', 0}}, {string, int} When payments are due ('begin' (1) or 'end' (0)) See Also -------- pmt, pv, ipmt """ total = pmt(rate, nper, pv, fv, when) return total - ipmt(rate, per, nper, pv, fv, when) def pv(rate, nper, pmt, fv=0, when='end'): """ Compute the present value. Given: * a future value, `fv` * an interest `rate` compounded once per period, of which there are * `nper` total * a (fixed) payment, `pmt`, paid either * at the beginning (`when` = {'begin', 1}) or the end (`when` = {'end', 0}) of each period Return: the value now Parameters ---------- rate : array_like Rate of interest (per period) nper : array_like Number of compounding periods pmt : array_like Payment fv : array_like, optional Future value when : {{'begin', 1}, {'end', 0}}, {string, int}, optional When payments are due ('begin' (1) or 'end' (0)) Returns ------- out : ndarray, float Present value of a series of payments or investments. Notes ----- The present value is computed by solving the equation:: fv + pv*(1 + rate)**nper + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0 or, when ``rate = 0``:: fv + pv + pmt * nper = 0 for `pv`, which is then returned. References ---------- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. Organization for the Advancement of Structured Information Standards (OASIS). Billerica, MA, USA. [ODT Document]. Available: http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula OpenDocument-formula-20090508.odt Examples -------- What is the present value (e.g., the initial investment) of an investment that needs to total $15692.93 after 10 years of saving $100 every month? Assume the interest rate is 5% (annually) compounded monthly. >>> np.pv(0.05/12, 10*12, -100, 15692.93) -100.00067131625819 By convention, the negative sign represents cash flow out (i.e., money not available today). Thus, to end up with $15,692.93 in 10 years saving $100 a month at 5% annual interest, one's initial deposit should also be $100. If any input is array_like, ``pv`` returns an array of equal shape. Let's compare different interest rates in the example above: >>> a = np.array((0.05, 0.04, 0.03))/12 >>> np.pv(a, 10*12, -100, 15692.93) array([ -100.00067132, -649.26771385, -1273.78633713]) So, to end up with the same $15692.93 under the same $100 per month "savings plan," for annual interest rates of 4% and 3%, one would need initial investments of $649.27 and $1273.79, respectively. """ when = _convert_when(when) (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when]) temp = (1+rate)**nper fact = np.where(rate == 0, nper, (1+rate*when)*(temp-1)/rate) return -(fv + pmt*fact)/temp # Computed with Sage # (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - # p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + # p*((r + 1)^n - 1)*w/r) def _g_div_gp(r, n, p, x, y, w): t1 = (r+1)**n t2 = (r+1)**(n-1) return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) / (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + p*(t1 - 1)*w/r)) # Use Newton's iteration until the change is less than 1e-6 # for all values or a maximum of 100 iterations is reached. # Newton's rule is # r_{n+1} = r_{n} - g(r_n)/g'(r_n) # where # g(r) is the formula # g'(r) is the derivative with respect to r. def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100): """ Compute the rate of interest per period. Parameters ---------- nper : array_like Number of compounding periods pmt : array_like Payment pv : array_like Present value fv : array_like Future value when : {{'begin', 1}, {'end', 0}}, {string, int}, optional When payments are due ('begin' (1) or 'end' (0)) guess : Number, optional Starting guess for solving the rate of interest, default 0.1 tol : Number, optional Required tolerance for the solution, default 1e-6 maxiter : int, optional Maximum iterations in finding the solution Notes ----- The rate of interest is computed by iteratively solving the (non-linear) equation:: fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 for ``rate``. References ---------- Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. Organization for the Advancement of Structured Information Standards (OASIS). Billerica, MA, USA. [ODT Document]. Available: http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula OpenDocument-formula-20090508.odt """ when = _convert_when(when) default_type = Decimal if isinstance(pmt, Decimal) else float # Handle casting defaults to Decimal if/when pmt is a Decimal and # guess and/or tol are not given default values if guess is None: guess = default_type('0.1') if tol is None: tol = default_type('1e-6') (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when]) rn = guess iterator = 0 close = False while (iterator < maxiter) and not close: rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when) diff = abs(rnp1-rn) close = np.all(diff < tol) iterator += 1 rn = rnp1 if not close: # Return nan's in array of the same shape as rn return np.nan + rn else: return rn def irr(values): """ Return the Internal Rate of Return (IRR). This is the "average" periodically compounded rate of return that gives a net present value of 0.0; for a more complete explanation, see Notes below. :class:`decimal.Decimal` type is not supported. Parameters ---------- values : array_like, shape(N,) Input cash flows per time period. By convention, net "deposits" are negative and net "withdrawals" are positive. Thus, for example, at least the first element of `values`, which represents the initial investment, will typically be negative. Returns ------- out : float Internal Rate of Return for periodic input values. Notes ----- The IRR is perhaps best understood through an example (illustrated using np.irr in the Examples section below). Suppose one invests 100 units and then makes the following withdrawals at regular (fixed) intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100 unit investment yields 173 units; however, due to the combination of compounding and the periodic withdrawals, the "average" rate of return is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution (for :math:`r`) of the equation: .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2} + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0 In general, for `values` :math:`= [v_0, v_1, ... v_M]`, irr is the solution of the equation: [G]_ .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0 References ---------- .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., Addison-Wesley, 2003, pg. 348. Examples -------- >>> round(irr([-100, 39, 59, 55, 20]), 5) 0.28095 >>> round(irr([-100, 0, 0, 74]), 5) -0.0955 >>> round(irr([-100, 100, 0, -7]), 5) -0.0833 >>> round(irr([-100, 100, 0, 7]), 5) 0.06206 >>> round(irr([-5, 10.5, 1, -8, 1]), 5) 0.0886 (Compare with the Example given for numpy.lib.financial.npv) """ # `np.roots` call is why this function does not support Decimal type. # # Ultimately Decimal support needs to be added to np.roots, which has # greater implications on the entire linear algebra module and how it does # eigenvalue computations. res = np.roots(values[::-1]) mask = (res.imag == 0) & (res.real > 0) if not mask.any(): return np.nan res = res[mask].real # NPV(rate) = 0 can have more than one solution so we return # only the solution closest to zero. rate = 1/res - 1 rate = rate.item(np.argmin(np.abs(rate))) return rate def npv(rate, values): """ Returns the NPV (Net Present Value) of a cash flow series. Parameters ---------- rate : scalar The discount rate. values : array_like, shape(M, ) The values of the time series of cash flows. The (fixed) time interval between cash flow "events" must be the same as that for which `rate` is given (i.e., if `rate` is per year, then precisely a year is understood to elapse between each cash flow event). By convention, investments or "deposits" are negative, income or "withdrawals" are positive; `values` must begin with the initial investment, thus `values[0]` will typically be negative. Returns ------- out : float The NPV of the input cash flow series `values` at the discount `rate`. Notes ----- Returns the result of: [G]_ .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}} References ---------- .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., Addison-Wesley, 2003, pg. 346. Examples -------- >>> np.npv(0.281,[-100, 39, 59, 55, 20]) -0.0084785916384548798 (Compare with the Example given for numpy.lib.financial.irr) """ values = np.asarray(values) return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0) def mirr(values, finance_rate, reinvest_rate): """ Modified internal rate of return. Parameters ---------- values : array_like Cash flows (must contain at least one positive and one negative value) or nan is returned. The first value is considered a sunk cost at time zero. finance_rate : scalar Interest rate paid on the cash flows reinvest_rate : scalar Interest rate received on the cash flows upon reinvestment Returns ------- out : float Modified internal rate of return """ values = np.asarray(values) n = values.size # Without this explicit cast the 1/(n - 1) computation below # becomes a float, which causes TypeError when using Decimal # values. if isinstance(finance_rate, Decimal): n = Decimal(n) pos = values > 0 neg = values < 0 if not (pos.any() and neg.any()): return np.nan numer = np.abs(npv(reinvest_rate, values*pos)) denom = np.abs(npv(finance_rate, values*neg)) return (numer/denom)**(1/(n - 1))*(1 + reinvest_rate) - 1
24,495
31.146982
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/setup.py
from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('lib', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)
379
28.230769
59
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/recfunctions.py
""" Collection of utilities to manipulate structured arrays. Most of these functions were initially implemented by John Hunter for matplotlib. They have been rewritten and extended for convenience. """ from __future__ import division, absolute_import, print_function import sys import itertools import numpy as np import numpy.ma as ma from numpy import ndarray, recarray from numpy.ma import MaskedArray from numpy.ma.mrecords import MaskedRecords from numpy.lib._iotools import _is_string_like from numpy.compat import basestring if sys.version_info[0] < 3: from future_builtins import zip _check_fill_value = np.ma.core._check_fill_value __all__ = [ 'append_fields', 'drop_fields', 'find_duplicates', 'get_fieldstructure', 'join_by', 'merge_arrays', 'rec_append_fields', 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', 'rename_fields', 'stack_arrays', ] def recursive_fill_fields(input, output): """ Fills fields from output with fields from input, with support for nested structures. Parameters ---------- input : ndarray Input array. output : ndarray Output array. Notes ----- * `output` should be at least the same size as `input` Examples -------- >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) >>> b = np.zeros((3,), dtype=a.dtype) >>> rfn.recursive_fill_fields(a, b) array([(1, 10.0), (2, 20.0), (0, 0.0)], dtype=[('A', '<i4'), ('B', '<f8')]) """ newdtype = output.dtype for field in newdtype.names: try: current = input[field] except ValueError: continue if current.dtype.names: recursive_fill_fields(current, output[field]) else: output[field][:len(current)] = current return output def get_fieldspec(dtype): """ Produce a list of name/dtype pairs corresponding to the dtype fields Similar to dtype.descr, but the second item of each tuple is a dtype, not a string. As a result, this handles subarray dtypes Can be passed to the dtype constructor to reconstruct the dtype, noting that this (deliberately) discards field offsets. Examples -------- >>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)]) >>> dt.descr [(('a', 'A'), '<i4'), ('b', '<f8', (3,))] >>> get_fieldspec(dt) [(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))] """ if dtype.names is None: # .descr returns a nameless field, so we should too return [('', dtype)] else: fields = ((name, dtype.fields[name]) for name in dtype.names) # keep any titles, if present return [ (name if len(f) == 2 else (f[2], name), f[0]) for name, f in fields ] def get_names(adtype): """ Returns the field names of the input datatype as a tuple. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names(np.empty((1,), dtype=int)) is None True >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names(adtype) ('a', ('b', ('ba', 'bb'))) """ listnames = [] names = adtype.names for name in names: current = adtype[name] if current.names: listnames.append((name, tuple(get_names(current)))) else: listnames.append(name) return tuple(listnames) or None def get_names_flat(adtype): """ Returns the field names of the input datatype as a tuple. Nested structure are flattend beforehand. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None True >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb') """ listnames = [] names = adtype.names for name in names: listnames.append(name) current = adtype[name] if current.names: listnames.extend(get_names_flat(current)) return tuple(listnames) or None def flatten_descr(ndtype): """ Flatten a structured data-type description. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])]) >>> rfn.flatten_descr(ndtype) (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) """ names = ndtype.names if names is None: return (('', ndtype),) else: descr = [] for field in names: (typ, _) = ndtype.fields[field] if typ.names: descr.extend(flatten_descr(typ)) else: descr.append((field, typ)) return tuple(descr) def zip_dtype(seqarrays, flatten=False): newdtype = [] if flatten: for a in seqarrays: newdtype.extend(flatten_descr(a.dtype)) else: for a in seqarrays: current = a.dtype if current.names and len(current.names) <= 1: # special case - dtypes of 0 or 1 field are flattened newdtype.extend(get_fieldspec(current)) else: newdtype.append(('', current)) return np.dtype(newdtype) def zip_descr(seqarrays, flatten=False): """ Combine the dtype description of a series of arrays. Parameters ---------- seqarrays : sequence of arrays Sequence of arrays flatten : {boolean}, optional Whether to collapse nested descriptions. """ return zip_dtype(seqarrays, flatten=flatten).descr def get_fieldstructure(adtype, lastname=None, parents=None,): """ Returns a dictionary with fields indexing lists of their parent fields. This function is used to simplify access to fields nested in other fields. Parameters ---------- adtype : np.dtype Input datatype lastname : optional Last processed field name (used internally during recursion). parents : dictionary Dictionary of parent fields (used interbally during recursion). Examples -------- >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('A', int), ... ('B', [('BA', int), ... ('BB', [('BBA', int), ('BBB', int)])])]) >>> rfn.get_fieldstructure(ndtype) ... # XXX: possible regression, order of BBA and BBB is swapped {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} """ if parents is None: parents = {} names = adtype.names for name in names: current = adtype[name] if current.names: if lastname: parents[name] = [lastname, ] else: parents[name] = [] parents.update(get_fieldstructure(current, name, parents)) else: lastparent = [_ for _ in (parents.get(lastname, []) or [])] if lastparent: lastparent.append(lastname) elif lastname: lastparent = [lastname, ] parents[name] = lastparent or [] return parents or None def _izip_fields_flat(iterable): """ Returns an iterator of concatenated fields from a sequence of arrays, collapsing any nested structure. """ for element in iterable: if isinstance(element, np.void): for f in _izip_fields_flat(tuple(element)): yield f else: yield element def _izip_fields(iterable): """ Returns an iterator of concatenated fields from a sequence of arrays. """ for element in iterable: if (hasattr(element, '__iter__') and not isinstance(element, basestring)): for f in _izip_fields(element): yield f elif isinstance(element, np.void) and len(tuple(element)) == 1: for f in _izip_fields(element): yield f else: yield element def izip_records(seqarrays, fill_value=None, flatten=True): """ Returns an iterator of concatenated items from a sequence of arrays. Parameters ---------- seqarrays : sequence of arrays Sequence of arrays. fill_value : {None, integer} Value used to pad shorter iterables. flatten : {True, False}, Whether to """ # Should we flatten the items, or just use a nested approach if flatten: zipfunc = _izip_fields_flat else: zipfunc = _izip_fields if sys.version_info[0] >= 3: zip_longest = itertools.zip_longest else: zip_longest = itertools.izip_longest for tup in zip_longest(*seqarrays, fillvalue=fill_value): yield tuple(zipfunc(tup)) def _fix_output(output, usemask=True, asrecarray=False): """ Private function: return a recarray, a ndarray, a MaskedArray or a MaskedRecords depending on the input parameters """ if not isinstance(output, MaskedArray): usemask = False if usemask: if asrecarray: output = output.view(MaskedRecords) else: output = ma.filled(output) if asrecarray: output = output.view(recarray) return output def _fix_defaults(output, defaults=None): """ Update the fill_value and masked data of `output` from the default given in a dictionary defaults. """ names = output.dtype.names (data, mask, fill_value) = (output.data, output.mask, output.fill_value) for (k, v) in (defaults or {}).items(): if k in names: fill_value[k] = v data[k][mask[k]] = v return output def merge_arrays(seqarrays, fill_value=-1, flatten=False, usemask=False, asrecarray=False): """ Merge arrays field by field. Parameters ---------- seqarrays : sequence of ndarrays Sequence of arrays fill_value : {float}, optional Filling value used to pad missing data on the shorter arrays. flatten : {False, True}, optional Whether to collapse nested fields. usemask : {False, True}, optional Whether to return a masked array or not. asrecarray : {False, True}, optional Whether to return a recarray (MaskedRecords) or not. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)], mask = [(False, False) (False, False) (True, False)], fill_value = (999999, 1e+20), dtype = [('f0', '<i4'), ('f1', '<f8')]) >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])), ... usemask=False) array([(1, 10.0), (2, 20.0), (-1, 30.0)], dtype=[('f0', '<i4'), ('f1', '<f8')]) >>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]), ... np.array([10., 20., 30.])), ... usemask=False, asrecarray=True) rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)], dtype=[('a', '<i4'), ('f1', '<f8')]) Notes ----- * Without a mask, the missing value will be filled with something, * depending on what its corresponding type: -1 for integers -1.0 for floating point numbers '-' for characters '-1' for strings True for boolean values * XXX: I just obtained these values empirically """ # Only one item in the input sequence ? if (len(seqarrays) == 1): seqarrays = np.asanyarray(seqarrays[0]) # Do we have a single ndarray as input ? if isinstance(seqarrays, (ndarray, np.void)): seqdtype = seqarrays.dtype # Make sure we have named fields if not seqdtype.names: seqdtype = np.dtype([('', seqdtype)]) if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype: # Minimal processing needed: just make sure everythng's a-ok seqarrays = seqarrays.ravel() # Find what type of array we must return if usemask: if asrecarray: seqtype = MaskedRecords else: seqtype = MaskedArray elif asrecarray: seqtype = recarray else: seqtype = ndarray return seqarrays.view(dtype=seqdtype, type=seqtype) else: seqarrays = (seqarrays,) else: # Make sure we have arrays in the input sequence seqarrays = [np.asanyarray(_m) for _m in seqarrays] # Find the sizes of the inputs and their maximum sizes = tuple(a.size for a in seqarrays) maxlength = max(sizes) # Get the dtype of the output (flattening if needed) newdtype = zip_dtype(seqarrays, flatten=flatten) # Initialize the sequences for data and mask seqdata = [] seqmask = [] # If we expect some kind of MaskedArray, make a special loop. if usemask: for (a, n) in zip(seqarrays, sizes): nbmissing = (maxlength - n) # Get the data and mask data = a.ravel().__array__() mask = ma.getmaskarray(a).ravel() # Get the filling value (if needed) if nbmissing: fval = _check_fill_value(fill_value, a.dtype) if isinstance(fval, (ndarray, np.void)): if len(fval.dtype) == 1: fval = fval.item()[0] fmsk = True else: fval = np.array(fval, dtype=a.dtype, ndmin=1) fmsk = np.ones((1,), dtype=mask.dtype) else: fval = None fmsk = True # Store an iterator padding the input to the expected length seqdata.append(itertools.chain(data, [fval] * nbmissing)) seqmask.append(itertools.chain(mask, [fmsk] * nbmissing)) # Create an iterator for the data data = tuple(izip_records(seqdata, flatten=flatten)) output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength), mask=list(izip_records(seqmask, flatten=flatten))) if asrecarray: output = output.view(MaskedRecords) else: # Same as before, without the mask we don't need... for (a, n) in zip(seqarrays, sizes): nbmissing = (maxlength - n) data = a.ravel().__array__() if nbmissing: fval = _check_fill_value(fill_value, a.dtype) if isinstance(fval, (ndarray, np.void)): if len(fval.dtype) == 1: fval = fval.item()[0] else: fval = np.array(fval, dtype=a.dtype, ndmin=1) else: fval = None seqdata.append(itertools.chain(data, [fval] * nbmissing)) output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)), dtype=newdtype, count=maxlength) if asrecarray: output = output.view(recarray) # And we're done... return output def drop_fields(base, drop_names, usemask=True, asrecarray=False): """ Return a new array with fields in `drop_names` dropped. Nested fields are supported. Parameters ---------- base : array Input array drop_names : string or sequence String or sequence of strings corresponding to the names of the fields to drop. usemask : {False, True}, optional Whether to return a masked array or not. asrecarray : string or sequence, optional Whether to return a recarray or a mrecarray (`asrecarray=True`) or a plain ndarray or masked array with flexible dtype. The default is False. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) >>> rfn.drop_fields(a, 'a') array([((2.0, 3),), ((5.0, 6),)], dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])]) >>> rfn.drop_fields(a, 'ba') array([(1, (3,)), (4, (6,))], dtype=[('a', '<i4'), ('b', [('bb', '<i4')])]) >>> rfn.drop_fields(a, ['ba', 'bb']) array([(1,), (4,)], dtype=[('a', '<i4')]) """ if _is_string_like(drop_names): drop_names = [drop_names] else: drop_names = set(drop_names) def _drop_descr(ndtype, drop_names): names = ndtype.names newdtype = [] for name in names: current = ndtype[name] if name in drop_names: continue if current.names: descr = _drop_descr(current, drop_names) if descr: newdtype.append((name, descr)) else: newdtype.append((name, current)) return newdtype newdtype = _drop_descr(base.dtype, drop_names) if not newdtype: return None output = np.empty(base.shape, dtype=newdtype) output = recursive_fill_fields(base, output) return _fix_output(output, usemask=usemask, asrecarray=asrecarray) def _keep_fields(base, keep_names, usemask=True, asrecarray=False): """ Return a new array keeping only the fields in `keep_names`, and preserving the order of those fields. Parameters ---------- base : array Input array keep_names : string or sequence String or sequence of strings corresponding to the names of the fields to keep. Order of the names will be preserved. usemask : {False, True}, optional Whether to return a masked array or not. asrecarray : string or sequence, optional Whether to return a recarray or a mrecarray (`asrecarray=True`) or a plain ndarray or masked array with flexible dtype. The default is False. """ newdtype = [(n, base.dtype[n]) for n in keep_names] output = np.empty(base.shape, dtype=newdtype) output = recursive_fill_fields(base, output) return _fix_output(output, usemask=usemask, asrecarray=asrecarray) def rec_drop_fields(base, drop_names): """ Returns a new numpy.recarray with fields in `drop_names` dropped. """ return drop_fields(base, drop_names, usemask=False, asrecarray=True) def rename_fields(base, namemapper): """ Rename the fields from a flexible-datatype ndarray or recarray. Nested fields are supported. Parameters ---------- base : ndarray Input array whose fields must be modified. namemapper : dictionary Dictionary mapping old field names to their new version. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))], dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])]) """ def _recursive_rename_fields(ndtype, namemapper): newdtype = [] for name in ndtype.names: newname = namemapper.get(name, name) current = ndtype[name] if current.names: newdtype.append( (newname, _recursive_rename_fields(current, namemapper)) ) else: newdtype.append((newname, current)) return newdtype newdtype = _recursive_rename_fields(base.dtype, namemapper) return base.view(newdtype) def append_fields(base, names, data, dtypes=None, fill_value=-1, usemask=True, asrecarray=False): """ Add new fields to an existing array. The names of the fields are given with the `names` arguments, the corresponding values with the `data` arguments. If a single field is appended, `names`, `data` and `dtypes` do not have to be lists but just values. Parameters ---------- base : array Input array to extend. names : string, sequence String or sequence of strings corresponding to the names of the new fields. data : array or sequence of arrays Array or sequence of arrays storing the fields to add to the base. dtypes : sequence of datatypes, optional Datatype or sequence of datatypes. If None, the datatypes are estimated from the `data`. fill_value : {float}, optional Filling value used to pad missing data on the shorter arrays. usemask : {False, True}, optional Whether to return a masked array or not. asrecarray : {False, True}, optional Whether to return a recarray (MaskedRecords) or not. """ # Check the names if isinstance(names, (tuple, list)): if len(names) != len(data): msg = "The number of arrays does not match the number of names" raise ValueError(msg) elif isinstance(names, basestring): names = [names, ] data = [data, ] # if dtypes is None: data = [np.array(a, copy=False, subok=True) for a in data] data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)] else: if not isinstance(dtypes, (tuple, list)): dtypes = [dtypes, ] if len(data) != len(dtypes): if len(dtypes) == 1: dtypes = dtypes * len(data) else: msg = "The dtypes argument must be None, a dtype, or a list." raise ValueError(msg) data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)]) for (a, n, d) in zip(data, names, dtypes)] # base = merge_arrays(base, usemask=usemask, fill_value=fill_value) if len(data) > 1: data = merge_arrays(data, flatten=True, usemask=usemask, fill_value=fill_value) else: data = data.pop() # output = ma.masked_all( max(len(base), len(data)), dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype)) output = recursive_fill_fields(base, output) output = recursive_fill_fields(data, output) # return _fix_output(output, usemask=usemask, asrecarray=asrecarray) def rec_append_fields(base, names, data, dtypes=None): """ Add new fields to an existing array. The names of the fields are given with the `names` arguments, the corresponding values with the `data` arguments. If a single field is appended, `names`, `data` and `dtypes` do not have to be lists but just values. Parameters ---------- base : array Input array to extend. names : string, sequence String or sequence of strings corresponding to the names of the new fields. data : array or sequence of arrays Array or sequence of arrays storing the fields to add to the base. dtypes : sequence of datatypes, optional Datatype or sequence of datatypes. If None, the datatypes are estimated from the `data`. See Also -------- append_fields Returns ------- appended_array : np.recarray """ return append_fields(base, names, data=data, dtypes=dtypes, asrecarray=True, usemask=False) def repack_fields(a, align=False, recurse=False): """ Re-pack the fields of a structured array or dtype in memory. The memory layout of structured datatypes allows fields at arbitrary byte offsets. This means the fields can be separated by padding bytes, their offsets can be non-monotonically increasing, and they can overlap. This method removes any overlaps and reorders the fields in memory so they have increasing byte offsets, and adds or removes padding bytes depending on the `align` option, which behaves like the `align` option to `np.dtype`. If `align=False`, this method produces a "packed" memory layout in which each field starts at the byte the previous field ended, and any padding bytes are removed. If `align=True`, this methods produces an "aligned" memory layout in which each field's offset is a multiple of its alignment, and the total itemsize is a multiple of the largest alignment, by adding padding bytes as needed. Parameters ---------- a : ndarray or dtype Structured array or dtype for which to repack the fields. align : boolean If true, use an "aligned" memory layout, otherwise use a "packed" layout. recurse : boolean If True, also repack nested structures. Returns ------- repacked : ndarray or dtype Copy of `a` with fields repacked, or `a` itself if no repacking was needed. Examples -------- >>> def print_offsets(d): ... print("offsets:", [d.fields[name][1] for name in d.names]) ... print("itemsize:", d.itemsize) ... >>> dt = np.dtype('u1,i4,f4', align=True) >>> dt dtype({'names':['f0','f1','f2'], 'formats':['u1','<i4','<f8'], 'offsets':[0,4,8], 'itemsize':16}, align=True) >>> print_offsets(dt) offsets: [0, 4, 8] itemsize: 16 >>> packed_dt = repack_fields(dt) >>> packed_dt dtype([('f0', 'u1'), ('f1', '<i4'), ('f2', '<f8')]) >>> print_offsets(packed_dt) offsets: [0, 1, 5] itemsize: 13 """ if not isinstance(a, np.dtype): dt = repack_fields(a.dtype, align=align, recurse=recurse) return a.astype(dt, copy=False) if a.names is None: raise ValueError("a must be or have a structured dtype") fieldinfo = [] for name in a.names: tup = a.fields[name] if recurse: fmt = repack_fields(tup[0], align=align, recurse=True) else: fmt = tup[0] if len(tup) == 3: name = (tup[2], name) fieldinfo.append((name, fmt)) dt = np.dtype(fieldinfo, align=align) return np.dtype((a.type, dt)) def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, autoconvert=False): """ Superposes arrays fields by fields Parameters ---------- arrays : array or sequence Sequence of input arrays. defaults : dictionary, optional Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`) or a ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. autoconvert : {False, True}, optional Whether automatically cast the type of the field to the maximum. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> x = np.array([1, 2,]) >>> rfn.stack_arrays(x) is x True >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) >>> test = rfn.stack_arrays((z,zz)) >>> test masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) ('c', 30.0, 300.0)], mask = [(False, False, True) (False, False, True) (False, False, False) (False, False, False) (False, False, False)], fill_value = ('N/A', 1e+20, 1e+20), dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')]) """ if isinstance(arrays, ndarray): return arrays elif len(arrays) == 1: return arrays[0] seqarrays = [np.asanyarray(a).ravel() for a in arrays] nrecords = [len(a) for a in seqarrays] ndtype = [a.dtype for a in seqarrays] fldnames = [d.names for d in ndtype] # dtype_l = ndtype[0] newdescr = get_fieldspec(dtype_l) names = [n for n, d in newdescr] for dtype_n in ndtype[1:]: for fname, fdtype in get_fieldspec(dtype_n): if fname not in names: newdescr.append((fname, fdtype)) names.append(fname) else: nameidx = names.index(fname) _, cdtype = newdescr[nameidx] if autoconvert: newdescr[nameidx] = (fname, max(fdtype, cdtype)) elif fdtype != cdtype: raise TypeError("Incompatible type '%s' <> '%s'" % (cdtype, fdtype)) # Only one field: use concatenate if len(newdescr) == 1: output = ma.concatenate(seqarrays) else: # output = ma.masked_all((np.sum(nrecords),), newdescr) offset = np.cumsum(np.r_[0, nrecords]) seen = [] for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): names = a.dtype.names if names is None: output['f%i' % len(seen)][i:j] = a else: for name in n: output[name][i:j] = a[name] if name not in seen: seen.append(name) # return _fix_output(_fix_defaults(output, defaults), usemask=usemask, asrecarray=asrecarray) def find_duplicates(a, key=None, ignoremask=True, return_index=False): """ Find the duplicates in a structured array along a given key Parameters ---------- a : array-like Input array key : {string, None}, optional Name of the fields along which to check the duplicates. If None, the search is performed by records ignoremask : {True, False}, optional Whether masked data should be discarded or considered as duplicates. return_index : {False, True}, optional Whether to return the indices of the duplicated values. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> ndtype = [('a', int)] >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) ... # XXX: judging by the output, the ignoremask flag has no effect """ a = np.asanyarray(a).ravel() # Get a dictionary of fields fields = get_fieldstructure(a.dtype) # Get the sorting data (by selecting the corresponding field) base = a if key: for f in fields[key]: base = base[f] base = base[key] # Get the sorting indices and the sorted data sortidx = base.argsort() sortedbase = base[sortidx] sorteddata = sortedbase.filled() # Compare the sorting data flag = (sorteddata[:-1] == sorteddata[1:]) # If masked data must be ignored, set the flag to false where needed if ignoremask: sortedmask = sortedbase.recordmask flag[sortedmask[1:]] = False flag = np.concatenate(([False], flag)) # We need to take the point on the left as well (else we're missing it) flag[:-1] = flag[:-1] + flag[1:] duplicates = a[sortidx][flag] if return_index: return (duplicates, sortidx[flag]) else: return duplicates def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', defaults=None, usemask=True, asrecarray=False): """ Join arrays `r1` and `r2` on key `key`. The key should be either a string or a sequence of string corresponding to the fields used to join the array. An exception is raised if the `key` field cannot be found in the two input arrays. Neither `r1` nor `r2` should have any duplicates along `key`: the presence of duplicates will make the output quite unreliable. Note that duplicates are not looked for by the algorithm. Parameters ---------- key : {string, sequence} A string or a sequence of strings corresponding to the fields used for comparison. r1, r2 : arrays Structured arrays. jointype : {'inner', 'outer', 'leftouter'}, optional If 'inner', returns the elements common to both r1 and r2. If 'outer', returns the common elements as well as the elements of r1 not in r2 and the elements of not in r2. If 'leftouter', returns the common elements and the elements of r1 not in r2. r1postfix : string, optional String appended to the names of the fields of r1 that are present in r2 but absent of the key. r2postfix : string, optional String appended to the names of the fields of r2 that are present in r1 but absent of the key. defaults : {dictionary}, optional Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`) or a ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. Notes ----- * The output is sorted along the key. * A temporary array is formed by dropping the fields not in the key for the two arrays and concatenating the result. This array is then sorted, and the common entries selected. The output is constructed by filling the fields with the selected entries. Matching is not preserved if there are some duplicates... """ # Check jointype if jointype not in ('inner', 'outer', 'leftouter'): raise ValueError( "The 'jointype' argument should be in 'inner', " "'outer' or 'leftouter' (got '%s' instead)" % jointype ) # If we have a single key, put it in a tuple if isinstance(key, basestring): key = (key,) # Check the keys if len(set(key)) != len(key): dup = next(x for n,x in enumerate(key) if x in key[n+1:]) raise ValueError("duplicate join key %r" % dup) for name in key: if name not in r1.dtype.names: raise ValueError('r1 does not have key field %r' % name) if name not in r2.dtype.names: raise ValueError('r2 does not have key field %r' % name) # Make sure we work with ravelled arrays r1 = r1.ravel() r2 = r2.ravel() # Fixme: nb2 below is never used. Commenting out for pyflakes. # (nb1, nb2) = (len(r1), len(r2)) nb1 = len(r1) (r1names, r2names) = (r1.dtype.names, r2.dtype.names) # Check the names for collision collisions = (set(r1names) & set(r2names)) - set(key) if collisions and not (r1postfix or r2postfix): msg = "r1 and r2 contain common names, r1postfix and r2postfix " msg += "can't both be empty" raise ValueError(msg) # Make temporary arrays of just the keys # (use order of keys in `r1` for back-compatibility) key1 = [ n for n in r1names if n in key ] r1k = _keep_fields(r1, key1) r2k = _keep_fields(r2, key1) # Concatenate the two arrays for comparison aux = ma.concatenate((r1k, r2k)) idx_sort = aux.argsort(order=key) aux = aux[idx_sort] # # Get the common keys flag_in = ma.concatenate(([False], aux[1:] == aux[:-1])) flag_in[:-1] = flag_in[1:] + flag_in[:-1] idx_in = idx_sort[flag_in] idx_1 = idx_in[(idx_in < nb1)] idx_2 = idx_in[(idx_in >= nb1)] - nb1 (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) if jointype == 'inner': (r1spc, r2spc) = (0, 0) elif jointype == 'outer': idx_out = idx_sort[~flag_in] idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) elif jointype == 'leftouter': idx_out = idx_sort[~flag_in] idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) # Select the entries from each input (s1, s2) = (r1[idx_1], r2[idx_2]) # # Build the new description of the output array ....... # Start with the key fields ndtype = get_fieldspec(r1k.dtype) # Add the fields from r1 for fname, fdtype in get_fieldspec(r1.dtype): if fname not in key: ndtype.append((fname, fdtype)) # Add the fields from r2 for fname, fdtype in get_fieldspec(r2.dtype): # Have we seen the current name already ? # we need to rebuild this list every time names = list(name for name, dtype in ndtype) try: nameidx = names.index(fname) except ValueError: #... we haven't: just add the description to the current list ndtype.append((fname, fdtype)) else: # collision _, cdtype = ndtype[nameidx] if fname in key: # The current field is part of the key: take the largest dtype ndtype[nameidx] = (fname, max(fdtype, cdtype)) else: # The current field is not part of the key: add the suffixes, # and place the new field adjacent to the old one ndtype[nameidx:nameidx + 1] = [ (fname + r1postfix, cdtype), (fname + r2postfix, fdtype) ] # Rebuild a dtype from the new fields ndtype = np.dtype(ndtype) # Find the largest nb of common fields : # r1cmn and r2cmn should be equal, but... cmn = max(r1cmn, r2cmn) # Construct an empty array output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) names = output.dtype.names for f in r1names: selected = s1[f] if f not in names or (f in r2names and not r2postfix and f not in key): f += r1postfix current = output[f] current[:r1cmn] = selected[:r1cmn] if jointype in ('outer', 'leftouter'): current[cmn:cmn + r1spc] = selected[r1cmn:] for f in r2names: selected = s2[f] if f not in names or (f in r1names and not r1postfix and f not in key): f += r2postfix current = output[f] current[:r2cmn] = selected[:r2cmn] if (jointype == 'outer') and r2spc: current[-r2spc:] = selected[r2cmn:] # Sort and finalize the output output.sort(order=key) kwargs = dict(usemask=usemask, asrecarray=asrecarray) return _fix_output(_fix_defaults(output, defaults), **kwargs) def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', defaults=None): """ Join arrays `r1` and `r2` on keys. Alternative to join_by, that always returns a np.recarray. See Also -------- join_by : equivalent function """ kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, defaults=defaults, usemask=False, asrecarray=True) return join_by(key, r1, r2, **kwargs)
39,674
33.711286
113
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/stride_tricks.py
""" Utilities that manipulate strides to achieve desirable effects. An explanation of strides can be found in the "ndarray.rst" file in the NumPy reference guide. """ from __future__ import division, absolute_import, print_function import numpy as np __all__ = ['broadcast_to', 'broadcast_arrays'] class DummyArray(object): """Dummy object that just exists to hang __array_interface__ dictionaries and possibly keep alive a reference to a base array. """ def __init__(self, interface, base=None): self.__array_interface__ = interface self.base = base def _maybe_view_as_subclass(original_array, new_array): if type(original_array) is not type(new_array): # if input was an ndarray subclass and subclasses were OK, # then view the result as that subclass. new_array = new_array.view(type=type(original_array)) # Since we have done something akin to a view from original_array, we # should let the subclass finalize (if it has it implemented, i.e., is # not None). if new_array.__array_finalize__: new_array.__array_finalize__(original_array) return new_array def as_strided(x, shape=None, strides=None, subok=False, writeable=True): """ Create a view into the array with the given shape and strides. .. warning:: This function has to be used with extreme care, see notes. Parameters ---------- x : ndarray Array to create a new. shape : sequence of int, optional The shape of the new array. Defaults to ``x.shape``. strides : sequence of int, optional The strides of the new array. Defaults to ``x.strides``. subok : bool, optional .. versionadded:: 1.10 If True, subclasses are preserved. writeable : bool, optional .. versionadded:: 1.12 If set to False, the returned array will always be readonly. Otherwise it will be writable if the original array was. It is advisable to set this to False if possible (see Notes). Returns ------- view : ndarray See also -------- broadcast_to: broadcast an array to a given shape. reshape : reshape an array. Notes ----- ``as_strided`` creates a view into the array given the exact strides and shape. This means it manipulates the internal data structure of ndarray and, if done incorrectly, the array elements can point to invalid memory and can corrupt results or crash your program. It is advisable to always use the original ``x.strides`` when calculating new strides to avoid reliance on a contiguous memory layout. Furthermore, arrays created with this function often contain self overlapping memory, so that two elements are identical. Vectorized write operations on such arrays will typically be unpredictable. They may even give different results for small, large, or transposed arrays. Since writing to these arrays has to be tested and done with great care, you may want to use ``writeable=False`` to avoid accidental write operations. For these reasons it is advisable to avoid ``as_strided`` when possible. """ # first convert input to array, possibly keeping subclass x = np.array(x, copy=False, subok=subok) interface = dict(x.__array_interface__) if shape is not None: interface['shape'] = tuple(shape) if strides is not None: interface['strides'] = tuple(strides) array = np.asarray(DummyArray(interface, base=x)) # The route via `__interface__` does not preserve structured # dtypes. Since dtype should remain unchanged, we set it explicitly. array.dtype = x.dtype view = _maybe_view_as_subclass(x, array) if view.flags.writeable and not writeable: view.flags.writeable = False return view def _broadcast_to(array, shape, subok, readonly): shape = tuple(shape) if np.iterable(shape) else (shape,) array = np.array(array, copy=False, subok=subok) if not shape and array.shape: raise ValueError('cannot broadcast a non-scalar to a scalar array') if any(size < 0 for size in shape): raise ValueError('all elements of broadcast shape must be non-' 'negative') needs_writeable = not readonly and array.flags.writeable extras = ['reduce_ok'] if needs_writeable else [] op_flag = 'readwrite' if needs_writeable else 'readonly' broadcast = np.nditer( (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, op_flags=[op_flag], itershape=shape, order='C').itviews[0] result = _maybe_view_as_subclass(array, broadcast) if needs_writeable and not result.flags.writeable: result.flags.writeable = True return result def broadcast_to(array, shape, subok=False): """Broadcast an array to a new shape. Parameters ---------- array : array_like The array to broadcast. shape : tuple The shape of the desired array. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). Returns ------- broadcast : array A readonly view on the original array with the given shape. It is typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. Raises ------ ValueError If the array is not compatible with the new shape according to NumPy's broadcasting rules. Notes ----- .. versionadded:: 1.10.0 Examples -------- >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) """ return _broadcast_to(array, shape, subok=subok, readonly=True) def _broadcast_shape(*args): """Returns the shape of the arrays that would result from broadcasting the supplied arrays against each other. """ if not args: return () # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape def broadcast_arrays(*args, **kwargs): """ Broadcast any number of arrays against each other. Parameters ---------- `*args` : array_likes The arrays to broadcast. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned arrays will be forced to be a base-class array (default). Returns ------- broadcasted : list of arrays These arrays are views on the original arrays. They are typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. If you need to write to the arrays, make copies first. Examples -------- >>> x = np.array([[1,2,3]]) >>> y = np.array([[1],[2],[3]]) >>> np.broadcast_arrays(x, y) [array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])] Here is a useful idiom for getting contiguous copies instead of non-contiguous views. >>> [np.array(a) for a in np.broadcast_arrays(x, y)] [array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])] """ # nditer is not used here to avoid the limit of 32 arrays. # Otherwise, something like the following one-liner would suffice: # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], # order='C').itviews subok = kwargs.pop('subok', False) if kwargs: raise TypeError('broadcast_arrays() got an unexpected keyword ' 'argument {!r}'.format(kwargs.keys()[0])) args = [np.array(_m, copy=False, subok=subok) for _m in args] shape = _broadcast_shape(*args) if all(array.shape == shape for array in args): # Common case where nothing needs to be broadcasted. return args # TODO: consider making the results of broadcast_arrays readonly to match # broadcast_to. This will require a deprecation cycle. return [_broadcast_to(array, shape, subok=subok, readonly=False) for array in args]
8,785
32.92278
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/ufunclike.py
""" Module of functions that are like ufuncs in acting on arrays and optionally storing results in an output array. """ from __future__ import division, absolute_import, print_function __all__ = ['fix', 'isneginf', 'isposinf'] import numpy.core.numeric as nx import warnings import functools def _deprecate_out_named_y(f): """ Allow the out argument to be passed as the name `y` (deprecated) In future, this decorator should be removed. """ @functools.wraps(f) def func(x, out=None, **kwargs): if 'y' in kwargs: if 'out' in kwargs: raise TypeError( "{} got multiple values for argument 'out'/'y'" .format(f.__name__) ) out = kwargs.pop('y') # NumPy 1.13.0, 2017-04-26 warnings.warn( "The name of the out argument to {} has changed from `y` to " "`out`, to match other ufuncs.".format(f.__name__), DeprecationWarning, stacklevel=3) return f(x, out=out, **kwargs) return func @_deprecate_out_named_y def fix(x, out=None): """ Round to nearest integer towards zero. Round an array of floats element-wise to nearest integer towards zero. The rounded values are returned as floats. Parameters ---------- x : array_like An array of floats to be rounded y : ndarray, optional Output array Returns ------- out : ndarray of floats The array of rounded numbers See Also -------- trunc, floor, ceil around : Round to given number of decimals Examples -------- >>> np.fix(3.14) 3.0 >>> np.fix(3) 3.0 >>> np.fix([2.1, 2.9, -2.1, -2.9]) array([ 2., 2., -2., -2.]) """ # promote back to an array if flattened res = nx.asanyarray(nx.ceil(x, out=out)) res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) # when no out argument is passed and no subclasses are involved, flatten # scalars if out is None and type(res) is nx.ndarray: res = res[()] return res @_deprecate_out_named_y def isposinf(x, out=None): """ Test element-wise for positive infinity, return result as bool array. Parameters ---------- x : array_like The input array. y : array_like, optional A boolean array with the same shape as `x` to store the result. Returns ------- out : ndarray A boolean array with the same dimensions as the input. If second argument is not supplied then a boolean array is returned with values True where the corresponding element of the input is positive infinity and values False where the element of the input is not positive infinity. If a second argument is supplied the result is stored there. If the type of that array is a numeric type the result is represented as zeros and ones, if the type is boolean then as False and True. The return value `out` is then a reference to that array. See Also -------- isinf, isneginf, isfinite, isnan Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). Errors result if the second argument is also supplied when `x` is a scalar input, or if first and second arguments have different shapes. Examples -------- >>> np.isposinf(np.PINF) array(True, dtype=bool) >>> np.isposinf(np.inf) array(True, dtype=bool) >>> np.isposinf(np.NINF) array(False, dtype=bool) >>> np.isposinf([-np.inf, 0., np.inf]) array([False, False, True]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) >>> np.isposinf(x, y) array([0, 0, 1]) >>> y array([0, 0, 1]) """ return nx.logical_and(nx.isinf(x), ~nx.signbit(x), out) @_deprecate_out_named_y def isneginf(x, out=None): """ Test element-wise for negative infinity, return result as bool array. Parameters ---------- x : array_like The input array. out : array_like, optional A boolean array with the same shape and type as `x` to store the result. Returns ------- out : ndarray A boolean array with the same dimensions as the input. If second argument is not supplied then a numpy boolean array is returned with values True where the corresponding element of the input is negative infinity and values False where the element of the input is not negative infinity. If a second argument is supplied the result is stored there. If the type of that array is a numeric type the result is represented as zeros and ones, if the type is boolean then as False and True. The return value `out` is then a reference to that array. See Also -------- isinf, isposinf, isnan, isfinite Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). Errors result if the second argument is also supplied when x is a scalar input, or if first and second arguments have different shapes. Examples -------- >>> np.isneginf(np.NINF) array(True, dtype=bool) >>> np.isneginf(np.inf) array(False, dtype=bool) >>> np.isneginf(np.PINF) array(False, dtype=bool) >>> np.isneginf([-np.inf, 0., np.inf]) array([ True, False, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) >>> np.isneginf(x, y) array([1, 0, 0]) >>> y array([1, 0, 0]) """ return nx.logical_and(nx.isinf(x), nx.signbit(x), out)
5,714
27.152709
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/nanfunctions.py
""" Functions that ignore NaN. Functions --------- - `nanmin` -- minimum non-NaN value - `nanmax` -- maximum non-NaN value - `nanargmin` -- index of minimum non-NaN value - `nanargmax` -- index of maximum non-NaN value - `nansum` -- sum of non-NaN values - `nanprod` -- product of non-NaN values - `nancumsum` -- cumulative sum of non-NaN values - `nancumprod` -- cumulative product of non-NaN values - `nanmean` -- mean of non-NaN values - `nanvar` -- variance of non-NaN values - `nanstd` -- standard deviation of non-NaN values - `nanmedian` -- median of non-NaN values - `nanpercentile` -- qth percentile of non-NaN values """ from __future__ import division, absolute_import, print_function import warnings import numpy as np from numpy.lib.function_base import _ureduce as _ureduce __all__ = [ 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', 'nancumsum', 'nancumprod' ] def _replace_nan(a, val): """ If `a` is of inexact type, make a copy of `a`, replace NaNs with the `val` value, and return the copy together with a boolean mask marking the locations where NaNs were present. If `a` is not of inexact type, do nothing and return `a` together with a mask of None. Note that scalars will end up as array scalars, which is important for using the result as the value of the out argument in some operations. Parameters ---------- a : array-like Input array. val : float NaN values are set to val before doing the operation. Returns ------- y : ndarray If `a` is of inexact type, return a copy of `a` with the NaNs replaced by the fill value, otherwise return `a`. mask: {bool, None} If `a` is of inexact type, return a boolean mask marking locations of NaNs, otherwise return None. """ a = np.array(a, subok=True, copy=True) if a.dtype == np.object_: # object arrays do not support `isnan` (gh-9009), so make a guess mask = a != a elif issubclass(a.dtype.type, np.inexact): mask = np.isnan(a) else: mask = None if mask is not None: np.copyto(a, val, where=mask) return a, mask def _copyto(a, val, mask): """ Replace values in `a` with NaN where `mask` is True. This differs from copyto in that it will deal with the case where `a` is a numpy scalar. Parameters ---------- a : ndarray or numpy scalar Array or numpy scalar some of whose values are to be replaced by val. val : numpy scalar Value used a replacement. mask : ndarray, scalar Boolean array. Where True the corresponding element of `a` is replaced by `val`. Broadcasts. Returns ------- res : ndarray, scalar Array with elements replaced or scalar `val`. """ if isinstance(a, np.ndarray): np.copyto(a, val, where=mask, casting='unsafe') else: a = a.dtype.type(val) return a def _remove_nan_1d(arr1d, overwrite_input=False): """ Equivalent to arr1d[~arr1d.isnan()], but in a different order Presumably faster as it incurs fewer copies Parameters ---------- arr1d : ndarray Array to remove nans from overwrite_input : bool True if `arr1d` can be modified in place Returns ------- res : ndarray Array with nan elements removed overwrite_input : bool True if `res` can be modified in place, given the constraint on the input """ c = np.isnan(arr1d) s = np.nonzero(c)[0] if s.size == arr1d.size: warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4) return arr1d[:0], True elif s.size == 0: return arr1d, overwrite_input else: if not overwrite_input: arr1d = arr1d.copy() # select non-nans at end of array enonan = arr1d[-s.size:][~c[-s.size:]] # fill nans in beginning of array with non-nans of end arr1d[s[:enonan.size]] = enonan return arr1d[:-s.size], True def _divide_by_count(a, b, out=None): """ Compute a/b ignoring invalid results. If `a` is an array the division is done in place. If `a` is a scalar, then its type is preserved in the output. If out is None, then then a is used instead so that the division is in place. Note that this is only called with `a` an inexact type. Parameters ---------- a : {ndarray, numpy scalar} Numerator. Expected to be of inexact type but not checked. b : {ndarray, numpy scalar} Denominator. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. Returns ------- ret : {ndarray, numpy scalar} The return value is a/b. If `a` was an ndarray the division is done in place. If `a` is a numpy scalar, the division preserves its type. """ with np.errstate(invalid='ignore', divide='ignore'): if isinstance(a, np.ndarray): if out is None: return np.divide(a, b, out=a, casting='unsafe') else: return np.divide(a, b, out=out, casting='unsafe') else: if out is None: return a.dtype.type(a / b) else: # This is questionable, but currently a numpy scalar can # be output to a zero dimensional array. return np.divide(a, b, out=out, casting='unsafe') def nanmin(a, axis=None, out=None, keepdims=np._NoValue): """ Return minimum of an array or minimum along an axis, ignoring any NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is raised and Nan is returned for that slice. Parameters ---------- a : array_like Array containing numbers whose minimum is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the minimum is computed. The default is to compute the minimum of the flattened array. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See `doc.ufuncs` for details. .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. If the value is anything but the default, then `keepdims` will be passed through to the `min` method of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 Returns ------- nanmin : ndarray An array with the same shape as `a`, with the specified axis removed. If `a` is a 0-d array, or if axis is None, an ndarray scalar is returned. The same dtype as `a` is returned. See Also -------- nanmax : The maximum value of an array along a given axis, ignoring any NaNs. amin : The minimum value of an array along a given axis, propagating any NaNs. fmin : Element-wise minimum of two arrays, ignoring any NaNs. minimum : Element-wise minimum of two arrays, propagating any NaNs. isnan : Shows which elements are Not a Number (NaN). isfinite: Shows which elements are neither NaN nor infinity. amax, fmax, maximum Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Positive infinity is treated as a very large number and negative infinity is treated as a very small (i.e. negative) number. If the input has a integer type the function is equivalent to np.min. Examples -------- >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanmin(a) 1.0 >>> np.nanmin(a, axis=0) array([ 1., 2.]) >>> np.nanmin(a, axis=1) array([ 1., 3.]) When positive infinity and negative infinity are present: >>> np.nanmin([1, 2, np.nan, np.inf]) 1.0 >>> np.nanmin([1, 2, np.nan, np.NINF]) -inf """ kwargs = {} if keepdims is not np._NoValue: kwargs['keepdims'] = keepdims if type(a) is np.ndarray and a.dtype != np.object_: # Fast, but not safe for subclasses of ndarray, or object arrays, # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) if np.isnan(res).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) else: # Slow, but safe for subclasses of ndarray a, mask = _replace_nan(a, +np.inf) res = np.amin(a, axis=axis, out=out, **kwargs) if mask is None: return res # Check for all-NaN axis mask = np.all(mask, axis=axis, **kwargs) if np.any(mask): res = _copyto(res, np.nan, mask) warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2) return res def nanmax(a, axis=None, out=None, keepdims=np._NoValue): """ Return the maximum of an array or maximum along an axis, ignoring any NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is raised and NaN is returned for that slice. Parameters ---------- a : array_like Array containing numbers whose maximum is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the maximum is computed. The default is to compute the maximum of the flattened array. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See `doc.ufuncs` for details. .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. If the value is anything but the default, then `keepdims` will be passed through to the `max` method of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 Returns ------- nanmax : ndarray An array with the same shape as `a`, with the specified axis removed. If `a` is a 0-d array, or if axis is None, an ndarray scalar is returned. The same dtype as `a` is returned. See Also -------- nanmin : The minimum value of an array along a given axis, ignoring any NaNs. amax : The maximum value of an array along a given axis, propagating any NaNs. fmax : Element-wise maximum of two arrays, ignoring any NaNs. maximum : Element-wise maximum of two arrays, propagating any NaNs. isnan : Shows which elements are Not a Number (NaN). isfinite: Shows which elements are neither NaN nor infinity. amin, fmin, minimum Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Positive infinity is treated as a very large number and negative infinity is treated as a very small (i.e. negative) number. If the input has a integer type the function is equivalent to np.max. Examples -------- >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanmax(a) 3.0 >>> np.nanmax(a, axis=0) array([ 3., 2.]) >>> np.nanmax(a, axis=1) array([ 2., 3.]) When positive infinity and negative infinity are present: >>> np.nanmax([1, 2, np.nan, np.NINF]) 2.0 >>> np.nanmax([1, 2, np.nan, np.inf]) inf """ kwargs = {} if keepdims is not np._NoValue: kwargs['keepdims'] = keepdims if type(a) is np.ndarray and a.dtype != np.object_: # Fast, but not safe for subclasses of ndarray, or object arrays, # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) if np.isnan(res).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) else: # Slow, but safe for subclasses of ndarray a, mask = _replace_nan(a, -np.inf) res = np.amax(a, axis=axis, out=out, **kwargs) if mask is None: return res # Check for all-NaN axis mask = np.all(mask, axis=axis, **kwargs) if np.any(mask): res = _copyto(res, np.nan, mask) warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2) return res def nanargmin(a, axis=None): """ Return the indices of the minimum values in the specified axis ignoring NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results cannot be trusted if a slice contains only NaNs and Infs. Parameters ---------- a : array_like Input data. axis : int, optional Axis along which to operate. By default flattened input is used. Returns ------- index_array : ndarray An array of indices or a single index value. See Also -------- argmin, nanargmax Examples -------- >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmin(a) 0 >>> np.nanargmin(a) 2 >>> np.nanargmin(a, axis=0) array([1, 1]) >>> np.nanargmin(a, axis=1) array([1, 0]) """ a, mask = _replace_nan(a, np.inf) res = np.argmin(a, axis=axis) if mask is not None: mask = np.all(mask, axis=axis) if np.any(mask): raise ValueError("All-NaN slice encountered") return res def nanargmax(a, axis=None): """ Return the indices of the maximum values in the specified axis ignoring NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results cannot be trusted if a slice contains only NaNs and -Infs. Parameters ---------- a : array_like Input data. axis : int, optional Axis along which to operate. By default flattened input is used. Returns ------- index_array : ndarray An array of indices or a single index value. See Also -------- argmax, nanargmin Examples -------- >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmax(a) 0 >>> np.nanargmax(a) 1 >>> np.nanargmax(a, axis=0) array([1, 0]) >>> np.nanargmax(a, axis=1) array([1, 1]) """ a, mask = _replace_nan(a, -np.inf) res = np.argmax(a, axis=axis) if mask is not None: mask = np.all(mask, axis=axis) if np.any(mask): raise ValueError("All-NaN slice encountered") return res def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero. In NumPy versions <= 1.8.0 Nan is returned for slices that are all-NaN or empty. In later versions zero is returned. Parameters ---------- a : array_like Array containing numbers whose sum is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the sum is computed. The default is to compute the sum of the flattened array. dtype : data-type, optional The type of the returned array and of the accumulator in which the elements are summed. By default, the dtype of `a` is used. An exception is when `a` has an integer type with less precision than the platform (u)intp. In that case, the default will be either (u)int32 or (u)int64 depending on whether the platform is 32 or 64 bits. For inexact inputs, dtype must be inexact. .. versionadded:: 1.8.0 out : ndarray, optional Alternate output array in which to place the result. The default is ``None``. If provided, it must have the same shape as the expected output, but the type will be cast if necessary. See `doc.ufuncs` for details. The casting of NaN to integer can yield unexpected results. .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. If the value is anything but the default, then `keepdims` will be passed through to the `mean` or `sum` methods of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 Returns ------- nansum : ndarray. A new array holding the result is returned unless `out` is specified, in which it is returned. The result has the same size as `a`, and the same shape as `a` if `axis` is not None or `a` is a 1-d array. See Also -------- numpy.sum : Sum across array propagating NaNs. isnan : Show which elements are NaN. isfinite: Show which elements are not NaN or +/-inf. Notes ----- If both positive and negative infinity are present, the sum will be Not A Number (NaN). Examples -------- >>> np.nansum(1) 1 >>> np.nansum([1]) 1 >>> np.nansum([1, np.nan]) 1.0 >>> a = np.array([[1, 1], [1, np.nan]]) >>> np.nansum(a) 3.0 >>> np.nansum(a, axis=0) array([ 2., 1.]) >>> np.nansum([1, np.nan, np.inf]) inf >>> np.nansum([1, np.nan, np.NINF]) -inf >>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present nan """ a, mask = _replace_nan(a, 0) return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the product of array elements over a given axis treating Not a Numbers (NaNs) as ones. One is returned for slices that are all-NaN or empty. .. versionadded:: 1.10.0 Parameters ---------- a : array_like Array containing numbers whose product is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the product is computed. The default is to compute the product of the flattened array. dtype : data-type, optional The type of the returned array and of the accumulator in which the elements are summed. By default, the dtype of `a` is used. An exception is when `a` has an integer type with less precision than the platform (u)intp. In that case, the default will be either (u)int32 or (u)int64 depending on whether the platform is 32 or 64 bits. For inexact inputs, dtype must be inexact. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``. If provided, it must have the same shape as the expected output, but the type will be cast if necessary. See `doc.ufuncs` for details. The casting of NaN to integer can yield unexpected results. keepdims : bool, optional If True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- nanprod : ndarray A new array holding the result is returned unless `out` is specified, in which case it is returned. See Also -------- numpy.prod : Product across array propagating NaNs. isnan : Show which elements are NaN. Examples -------- >>> np.nanprod(1) 1 >>> np.nanprod([1]) 1 >>> np.nanprod([1, np.nan]) 1.0 >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanprod(a) 6.0 >>> np.nanprod(a, axis=0) array([ 3., 2.]) """ a, mask = _replace_nan(a, 1) return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def nancumsum(a, axis=None, dtype=None, out=None): """ Return the cumulative sum of array elements over a given axis treating Not a Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are encountered and leading NaNs are replaced by zeros. Zeros are returned for slices that are all-NaN or empty. .. versionadded:: 1.12.0 Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults to the dtype of `a`, unless `a` has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. See `doc.ufuncs` (Section "Output arguments") for more details. Returns ------- nancumsum : ndarray. A new array holding the result is returned unless `out` is specified, in which it is returned. The result has the same size as `a`, and the same shape as `a` if `axis` is not None or `a` is a 1-d array. See Also -------- numpy.cumsum : Cumulative sum across array propagating NaNs. isnan : Show which elements are NaN. Examples -------- >>> np.nancumsum(1) array([1]) >>> np.nancumsum([1]) array([1]) >>> np.nancumsum([1, np.nan]) array([ 1., 1.]) >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nancumsum(a) array([ 1., 3., 6., 6.]) >>> np.nancumsum(a, axis=0) array([[ 1., 2.], [ 4., 2.]]) >>> np.nancumsum(a, axis=1) array([[ 1., 3.], [ 3., 3.]]) """ a, mask = _replace_nan(a, 0) return np.cumsum(a, axis=axis, dtype=dtype, out=out) def nancumprod(a, axis=None, dtype=None, out=None): """ Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one. The cumulative product does not change when NaNs are encountered and leading NaNs are replaced by ones. Ones are returned for slices that are all-NaN or empty. .. versionadded:: 1.12.0 Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative product is computed. By default the input is flattened. dtype : dtype, optional Type of the returned array, as well as of the accumulator in which the elements are multiplied. If *dtype* is not specified, it defaults to the dtype of `a`, unless `a` has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used instead. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type of the resulting values will be cast if necessary. Returns ------- nancumprod : ndarray A new array holding the result is returned unless `out` is specified, in which case it is returned. See Also -------- numpy.cumprod : Cumulative product across array propagating NaNs. isnan : Show which elements are NaN. Examples -------- >>> np.nancumprod(1) array([1]) >>> np.nancumprod([1]) array([1]) >>> np.nancumprod([1, np.nan]) array([ 1., 1.]) >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nancumprod(a) array([ 1., 2., 6., 6.]) >>> np.nancumprod(a, axis=0) array([[ 1., 2.], [ 3., 2.]]) >>> np.nancumprod(a, axis=1) array([[ 1., 2.], [ 3., 3.]]) """ a, mask = _replace_nan(a, 1) return np.cumprod(a, axis=axis, dtype=dtype, out=out) def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Compute the arithmetic mean along the specified axis, ignoring NaNs. Returns the average of the array elements. The average is taken over the flattened array by default, otherwise over the specified axis. `float64` intermediate and return values are used for integer inputs. For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. .. versionadded:: 1.8.0 Parameters ---------- a : array_like Array containing numbers whose mean is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the means are computed. The default is to compute the mean of the flattened array. dtype : data-type, optional Type to use in computing the mean. For integer inputs, the default is `float64`; for inexact inputs, it is the same as the input dtype. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See `doc.ufuncs` for details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. If the value is anything but the default, then `keepdims` will be passed through to the `mean` or `sum` methods of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. Returns ------- m : ndarray, see dtype parameter above If `out=None`, returns a new array containing the mean values, otherwise a reference to the output array is returned. Nan is returned for slices that contain only NaNs. See Also -------- average : Weighted average mean : Arithmetic mean taken while not ignoring NaNs var, nanvar Notes ----- The arithmetic mean is the sum of the non-NaN elements along the axis divided by the number of non-NaN elements. Note that for floating-point input, the mean is computed using the same precision the input has. Depending on the input data, this can cause the results to be inaccurate, especially for `float32`. Specifying a higher-precision accumulator using the `dtype` keyword can alleviate this issue. Examples -------- >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanmean(a) 2.6666666666666665 >>> np.nanmean(a, axis=0) array([ 2., 4.]) >>> np.nanmean(a, axis=1) array([ 1., 3.5]) """ arr, mask = _replace_nan(a, 0) if mask is None: return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) if dtype is not None: dtype = np.dtype(dtype) if dtype is not None and not issubclass(dtype.type, np.inexact): raise TypeError("If a is inexact, then dtype must be inexact") if out is not None and not issubclass(out.dtype.type, np.inexact): raise TypeError("If a is inexact, then out must be inexact") cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims) tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) avg = _divide_by_count(tot, cnt, out=out) isbad = (cnt == 0) if isbad.any(): warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) # NaN is the only possible bad value, so no further # action is needed to handle bad results. return avg def _nanmedian1d(arr1d, overwrite_input=False): """ Private function for rank 1 arrays. Compute the median ignoring NaNs. See nanmedian for parameter usage """ arr1d, overwrite_input = _remove_nan_1d(arr1d, overwrite_input=overwrite_input) if arr1d.size == 0: return np.nan return np.median(arr1d, overwrite_input=overwrite_input) def _nanmedian(a, axis=None, out=None, overwrite_input=False): """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce See nanmedian for parameter usage """ if axis is None or a.ndim == 1: part = a.ravel() if out is None: return _nanmedian1d(part, overwrite_input) else: out[...] = _nanmedian1d(part, overwrite_input) return out else: # for small medians use sort + indexing which is still faster than # apply_along_axis # benchmarked with shuffled (50, 50, x) containing a few NaN if a.shape[axis] < 600: return _nanmedian_small(a, axis, out, overwrite_input) result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) if out is not None: out[...] = result return result def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): """ sort + indexing median, faster for small medians along multiple dimensions due to the high overhead of apply_along_axis see nanmedian for parameter usage """ a = np.ma.masked_array(a, np.isnan(a)) m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) for i in range(np.count_nonzero(m.mask.ravel())): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3) if out is not None: out[...] = m.filled(np.nan) return out return m.filled(np.nan) def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): """ Compute the median along the specified axis, while ignoring NaNs. Returns the median of the array elements. .. versionadded:: 1.9.0 Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to `median`. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. If `overwrite_input` is ``True`` and `a` is not already an `ndarray`, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. If this is anything but the default value it will be passed through (in the special case of an empty array) to the `mean` function of the underlying array. If the array is a sub-class and `mean` does not have the kwarg `keepdims` this will raise a RuntimeError. Returns ------- median : ndarray A new array holding the result. If the input contains integers or floats smaller than ``float64``, then the output data-type is ``np.float64``. Otherwise, the data-type of the output is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, median, percentile Notes ----- Given a vector ``V`` of length ``N``, the median of ``V`` is the middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two middle values of ``V_sorted`` when ``N`` is even. Examples -------- >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) >>> a[0, 1] = np.nan >>> a array([[ 10., nan, 4.], [ 3., 2., 1.]]) >>> np.median(a) nan >>> np.nanmedian(a) 3.0 >>> np.nanmedian(a, axis=0) array([ 6.5, 2., 2.5]) >>> np.median(a, axis=1) array([ 7., 2.]) >>> b = a.copy() >>> np.nanmedian(b, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.nanmedian(b, axis=None, overwrite_input=True) 3.0 >>> assert not np.all(a==b) """ a = np.asanyarray(a) # apply_along_axis in _nanmedian doesn't handle empty arrays well, # so deal them upfront if a.size == 0: return np.nanmean(a, axis, out=out, keepdims=keepdims) r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims and keepdims is not np._NoValue: return r.reshape(k) else: return r def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=np._NoValue): """ Compute the qth percentile of the data along the specified axis, while ignoring nan values. Returns the qth percentile(s) of the array elements. .. versionadded:: 1.9.0 Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute, which must be between 0 and 100 inclusive. axis : {int, sequence of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to `percentile`. This will save memory when you do not need to preserve the contents of the input array. In this case you should not make any assumptions about the contents of the input `a` after this function completes -- treat it as undefined. Default is False. If `a` is not already an array, this parameter will have no effect as `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. If this is anything but the default value it will be passed through (in the special case of an empty array) to the `mean` function of the underlying array. If the array is a sub-class and `mean` does not have the kwarg `keepdims` this will raise a RuntimeError. Returns ------- percentile : scalar or ndarray If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the percentiles. The other axes are the axes that remain after the reduction of `a`. If the input contains integers or floats smaller than ``float64``, the output data-type is ``float64``. Otherwise, the output data-type is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- nanmean, nanmedian, percentile, median, mean Notes ----- Given a vector ``V`` of length ``N``, the ``q``-th percentile of ``V`` is the value ``q/100`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a array([[ 10., nan, 4.], [ 3., 2., 1.]]) >>> np.percentile(a, 50) nan >>> np.nanpercentile(a, 50) 3.5 >>> np.nanpercentile(a, 50, axis=0) array([ 6.5, 2., 2.5]) >>> np.nanpercentile(a, 50, axis=1, keepdims=True) array([[ 7.], [ 2.]]) >>> m = np.nanpercentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.nanpercentile(a, 50, axis=0, out=out) array([ 6.5, 2., 2.5]) >>> m array([ 6.5, 2. , 2.5]) >>> b = a.copy() >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) """ a = np.asanyarray(a) q = np.asanyarray(q) # apply_along_axis in _nanpercentile doesn't handle empty arrays well, # so deal them upfront if a.size == 0: return np.nanmean(a, axis, out=out, keepdims=keepdims) r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims and keepdims is not np._NoValue: return r.reshape(q.shape + k) else: return r def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear'): """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce See nanpercentile for parameter usage """ if axis is None or a.ndim == 1: part = a.ravel() result = _nanpercentile1d(part, q, overwrite_input, interpolation) else: result = np.apply_along_axis(_nanpercentile1d, axis, a, q, overwrite_input, interpolation) # apply_along_axis fills in collapsed axis with results. # Move that axis to the beginning to match percentile's # convention. if q.ndim != 0: result = np.moveaxis(result, axis, 0) if out is not None: out[...] = result return result def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'): """ Private function for rank 1 arrays. Compute percentile ignoring NaNs. See nanpercentile for parameter usage """ arr1d, overwrite_input = _remove_nan_1d(arr1d, overwrite_input=overwrite_input) if arr1d.size == 0: return np.full(q.shape, np.nan)[()] # convert to scalar return np.percentile(arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation) def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Compute the variance along the specified axis, while ignoring NaNs. Returns the variance of the array elements, a measure of the spread of a distribution. The variance is computed for the flattened array by default, otherwise over the specified axis. For all-NaN slices or slices with zero degrees of freedom, NaN is returned and a `RuntimeWarning` is raised. .. versionadded:: 1.8.0 Parameters ---------- a : array_like Array containing numbers whose variance is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the variance is computed. The default is to compute the variance of the flattened array. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type the default is `float32`; for arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output, but the type is cast if necessary. ddof : int, optional "Delta Degrees of Freedom": the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of non-NaN elements. By default `ddof` is zero. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. Returns ------- variance : ndarray, see dtype parameter above If `out` is None, return a new array containing the variance, otherwise return a reference to the output array. If ddof is >= the number of non-NaN elements in a slice or the slice contains only NaNs, then the result for that slice is NaN. See Also -------- std : Standard deviation mean : Average var : Variance while not ignoring NaNs nanstd, nanmean numpy.doc.ufuncs : Section "Output arguments" Notes ----- The variance is the average of the squared deviations from the mean, i.e., ``var = mean(abs(x - x.mean())**2)``. The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` provides an unbiased estimator of the variance of a hypothetical infinite population. ``ddof=0`` provides a maximum likelihood estimate of the variance for normally distributed variables. Note that for complex numbers, the absolute value is taken before squaring, so that the result is always real and nonnegative. For floating-point input, the variance is computed using the same precision the input has. Depending on the input data, this can cause the results to be inaccurate, especially for `float32` (see example below). Specifying a higher-accuracy accumulator using the ``dtype`` keyword can alleviate this issue. For this function to work on sub-classes of ndarray, they must define `sum` with the kwarg `keepdims` Examples -------- >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.var(a) 1.5555555555555554 >>> np.nanvar(a, axis=0) array([ 1., 0.]) >>> np.nanvar(a, axis=1) array([ 0., 0.25]) """ arr, mask = _replace_nan(a, 0) if mask is None: return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) if dtype is not None: dtype = np.dtype(dtype) if dtype is not None and not issubclass(dtype.type, np.inexact): raise TypeError("If a is inexact, then dtype must be inexact") if out is not None and not issubclass(out.dtype.type, np.inexact): raise TypeError("If a is inexact, then out must be inexact") # Compute mean if type(arr) is np.matrix: _keepdims = np._NoValue else: _keepdims = True # we need to special case matrix for reverse compatibility # in order for this to work, these sums need to be called with # keepdims=True, however matrix now raises an error in this case, but # the reason that it drops the keepdims kwarg is to force keepdims=True # so this used to work by serendipity. cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims) avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims) avg = _divide_by_count(avg, cnt) # Compute squared deviation from mean. np.subtract(arr, avg, out=arr, casting='unsafe') arr = _copyto(arr, 0, mask) if issubclass(arr.dtype.type, np.complexfloating): sqr = np.multiply(arr, arr.conj(), out=arr).real else: sqr = np.multiply(arr, arr, out=arr) # Compute variance. var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) if var.ndim < cnt.ndim: # Subclasses of ndarray may ignore keepdims, so check here. cnt = cnt.squeeze(axis) dof = cnt - ddof var = _divide_by_count(var, dof) isbad = (dof <= 0) if np.any(isbad): warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2) # NaN, inf, or negative numbers are all possible bad # values, so explicitly replace them with NaN. var = _copyto(var, np.nan, isbad) return var def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Compute the standard deviation along the specified axis, while ignoring NaNs. Returns the standard deviation, a measure of the spread of a distribution, of the non-NaN array elements. The standard deviation is computed for the flattened array by default, otherwise over the specified axis. For all-NaN slices or slices with zero degrees of freedom, NaN is returned and a `RuntimeWarning` is raised. .. versionadded:: 1.8.0 Parameters ---------- a : array_like Calculate the standard deviation of the non-NaN values. axis : int, optional Axis along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array. dtype : dtype, optional Type to use in computing the standard deviation. For arrays of integer type the default is float64, for arrays of float types it is the same as the array type. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type (of the calculated values) will be cast if necessary. ddof : int, optional Means Delta Degrees of Freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of non-NaN elements. By default `ddof` is zero. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. If this value is anything but the default it is passed through as-is to the relevant functions of the sub-classes. If these functions do not have a `keepdims` kwarg, a RuntimeError will be raised. Returns ------- standard_deviation : ndarray, see dtype parameter above. If `out` is None, return a new array containing the standard deviation, otherwise return a reference to the output array. If ddof is >= the number of non-NaN elements in a slice or the slice contains only NaNs, then the result for that slice is NaN. See Also -------- var, mean, std nanvar, nanmean numpy.doc.ufuncs : Section "Output arguments" Notes ----- The standard deviation is the square root of the average of the squared deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. The average squared deviation is normally calculated as ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` provides an unbiased estimator of the variance of the infinite population. ``ddof=0`` provides a maximum likelihood estimate of the variance for normally distributed variables. The standard deviation computed in this function is the square root of the estimated variance, so even with ``ddof=1``, it will not be an unbiased estimate of the standard deviation per se. Note that, for complex numbers, `std` takes the absolute value before squaring, so that the result is always real and nonnegative. For floating-point input, the *std* is computed using the same precision the input has. Depending on the input data, this can cause the results to be inaccurate, especially for float32 (see example below). Specifying a higher-accuracy accumulator using the `dtype` keyword can alleviate this issue. Examples -------- >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanstd(a) 1.247219128924647 >>> np.nanstd(a, axis=0) array([ 1., 0.]) >>> np.nanstd(a, axis=1) array([ 0., 0.5]) """ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) if isinstance(var, np.ndarray): std = np.sqrt(var, out=var) else: std = var.dtype.type(np.sqrt(var)) return std
50,854
34.315972
89
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/scimath.py
""" Wrapper functions to more user-friendly calling of certain math functions whose output data-type is different than the input data-type in certain domains of the input. For example, for functions like `log` with branch cuts, the versions in this module provide the mathematically valid answers in the complex plane:: >>> import math >>> from numpy.lib import scimath >>> scimath.log(-math.exp(1)) == (1+1j*math.pi) True Similarly, `sqrt`, other base logarithms, `power` and trig functions are correctly handled. See their respective docstrings for specific examples. """ from __future__ import division, absolute_import, print_function import numpy.core.numeric as nx import numpy.core.numerictypes as nt from numpy.core.numeric import asarray, any from numpy.lib.type_check import isreal __all__ = [ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', 'arctanh' ] _ln2 = nx.log(2.0) def _tocomplex(arr): """Convert its input `arr` to a complex array. The input is returned as a complex array of the smallest type that will fit the original data: types like single, byte, short, etc. become csingle, while others become cdouble. A copy of the input is always made. Parameters ---------- arr : array Returns ------- array An array with the same input data as the input but in complex form. Examples -------- First, consider an input of type short: >>> a = np.array([1,2,3],np.short) >>> ac = np.lib.scimath._tocomplex(a); ac array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) >>> ac.dtype dtype('complex64') If the input is of type double, the output is correspondingly of the complex double type as well: >>> b = np.array([1,2,3],np.double) >>> bc = np.lib.scimath._tocomplex(b); bc array([ 1.+0.j, 2.+0.j, 3.+0.j]) >>> bc.dtype dtype('complex128') Note that even if the input was complex to begin with, a copy is still made, since the astype() method always copies: >>> c = np.array([1,2,3],np.csingle) >>> cc = np.lib.scimath._tocomplex(c); cc array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) >>> c *= 2; c array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) >>> cc array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) """ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, nt.ushort, nt.csingle)): return arr.astype(nt.csingle) else: return arr.astype(nt.cdouble) def _fix_real_lt_zero(x): """Convert `x` to complex if it has real, negative components. Otherwise, output is just the array version of the input (via asarray). Parameters ---------- x : array_like Returns ------- array Examples -------- >>> np.lib.scimath._fix_real_lt_zero([1,2]) array([1, 2]) >>> np.lib.scimath._fix_real_lt_zero([-1,2]) array([-1.+0.j, 2.+0.j]) """ x = asarray(x) if any(isreal(x) & (x < 0)): x = _tocomplex(x) return x def _fix_int_lt_zero(x): """Convert `x` to double if it has real, negative components. Otherwise, output is just the array version of the input (via asarray). Parameters ---------- x : array_like Returns ------- array Examples -------- >>> np.lib.scimath._fix_int_lt_zero([1,2]) array([1, 2]) >>> np.lib.scimath._fix_int_lt_zero([-1,2]) array([-1., 2.]) """ x = asarray(x) if any(isreal(x) & (x < 0)): x = x * 1.0 return x def _fix_real_abs_gt_1(x): """Convert `x` to complex if it has real components x_i with abs(x_i)>1. Otherwise, output is just the array version of the input (via asarray). Parameters ---------- x : array_like Returns ------- array Examples -------- >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) array([0, 1]) >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) array([ 0.+0.j, 2.+0.j]) """ x = asarray(x) if any(isreal(x) & (abs(x) > 1)): x = _tocomplex(x) return x def sqrt(x): """ Compute the square root of x. For negative input elements, a complex value is returned (unlike `numpy.sqrt` which returns NaN). Parameters ---------- x : array_like The input value(s). Returns ------- out : ndarray or scalar The square root of `x`. If `x` was a scalar, so is `out`, otherwise an array is returned. See Also -------- numpy.sqrt Examples -------- For real, non-negative inputs this works just like `numpy.sqrt`: >>> np.lib.scimath.sqrt(1) 1.0 >>> np.lib.scimath.sqrt([1, 4]) array([ 1., 2.]) But it automatically handles negative inputs: >>> np.lib.scimath.sqrt(-1) (0.0+1.0j) >>> np.lib.scimath.sqrt([-1,4]) array([ 0.+1.j, 2.+0.j]) """ x = _fix_real_lt_zero(x) return nx.sqrt(x) def log(x): """ Compute the natural logarithm of `x`. Return the "principal value" (for a description of this, see `numpy.log`) of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the complex principle value is returned. Parameters ---------- x : array_like The value(s) whose log is (are) required. Returns ------- out : ndarray or scalar The log of the `x` value(s). If `x` was a scalar, so is `out`, otherwise an array is returned. See Also -------- numpy.log Notes ----- For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` (note, however, that otherwise `numpy.log` and this `log` are identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, notably, the complex principle value if ``x.imag != 0``). Examples -------- >>> np.emath.log(np.exp(1)) 1.0 Negative arguments are handled "correctly" (recall that ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) True """ x = _fix_real_lt_zero(x) return nx.log(x) def log10(x): """ Compute the logarithm base 10 of `x`. Return the "principal value" (for a description of this, see `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` returns ``inf``). Otherwise, the complex principle value is returned. Parameters ---------- x : array_like or scalar The value(s) whose log base 10 is (are) required. Returns ------- out : ndarray or scalar The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, otherwise an array object is returned. See Also -------- numpy.log10 Notes ----- For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` (note, however, that otherwise `numpy.log10` and this `log10` are identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, notably, the complex principle value if ``x.imag != 0``). Examples -------- (We set the printing precision so the example can be auto-tested) >>> np.set_printoptions(precision=4) >>> np.emath.log10(10**1) 1.0 >>> np.emath.log10([-10**1, -10**2, 10**2]) array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ]) """ x = _fix_real_lt_zero(x) return nx.log10(x) def logn(n, x): """ Take log base n of x. If `x` contains negative inputs, the answer is computed and returned in the complex domain. Parameters ---------- n : int The base in which the log is taken. x : array_like The value(s) whose log base `n` is (are) required. Returns ------- out : ndarray or scalar The log base `n` of the `x` value(s). If `x` was a scalar, so is `out`, otherwise an array is returned. Examples -------- >>> np.set_printoptions(precision=4) >>> np.lib.scimath.logn(2, [4, 8]) array([ 2., 3.]) >>> np.lib.scimath.logn(2, [-4, -8, 8]) array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ x = _fix_real_lt_zero(x) n = _fix_real_lt_zero(n) return nx.log(x)/nx.log(n) def log2(x): """ Compute the logarithm base 2 of `x`. Return the "principal value" (for a description of this, see `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns ``inf``). Otherwise, the complex principle value is returned. Parameters ---------- x : array_like The value(s) whose log base 2 is (are) required. Returns ------- out : ndarray or scalar The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, otherwise an array is returned. See Also -------- numpy.log2 Notes ----- For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` (note, however, that otherwise `numpy.log2` and this `log2` are identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, notably, the complex principle value if ``x.imag != 0``). Examples -------- We set the printing precision so the example can be auto-tested: >>> np.set_printoptions(precision=4) >>> np.emath.log2(8) 3.0 >>> np.emath.log2([-4, -8, 8]) array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ x = _fix_real_lt_zero(x) return nx.log2(x) def power(x, p): """ Return x to the power p, (x**p). If `x` contains negative values, the output is converted to the complex domain. Parameters ---------- x : array_like The input value(s). p : array_like of ints The power(s) to which `x` is raised. If `x` contains multiple values, `p` has to either be a scalar, or contain the same number of values as `x`. In the latter case, the result is ``x[0]**p[0], x[1]**p[1], ...``. Returns ------- out : ndarray or scalar The result of ``x**p``. If `x` and `p` are scalars, so is `out`, otherwise an array is returned. See Also -------- numpy.power Examples -------- >>> np.set_printoptions(precision=4) >>> np.lib.scimath.power([2, 4], 2) array([ 4, 16]) >>> np.lib.scimath.power([2, 4], -2) array([ 0.25 , 0.0625]) >>> np.lib.scimath.power([-2, 4], 2) array([ 4.+0.j, 16.+0.j]) """ x = _fix_real_lt_zero(x) p = _fix_int_lt_zero(p) return nx.power(x, p) def arccos(x): """ Compute the inverse cosine of x. Return the "principal value" (for a description of this, see `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that `abs(x) <= 1`, this is a real number in the closed interval :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. Parameters ---------- x : array_like or scalar The value(s) whose arccos is (are) required. Returns ------- out : ndarray or scalar The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so is `out`, otherwise an array object is returned. See Also -------- numpy.arccos Notes ----- For an arccos() that returns ``NAN`` when real `x` is not in the interval ``[-1,1]``, use `numpy.arccos`. Examples -------- >>> np.set_printoptions(precision=4) >>> np.emath.arccos(1) # a scalar is returned 0.0 >>> np.emath.arccos([1,2]) array([ 0.-0.j , 0.+1.317j]) """ x = _fix_real_abs_gt_1(x) return nx.arccos(x) def arcsin(x): """ Compute the inverse sine of x. Return the "principal value" (for a description of this, see `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that `abs(x) <= 1`, this is a real number in the closed interval :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is returned. Parameters ---------- x : array_like or scalar The value(s) whose arcsin is (are) required. Returns ------- out : ndarray or scalar The inverse sine(s) of the `x` value(s). If `x` was a scalar, so is `out`, otherwise an array object is returned. See Also -------- numpy.arcsin Notes ----- For an arcsin() that returns ``NAN`` when real `x` is not in the interval ``[-1,1]``, use `numpy.arcsin`. Examples -------- >>> np.set_printoptions(precision=4) >>> np.emath.arcsin(0) 0.0 >>> np.emath.arcsin([0,1]) array([ 0. , 1.5708]) """ x = _fix_real_abs_gt_1(x) return nx.arcsin(x) def arctanh(x): """ Compute the inverse hyperbolic tangent of `x`. Return the "principal value" (for a description of this, see `numpy.arctanh`) of `arctanh(x)`. For real `x` such that `abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is complex, the result is complex. Finally, `x = 1` returns``inf`` and `x=-1` returns ``-inf``. Parameters ---------- x : array_like The value(s) whose arctanh is (are) required. Returns ------- out : ndarray or scalar The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was a scalar so is `out`, otherwise an array is returned. See Also -------- numpy.arctanh Notes ----- For an arctanh() that returns ``NAN`` when real `x` is not in the interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does return +/-inf for `x = +/-1`). Examples -------- >>> np.set_printoptions(precision=4) >>> np.emath.arctanh(np.matrix(np.eye(2))) array([[ Inf, 0.], [ 0., Inf]]) >>> np.emath.arctanh([1j]) array([ 0.+0.7854j]) """ x = _fix_real_abs_gt_1(x) return nx.arctanh(x)
14,085
23.843034
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/utils.py
from __future__ import division, absolute_import, print_function import os import sys import types import re import warnings from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype from numpy.core import ndarray, ufunc, asarray import numpy as np # getargspec and formatargspec were removed in Python 3.6 from numpy.compat import getargspec, formatargspec __all__ = [ 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', 'lookfor', 'byte_bounds', 'safe_eval' ] def get_include(): """ Return the directory that contains the NumPy \\*.h header files. Extension modules that need to compile against NumPy should use this function to locate the appropriate include directory. Notes ----- When using ``distutils``, for example in ``setup.py``. :: import numpy as np ... Extension('extension_name', ... include_dirs=[np.get_include()]) ... """ import numpy if numpy.show_config is None: # running from numpy source directory d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include') else: # using installed numpy core headers import numpy.core as core d = os.path.join(os.path.dirname(core.__file__), 'include') return d def _set_function_name(func, name): func.__name__ = name return func class _Deprecate(object): """ Decorator class to deprecate old functions. Refer to `deprecate` for details. See Also -------- deprecate """ def __init__(self, old_name=None, new_name=None, message=None): self.old_name = old_name self.new_name = new_name self.message = message def __call__(self, func, *args, **kwargs): """ Decorator call. Refer to ``decorate``. """ old_name = self.old_name new_name = self.new_name message = self.message import warnings if old_name is None: try: old_name = func.__name__ except AttributeError: old_name = func.__name__ if new_name is None: depdoc = "`%s` is deprecated!" % old_name else: depdoc = "`%s` is deprecated, use `%s` instead!" % \ (old_name, new_name) if message is not None: depdoc += "\n" + message def newfunc(*args,**kwds): """`arrayrange` is deprecated, use `arange` instead!""" warnings.warn(depdoc, DeprecationWarning, stacklevel=2) return func(*args, **kwds) newfunc = _set_function_name(newfunc, old_name) doc = func.__doc__ if doc is None: doc = depdoc else: doc = '\n\n'.join([depdoc, doc]) newfunc.__doc__ = doc try: d = func.__dict__ except AttributeError: pass else: newfunc.__dict__.update(d) return newfunc def deprecate(*args, **kwargs): """ Issues a DeprecationWarning, adds warning to `old_name`'s docstring, rebinds ``old_name.__name__`` and returns the new function object. This function may also be used as a decorator. Parameters ---------- func : function The function to be deprecated. old_name : str, optional The name of the function to be deprecated. Default is None, in which case the name of `func` is used. new_name : str, optional The new name for the function. Default is None, in which case the deprecation message is that `old_name` is deprecated. If given, the deprecation message is that `old_name` is deprecated and `new_name` should be used instead. message : str, optional Additional explanation of the deprecation. Displayed in the docstring after the warning. Returns ------- old_func : function The deprecated function. Examples -------- Note that ``olduint`` returns a value after printing Deprecation Warning: >>> olduint = np.deprecate(np.uint) >>> olduint(6) /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114: DeprecationWarning: uint32 is deprecated warnings.warn(str1, DeprecationWarning, stacklevel=2) 6 """ # Deprecate may be run as a function or as a decorator # If run as a function, we initialise the decorator class # and execute its __call__ method. if args: fn = args[0] args = args[1:] # backward compatibility -- can be removed # after next release if 'newname' in kwargs: kwargs['new_name'] = kwargs.pop('newname') if 'oldname' in kwargs: kwargs['old_name'] = kwargs.pop('oldname') return _Deprecate(*args, **kwargs)(fn) else: return _Deprecate(*args, **kwargs) deprecate_with_doc = lambda msg: _Deprecate(message=msg) #-------------------------------------------- # Determine if two arrays can share memory #-------------------------------------------- def byte_bounds(a): """ Returns pointers to the end-points of an array. Parameters ---------- a : ndarray Input array. It must conform to the Python-side of the array interface. Returns ------- (low, high) : tuple of 2 integers The first integer is the first byte of the array, the second integer is just past the last byte of the array. If `a` is not contiguous it will not use every byte between the (`low`, `high`) values. Examples -------- >>> I = np.eye(2, dtype='f'); I.dtype dtype('float32') >>> low, high = np.byte_bounds(I) >>> high - low == I.size*I.itemsize True >>> I = np.eye(2, dtype='G'); I.dtype dtype('complex192') >>> low, high = np.byte_bounds(I) >>> high - low == I.size*I.itemsize True """ ai = a.__array_interface__ a_data = ai['data'][0] astrides = ai['strides'] ashape = ai['shape'] bytes_a = asarray(a).dtype.itemsize a_low = a_high = a_data if astrides is None: # contiguous case a_high += a.size * bytes_a else: for shape, stride in zip(ashape, astrides): if stride < 0: a_low += (shape-1)*stride else: a_high += (shape-1)*stride a_high += bytes_a return a_low, a_high #----------------------------------------------------------------------------- # Function for output and information on the variables used. #----------------------------------------------------------------------------- def who(vardict=None): """ Print the NumPy arrays in the given dictionary. If there is no dictionary passed in or `vardict` is None then returns NumPy arrays in the globals() dictionary (all NumPy arrays in the namespace). Parameters ---------- vardict : dict, optional A dictionary possibly containing ndarrays. Default is globals(). Returns ------- out : None Returns 'None'. Notes ----- Prints out the name, shape, bytes and type of all of the ndarrays present in `vardict`. Examples -------- >>> a = np.arange(10) >>> b = np.ones(20) >>> np.who() Name Shape Bytes Type =========================================================== a 10 40 int32 b 20 160 float64 Upper bound on total bytes = 200 >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', ... 'idx':5} >>> np.who(d) Name Shape Bytes Type =========================================================== y 3 24 float64 x 2 16 float64 Upper bound on total bytes = 40 """ if vardict is None: frame = sys._getframe().f_back vardict = frame.f_globals sta = [] cache = {} for name in vardict.keys(): if isinstance(vardict[name], ndarray): var = vardict[name] idv = id(var) if idv in cache.keys(): namestr = name + " (%s)" % cache[idv] original = 0 else: cache[idv] = name namestr = name original = 1 shapestr = " x ".join(map(str, var.shape)) bytestr = str(var.nbytes) sta.append([namestr, shapestr, bytestr, var.dtype.name, original]) maxname = 0 maxshape = 0 maxbyte = 0 totalbytes = 0 for k in range(len(sta)): val = sta[k] if maxname < len(val[0]): maxname = len(val[0]) if maxshape < len(val[1]): maxshape = len(val[1]) if maxbyte < len(val[2]): maxbyte = len(val[2]) if val[4]: totalbytes += int(val[2]) if len(sta) > 0: sp1 = max(10, maxname) sp2 = max(10, maxshape) sp3 = max(10, maxbyte) prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') print(prval + "\n" + "="*(len(prval)+5) + "\n") for k in range(len(sta)): val = sta[k] print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4), val[1], ' '*(sp2-len(val[1])+5), val[2], ' '*(sp3-len(val[2])+5), val[3])) print("\nUpper bound on total bytes = %d" % totalbytes) return #----------------------------------------------------------------------------- # NOTE: pydoc defines a help function which works similarly to this # except it uses a pager to take over the screen. # combine name and arguments and split to multiple lines of width # characters. End lines on a comma and begin argument list indented with # the rest of the arguments. def _split_line(name, arguments, width): firstwidth = len(name) k = firstwidth newstr = name sepstr = ", " arglist = arguments.split(sepstr) for argument in arglist: if k == firstwidth: addstr = "" else: addstr = sepstr k = k + len(argument) + len(addstr) if k > width: k = firstwidth + 1 + len(argument) newstr = newstr + ",\n" + " "*(firstwidth+2) + argument else: newstr = newstr + addstr + argument return newstr _namedict = None _dictlist = None # Traverse all module directories underneath globals # to see if something is defined def _makenamedict(module='numpy'): module = __import__(module, globals(), locals(), []) thedict = {module.__name__:module.__dict__} dictlist = [module.__name__] totraverse = [module.__dict__] while True: if len(totraverse) == 0: break thisdict = totraverse.pop(0) for x in thisdict.keys(): if isinstance(thisdict[x], types.ModuleType): modname = thisdict[x].__name__ if modname not in dictlist: moddict = thisdict[x].__dict__ dictlist.append(modname) totraverse.append(moddict) thedict[modname] = moddict return thedict, dictlist def _info(obj, output=sys.stdout): """Provide information about ndarray obj. Parameters ---------- obj : ndarray Must be ndarray, not checked. output Where printed output goes. Notes ----- Copied over from the numarray module prior to its removal. Adapted somewhat as only numpy is an option now. Called by info. """ extra = "" tic = "" bp = lambda x: x cls = getattr(obj, '__class__', type(obj)) nm = getattr(cls, '__name__', cls) strides = obj.strides endian = obj.dtype.byteorder print("class: ", nm, file=output) print("shape: ", obj.shape, file=output) print("strides: ", strides, file=output) print("itemsize: ", obj.itemsize, file=output) print("aligned: ", bp(obj.flags.aligned), file=output) print("contiguous: ", bp(obj.flags.contiguous), file=output) print("fortran: ", obj.flags.fortran, file=output) print( "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), file=output ) print("byteorder: ", end=' ', file=output) if endian in ['|', '=']: print("%s%s%s" % (tic, sys.byteorder, tic), file=output) byteswap = False elif endian == '>': print("%sbig%s" % (tic, tic), file=output) byteswap = sys.byteorder != "big" else: print("%slittle%s" % (tic, tic), file=output) byteswap = sys.byteorder != "little" print("byteswap: ", bp(byteswap), file=output) print("type: %s" % obj.dtype, file=output) def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): """ Get help information for a function, class, or module. Parameters ---------- object : object or str, optional Input object or name to get information about. If `object` is a numpy object, its docstring is given. If it is a string, available modules are searched for matching objects. If None, information about `info` itself is returned. maxwidth : int, optional Printing width. output : file like object, optional File like object that the output is written to, default is ``stdout``. The object has to be opened in 'w' or 'a' mode. toplevel : str, optional Start search at this level. See Also -------- source, lookfor Notes ----- When used interactively with an object, ``np.info(obj)`` is equivalent to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt. Examples -------- >>> np.info(np.polyval) # doctest: +SKIP polyval(p, x) Evaluate the polynomial p at x. ... When using a string for `object` it is possible to get multiple results. >>> np.info('fft') # doctest: +SKIP *** Found in numpy *** Core FFT routines ... *** Found in numpy.fft *** fft(a, n=None, axis=-1) ... *** Repeat reference found in numpy.fft.fftpack *** *** Total of 3 references found. *** """ global _namedict, _dictlist # Local import to speed up numpy's import time. import pydoc import inspect if (hasattr(object, '_ppimport_importer') or hasattr(object, '_ppimport_module')): object = object._ppimport_module elif hasattr(object, '_ppimport_attr'): object = object._ppimport_attr if object is None: info(info) elif isinstance(object, ndarray): _info(object, output=output) elif isinstance(object, str): if _namedict is None: _namedict, _dictlist = _makenamedict(toplevel) numfound = 0 objlist = [] for namestr in _dictlist: try: obj = _namedict[namestr][object] if id(obj) in objlist: print("\n " "*** Repeat reference found in %s *** " % namestr, file=output ) else: objlist.append(id(obj)) print(" *** Found in %s ***" % namestr, file=output) info(obj) print("-"*maxwidth, file=output) numfound += 1 except KeyError: pass if numfound == 0: print("Help for %s not found." % object, file=output) else: print("\n " "*** Total of %d references found. ***" % numfound, file=output ) elif inspect.isfunction(object): name = object.__name__ arguments = formatargspec(*getargspec(object)) if len(name+arguments) > maxwidth: argstr = _split_line(name, arguments, maxwidth) else: argstr = name + arguments print(" " + argstr + "\n", file=output) print(inspect.getdoc(object), file=output) elif inspect.isclass(object): name = object.__name__ arguments = "()" try: if hasattr(object, '__init__'): arguments = formatargspec( *getargspec(object.__init__.__func__) ) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) except Exception: pass if len(name+arguments) > maxwidth: argstr = _split_line(name, arguments, maxwidth) else: argstr = name + arguments print(" " + argstr + "\n", file=output) doc1 = inspect.getdoc(object) if doc1 is None: if hasattr(object, '__init__'): print(inspect.getdoc(object.__init__), file=output) else: print(inspect.getdoc(object), file=output) methods = pydoc.allmethods(object) if methods != []: print("\n\nMethods:\n", file=output) for meth in methods: if meth[0] == '_': continue thisobj = getattr(object, meth, None) if thisobj is not None: methstr, other = pydoc.splitdoc( inspect.getdoc(thisobj) or "None" ) print(" %s -- %s" % (meth, methstr), file=output) elif (sys.version_info[0] < 3 and isinstance(object, types.InstanceType)): # check for __call__ method # types.InstanceType is the type of the instances of oldstyle classes print("Instance of class: ", object.__class__.__name__, file=output) print(file=output) if hasattr(object, '__call__'): arguments = formatargspec( *getargspec(object.__call__.__func__) ) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) else: arguments = "()" if hasattr(object, 'name'): name = "%s" % object.name else: name = "<name>" if len(name+arguments) > maxwidth: argstr = _split_line(name, arguments, maxwidth) else: argstr = name + arguments print(" " + argstr + "\n", file=output) doc = inspect.getdoc(object.__call__) if doc is not None: print(inspect.getdoc(object.__call__), file=output) print(inspect.getdoc(object), file=output) else: print(inspect.getdoc(object), file=output) elif inspect.ismethod(object): name = object.__name__ arguments = formatargspec( *getargspec(object.__func__) ) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) else: arguments = "()" if len(name+arguments) > maxwidth: argstr = _split_line(name, arguments, maxwidth) else: argstr = name + arguments print(" " + argstr + "\n", file=output) print(inspect.getdoc(object), file=output) elif hasattr(object, '__doc__'): print(inspect.getdoc(object), file=output) def source(object, output=sys.stdout): """ Print or write to a file the source code for a NumPy object. The source code is only returned for objects written in Python. Many functions and classes are defined in C and will therefore not return useful information. Parameters ---------- object : numpy object Input object. This can be any object (function, class, module, ...). output : file object, optional If `output` not supplied then source code is printed to screen (sys.stdout). File object must be created with either write 'w' or append 'a' modes. See Also -------- lookfor, info Examples -------- >>> np.source(np.interp) #doctest: +SKIP In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py def interp(x, xp, fp, left=None, right=None): \"\"\".... (full docstring printed)\"\"\" if isinstance(x, (float, int, number)): return compiled_interp([x], xp, fp, left, right).item() else: return compiled_interp(x, xp, fp, left, right) The source code is only returned for objects written in Python. >>> np.source(np.array) #doctest: +SKIP Not available for this object. """ # Local import to speed up numpy's import time. import inspect try: print("In file: %s\n" % inspect.getsourcefile(object), file=output) print(inspect.getsource(object), file=output) except Exception: print("Not available for this object.", file=output) # Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} # where kind: "func", "class", "module", "object" # and index: index in breadth-first namespace traversal _lookfor_caches = {} # regexp whose match indicates that the string may contain a function # signature _function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) def lookfor(what, module=None, import_modules=True, regenerate=False, output=None): """ Do a keyword search on docstrings. A list of of objects that matched the search is displayed, sorted by relevance. All given keywords need to be found in the docstring for it to be returned as a result, but the order does not matter. Parameters ---------- what : str String containing words to look for. module : str or list, optional Name of module(s) whose docstrings to go through. import_modules : bool, optional Whether to import sub-modules in packages. Default is True. regenerate : bool, optional Whether to re-generate the docstring cache. Default is False. output : file-like, optional File-like object to write the output to. If omitted, use a pager. See Also -------- source, info Notes ----- Relevance is determined only roughly, by checking if the keywords occur in the function name, at the start of a docstring, etc. Examples -------- >>> np.lookfor('binary representation') Search results for 'binary representation' ------------------------------------------ numpy.binary_repr Return the binary representation of the input number as a string. numpy.core.setup_common.long_double_representation Given a binary dump as given by GNU od -b, look for long double numpy.base_repr Return a string representation of a number in the given base system. ... """ import pydoc # Cache cache = _lookfor_generate_cache(module, import_modules, regenerate) # Search # XXX: maybe using a real stemming search engine would be better? found = [] whats = str(what).lower().split() if not whats: return for name, (docstring, kind, index) in cache.items(): if kind in ('module', 'object'): # don't show modules or objects continue ok = True doc = docstring.lower() for w in whats: if w not in doc: ok = False break if ok: found.append(name) # Relevance sort # XXX: this is full Harrison-Stetson heuristics now, # XXX: it probably could be improved kind_relevance = {'func': 1000, 'class': 1000, 'module': -1000, 'object': -1000} def relevance(name, docstr, kind, index): r = 0 # do the keywords occur within the start of the docstring? first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) r += sum([200 for w in whats if w in first_doc]) # do the keywords occur in the function name? r += sum([30 for w in whats if w in name]) # is the full name long? r += -len(name) * 5 # is the object of bad type? r += kind_relevance.get(kind, -1000) # is the object deep in namespace hierarchy? r += -name.count('.') * 10 r += max(-index / 100, -100) return r def relevance_value(a): return relevance(a, *cache[a]) found.sort(key=relevance_value) # Pretty-print s = "Search results for '%s'" % (' '.join(whats)) help_text = [s, "-"*len(s)] for name in found[::-1]: doc, kind, ix = cache[name] doclines = [line.strip() for line in doc.strip().split("\n") if line.strip()] # find a suitable short description try: first_doc = doclines[0].strip() if _function_signature_re.search(first_doc): first_doc = doclines[1].strip() except IndexError: first_doc = "" help_text.append("%s\n %s" % (name, first_doc)) if not found: help_text.append("Nothing found.") # Output if output is not None: output.write("\n".join(help_text)) elif len(help_text) > 10: pager = pydoc.getpager() pager("\n".join(help_text)) else: print("\n".join(help_text)) def _lookfor_generate_cache(module, import_modules, regenerate): """ Generate docstring cache for given module. Parameters ---------- module : str, None, module Module for which to generate docstring cache import_modules : bool Whether to import sub-modules in packages. regenerate : bool Re-generate the docstring cache Returns ------- cache : dict {obj_full_name: (docstring, kind, index), ...} Docstring cache for the module, either cached one (regenerate=False) or newly generated. """ global _lookfor_caches # Local import to speed up numpy's import time. import inspect if sys.version_info[0] >= 3: # In Python3 stderr, stdout are text files. from io import StringIO else: from StringIO import StringIO if module is None: module = "numpy" if isinstance(module, str): try: __import__(module) except ImportError: return {} module = sys.modules[module] elif isinstance(module, list) or isinstance(module, tuple): cache = {} for mod in module: cache.update(_lookfor_generate_cache(mod, import_modules, regenerate)) return cache if id(module) in _lookfor_caches and not regenerate: return _lookfor_caches[id(module)] # walk items and collect docstrings cache = {} _lookfor_caches[id(module)] = cache seen = {} index = 0 stack = [(module.__name__, module)] while stack: name, item = stack.pop(0) if id(item) in seen: continue seen[id(item)] = True index += 1 kind = "object" if inspect.ismodule(item): kind = "module" try: _all = item.__all__ except AttributeError: _all = None # import sub-packages if import_modules and hasattr(item, '__path__'): for pth in item.__path__: for mod_path in os.listdir(pth): this_py = os.path.join(pth, mod_path) init_py = os.path.join(pth, mod_path, '__init__.py') if (os.path.isfile(this_py) and mod_path.endswith('.py')): to_import = mod_path[:-3] elif os.path.isfile(init_py): to_import = mod_path else: continue if to_import == '__init__': continue try: old_stdout = sys.stdout old_stderr = sys.stderr try: sys.stdout = StringIO() sys.stderr = StringIO() __import__("%s.%s" % (name, to_import)) finally: sys.stdout = old_stdout sys.stderr = old_stderr # Catch SystemExit, too except BaseException: continue for n, v in _getmembers(item): try: item_name = getattr(v, '__name__', "%s.%s" % (name, n)) mod_name = getattr(v, '__module__', None) except NameError: # ref. SWIG's global cvars # NameError: Unknown C global variable item_name = "%s.%s" % (name, n) mod_name = None if '.' not in item_name and mod_name: item_name = "%s.%s" % (mod_name, item_name) if not item_name.startswith(name + '.'): # don't crawl "foreign" objects if isinstance(v, ufunc): # ... unless they are ufuncs pass else: continue elif not (inspect.ismodule(v) or _all is None or n in _all): continue stack.append(("%s.%s" % (name, n), v)) elif inspect.isclass(item): kind = "class" for n, v in _getmembers(item): stack.append(("%s.%s" % (name, n), v)) elif hasattr(item, "__call__"): kind = "func" try: doc = inspect.getdoc(item) except NameError: # ref SWIG's NameError: Unknown C global variable doc = None if doc is not None: cache[name] = (doc, kind, index) return cache def _getmembers(item): import inspect try: members = inspect.getmembers(item) except Exception: members = [(x, getattr(item, x)) for x in dir(item) if hasattr(item, x)] return members #----------------------------------------------------------------------------- # The following SafeEval class and company are adapted from Michael Spencer's # ASPN Python Cookbook recipe: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469 # Accordingly it is mostly Copyright 2006 by Michael Spencer. # The recipe, like most of the other ASPN Python Cookbook recipes was made # available under the Python license. # http://www.python.org/license # It has been modified to: # * handle unary -/+ # * support True/False/None # * raise SyntaxError instead of a custom exception. class SafeEval(object): """ Object to evaluate constant string expressions. This includes strings with lists, dicts and tuples using the abstract syntax tree created by ``compiler.parse``. .. deprecated:: 1.10.0 See Also -------- safe_eval """ def __init__(self): # 2014-10-15, 1.10 warnings.warn("SafeEval is deprecated in 1.10 and will be removed.", DeprecationWarning, stacklevel=2) def visit(self, node): cls = node.__class__ meth = getattr(self, 'visit' + cls.__name__, self.default) return meth(node) def default(self, node): raise SyntaxError("Unsupported source construct: %s" % node.__class__) def visitExpression(self, node): return self.visit(node.body) def visitNum(self, node): return node.n def visitStr(self, node): return node.s def visitBytes(self, node): return node.s def visitDict(self, node,**kw): return dict([(self.visit(k), self.visit(v)) for k, v in zip(node.keys, node.values)]) def visitTuple(self, node): return tuple([self.visit(i) for i in node.elts]) def visitList(self, node): return [self.visit(i) for i in node.elts] def visitUnaryOp(self, node): import ast if isinstance(node.op, ast.UAdd): return +self.visit(node.operand) elif isinstance(node.op, ast.USub): return -self.visit(node.operand) else: raise SyntaxError("Unknown unary op: %r" % node.op) def visitName(self, node): if node.id == 'False': return False elif node.id == 'True': return True elif node.id == 'None': return None else: raise SyntaxError("Unknown name: %s" % node.id) def visitNameConstant(self, node): return node.value def safe_eval(source): """ Protected string evaluation. Evaluate a string containing a Python literal expression without allowing the execution of arbitrary non-literal code. Parameters ---------- source : str The string to evaluate. Returns ------- obj : object The result of evaluating `source`. Raises ------ SyntaxError If the code has invalid Python syntax, or if it contains non-literal code. Examples -------- >>> np.safe_eval('1') 1 >>> np.safe_eval('[1, 2, 3]') [1, 2, 3] >>> np.safe_eval('{"foo": ("bar", 10.0)}') {'foo': ('bar', 10.0)} >>> np.safe_eval('import os') Traceback (most recent call last): ... SyntaxError: invalid syntax >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') Traceback (most recent call last): ... SyntaxError: Unsupported source construct: compiler.ast.CallFunc """ # Local import to speed up numpy's import time. import ast return ast.literal_eval(source) def _median_nancheck(data, result, axis, out): """ Utility function to check median result from data for NaN values at the end and return NaN in that case. Input result can also be a MaskedArray. Parameters ---------- data : array Input data to median function result : Array or MaskedArray Result of median function axis : {int, sequence of int, None}, optional Axis or axes along which the median was computed. out : ndarray, optional Output array in which to place the result. Returns ------- median : scalar or ndarray Median or NaN in axes which contained NaN in the input. """ if data.size == 0: return result data = np.moveaxis(data, axis, -1) n = np.isnan(data[..., -1]) # masked NaN values are ok if np.ma.isMaskedArray(n): n = n.filled(False) if result.ndim == 0: if n == True: warnings.warn("Invalid value encountered in median", RuntimeWarning, stacklevel=3) if out is not None: out[...] = data.dtype.type(np.nan) result = out else: result = data.dtype.type(np.nan) elif np.count_nonzero(n.ravel()) > 0: warnings.warn("Invalid value encountered in median for" + " %d results" % np.count_nonzero(n.ravel()), RuntimeWarning, stacklevel=3) result[n] = np.nan return result #-----------------------------------------------------------------------------
36,340
30.247635
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/polynomial.py
""" Functions to operate on polynomials. """ from __future__ import division, absolute_import, print_function __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', 'polyfit', 'RankWarning'] import re import warnings import numpy.core.numeric as NX from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, ones) from numpy.lib.twodim_base import diag, vander from numpy.lib.function_base import trim_zeros from numpy.lib.type_check import iscomplex, real, imag, mintypecode from numpy.linalg import eigvals, lstsq, inv class RankWarning(UserWarning): """ Issued by `polyfit` when the Vandermonde matrix is rank deficient. For more information, a way to suppress the warning, and an example of `RankWarning` being issued, see `polyfit`. """ pass def poly(seq_of_zeros): """ Find the coefficients of a polynomial with the given sequence of roots. Returns the coefficients of the polynomial whose leading coefficient is one for the given sequence of zeros (multiple roots must be included in the sequence as many times as their multiplicity; see Examples). A square matrix (or array, which will be treated as a matrix) can also be given, in which case the coefficients of the characteristic polynomial of the matrix are returned. Parameters ---------- seq_of_zeros : array_like, shape (N,) or (N, N) A sequence of polynomial roots, or a square array or matrix object. Returns ------- c : ndarray 1D array of polynomial coefficients from highest to lowest degree: ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` where c[0] always equals 1. Raises ------ ValueError If input is the wrong shape (the input must be a 1-D or square 2-D array). See Also -------- polyval : Compute polynomial values. roots : Return the roots of a polynomial. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- Specifying the roots of a polynomial still leaves one degree of freedom, typically represented by an undetermined leading coefficient. [1]_ In the case of this function, that coefficient - the first one in the returned array - is always taken as one. (If for some reason you have one other point, the only automatic way presently to leverage that information is to use ``polyfit``.) The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` matrix **A** is given by :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, where **I** is the `n`-by-`n` identity matrix. [2]_ References ---------- .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," Academic Press, pg. 182, 1980. Examples -------- Given a sequence of a polynomial's zeros: >>> np.poly((0, 0, 0)) # Multiple root example array([1, 0, 0, 0]) The line above represents z**3 + 0*z**2 + 0*z + 0. >>> np.poly((-1./2, 0, 1./2)) array([ 1. , 0. , -0.25, 0. ]) The line above represents z**3 - z/4 >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) array([ 1. , -0.77086955, 0.08618131, 0. ]) #random Given a square array object: >>> P = np.array([[0, 1./3], [-1./2, 0]]) >>> np.poly(P) array([ 1. , 0. , 0.16666667]) Or a square matrix object: >>> np.poly(np.matrix(P)) array([ 1. , 0. , 0.16666667]) Note how in all cases the leading coefficient is always 1. """ seq_of_zeros = atleast_1d(seq_of_zeros) sh = seq_of_zeros.shape if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: seq_of_zeros = eigvals(seq_of_zeros) elif len(sh) == 1: dt = seq_of_zeros.dtype # Let object arrays slip through, e.g. for arbitrary precision if dt != object: seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) else: raise ValueError("input must be 1d or non-empty square 2d array.") if len(seq_of_zeros) == 0: return 1.0 dt = seq_of_zeros.dtype a = ones((1,), dtype=dt) for k in range(len(seq_of_zeros)): a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), mode='full') if issubclass(a.dtype.type, NX.complexfloating): # if complex roots are all complex conjugates, the roots are real. roots = NX.asarray(seq_of_zeros, complex) if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): a = a.real.copy() return a def roots(p): """ Return the roots of a polynomial with coefficients given in p. The values in the rank-1 array `p` are coefficients of a polynomial. If the length of `p` is n+1 then the polynomial is described by:: p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] Parameters ---------- p : array_like Rank-1 array of polynomial coefficients. Returns ------- out : ndarray An array containing the roots of the polynomial. Raises ------ ValueError When `p` cannot be converted to a rank-1 array. See also -------- poly : Find the coefficients of a polynomial with a given sequence of roots. polyval : Compute polynomial values. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- The algorithm relies on computing the eigenvalues of the companion matrix [1]_. References ---------- .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: Cambridge University Press, 1999, pp. 146-7. Examples -------- >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j]) """ # If input is scalar, this makes it an array p = atleast_1d(p) if p.ndim != 1: raise ValueError("Input must be a rank-1 array.") # find non-zero array entries non_zero = NX.nonzero(NX.ravel(p))[0] # Return an empty array if polynomial is all zeros if len(non_zero) == 0: return NX.array([]) # find the number of trailing zeros -- this is the number of roots at 0. trailing_zeros = len(p) - non_zero[-1] - 1 # strip leading and trailing zeros p = p[int(non_zero[0]):int(non_zero[-1])+1] # casting: if incoming array isn't floating point, make it floating point. if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): p = p.astype(float) N = len(p) if N > 1: # build companion matrix and find its eigenvalues (the roots) A = diag(NX.ones((N-2,), p.dtype), -1) A[0,:] = -p[1:] / p[0] roots = eigvals(A) else: roots = NX.array([]) # tack any zeros onto the back of the array roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) return roots def polyint(p, m=1, k=None): """ Return an antiderivative (indefinite integral) of a polynomial. The returned order `m` antiderivative `P` of polynomial `p` satisfies :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` integration constants `k`. The constants determine the low-order polynomial part .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. Parameters ---------- p : array_like or poly1d Polynomial to differentiate. A sequence is interpreted as polynomial coefficients, see `poly1d`. m : int, optional Order of the antiderivative. (Default: 1) k : list of `m` scalars or scalar, optional Integration constants. They are given in the order of integration: those corresponding to highest-order terms come first. If ``None`` (default), all constants are assumed to be zero. If `m = 1`, a single scalar can be given instead of a list. See Also -------- polyder : derivative of a polynomial poly1d.integ : equivalent method Examples -------- The defining property of the antiderivative: >>> p = np.poly1d([1,1,1]) >>> P = np.polyint(p) >>> P poly1d([ 0.33333333, 0.5 , 1. , 0. ]) >>> np.polyder(P) == p True The integration constants default to zero, but can be specified: >>> P = np.polyint(p, 3) >>> P(0) 0.0 >>> np.polyder(P)(0) 0.0 >>> np.polyder(P, 2)(0) 0.0 >>> P = np.polyint(p, 3, k=[6,5,3]) >>> P poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) Note that 3 = 6 / 2!, and that the constants are given in the order of integrations. Constant of the highest-order polynomial term comes first: >>> np.polyder(P, 2)(0) 6.0 >>> np.polyder(P, 1)(0) 5.0 >>> P(0) 3.0 """ m = int(m) if m < 0: raise ValueError("Order of integral must be positive (see polyder)") if k is None: k = NX.zeros(m, float) k = atleast_1d(k) if len(k) == 1 and m > 1: k = k[0]*NX.ones(m, float) if len(k) < m: raise ValueError( "k must be a scalar or a rank-1 array of length 1 or >m.") truepoly = isinstance(p, poly1d) p = NX.asarray(p) if m == 0: if truepoly: return poly1d(p) return p else: # Note: this must work also with object and integer arrays y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) val = polyint(y, m - 1, k=k[1:]) if truepoly: return poly1d(val) return val def polyder(p, m=1): """ Return the derivative of the specified order of a polynomial. Parameters ---------- p : poly1d or sequence Polynomial to differentiate. A sequence is interpreted as polynomial coefficients, see `poly1d`. m : int, optional Order of differentiation (default: 1) Returns ------- der : poly1d A new polynomial representing the derivative. See Also -------- polyint : Anti-derivative of a polynomial. poly1d : Class for one-dimensional polynomials. Examples -------- The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: >>> p = np.poly1d([1,1,1,1]) >>> p2 = np.polyder(p) >>> p2 poly1d([3, 2, 1]) which evaluates to: >>> p2(2.) 17.0 We can verify this, approximating the derivative with ``(f(x + h) - f(x))/h``: >>> (p(2. + 0.001) - p(2.)) / 0.001 17.007000999997857 The fourth-order derivative of a 3rd-order polynomial is zero: >>> np.polyder(p, 2) poly1d([6, 2]) >>> np.polyder(p, 3) poly1d([6]) >>> np.polyder(p, 4) poly1d([ 0.]) """ m = int(m) if m < 0: raise ValueError("Order of derivative must be positive (see polyint)") truepoly = isinstance(p, poly1d) p = NX.asarray(p) n = len(p) - 1 y = p[:-1] * NX.arange(n, 0, -1) if m == 0: val = p else: val = polyder(y, m - 1) if truepoly: val = poly1d(val) return val def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): """ Least squares polynomial fit. Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int Degree of the fitting polynomial rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (M,), optional Weights to apply to the y-coordinates of the sample points. For gaussian uncertainties, use 1/sigma (not 1/sigma**2). cov : bool, optional Return the estimate and the covariance matrix of the estimate If full is True, then cov is not returned. Returns ------- p : ndarray, shape (deg + 1,) or (deg + 1, K) Polynomial coefficients, highest power first. If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``. residuals, rank, singular_values, rcond Present only if `full` = True. Residuals of the least-squares fit, the effective rank of the scaled Vandermonde coefficient matrix, its singular values, and the specified value of `rcond`. For more details, see `linalg.lstsq`. V : ndarray, shape (M,M) or (M,M,K) Present only if `full` = False and `cov`=True. The covariance matrix of the polynomial coefficient estimates. The diagonal of this matrix are the variance estimates for each coefficient. If y is a 2-D array, then the covariance matrix for the `k`-th data set are in ``V[:,:,k]`` Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is only raised if `full` = False. The warnings can be turned off by >>> import warnings >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- polyval : Compute polynomial values. linalg.lstsq : Computes a least-squares fit. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution minimizes the squared error .. math :: E = \\sum_{j=0}^k |p(x_j) - y_j|^2 in the equations:: x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] ... x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] The coefficient matrix of the coefficients `p` is a Vandermonde matrix. `polyfit` issues a `RankWarning` when the least-squares fit is badly conditioned. This implies that the best fit is not well-defined due to numerical error. The results may be improved by lowering the polynomial degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious: including contributions from the small singular values can add numerical noise to the result. Note that fitting polynomial coefficients is inherently badly conditioned when the degree of the polynomial is large or the interval of sample points is badly centered. The quality of the fit should always be checked in these cases. When polynomial fits are not satisfactory, splines may be a good alternative. References ---------- .. [1] Wikipedia, "Curve fitting", http://en.wikipedia.org/wiki/Curve_fitting .. [2] Wikipedia, "Polynomial interpolation", http://en.wikipedia.org/wiki/Polynomial_interpolation Examples -------- >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) >>> z = np.polyfit(x, y, 3) >>> z array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) It is convenient to use `poly1d` objects for dealing with polynomials: >>> p = np.poly1d(z) >>> p(0.5) 0.6143849206349179 >>> p(3.5) -0.34732142857143039 >>> p(10) 22.579365079365115 High-order polynomials may oscillate wildly: >>> p30 = np.poly1d(np.polyfit(x, y, 30)) /... RankWarning: Polyfit may be poorly conditioned... >>> p30(4) -0.80000000000000204 >>> p30(5) -0.99999999999999445 >>> p30(4.5) -0.10547061179440398 Illustration: >>> import matplotlib.pyplot as plt >>> xp = np.linspace(-2, 6, 100) >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') >>> plt.ylim(-2,2) (-2, 2) >>> plt.show() """ order = int(deg) + 1 x = NX.asarray(x) + 0.0 y = NX.asarray(y) + 0.0 # check arguments. if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if x.shape[0] != y.shape[0]: raise TypeError("expected x and y to have same length") # set rcond if rcond is None: rcond = len(x)*finfo(x.dtype).eps # set up least squares equation for powers of x lhs = vander(x, order) rhs = y # apply weighting if w is not None: w = NX.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected a 1-d array for weights") if w.shape[0] != y.shape[0]: raise TypeError("expected w and y to have the same length") lhs *= w[:, NX.newaxis] if rhs.ndim == 2: rhs *= w[:, NX.newaxis] else: rhs *= w # scale lhs to improve condition number and solve scale = NX.sqrt((lhs*lhs).sum(axis=0)) lhs /= scale c, resids, rank, s = lstsq(lhs, rhs, rcond) c = (c.T/scale).T # broadcast scale coefficients # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: msg = "Polyfit may be poorly conditioned" warnings.warn(msg, RankWarning, stacklevel=2) if full: return c, resids, rank, s, rcond elif cov: Vbase = inv(dot(lhs.T, lhs)) Vbase /= NX.outer(scale, scale) # Some literature ignores the extra -2.0 factor in the denominator, but # it is included here because the covariance of Multivariate Student-T # (which is implied by a Bayesian uncertainty analysis) includes it. # Plus, it gives a slightly more conservative estimate of uncertainty. if len(x) <= order + 2: raise ValueError("the number of data points must exceed order + 2 " "for Bayesian estimate the covariance matrix") fac = resids / (len(x) - order - 2.0) if y.ndim == 1: return c, Vbase * fac else: return c, Vbase[:,:, NX.newaxis] * fac else: return c def polyval(p, x): """ Evaluate a polynomial at specific values. If `p` is of length N, this function returns the value: ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` If `x` is a sequence, then `p(x)` is returned for each element of `x`. If `x` is another polynomial then the composite polynomial `p(x(t))` is returned. Parameters ---------- p : array_like or poly1d object 1D array of polynomial coefficients (including coefficients equal to zero) from highest degree to the constant term, or an instance of poly1d. x : array_like or poly1d object A number, an array of numbers, or an instance of poly1d, at which to evaluate `p`. Returns ------- values : ndarray or poly1d If `x` is a poly1d instance, the result is the composition of the two polynomials, i.e., `x` is "substituted" in `p` and the simplified result is returned. In addition, the type of `x` - array_like or poly1d - governs the type of the output: `x` array_like => `values` array_like, `x` a poly1d object => `values` is also. See Also -------- poly1d: A polynomial class. Notes ----- Horner's scheme [1]_ is used to evaluate the polynomial. Even so, for polynomials of high degree the values may be inaccurate due to rounding errors. Use carefully. References ---------- .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand Reinhold Co., 1985, pg. 720. Examples -------- >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 76 >>> np.polyval([3,0,1], np.poly1d(5)) poly1d([ 76.]) >>> np.polyval(np.poly1d([3,0,1]), 5) 76 >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) poly1d([ 76.]) """ p = NX.asarray(p) if isinstance(x, poly1d): y = 0 else: x = NX.asarray(x) y = NX.zeros_like(x) for i in range(len(p)): y = y * x + p[i] return y def polyadd(a1, a2): """ Find the sum of two polynomials. Returns the polynomial resulting from the sum of two input polynomials. Each input must be either a poly1d object or a 1D sequence of polynomial coefficients, from highest to lowest degree. Parameters ---------- a1, a2 : array_like or poly1d object Input polynomials. Returns ------- out : ndarray or poly1d object The sum of the inputs. If either input is a poly1d object, then the output is also a poly1d object. Otherwise, it is a 1D array of polynomial coefficients from highest to lowest degree. See Also -------- poly1d : A one-dimensional polynomial class. poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval Examples -------- >>> np.polyadd([1, 2], [9, 5, 4]) array([9, 6, 6]) Using poly1d objects: >>> p1 = np.poly1d([1, 2]) >>> p2 = np.poly1d([9, 5, 4]) >>> print(p1) 1 x + 2 >>> print(p2) 2 9 x + 5 x + 4 >>> print(np.polyadd(p1, p2)) 2 9 x + 6 x + 6 """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1 = atleast_1d(a1) a2 = atleast_1d(a2) diff = len(a2) - len(a1) if diff == 0: val = a1 + a2 elif diff > 0: zr = NX.zeros(diff, a1.dtype) val = NX.concatenate((zr, a1)) + a2 else: zr = NX.zeros(abs(diff), a2.dtype) val = a1 + NX.concatenate((zr, a2)) if truepoly: val = poly1d(val) return val def polysub(a1, a2): """ Difference (subtraction) of two polynomials. Given two polynomials `a1` and `a2`, returns ``a1 - a2``. `a1` and `a2` can be either array_like sequences of the polynomials' coefficients (including coefficients equal to zero), or `poly1d` objects. Parameters ---------- a1, a2 : array_like or poly1d Minuend and subtrahend polynomials, respectively. Returns ------- out : ndarray or poly1d Array or `poly1d` object of the difference polynomial's coefficients. See Also -------- polyval, polydiv, polymul, polyadd Examples -------- .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) >>> np.polysub([2, 10, -2], [3, 10, -4]) array([-1, 0, 2]) """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1 = atleast_1d(a1) a2 = atleast_1d(a2) diff = len(a2) - len(a1) if diff == 0: val = a1 - a2 elif diff > 0: zr = NX.zeros(diff, a1.dtype) val = NX.concatenate((zr, a1)) - a2 else: zr = NX.zeros(abs(diff), a2.dtype) val = a1 - NX.concatenate((zr, a2)) if truepoly: val = poly1d(val) return val def polymul(a1, a2): """ Find the product of two polynomials. Finds the polynomial resulting from the multiplication of the two input polynomials. Each input must be either a poly1d object or a 1D sequence of polynomial coefficients, from highest to lowest degree. Parameters ---------- a1, a2 : array_like or poly1d object Input polynomials. Returns ------- out : ndarray or poly1d object The polynomial resulting from the multiplication of the inputs. If either inputs is a poly1d object, then the output is also a poly1d object. Otherwise, it is a 1D array of polynomial coefficients from highest to lowest degree. See Also -------- poly1d : A one-dimensional polynomial class. poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval convolve : Array convolution. Same output as polymul, but has parameter for overlap mode. Examples -------- >>> np.polymul([1, 2, 3], [9, 5, 1]) array([ 9, 23, 38, 17, 3]) Using poly1d objects: >>> p1 = np.poly1d([1, 2, 3]) >>> p2 = np.poly1d([9, 5, 1]) >>> print(p1) 2 1 x + 2 x + 3 >>> print(p2) 2 9 x + 5 x + 1 >>> print(np.polymul(p1, p2)) 4 3 2 9 x + 23 x + 38 x + 17 x + 3 """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1, a2 = poly1d(a1), poly1d(a2) val = NX.convolve(a1, a2) if truepoly: val = poly1d(val) return val def polydiv(u, v): """ Returns the quotient and remainder of polynomial division. The input arrays are the coefficients (including any coefficients equal to zero) of the "numerator" (dividend) and "denominator" (divisor) polynomials, respectively. Parameters ---------- u : array_like or poly1d Dividend polynomial's coefficients. v : array_like or poly1d Divisor polynomial's coefficients. Returns ------- q : ndarray Coefficients, including those equal to zero, of the quotient. r : ndarray Coefficients, including those equal to zero, of the remainder. See Also -------- poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub, polyval Notes ----- Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need not equal `v.ndim`. In other words, all four possible combinations - ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. Examples -------- .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 >>> x = np.array([3.0, 5.0, 2.0]) >>> y = np.array([2.0, 1.0]) >>> np.polydiv(x, y) (array([ 1.5 , 1.75]), array([ 0.25])) """ truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) u = atleast_1d(u) + 0.0 v = atleast_1d(v) + 0.0 # w has the common type w = u[0] + v[0] m = len(u) - 1 n = len(v) - 1 scale = 1. / v[0] q = NX.zeros((max(m - n + 1, 1),), w.dtype) r = u.copy() for k in range(0, m-n+1): d = scale * r[k] q[k] = d r[k:k+n+1] -= d*v while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): r = r[1:] if truepoly: return poly1d(q), poly1d(r) return q, r _poly_mat = re.compile(r"[*][*]([0-9]*)") def _raise_power(astr, wrap=70): n = 0 line1 = '' line2 = '' output = ' ' while True: mat = _poly_mat.search(astr, n) if mat is None: break span = mat.span() power = mat.groups()[0] partstr = astr[n:span[0]] n = span[1] toadd2 = partstr + ' '*(len(power)-1) toadd1 = ' '*(len(partstr)-1) + power if ((len(line2) + len(toadd2) > wrap) or (len(line1) + len(toadd1) > wrap)): output += line1 + "\n" + line2 + "\n " line1 = toadd1 line2 = toadd2 else: line2 += partstr + ' '*(len(power)-1) line1 += ' '*(len(partstr)-1) + power output += line1 + "\n" + line2 return output + astr[n:] class poly1d(object): """ A one-dimensional polynomial class. A convenience class, used to encapsulate "natural" operations on polynomials so that said operations may take on their customary form in code (see Examples). Parameters ---------- c_or_r : array_like The polynomial's coefficients, in decreasing powers, or if the value of the second parameter is True, the polynomial's roots (values where the polynomial evaluates to 0). For example, ``poly1d([1, 2, 3])`` returns an object that represents :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. r : bool, optional If True, `c_or_r` specifies the polynomial's roots; the default is False. variable : str, optional Changes the variable used when printing `p` from `x` to `variable` (see Examples). Examples -------- Construct the polynomial :math:`x^2 + 2x + 3`: >>> p = np.poly1d([1, 2, 3]) >>> print(np.poly1d(p)) 2 1 x + 2 x + 3 Evaluate the polynomial at :math:`x = 0.5`: >>> p(0.5) 4.25 Find the roots: >>> p.r array([-1.+1.41421356j, -1.-1.41421356j]) >>> p(p.r) array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) These numbers in the previous line represent (0, 0) to machine precision Show the coefficients: >>> p.c array([1, 2, 3]) Display the order (the leading zero-coefficients are removed): >>> p.order 2 Show the coefficient of the k-th power in the polynomial (which is equivalent to ``p.c[-(i+1)]``): >>> p[1] 2 Polynomials can be added, subtracted, multiplied, and divided (returns quotient and remainder): >>> p * p poly1d([ 1, 4, 10, 12, 9]) >>> (p**3 + 4) / p (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) ``asarray(p)`` gives the coefficient array, so polynomials can be used in all functions that accept arrays: >>> p**2 # square of polynomial poly1d([ 1, 4, 10, 12, 9]) >>> np.square(p) # square of individual coefficients array([1, 4, 9]) The variable used in the string representation of `p` can be modified, using the `variable` parameter: >>> p = np.poly1d([1,2,3], variable='z') >>> print(p) 2 1 z + 2 z + 3 Construct a polynomial from its roots: >>> np.poly1d([1, 2], True) poly1d([ 1, -3, 2]) This is the same polynomial as obtained by: >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) poly1d([ 1, -3, 2]) """ __hash__ = None @property def coeffs(self): """ A copy of the polynomial coefficients """ return self._coeffs.copy() @property def variable(self): """ The name of the polynomial variable """ return self._variable # calculated attributes @property def order(self): """ The order or degree of the polynomial """ return len(self._coeffs) - 1 @property def roots(self): """ The roots of the polynomial, where self(x) == 0 """ return roots(self._coeffs) # our internal _coeffs property need to be backed by __dict__['coeffs'] for # scipy to work correctly. @property def _coeffs(self): return self.__dict__['coeffs'] @_coeffs.setter def _coeffs(self, coeffs): self.__dict__['coeffs'] = coeffs # alias attributes r = roots c = coef = coefficients = coeffs o = order def __init__(self, c_or_r, r=False, variable=None): if isinstance(c_or_r, poly1d): self._variable = c_or_r._variable self._coeffs = c_or_r._coeffs if set(c_or_r.__dict__) - set(self.__dict__): msg = ("In the future extra properties will not be copied " "across when constructing one poly1d from another") warnings.warn(msg, FutureWarning, stacklevel=2) self.__dict__.update(c_or_r.__dict__) if variable is not None: self._variable = variable return if r: c_or_r = poly(c_or_r) c_or_r = atleast_1d(c_or_r) if c_or_r.ndim > 1: raise ValueError("Polynomial must be 1d only.") c_or_r = trim_zeros(c_or_r, trim='f') if len(c_or_r) == 0: c_or_r = NX.array([0.]) self._coeffs = c_or_r if variable is None: variable = 'x' self._variable = variable def __array__(self, t=None): if t: return NX.asarray(self.coeffs, t) else: return NX.asarray(self.coeffs) def __repr__(self): vals = repr(self.coeffs) vals = vals[6:-1] return "poly1d(%s)" % vals def __len__(self): return self.order def __str__(self): thestr = "0" var = self.variable # Remove leading zeros coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] N = len(coeffs)-1 def fmt_float(q): s = '%.4g' % q if s.endswith('.0000'): s = s[:-5] return s for k in range(len(coeffs)): if not iscomplex(coeffs[k]): coefstr = fmt_float(real(coeffs[k])) elif real(coeffs[k]) == 0: coefstr = '%sj' % fmt_float(imag(coeffs[k])) else: coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), fmt_float(imag(coeffs[k]))) power = (N-k) if power == 0: if coefstr != '0': newstr = '%s' % (coefstr,) else: if k == 0: newstr = '0' else: newstr = '' elif power == 1: if coefstr == '0': newstr = '' elif coefstr == 'b': newstr = var else: newstr = '%s %s' % (coefstr, var) else: if coefstr == '0': newstr = '' elif coefstr == 'b': newstr = '%s**%d' % (var, power,) else: newstr = '%s %s**%d' % (coefstr, var, power) if k > 0: if newstr != '': if newstr.startswith('-'): thestr = "%s - %s" % (thestr, newstr[1:]) else: thestr = "%s + %s" % (thestr, newstr) else: thestr = newstr return _raise_power(thestr) def __call__(self, val): return polyval(self.coeffs, val) def __neg__(self): return poly1d(-self.coeffs) def __pos__(self): return self def __mul__(self, other): if isscalar(other): return poly1d(self.coeffs * other) else: other = poly1d(other) return poly1d(polymul(self.coeffs, other.coeffs)) def __rmul__(self, other): if isscalar(other): return poly1d(other * self.coeffs) else: other = poly1d(other) return poly1d(polymul(self.coeffs, other.coeffs)) def __add__(self, other): other = poly1d(other) return poly1d(polyadd(self.coeffs, other.coeffs)) def __radd__(self, other): other = poly1d(other) return poly1d(polyadd(self.coeffs, other.coeffs)) def __pow__(self, val): if not isscalar(val) or int(val) != val or val < 0: raise ValueError("Power to non-negative integers only.") res = [1] for _ in range(val): res = polymul(self.coeffs, res) return poly1d(res) def __sub__(self, other): other = poly1d(other) return poly1d(polysub(self.coeffs, other.coeffs)) def __rsub__(self, other): other = poly1d(other) return poly1d(polysub(other.coeffs, self.coeffs)) def __div__(self, other): if isscalar(other): return poly1d(self.coeffs/other) else: other = poly1d(other) return polydiv(self, other) __truediv__ = __div__ def __rdiv__(self, other): if isscalar(other): return poly1d(other/self.coeffs) else: other = poly1d(other) return polydiv(other, self) __rtruediv__ = __rdiv__ def __eq__(self, other): if not isinstance(other, poly1d): return NotImplemented if self.coeffs.shape != other.coeffs.shape: return False return (self.coeffs == other.coeffs).all() def __ne__(self, other): if not isinstance(other, poly1d): return NotImplemented return not self.__eq__(other) def __getitem__(self, val): ind = self.order - val if val > self.order: return 0 if val < 0: return 0 return self.coeffs[ind] def __setitem__(self, key, val): ind = self.order - key if key < 0: raise ValueError("Does not support negative powers.") if key > self.order: zr = NX.zeros(key-self.order, self.coeffs.dtype) self._coeffs = NX.concatenate((zr, self.coeffs)) ind = 0 self._coeffs[ind] = val return def __iter__(self): return iter(self.coeffs) def integ(self, m=1, k=0): """ Return an antiderivative (indefinite integral) of this polynomial. Refer to `polyint` for full documentation. See Also -------- polyint : equivalent function """ return poly1d(polyint(self.coeffs, m=m, k=k)) def deriv(self, m=1): """ Return a derivative of this polynomial. Refer to `polyder` for full documentation. See Also -------- polyder : equivalent function """ return poly1d(polyder(self.coeffs, m=m)) # Stuff to do on module import warnings.simplefilter('always', RankWarning)
38,572
28.603223
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/_version.py
"""Utility to compare (NumPy) version strings. The NumpyVersion class allows properly comparing numpy version strings. The LooseVersion and StrictVersion classes that distutils provides don't work; they don't recognize anything like alpha/beta/rc/dev versions. """ from __future__ import division, absolute_import, print_function import re from numpy.compat import basestring __all__ = ['NumpyVersion'] class NumpyVersion(): """Parse and compare numpy version strings. NumPy has the following versioning scheme (numbers given are examples; they can be > 9) in principle): - Released version: '1.8.0', '1.8.1', etc. - Alpha: '1.8.0a1', '1.8.0a2', etc. - Beta: '1.8.0b1', '1.8.0b2', etc. - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other `NumpyVersion` instance. Note that all development versions of the same (pre-)release compare equal. .. versionadded:: 1.9.0 Parameters ---------- vstring : str NumPy version string (``np.__version__``). Examples -------- >>> from numpy.lib import NumpyVersion >>> if NumpyVersion(np.__version__) < '1.7.0'): ... print('skip') skip >>> NumpyVersion('1.7') # raises ValueError, add ".0" """ def __init__(self, vstring): self.vstring = vstring ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) if not ver_main: raise ValueError("Not a valid numpy version string") self.version = ver_main.group() self.major, self.minor, self.bugfix = [int(x) for x in self.version.split('.')] if len(vstring) == ver_main.end(): self.pre_release = 'final' else: alpha = re.match(r'a\d', vstring[ver_main.end():]) beta = re.match(r'b\d', vstring[ver_main.end():]) rc = re.match(r'rc\d', vstring[ver_main.end():]) pre_rel = [m for m in [alpha, beta, rc] if m is not None] if pre_rel: self.pre_release = pre_rel[0].group() else: self.pre_release = '' self.is_devversion = bool(re.search(r'.dev', vstring)) def _compare_version(self, other): """Compare major.minor.bugfix""" if self.major == other.major: if self.minor == other.minor: if self.bugfix == other.bugfix: vercmp = 0 elif self.bugfix > other.bugfix: vercmp = 1 else: vercmp = -1 elif self.minor > other.minor: vercmp = 1 else: vercmp = -1 elif self.major > other.major: vercmp = 1 else: vercmp = -1 return vercmp def _compare_pre_release(self, other): """Compare alpha/beta/rc/final.""" if self.pre_release == other.pre_release: vercmp = 0 elif self.pre_release == 'final': vercmp = 1 elif other.pre_release == 'final': vercmp = -1 elif self.pre_release > other.pre_release: vercmp = 1 else: vercmp = -1 return vercmp def _compare(self, other): if not isinstance(other, (basestring, NumpyVersion)): raise ValueError("Invalid object to compare with NumpyVersion.") if isinstance(other, basestring): other = NumpyVersion(other) vercmp = self._compare_version(other) if vercmp == 0: # Same x.y.z version, check for alpha/beta/rc vercmp = self._compare_pre_release(other) if vercmp == 0: # Same version and same pre-release, check if dev version if self.is_devversion is other.is_devversion: vercmp = 0 elif self.is_devversion: vercmp = -1 else: vercmp = 1 return vercmp def __lt__(self, other): return self._compare(other) < 0 def __le__(self, other): return self._compare(other) <= 0 def __eq__(self, other): return self._compare(other) == 0 def __ne__(self, other): return self._compare(other) != 0 def __gt__(self, other): return self._compare(other) > 0 def __ge__(self, other): return self._compare(other) >= 0 def __repr(self): return "NumpyVersion(%s)" % self.vstring
4,867
30.006369
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/index_tricks.py
from __future__ import division, absolute_import, print_function import sys import math import numpy.core.numeric as _nx from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod, arange ) from numpy.core.numerictypes import find_common_type, issubdtype from . import function_base import numpy.matrixlib as matrixlib from .function_base import diff from numpy.core.multiarray import ravel_multi_index, unravel_index from numpy.lib.stride_tricks import as_strided __all__ = [ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', 'diag_indices', 'diag_indices_from' ] def ix_(*args): """ Construct an open mesh from multiple sequences. This function takes N 1-D sequences and returns N outputs with N dimensions each, such that the shape is 1 in all but one dimension and the dimension with the non-unit shape value cycles through all N dimensions. Using `ix_` one can quickly construct index arrays that will index the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. Parameters ---------- args : 1-D sequences Each sequence should be of integer or boolean type. Boolean sequences will be interpreted as boolean masks for the corresponding dimension (equivalent to passing in ``np.nonzero(boolean_sequence)``). Returns ------- out : tuple of ndarrays N arrays with N dimensions each, with N the number of input sequences. Together these arrays form an open mesh. See Also -------- ogrid, mgrid, meshgrid Examples -------- >>> a = np.arange(10).reshape(2, 5) >>> a array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> ixgrid = np.ix_([0, 1], [2, 4]) >>> ixgrid (array([[0], [1]]), array([[2, 4]])) >>> ixgrid[0].shape, ixgrid[1].shape ((2, 1), (1, 2)) >>> a[ixgrid] array([[2, 4], [7, 9]]) >>> ixgrid = np.ix_([True, True], [2, 4]) >>> a[ixgrid] array([[2, 4], [7, 9]]) >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) >>> a[ixgrid] array([[2, 4], [7, 9]]) """ out = [] nd = len(args) for k, new in enumerate(args): new = asarray(new) if new.ndim != 1: raise ValueError("Cross index must be 1 dimensional") if new.size == 0: # Explicitly type empty arrays to avoid float default new = new.astype(_nx.intp) if issubdtype(new.dtype, _nx.bool_): new, = new.nonzero() new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1)) out.append(new) return tuple(out) class nd_grid(object): """ Construct a multi-dimensional "meshgrid". ``grid = nd_grid()`` creates an instance which will return a mesh-grid when indexed. The dimension and number of the output arrays are equal to the number of indexing dimensions. If the step length is not a complex number, then the stop is not inclusive. However, if the step length is a **complex number** (e.g. 5j), then the integer part of its magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value **is inclusive**. If instantiated with an argument of ``sparse=True``, the mesh-grid is open (or not fleshed out) so that only one-dimension of each returned argument is greater than 1. Parameters ---------- sparse : bool, optional Whether the grid is sparse or not. Default is False. Notes ----- Two instances of `nd_grid` are made available in the NumPy namespace, `mgrid` and `ogrid`:: mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) Users should use these pre-defined instances instead of using `nd_grid` directly. Examples -------- >>> mgrid = np.lib.index_tricks.nd_grid() >>> mgrid[0:5,0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]) >>> mgrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) >>> ogrid = np.lib.index_tricks.nd_grid(sparse=True) >>> ogrid[0:5,0:5] [array([[0], [1], [2], [3], [4]]), array([[0, 1, 2, 3, 4]])] """ def __init__(self, sparse=False): self.sparse = sparse def __getitem__(self, key): try: size = [] typ = int for k in range(len(key)): step = key[k].step start = key[k].start if start is None: start = 0 if step is None: step = 1 if isinstance(step, complex): size.append(int(abs(step))) typ = float else: size.append( int(math.ceil((key[k].stop - start)/(step*1.0)))) if (isinstance(step, float) or isinstance(start, float) or isinstance(key[k].stop, float)): typ = float if self.sparse: nn = [_nx.arange(_x, dtype=_t) for _x, _t in zip(size, (typ,)*len(size))] else: nn = _nx.indices(size, typ) for k in range(len(size)): step = key[k].step start = key[k].start if start is None: start = 0 if step is None: step = 1 if isinstance(step, complex): step = int(abs(step)) if step != 1: step = (key[k].stop - start)/float(step-1) nn[k] = (nn[k]*step+start) if self.sparse: slobj = [_nx.newaxis]*len(size) for k in range(len(size)): slobj[k] = slice(None, None) nn[k] = nn[k][slobj] slobj[k] = _nx.newaxis return nn except (IndexError, TypeError): step = key.step stop = key.stop start = key.start if start is None: start = 0 if isinstance(step, complex): step = abs(step) length = int(step) if step != 1: step = (key.stop-start)/float(step-1) stop = key.stop + step return _nx.arange(0, length, 1, float)*step + start else: return _nx.arange(start, stop, step) def __len__(self): return 0 mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) mgrid.__doc__ = None # set in numpy.add_newdocs ogrid.__doc__ = None # set in numpy.add_newdocs class AxisConcatenator(object): """ Translates slice objects to concatenation along an axis. For detailed documentation on usage, see `r_`. """ # allow ma.mr_ to override this concatenate = staticmethod(_nx.concatenate) makemat = staticmethod(matrixlib.matrix) def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): self.axis = axis self.matrix = matrix self.trans1d = trans1d self.ndmin = ndmin def __getitem__(self, key): # handle matrix builder syntax if isinstance(key, str): frame = sys._getframe().f_back mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) return mymat if not isinstance(key, tuple): key = (key,) # copy attributes, since they can be overridden in the first argument trans1d = self.trans1d ndmin = self.ndmin matrix = self.matrix axis = self.axis objs = [] scalars = [] arraytypes = [] scalartypes = [] for k, item in enumerate(key): scalar = False if isinstance(item, slice): step = item.step start = item.start stop = item.stop if start is None: start = 0 if step is None: step = 1 if isinstance(step, complex): size = int(abs(step)) newobj = function_base.linspace(start, stop, num=size) else: newobj = _nx.arange(start, stop, step) if ndmin > 1: newobj = array(newobj, copy=False, ndmin=ndmin) if trans1d != -1: newobj = newobj.swapaxes(-1, trans1d) elif isinstance(item, str): if k != 0: raise ValueError("special directives must be the " "first entry.") if item in ('r', 'c'): matrix = True col = (item == 'c') continue if ',' in item: vec = item.split(',') try: axis, ndmin = [int(x) for x in vec[:2]] if len(vec) == 3: trans1d = int(vec[2]) continue except Exception: raise ValueError("unknown special directive") try: axis = int(item) continue except (ValueError, TypeError): raise ValueError("unknown special directive") elif type(item) in ScalarType: newobj = array(item, ndmin=ndmin) scalars.append(len(objs)) scalar = True scalartypes.append(newobj.dtype) else: newobj = item if ndmin > 1: tempobj = array(newobj, copy=False, subok=True) newobj = array(newobj, copy=False, subok=True, ndmin=ndmin) if trans1d != -1 and tempobj.ndim < ndmin: k2 = ndmin-tempobj.ndim if (trans1d < 0): trans1d += k2 + 1 defaxes = list(range(ndmin)) k1 = trans1d axes = defaxes[:k1] + defaxes[k2:] + \ defaxes[k1:k2] newobj = newobj.transpose(axes) del tempobj objs.append(newobj) if not scalar and isinstance(newobj, _nx.ndarray): arraytypes.append(newobj.dtype) # Ensure that scalars won't up-cast unless warranted final_dtype = find_common_type(arraytypes, scalartypes) if final_dtype is not None: for k in scalars: objs[k] = objs[k].astype(final_dtype) res = self.concatenate(tuple(objs), axis=axis) if matrix: oldndim = res.ndim res = self.makemat(res) if oldndim == 1 and col: res = res.T return res def __len__(self): return 0 # separate classes are used here instead of just making r_ = concatentor(0), # etc. because otherwise we couldn't get the doc string to come out right # in help(r_) class RClass(AxisConcatenator): """ Translates slice objects to concatenation along the first axis. This is a simple way to build up arrays quickly. There are two use cases. 1. If the index expression contains comma separated arrays, then stack them along their first axis. 2. If the index expression contains slice notation or scalars then create a 1-D array with a range indicated by the slice notation. If slice notation is used, the syntax ``start:stop:step`` is equivalent to ``np.arange(start, stop, step)`` inside of the brackets. However, if ``step`` is an imaginary number (i.e. 100j) then its integer portion is interpreted as a number-of-points desired and the start and stop are inclusive. In other words ``start:stop:stepj`` is interpreted as ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. After expansion of slice notation, all comma separated sequences are concatenated together. Optional character strings placed as the first element of the index expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 (column) matrix is produced. If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication of the minimum number of dimensions to force each entry into as the second integer (the axis to concatenate along is still the first integer). A string with three comma-separated integers allows specification of the axis to concatenate along, the minimum number of dimensions to force the entries to, and which axis should contain the start of the arrays which are less than the specified number of dimensions. In other words the third integer allows you to specify where the 1's should be placed in the shape of the arrays that have their shapes upgraded. By default, they are placed in the front of the shape tuple. The third argument allows you to specify where the start of the array should be instead. Thus, a third argument of '0' would place the 1's at the end of the array shape. Negative integers specify where in the new shape tuple the last dimension of upgraded arrays should be placed, so the default is '-1'. Parameters ---------- Not a function, so takes no parameters Returns ------- A concatenated ndarray or matrix. See Also -------- concatenate : Join a sequence of arrays along an existing axis. c_ : Translates slice objects to concatenation along the second axis. Examples -------- >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] array([1, 2, 3, 0, 0, 4, 5, 6]) >>> np.r_[-1:1:6j, [0]*3, 5, 6] array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) String integers specify the axis to concatenate along or the minimum number of dimensions to force entries into. >>> a = np.array([[0, 1, 2], [3, 4, 5]]) >>> np.r_['-1', a, a] # concatenate along last axis array([[0, 1, 2, 0, 1, 2], [3, 4, 5, 3, 4, 5]]) >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 array([[1, 2, 3], [4, 5, 6]]) >>> np.r_['0,2,0', [1,2,3], [4,5,6]] array([[1], [2], [3], [4], [5], [6]]) >>> np.r_['1,2,0', [1,2,3], [4,5,6]] array([[1, 4], [2, 5], [3, 6]]) Using 'r' or 'c' as a first string argument creates a matrix. >>> np.r_['r',[1,2,3], [4,5,6]] matrix([[1, 2, 3, 4, 5, 6]]) """ def __init__(self): AxisConcatenator.__init__(self, 0) r_ = RClass() class CClass(AxisConcatenator): """ Translates slice objects to concatenation along the second axis. This is short-hand for ``np.r_['-1,2,0', index expression]``, which is useful because of its common occurrence. In particular, arrays will be stacked along their last axis after being upgraded to at least 2-D with 1's post-pended to the shape (column vectors made out of 1-D arrays). See Also -------- column_stack : Stack 1-D arrays as columns into a 2-D array. r_ : For more detailed documentation. Examples -------- >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] array([[1, 4], [2, 5], [3, 6]]) >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] array([[1, 2, 3, 0, 0, 4, 5, 6]]) """ def __init__(self): AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) c_ = CClass() class ndenumerate(object): """ Multidimensional index iterator. Return an iterator yielding pairs of array coordinates and values. Parameters ---------- arr : ndarray Input array. See Also -------- ndindex, flatiter Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print(index, x) (0, 0) 1 (0, 1) 2 (1, 0) 3 (1, 1) 4 """ def __init__(self, arr): self.iter = asarray(arr).flat def __next__(self): """ Standard iterator method, returns the index tuple and array value. Returns ------- coords : tuple of ints The indices of the current iteration. val : scalar The array element of the current iteration. """ return self.iter.coords, next(self.iter) def __iter__(self): return self next = __next__ class ndindex(object): """ An N-dimensional iterator object to index arrays. Given the shape of an array, an `ndindex` instance iterates over the N-dimensional index of the array. At each iteration a tuple of indices is returned, the last dimension is iterated over first. Parameters ---------- `*args` : ints The size of each dimension of the array. See Also -------- ndenumerate, flatiter Examples -------- >>> for index in np.ndindex(3, 2, 1): ... print(index) (0, 0, 0) (0, 1, 0) (1, 0, 0) (1, 1, 0) (2, 0, 0) (2, 1, 0) """ def __init__(self, *shape): if len(shape) == 1 and isinstance(shape[0], tuple): shape = shape[0] x = as_strided(_nx.zeros(1), shape=shape, strides=_nx.zeros_like(shape)) self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], order='C') def __iter__(self): return self def ndincr(self): """ Increment the multi-dimensional index by one. This method is for backward compatibility only: do not use. """ next(self) def __next__(self): """ Standard iterator method, updates the index and returns the index tuple. Returns ------- val : tuple of ints Returns a tuple containing the indices of the current iteration. """ next(self._it) return self._it.multi_index next = __next__ # You can do all this with slice() plus a few special objects, # but there's a lot to remember. This version is simpler because # it uses the standard array indexing syntax. # # Written by Konrad Hinsen <[email protected]> # last revision: 1999-7-23 # # Cosmetic changes by T. Oliphant 2001 # # class IndexExpression(object): """ A nicer way to build up index tuples for arrays. .. note:: Use one of the two predefined instances `index_exp` or `s_` rather than directly using `IndexExpression`. For any index combination, including slicing and axis insertion, ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any array `a`. However, ``np.index_exp[indices]`` can be used anywhere in Python code and returns a tuple of slice objects that can be used in the construction of complex index expressions. Parameters ---------- maketuple : bool If True, always returns a tuple. See Also -------- index_exp : Predefined instance that always returns a tuple: `index_exp = IndexExpression(maketuple=True)`. s_ : Predefined instance without tuple conversion: `s_ = IndexExpression(maketuple=False)`. Notes ----- You can do all this with `slice()` plus a few special objects, but there's a lot to remember and this version is simpler because it uses the standard array indexing syntax. Examples -------- >>> np.s_[2::2] slice(2, None, 2) >>> np.index_exp[2::2] (slice(2, None, 2),) >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] array([2, 4]) """ def __init__(self, maketuple): self.maketuple = maketuple def __getitem__(self, item): if self.maketuple and not isinstance(item, tuple): return (item,) else: return item index_exp = IndexExpression(maketuple=True) s_ = IndexExpression(maketuple=False) # End contribution from Konrad. # The following functions complement those in twodim_base, but are # applicable to N-dimensions. def fill_diagonal(a, val, wrap=False): """Fill the main diagonal of the given array of any dimensionality. For an array `a` with ``a.ndim >= 2``, the diagonal is the list of locations with indices ``a[i, ..., i]`` all identical. This function modifies the input array in-place, it does not return a value. Parameters ---------- a : array, at least 2-D. Array whose diagonal is to be filled, it gets modified in-place. val : scalar Value to be written on the diagonal, its type must be compatible with that of the array a. wrap : bool For tall matrices in NumPy version up to 1.6.2, the diagonal "wrapped" after N columns. You can have this behavior with this option. This affects only tall matrices. See also -------- diag_indices, diag_indices_from Notes ----- .. versionadded:: 1.4.0 This functionality can be obtained via `diag_indices`, but internally this version uses a much faster implementation that never constructs the indices and uses simple slicing. Examples -------- >>> a = np.zeros((3, 3), int) >>> np.fill_diagonal(a, 5) >>> a array([[5, 0, 0], [0, 5, 0], [0, 0, 5]]) The same function can operate on a 4-D array: >>> a = np.zeros((3, 3, 3, 3), int) >>> np.fill_diagonal(a, 4) We only show a few blocks for clarity: >>> a[0, 0] array([[4, 0, 0], [0, 0, 0], [0, 0, 0]]) >>> a[1, 1] array([[0, 0, 0], [0, 4, 0], [0, 0, 0]]) >>> a[2, 2] array([[0, 0, 0], [0, 0, 0], [0, 0, 4]]) The wrap option affects only tall matrices: >>> # tall matrices no wrap >>> a = np.zeros((5, 3),int) >>> fill_diagonal(a, 4) >>> a array([[4, 0, 0], [0, 4, 0], [0, 0, 4], [0, 0, 0], [0, 0, 0]]) >>> # tall matrices wrap >>> a = np.zeros((5, 3),int) >>> fill_diagonal(a, 4, wrap=True) >>> a array([[4, 0, 0], [0, 4, 0], [0, 0, 4], [0, 0, 0], [4, 0, 0]]) >>> # wide matrices >>> a = np.zeros((3, 5),int) >>> fill_diagonal(a, 4, wrap=True) >>> a array([[4, 0, 0, 0, 0], [0, 4, 0, 0, 0], [0, 0, 4, 0, 0]]) """ if a.ndim < 2: raise ValueError("array must be at least 2-d") end = None if a.ndim == 2: # Explicit, fast formula for the common case. For 2-d arrays, we # accept rectangular ones. step = a.shape[1] + 1 #This is needed to don't have tall matrix have the diagonal wrap. if not wrap: end = a.shape[1] * a.shape[1] else: # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. if not alltrue(diff(a.shape) == 0): raise ValueError("All dimensions of input must be of equal length") step = 1 + (cumprod(a.shape[:-1])).sum() # Write the value out into the diagonal. a.flat[:end:step] = val def diag_indices(n, ndim=2): """ Return the indices to access the main diagonal of an array. This returns a tuple of indices that can be used to access the main diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` for ``i = [0..n-1]``. Parameters ---------- n : int The size, along each dimension, of the arrays for which the returned indices can be used. ndim : int, optional The number of dimensions. See also -------- diag_indices_from Notes ----- .. versionadded:: 1.4.0 Examples -------- Create a set of indices to access the diagonal of a (4, 4) array: >>> di = np.diag_indices(4) >>> di (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> a[di] = 100 >>> a array([[100, 1, 2, 3], [ 4, 100, 6, 7], [ 8, 9, 100, 11], [ 12, 13, 14, 100]]) Now, we create indices to manipulate a 3-D array: >>> d3 = np.diag_indices(2, 3) >>> d3 (array([0, 1]), array([0, 1]), array([0, 1])) And use it to set the diagonal of an array of zeros to 1: >>> a = np.zeros((2, 2, 2), dtype=int) >>> a[d3] = 1 >>> a array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]]) """ idx = arange(n) return (idx,) * ndim def diag_indices_from(arr): """ Return the indices to access the main diagonal of an n-dimensional array. See `diag_indices` for full details. Parameters ---------- arr : array, at least 2-D See Also -------- diag_indices Notes ----- .. versionadded:: 1.4.0 """ if not arr.ndim >= 2: raise ValueError("input array must be at least 2-d") # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. if not alltrue(diff(arr.shape) == 0): raise ValueError("All dimensions of input must be of equal length") return diag_indices(arr.shape[0], arr.ndim)
26,680
29.113995
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/arraypad.py
""" The arraypad module contains a group of functions to pad values onto the edges of an n-dimensional array. """ from __future__ import division, absolute_import, print_function import numpy as np __all__ = ['pad'] ############################################################################### # Private utility functions. def _arange_ndarray(arr, shape, axis, reverse=False): """ Create an ndarray of `shape` with increments along specified `axis` Parameters ---------- arr : ndarray Input array of arbitrary shape. shape : tuple of ints Shape of desired array. Should be equivalent to `arr.shape` except `shape[axis]` which may have any positive value. axis : int Axis to increment along. reverse : bool If False, increment in a positive fashion from 1 to `shape[axis]`, inclusive. If True, the bounds are the same but the order reversed. Returns ------- padarr : ndarray Output array sized to pad `arr` along `axis`, with linear range from 1 to `shape[axis]` along specified `axis`. Notes ----- The range is deliberately 1-indexed for this specific use case. Think of this algorithm as broadcasting `np.arange` to a single `axis` of an arbitrarily shaped ndarray. """ initshape = tuple(1 if i != axis else shape[axis] for (i, x) in enumerate(arr.shape)) if not reverse: padarr = np.arange(1, shape[axis] + 1) else: padarr = np.arange(shape[axis], 0, -1) padarr = padarr.reshape(initshape) for i, dim in enumerate(shape): if padarr.shape[i] != dim: padarr = padarr.repeat(dim, axis=i) return padarr def _round_ifneeded(arr, dtype): """ Rounds arr inplace if destination dtype is integer. Parameters ---------- arr : ndarray Input array. dtype : dtype The dtype of the destination array. """ if np.issubdtype(dtype, np.integer): arr.round(out=arr) def _prepend_const(arr, pad_amt, val, axis=-1): """ Prepend constant `val` along `axis` of `arr`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. val : scalar Constant value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` constant `val` prepended along `axis`. """ if pad_amt == 0: return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) if val == 0: return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr), axis=axis) else: return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype), arr), axis=axis) def _append_const(arr, pad_amt, val, axis=-1): """ Append constant `val` along `axis` of `arr`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. val : scalar Constant value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` constant `val` appended along `axis`. """ if pad_amt == 0: return arr padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) if val == 0: return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)), axis=axis) else: return np.concatenate( (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis) def _prepend_edge(arr, pad_amt, axis=-1): """ Prepend `pad_amt` to `arr` along `axis` by extending edge values. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, extended by `pad_amt` edge values appended along `axis`. """ if pad_amt == 0: return arr edge_slice = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) edge_arr = arr[edge_slice].reshape(pad_singleton) return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_edge(arr, pad_amt, axis=-1): """ Append `pad_amt` to `arr` along `axis` by extending edge values. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, extended by `pad_amt` edge values prepended along `axis`. """ if pad_amt == 0: return arr edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) edge_arr = arr[edge_slice].reshape(pad_singleton) return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)), axis=axis) def _prepend_ramp(arr, pad_amt, end, axis=-1): """ Prepend linear ramp along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. end : scalar Constal value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region ramps linearly from the edge value to `end`. """ if pad_amt == 0: return arr # Generate shape for final concatenated array padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) # Generate an n-dimensional array incrementing along `axis` ramp_arr = _arange_ndarray(arr, padshape, axis, reverse=True).astype(np.float64) # Appropriate slicing to extract n-dimensional edge along `axis` edge_slice = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract edge, reshape to original rank, and extend along `axis` edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) # Linear ramp slope = (end - edge_pad) / float(pad_amt) ramp_arr = ramp_arr * slope ramp_arr += edge_pad _round_ifneeded(ramp_arr, arr.dtype) # Ramp values will most likely be float, cast them to the same type as arr return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis) def _append_ramp(arr, pad_amt, end, axis=-1): """ Append linear ramp along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. end : scalar Constal value to use. For best results should be of type `arr.dtype`; if not `arr.dtype` will be cast to `arr.dtype`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region ramps linearly from the edge value to `end`. """ if pad_amt == 0: return arr # Generate shape for final concatenated array padshape = tuple(x if i != axis else pad_amt for (i, x) in enumerate(arr.shape)) # Generate an n-dimensional array incrementing along `axis` ramp_arr = _arange_ndarray(arr, padshape, axis, reverse=False).astype(np.float64) # Slice a chunk from the edge to calculate stats on edge_slice = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract edge, reshape to original rank, and extend along `axis` edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) # Linear ramp slope = (end - edge_pad) / float(pad_amt) ramp_arr = ramp_arr * slope ramp_arr += edge_pad _round_ifneeded(ramp_arr, arr.dtype) # Ramp values will most likely be float, cast them to the same type as arr return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis) def _prepend_max(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` maximum values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate maximum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The prepended region is the maximum of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on max_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate max, reshape to add singleton dimension back max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_max(arr, pad_amt, num, axis=-1): """ Pad one `axis` of `arr` with the maximum of the last `num` elements. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate maximum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the maximum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: max_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: max_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate max, reshape to add singleton dimension back max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)), axis=axis) def _prepend_mean(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` mean values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate mean. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the mean of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on mean_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate mean, reshape to add singleton dimension back mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton) _round_ifneeded(mean_chunk, arr.dtype) # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) def _append_mean(arr, pad_amt, num, axis=-1): """ Append `pad_amt` mean values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate mean. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the maximum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: mean_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: mean_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate mean, reshape to add singleton dimension back mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton) _round_ifneeded(mean_chunk, arr.dtype) # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) def _prepend_med(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate median. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the median of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on med_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate median, reshape to add singleton dimension back med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) _round_ifneeded(med_chunk, arr.dtype) # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) def _append_med(arr, pad_amt, num, axis=-1): """ Append `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate median. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the median of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: med_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: med_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate median, reshape to add singleton dimension back med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) _round_ifneeded(med_chunk, arr.dtype) # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` return np.concatenate( (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) def _prepend_min(arr, pad_amt, num, axis=-1): """ Prepend `pad_amt` minimum values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to prepend. num : int Depth into `arr` along `axis` to calculate minimum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values prepended along `axis`. The prepended region is the minimum of the first `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _prepend_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on min_slice = tuple(slice(None) if i != axis else slice(num) for (i, x) in enumerate(arr.shape)) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate min, reshape to add singleton dimension back min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr), axis=axis) def _append_min(arr, pad_amt, num, axis=-1): """ Append `pad_amt` median values along `axis`. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : int Amount of padding to append. num : int Depth into `arr` along `axis` to calculate minimum. Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt` values appended along `axis`. The appended region is the minimum of the final `num` values along `axis`. """ if pad_amt == 0: return arr # Equivalent to edge padding for single value, so do that instead if num == 1: return _append_edge(arr, pad_amt, axis) # Use entire array if `num` is too large if num is not None: if num >= arr.shape[axis]: num = None # Slice a chunk from the edge to calculate stats on end = arr.shape[axis] - 1 if num is not None: min_slice = tuple( slice(None) if i != axis else slice(end, end - num, -1) for (i, x) in enumerate(arr.shape)) else: min_slice = tuple(slice(None) for x in arr.shape) # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) # Extract slice, calculate min, reshape to add singleton dimension back min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)), axis=axis) def _pad_ref(arr, pad_amt, method, axis=-1): """ Pad `axis` of `arr` by reflection. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. method : str Controls method of reflection; options are 'even' or 'odd'. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded with reflected values from the original array. Notes ----- This algorithm does not pad with repetition, i.e. the edges are not repeated in the reflection. For that behavior, use `mode='symmetric'`. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1) for (i, x) in enumerate(arr.shape)) ref_chunk1 = arr[ref_slice] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: ref_chunk1 = ref_chunk1.reshape(pad_singleton) # Memory/computationally more expensive, only do this if `method='odd'` if 'odd' in method and pad_amt[0] > 0: edge_slice1 = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice1].reshape(pad_singleton) ref_chunk1 = 2 * edge_chunk - ref_chunk1 del edge_chunk ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after start = arr.shape[axis] - pad_amt[1] - 1 end = arr.shape[axis] - 1 ref_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) for (i, x) in enumerate(arr.shape)) ref_chunk2 = arr[ref_slice][rev_idx] if pad_amt[1] == 1: ref_chunk2 = ref_chunk2.reshape(pad_singleton) if 'odd' in method: edge_slice2 = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice2].reshape(pad_singleton) ref_chunk2 = 2 * edge_chunk - ref_chunk2 del edge_chunk # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis) def _pad_sym(arr, pad_amt, method, axis=-1): """ Pad `axis` of `arr` by symmetry. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. method : str Controls method of symmetry; options are 'even' or 'odd'. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded with symmetric values from the original array. Notes ----- This algorithm DOES pad with repetition, i.e. the edges are repeated. For padding without repeated edges, use `mode='reflect'`. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0]) for (i, x) in enumerate(arr.shape)) rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) for (i, x) in enumerate(arr.shape)) sym_chunk1 = arr[sym_slice][rev_idx] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: sym_chunk1 = sym_chunk1.reshape(pad_singleton) # Memory/computationally more expensive, only do this if `method='odd'` if 'odd' in method and pad_amt[0] > 0: edge_slice1 = tuple(slice(None) if i != axis else 0 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice1].reshape(pad_singleton) sym_chunk1 = 2 * edge_chunk - sym_chunk1 del edge_chunk ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after start = arr.shape[axis] - pad_amt[1] end = arr.shape[axis] sym_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) sym_chunk2 = arr[sym_slice][rev_idx] if pad_amt[1] == 1: sym_chunk2 = sym_chunk2.reshape(pad_singleton) if 'odd' in method: edge_slice2 = tuple(slice(None) if i != axis else -1 for (i, x) in enumerate(arr.shape)) edge_chunk = arr[edge_slice2].reshape(pad_singleton) sym_chunk2 = 2 * edge_chunk - sym_chunk2 del edge_chunk # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis) def _pad_wrap(arr, pad_amt, axis=-1): """ Pad `axis` of `arr` via wrapping. Parameters ---------- arr : ndarray Input array of arbitrary shape. pad_amt : tuple of ints, length 2 Padding to (prepend, append) along `axis`. axis : int Axis along which to pad `arr`. Returns ------- padarr : ndarray Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` values appended along `axis`. Both regions are padded wrapped values from the opposite end of `axis`. Notes ----- This method of padding is also known as 'tile' or 'tiling'. The modes 'reflect', 'symmetric', and 'wrap' must be padded with a single function, lest the indexing tricks in non-integer multiples of the original shape would violate repetition in the final iteration. """ # Implicit booleanness to test for zero (or None) in any scalar type if pad_amt[0] == 0 and pad_amt[1] == 0: return arr ########################################################################## # Prepended region # Slice off a reverse indexed chunk from near edge to pad `arr` before start = arr.shape[axis] - pad_amt[0] end = arr.shape[axis] wrap_slice = tuple(slice(None) if i != axis else slice(start, end) for (i, x) in enumerate(arr.shape)) wrap_chunk1 = arr[wrap_slice] # Shape to restore singleton dimension after slicing pad_singleton = tuple(x if i != axis else 1 for (i, x) in enumerate(arr.shape)) if pad_amt[0] == 1: wrap_chunk1 = wrap_chunk1.reshape(pad_singleton) ########################################################################## # Appended region # Slice off a reverse indexed chunk from far edge to pad `arr` after wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1]) for (i, x) in enumerate(arr.shape)) wrap_chunk2 = arr[wrap_slice] if pad_amt[1] == 1: wrap_chunk2 = wrap_chunk2.reshape(pad_singleton) # Concatenate `arr` with both chunks, extending along `axis` return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis) def _normalize_shape(ndarray, shape, cast_to_int=True): """ Private function which does some checks and normalizes the possibly much simpler representations of 'pad_width', 'stat_length', 'constant_values', 'end_values'. Parameters ---------- narray : ndarray Input ndarray shape : {sequence, array_like, float, int}, optional The width of padding (pad_width), the number of elements on the edge of the narray used for statistics (stat_length), the constant value(s) to use when filling padded regions (constant_values), or the endpoint target(s) for linear ramps (end_values). ((before_1, after_1), ... (before_N, after_N)) unique number of elements for each axis where `N` is rank of `narray`. ((before, after),) yields same before and after constants for each axis. (constant,) or val is a shortcut for before = after = constant for all axes. cast_to_int : bool, optional Controls if values in ``shape`` will be rounded and cast to int before being returned. Returns ------- normalized_shape : tuple of tuples val => ((val, val), (val, val), ...) [[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...) ((val1, val2), (val3, val4), ...) => no change [[val1, val2], ] => ((val1, val2), (val1, val2), ...) ((val1, val2), ) => ((val1, val2), (val1, val2), ...) [[val , ], ] => ((val, val), (val, val), ...) ((val , ), ) => ((val, val), (val, val), ...) """ ndims = ndarray.ndim # Shortcut shape=None if shape is None: return ((None, None), ) * ndims # Convert any input `info` to a NumPy array shape_arr = np.asarray(shape) try: shape_arr = np.broadcast_to(shape_arr, (ndims, 2)) except ValueError: fmt = "Unable to create correctly shaped tuple from %s" raise ValueError(fmt % (shape,)) # Cast if necessary if cast_to_int is True: shape_arr = np.round(shape_arr).astype(int) # Convert list of lists to tuple of tuples return tuple(tuple(axis) for axis in shape_arr.tolist()) def _validate_lengths(narray, number_elements): """ Private function which does some checks and reformats pad_width and stat_length using _normalize_shape. Parameters ---------- narray : ndarray Input ndarray number_elements : {sequence, int}, optional The width of padding (pad_width) or the number of elements on the edge of the narray used for statistics (stat_length). ((before_1, after_1), ... (before_N, after_N)) unique number of elements for each axis. ((before, after),) yields same before and after constants for each axis. (constant,) or int is a shortcut for before = after = constant for all axes. Returns ------- _validate_lengths : tuple of tuples int => ((int, int), (int, int), ...) [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...) ((int1, int2), (int3, int4), ...) => no change [[int1, int2], ] => ((int1, int2), (int1, int2), ...) ((int1, int2), ) => ((int1, int2), (int1, int2), ...) [[int , ], ] => ((int, int), (int, int), ...) ((int , ), ) => ((int, int), (int, int), ...) """ normshp = _normalize_shape(narray, number_elements) for i in normshp: chk = [1 if x is None else x for x in i] chk = [1 if x >= 0 else -1 for x in chk] if (chk[0] < 0) or (chk[1] < 0): fmt = "%s cannot contain negative values." raise ValueError(fmt % (number_elements,)) return normshp ############################################################################### # Public functions def pad(array, pad_width, mode, **kwargs): """ Pads an array. Parameters ---------- array : array_like of rank N Input array pad_width : {sequence, array_like, int} Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode : str or function One of the following string values or a user supplied function. 'constant' Pads with a constant value. 'edge' Pads with the edge values of array. 'linear_ramp' Pads with the linear ramp between end_value and the array edge value. 'maximum' Pads with the maximum value of all or part of the vector along each axis. 'mean' Pads with the mean value of all or part of the vector along each axis. 'median' Pads with the median value of all or part of the vector along each axis. 'minimum' Pads with the minimum value of all or part of the vector along each axis. 'reflect' Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. 'symmetric' Pads with the reflection of the vector mirrored along the edge of the array. 'wrap' Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. <function> Padding function, see Notes. stat_length : sequence or int, optional Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. ((before_1, after_1), ... (before_N, after_N)) unique statistic lengths for each axis. ((before, after),) yields same before and after statistic lengths for each axis. (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. constant_values : sequence or int, optional Used in 'constant'. The values to set the padded values for each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad constants for each axis. ((before, after),) yields same before and after constants for each axis. (constant,) or int is a shortcut for before = after = constant for all axes. Default is 0. end_values : sequence or int, optional Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ((before_1, after_1), ... (before_N, after_N)) unique end values for each axis. ((before, after),) yields same before and after end values for each axis. (constant,) or int is a shortcut for before = after = end value for all axes. Default is 0. reflect_type : {'even', 'odd'}, optional Used in 'reflect', and 'symmetric'. The 'even' style is the default with an unaltered reflection around the edge value. For the 'odd' style, the extented part of the array is created by subtracting the reflected values from two times the edge value. Returns ------- pad : ndarray Padded array of rank equal to `array` with shape increased according to `pad_width`. Notes ----- .. versionadded:: 1.7.0 For an array with rank greater than 1, some of the padding of later axes is calculated from padding of previous axes. This is easiest to think about with a rank 2 array where the corners of the padded array are calculated by using padded values from the first axis. The padding function, if used, should return a rank 1 array equal in length to the vector argument with padded values replaced. It has the following signature:: padding_func(vector, iaxis_pad_width, iaxis, kwargs) where vector : ndarray A rank 1 array already padded with zeros. Padded values are vector[:pad_tuple[0]] and vector[-pad_tuple[1]:]. iaxis_pad_width : tuple A 2-tuple of ints, iaxis_pad_width[0] represents the number of values padded at the beginning of vector where iaxis_pad_width[1] represents the number of values padded at the end of vector. iaxis : int The axis currently being calculated. kwargs : dict Any keyword arguments the function requires. Examples -------- >>> a = [1, 2, 3, 4, 5] >>> np.pad(a, (2,3), 'constant', constant_values=(4, 6)) array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) >>> np.pad(a, (2, 3), 'edge') array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) >>> np.pad(a, (2,), 'maximum') array([5, 5, 1, 2, 3, 4, 5, 5, 5]) >>> np.pad(a, (2,), 'mean') array([3, 3, 1, 2, 3, 4, 5, 3, 3]) >>> np.pad(a, (2,), 'median') array([3, 3, 1, 2, 3, 4, 5, 3, 3]) >>> a = [[1, 2], [3, 4]] >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') array([[1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [3, 3, 3, 4, 3, 3, 3], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1]]) >>> a = [1, 2, 3, 4, 5] >>> np.pad(a, (2, 3), 'reflect') array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) >>> np.pad(a, (2, 3), 'symmetric') array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) >>> np.pad(a, (2, 3), 'wrap') array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) >>> def pad_with(vector, pad_width, iaxis, kwargs): ... pad_value = kwargs.get('padder', 10) ... vector[:pad_width[0]] = pad_value ... vector[-pad_width[1]:] = pad_value ... return vector >>> a = np.arange(6) >>> a = a.reshape((2, 3)) >>> np.pad(a, 2, pad_with) array([[10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 0, 1, 2, 10, 10], [10, 10, 3, 4, 5, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10]]) >>> np.pad(a, 2, pad_with, padder=100) array([[100, 100, 100, 100, 100, 100, 100], [100, 100, 100, 100, 100, 100, 100], [100, 100, 0, 1, 2, 100, 100], [100, 100, 3, 4, 5, 100, 100], [100, 100, 100, 100, 100, 100, 100], [100, 100, 100, 100, 100, 100, 100]]) """ if not np.asarray(pad_width).dtype.kind == 'i': raise TypeError('`pad_width` must be of integral type.') narray = np.array(array) pad_width = _validate_lengths(narray, pad_width) allowedkwargs = { 'constant': ['constant_values'], 'edge': [], 'linear_ramp': ['end_values'], 'maximum': ['stat_length'], 'mean': ['stat_length'], 'median': ['stat_length'], 'minimum': ['stat_length'], 'reflect': ['reflect_type'], 'symmetric': ['reflect_type'], 'wrap': [], } kwdefaults = { 'stat_length': None, 'constant_values': 0, 'end_values': 0, 'reflect_type': 'even', } if isinstance(mode, np.compat.basestring): # Make sure have allowed kwargs appropriate for mode for key in kwargs: if key not in allowedkwargs[mode]: raise ValueError('%s keyword not in allowed keywords %s' % (key, allowedkwargs[mode])) # Set kwarg defaults for kw in allowedkwargs[mode]: kwargs.setdefault(kw, kwdefaults[kw]) # Need to only normalize particular keywords. for i in kwargs: if i == 'stat_length': kwargs[i] = _validate_lengths(narray, kwargs[i]) if i in ['end_values', 'constant_values']: kwargs[i] = _normalize_shape(narray, kwargs[i], cast_to_int=False) else: # Drop back to old, slower np.apply_along_axis mode for user-supplied # vector function function = mode # Create a new padded array rank = list(range(narray.ndim)) total_dim_increase = [np.sum(pad_width[i]) for i in rank] offset_slices = [slice(pad_width[i][0], pad_width[i][0] + narray.shape[i]) for i in rank] new_shape = np.array(narray.shape) + total_dim_increase newmat = np.zeros(new_shape, narray.dtype) # Insert the original array into the padded array newmat[offset_slices] = narray # This is the core of pad ... for iaxis in rank: np.apply_along_axis(function, iaxis, newmat, pad_width[iaxis], iaxis, kwargs) return newmat # If we get here, use new padding method newmat = narray.copy() # API preserved, but completely new algorithm which pads by building the # entire block to pad before/after `arr` with in one step, for each axis. if mode == 'constant': for axis, ((pad_before, pad_after), (before_val, after_val)) \ in enumerate(zip(pad_width, kwargs['constant_values'])): newmat = _prepend_const(newmat, pad_before, before_val, axis) newmat = _append_const(newmat, pad_after, after_val, axis) elif mode == 'edge': for axis, (pad_before, pad_after) in enumerate(pad_width): newmat = _prepend_edge(newmat, pad_before, axis) newmat = _append_edge(newmat, pad_after, axis) elif mode == 'linear_ramp': for axis, ((pad_before, pad_after), (before_val, after_val)) \ in enumerate(zip(pad_width, kwargs['end_values'])): newmat = _prepend_ramp(newmat, pad_before, before_val, axis) newmat = _append_ramp(newmat, pad_after, after_val, axis) elif mode == 'maximum': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_max(newmat, pad_before, chunk_before, axis) newmat = _append_max(newmat, pad_after, chunk_after, axis) elif mode == 'mean': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_mean(newmat, pad_before, chunk_before, axis) newmat = _append_mean(newmat, pad_after, chunk_after, axis) elif mode == 'median': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_med(newmat, pad_before, chunk_before, axis) newmat = _append_med(newmat, pad_after, chunk_after, axis) elif mode == 'minimum': for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ in enumerate(zip(pad_width, kwargs['stat_length'])): newmat = _prepend_min(newmat, pad_before, chunk_before, axis) newmat = _append_min(newmat, pad_after, chunk_after, axis) elif mode == 'reflect': for axis, (pad_before, pad_after) in enumerate(pad_width): if narray.shape[axis] == 0: # Axes with non-zero padding cannot be empty. if pad_before > 0 or pad_after > 0: raise ValueError("There aren't any elements to reflect" " in axis {} of `array`".format(axis)) # Skip zero padding on empty axes. continue # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. if ((pad_before > 0) or (pad_after > 0)) and newmat.shape[axis] == 1: # Extending singleton dimension for 'reflect' is legacy # behavior; it really should raise an error. newmat = _prepend_edge(newmat, pad_before, axis) newmat = _append_edge(newmat, pad_after, axis) continue method = kwargs['reflect_type'] safe_pad = newmat.shape[axis] - 1 while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_ref(newmat, (pad_iter_b, pad_iter_a), method, axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis) elif mode == 'symmetric': for axis, (pad_before, pad_after) in enumerate(pad_width): # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. method = kwargs['reflect_type'] safe_pad = newmat.shape[axis] while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_sym(newmat, (pad_iter_b, pad_iter_a), method, axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis) elif mode == 'wrap': for axis, (pad_before, pad_after) in enumerate(pad_width): # Recursive padding along any axis where `pad_amt` is too large # for indexing tricks. We can only safely pad the original axis # length, to keep the period of the reflections consistent. safe_pad = newmat.shape[axis] while ((pad_before > safe_pad) or (pad_after > safe_pad)): pad_iter_b = min(safe_pad, safe_pad * (pad_before // safe_pad)) pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis) pad_before -= pad_iter_b pad_after -= pad_iter_a safe_pad += pad_iter_b + pad_iter_a newmat = _pad_wrap(newmat, (pad_before, pad_after), axis) return newmat
51,857
33.897712
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/function_base.py
from __future__ import division, absolute_import, print_function import collections import re import sys import warnings import operator import numpy as np import numpy.core.numeric as _nx from numpy.core import linspace, atleast_1d, atleast_2d, transpose from numpy.core.numeric import ( ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, empty_like, ndarray, around, floor, ceil, take, dot, where, intp, integer, isscalar, absolute, AxisError ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, mod, exp, log10, not_equal, subtract ) from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean, any, sum ) from numpy.core.numerictypes import typecodes, number from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import ( _insert, add_docstring, digitize, bincount, normalize_axis_index, interp as compiled_interp, interp_complex as compiled_interp_complex ) from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc from numpy.compat import long from numpy.compat.py3k import basestring if sys.version_info[0] < 3: # Force range to be a generator, for np.delete's usage. range = xrange import __builtin__ as builtins else: import builtins __all__ = [ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' ] def rot90(m, k=1, axes=(0,1)): """ Rotate an array by 90 degrees in the plane specified by axes. Rotation direction is from the first towards the second axis. Parameters ---------- m : array_like Array of two or more dimensions. k : integer Number of times the array is rotated by 90 degrees. axes: (2,) array_like The array is rotated in the plane defined by the axes. Axes must be different. .. versionadded:: 1.12.0 Returns ------- y : ndarray A rotated view of `m`. See Also -------- flip : Reverse the order of elements in an array along the given axis. fliplr : Flip an array horizontally. flipud : Flip an array vertically. Notes ----- rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) Examples -------- >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], [3, 4]]) >>> np.rot90(m) array([[2, 4], [1, 3]]) >>> np.rot90(m, 2) array([[4, 3], [2, 1]]) >>> m = np.arange(8).reshape((2,2,2)) >>> np.rot90(m, 1, (1,2)) array([[[1, 3], [0, 2]], [[5, 7], [4, 6]]]) """ axes = tuple(axes) if len(axes) != 2: raise ValueError("len(axes) must be 2.") m = asanyarray(m) if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: raise ValueError("Axes must be different.") if (axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim): raise ValueError("Axes={} out of range for array of ndim={}." .format(axes, m.ndim)) k %= 4 if k == 0: return m[:] if k == 2: return flip(flip(m, axes[0]), axes[1]) axes_list = arange(0, m.ndim) (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]]) if k == 1: return transpose(flip(m,axes[1]), axes_list) else: # k == 3 return flip(transpose(m, axes_list), axes[1]) def flip(m, axis): """ Reverse the order of elements in an array along the given axis. The shape of the array is preserved, but the elements are reordered. .. versionadded:: 1.12.0 Parameters ---------- m : array_like Input array. axis : integer Axis in array, which entries are reversed. Returns ------- out : array_like A view of `m` with the entries of axis reversed. Since a view is returned, this operation is done in constant time. See Also -------- flipud : Flip an array vertically (axis=0). fliplr : Flip an array horizontally (axis=1). Notes ----- flip(m, 0) is equivalent to flipud(m). flip(m, 1) is equivalent to fliplr(m). flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. Examples -------- >>> A = np.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> flip(A, 0) array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) >>> flip(A, 1) array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) >>> A = np.random.randn(3,4,5) >>> np.all(flip(A,2) == A[:,:,::-1,...]) True """ if not hasattr(m, 'ndim'): m = asarray(m) indexer = [slice(None)] * m.ndim try: indexer[axis] = slice(None, None, -1) except IndexError: raise ValueError("axis=%i is invalid for the %i-dimensional input array" % (axis, m.ndim)) return m[tuple(indexer)] def iterable(y): """ Check whether or not an object can be iterated over. Parameters ---------- y : object Input object. Returns ------- b : bool Return ``True`` if the object has an iterator method or is a sequence and ``False`` otherwise. Examples -------- >>> np.iterable([1, 2, 3]) True >>> np.iterable(2) False """ try: iter(y) except TypeError: return False return True def _hist_bin_sqrt(x): """ Square root histogram bin estimator. Bin width is inversely proportional to the data size. Used by many programs for its simplicity. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / np.sqrt(x.size) def _hist_bin_sturges(x): """ Sturges histogram bin estimator. A very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (np.log2(x.size) + 1.0) def _hist_bin_rice(x): """ Rice histogram bin estimator. Another simple estimator with no normality assumption. It has better performance for large data than Sturges, but tends to overestimate the number of bins. The number of bins is proportional to the cube root of data size (asymptotically optimal). The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (2.0 * x.size ** (1.0 / 3)) def _hist_bin_scott(x): """ Scott histogram bin estimator. The binwidth is proportional to the standard deviation of the data and inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) def _hist_bin_doane(x): """ Doane's histogram bin estimator. Improved version of Sturges' formula which works better for non-normal data. See stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ if x.size > 2: sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) sigma = np.std(x) if sigma > 0.0: # These three operations add up to # g1 = np.mean(((x - np.mean(x)) / sigma)**3) # but use only one temp array instead of three temp = x - np.mean(x) np.true_divide(temp, sigma, temp) np.power(temp, 3, temp) g1 = np.mean(temp) return x.ptp() / (1.0 + np.log2(x.size) + np.log2(1.0 + np.absolute(g1) / sg1)) return 0.0 def _hist_bin_fd(x): """ The Freedman-Diaconis histogram bin estimator. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate binwidth. It is considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions. If the IQR is 0, this function returns 1 for the number of bins. Binwidth is inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ iqr = np.subtract(*np.percentile(x, [75, 25])) return 2.0 * iqr * x.size ** (-1.0 / 3.0) def _hist_bin_auto(x): """ Histogram bin estimator that uses the minimum width of the Freedman-Diaconis and Sturges estimators. The FD estimator is usually the most robust method, but its width estimate tends to be too large for small `x`. The Sturges estimator is quite good for small (<1000) datasets and is the default in the R language. This method gives good off the shelf behaviour. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. See Also -------- _hist_bin_fd, _hist_bin_sturges """ # There is no need to check for zero here. If ptp is, so is IQR and # vice versa. Either both are zero or neither one is. return min(_hist_bin_fd(x), _hist_bin_sturges(x)) # Private dict initialized at module load time _hist_bin_selectors = {'auto': _hist_bin_auto, 'doane': _hist_bin_doane, 'fd': _hist_bin_fd, 'rice': _hist_bin_rice, 'scott': _hist_bin_scott, 'sqrt': _hist_bin_sqrt, 'sturges': _hist_bin_sturges} def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): r""" Compute the histogram of a set of data. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string from the list below, `histogram` will use the method chosen to calculate the optimal bin width and consequently the number of bins (see `Notes` for more detail on the estimators) from the data that falls within the requested range. While the bin width will be optimal for the actual data in the range, the number of bins will be computed to fill the entire range, including the empty portions. For visualisation, using the 'auto' option is suggested. Weighted data is not supported for automated bin size selection. 'auto' Maximum of the 'sturges' and 'fd' estimators. Provides good all around performance. 'fd' (Freedman Diaconis Estimator) Robust (resilient to outliers) estimator that takes into account data variability and data size. 'doane' An improved version of Sturges' estimator that works better with non-normal datasets. 'scott' Less robust estimator that that takes into account data variability and data size. 'rice' Estimator does not take variability into account, only data size. Commonly overestimates number of bins required. 'sturges' R's default method, only accounts for data size. Only optimal for gaussian data and underestimates number of bins for large non-gaussian datasets. 'sqrt' Square root (of data size) estimator, used by Excel and other programs for its speed and simplicity. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy behavior. It will be removed in NumPy 2.0.0. Use the ``density`` keyword instead. If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that this latter behavior is known to be buggy with unequal bin widths; use ``density`` instead. weights : array_like, optional An array of weights, of the same shape as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- histogramdd, bincount, searchsorted, digitize Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is:: [1, 2, 3, 4] then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 1.11.0 The methods to estimate the optimal number of bins are well founded in literature, and are inspired by the choices R provides for histogram visualisation. Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal, which is why it appears in most estimators. These are simply plug-in methods that give good starting points for number of bins. In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins. All estimators that compute bin counts are recast to bin width using the `ptp` of the data. The final bin count is obtained from ``np.round(np.ceil(range / h))`. 'Auto' (maximum of the 'Sturges' and 'FD' estimators) A compromise to get a good value. For small datasets the Sturges value will usually be chosen, while larger datasets will usually default to FD. Avoids the overly conservative behaviour of FD and Sturges for small and large datasets respectively. Switchover point is usually :math:`a.size \approx 1000`. 'FD' (Freedman Diaconis Estimator) .. math:: h = 2 \frac{IQR}{n^{1/3}} The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. 'Scott' .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} The binwidth is proportional to the standard deviation of the data and inversely proportional to cube root of ``x.size``. Can be too conservative for small datasets, but is quite good for large datasets. The standard deviation is not very robust to outliers. Values are very similar to the Freedman-Diaconis estimator in the absence of outliers. 'Rice' .. math:: n_h = 2n^{1/3} The number of bins is only proportional to cube root of ``a.size``. It tends to overestimate the number of bins and it does not take into account data variability. 'Sturges' .. math:: n_h = \log _{2}n+1 The number of bins is the base 2 log of ``a.size``. This estimator assumes normality of data and is too conservative for larger, non-normal datasets. This is the default method in R's ``hist`` method. 'Doane' .. math:: n_h = 1 + \log_{2}(n) + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) g_1 = mean[(\frac{x - \mu}{\sigma})^3] \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} An improved version of Sturges' formula that produces better estimates for non-normal datasets. This estimator attempts to account for the skew of the data. 'Sqrt' .. math:: n_h = \sqrt n The simplest and fastest estimator. Only takes into account the data size. Examples -------- >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist * np.diff(bin_edges)) 1.0 .. versionadded:: 1.11.0 Automated Bin Selection Methods example, using 2 peak random data with 2000 points: >>> import matplotlib.pyplot as plt >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size=1000), ... rng.normal(loc=5, scale=2, size=1000))) >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram >>> plt.title("Histogram with 'auto' bins") >>> plt.show() """ a = asarray(a) if weights is not None: weights = asarray(weights) if weights.shape != a.shape: raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() # Do not modify the original value of range so we can check for `None` if range is None: if a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. first_edge, last_edge = 0.0, 1.0 else: first_edge, last_edge = a.min() + 0.0, a.max() + 0.0 else: first_edge, last_edge = [mi + 0.0 for mi in range] if first_edge > last_edge: raise ValueError( 'max must be larger than min in range parameter.') if not np.all(np.isfinite([first_edge, last_edge])): raise ValueError( 'range parameter must be finite.') if first_edge == last_edge: first_edge -= 0.5 last_edge += 0.5 # density overrides the normed keyword if density is not None: normed = False # parse the overloaded bins argument n_equal_bins = None bin_edges = None if isinstance(bins, basestring): bin_name = bins # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated if bin_name not in _hist_bin_selectors: raise ValueError( "{!r} is not a valid estimator for `bins`".format(bin_name)) if weights is not None: raise TypeError("Automated estimation of the number of " "bins is not supported for weighted data") # Make a reference to `a` b = a # Update the reference if the range needs truncation if range is not None: keep = (a >= first_edge) keep &= (a <= last_edge) if not np.logical_and.reduce(keep): b = a[keep] if b.size == 0: n_equal_bins = 1 else: # Do not call selectors on empty arrays width = _hist_bin_selectors[bin_name](b) if width: n_equal_bins = int(np.ceil((last_edge - first_edge) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. n_equal_bins = 1 elif np.ndim(bins) == 0: try: n_equal_bins = operator.index(bins) except TypeError: raise TypeError( '`bins` must be an integer, a string, or an array') if n_equal_bins < 1: raise ValueError('`bins` must be positive, when an integer') elif np.ndim(bins) == 1: bin_edges = np.asarray(bins) if np.any(bin_edges[:-1] > bin_edges[1:]): raise ValueError( '`bins` must increase monotonically, when an array') else: raise ValueError('`bins` must be 1d, when an array') del bins # compute the bins if only the count was specified if n_equal_bins is not None: bin_edges = linspace( first_edge, last_edge, n_equal_bins + 1, endpoint=True) # Histogram is an integer or a float array depending on the weights. if weights is None: ntype = np.dtype(np.intp) else: ntype = weights.dtype # We set a block size, as this allows us to iterate over chunks when # computing histograms, to minimize memory usage. BLOCK = 65536 # The fast path uses bincount, but that only works for certain types # of weight simple_weights = ( weights is None or np.can_cast(weights.dtype, np.double) or np.can_cast(weights.dtype, complex) ) if n_equal_bins is not None and simple_weights: # Fast algorithm for equal bins # We now convert values of a to bin indices, under the assumption of # equal bin widths (which is valid here). # Initialize empty histogram n = np.zeros(n_equal_bins, ntype) # Pre-compute histogram scaling factor norm = n_equal_bins / (last_edge - first_edge) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it # is 2x as fast) and it results in a memory footprint 3x lower in the # limit of large arrays. for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] if weights is None: tmp_w = None else: tmp_w = weights[i:i + BLOCK] # Only include values in the right range keep = (tmp_a >= first_edge) keep &= (tmp_a <= last_edge) if not np.logical_and.reduce(keep): tmp_a = tmp_a[keep] if tmp_w is not None: tmp_w = tmp_w[keep] tmp_a_data = tmp_a.astype(float) tmp_a = tmp_a_data - first_edge tmp_a *= norm # Compute the bin indices, and for values that lie exactly on # last_edge we need to subtract one indices = tmp_a.astype(np.intp) indices[indices == n_equal_bins] -= 1 # The index computation is not guaranteed to give exactly # consistent results within ~1 ULP of the bin edges. decrement = tmp_a_data < bin_edges[indices] indices[decrement] -= 1 # The last bin includes the right edge. The other bins do not. increment = ((tmp_a_data >= bin_edges[indices + 1]) & (indices != n_equal_bins - 1)) indices[increment] += 1 # We now compute the histogram using bincount if ntype.kind == 'c': n.real += np.bincount(indices, weights=tmp_w.real, minlength=n_equal_bins) n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=n_equal_bins) else: n += np.bincount(indices, weights=tmp_w, minlength=n_equal_bins).astype(ntype) else: # Compute via cumulative histogram cum_n = np.zeros(bin_edges.shape, ntype) if weights is None: for i in arange(0, len(a), BLOCK): sa = sort(a[i:i+BLOCK]) cum_n += np.r_[sa.searchsorted(bin_edges[:-1], 'left'), sa.searchsorted(bin_edges[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] tmp_w = weights[i:i+BLOCK] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] cw = np.concatenate(([zero], sw.cumsum())) bin_index = np.r_[sa.searchsorted(bin_edges[:-1], 'left'), sa.searchsorted(bin_edges[-1], 'right')] cum_n += cw[bin_index] n = np.diff(cum_n) if density: db = array(np.diff(bin_edges), float) return n/db/n.sum(), bin_edges elif normed: # deprecated, buggy behavior. Remove for NumPy 2.0.0 db = array(np.diff(bin_edges), float) return n/(n*db).sum(), bin_edges else: return n, bin_edges def histogramdd(sample, bins=10, range=None, normed=False, weights=None): """ Compute the multidimensional histogram of some data. Parameters ---------- sample : array_like The data to be histogrammed. It must be an (N,D) array or data that can be converted to such. The rows of the resulting array are the coordinates of points in a D dimensional polytope. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitly in `bins`. Defaults to the minimum and maximum values along each dimension. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density ``bin_count / sample_count / bin_volume``. weights : (N,) array_like, optional An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. Weights are normalized to 1 if normed is True. If normed is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray The multidimensional histogram of sample x. See normed and weights for the different possible semantics. edges : list A list of D arrays describing the bin edges for each dimension. See Also -------- histogram: 1-D histogram histogram2d: 2-D histogram Examples -------- >>> r = np.random.randn(100,3) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) """ try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = atleast_2d(sample).T N, D = sample.shape nbin = empty(D, int) edges = D*[None] dedges = D*[None] if weights is not None: weights = asarray(weights) try: M = len(bins) if M != D: raise ValueError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: # bins is an integer bins = D*[bins] # Select range for each dimension # Used only if number of bins is given. if range is None: # Handle empty input. Range can't be determined in that case, use 0-1. if N == 0: smin = zeros(D) smax = ones(D) else: smin = atleast_1d(array(sample.min(0), float)) smax = atleast_1d(array(sample.max(0), float)) else: if not np.all(np.isfinite(range)): raise ValueError( 'range parameter must be finite.') smin = zeros(D) smax = zeros(D) for i in arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # avoid rounding issues for comparisons when dealing with inexact types if np.issubdtype(sample.dtype, np.inexact): edge_dt = sample.dtype else: edge_dt = float # Create edge arrays for i in arange(D): if isscalar(bins[i]): if bins[i] < 1: raise ValueError( "Element at index %s in `bins` should be a positive " "integer." % i) nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) else: edges[i] = asarray(bins[i], edge_dt) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = diff(edges[i]) if np.any(np.asarray(dedges[i]) <= 0): raise ValueError( "Found bin edge of size <= 0. Did you specify `bins` with" "non-monotonic sequence?") nbin = asarray(nbin) # Handle empty input. if N == 0: return np.zeros(nbin-2), edges # Compute the bin number each sample falls into. Ncount = {} for i in arange(D): Ncount[i] = digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in arange(D): # Rounding precision mindiff = dedges[i].min() if not np.isinf(mindiff): decimal = int(-log10(mindiff)) + 6 # Find which points are on the rightmost edge. not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. Ncount[i][nonzero(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays # will raise an error. hist = zeros(nbin, float).reshape(-1) # Compute the sample indices in the flattened histogram matrix. ni = nbin.argsort() xy = zeros(N, int) for i in arange(0, D-1): xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() xy += Ncount[ni[-1]] # Compute the number of repetitions in xy and assign it to the # flattened histmat. if len(xy) == 0: return zeros(nbin-2, int), edges flatcount = bincount(xy, weights) a = arange(len(flatcount)) hist[a] = flatcount # Shape into a proper matrix hist = hist.reshape(sort(nbin)) for i in arange(nbin.size): j = ni.argsort()[i] hist = hist.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D*[slice(1, -1)] hist = hist[core] # Normalize if normed is True if normed: s = hist.sum() for i in arange(D): shape = ones(D, int) shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s if (hist.shape != nbin - 2).any(): raise RuntimeError( "Internal Shape Error") return hist, edges def average(a, axis=None, weights=None, returned=False): """ Compute the weighted average along the specified axis. Parameters ---------- a : array_like Array containing data to be averaged. If `a` is not an array, a conversion is attempted. axis : None or int or tuple of ints, optional Axis or axes along which to average `a`. The default, axis=None, will average over all of the elements of the input array. If axis is negative it counts from the last to the first axis. .. versionadded:: 1.7.0 If axis is a tuple of ints, averaging is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a weight equal to one. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. Returns ------- average, [sum_of_weights] : array_type or double Return the average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum of the weights as the second element. The return type is `Float` if `a` is of integer type, otherwise it is of the same type as `a`. `sum_of_weights` is of the same type as `average`. Raises ------ ZeroDivisionError When all weights along axis are zero. See `numpy.ma.average` for a version robust to this type of error. TypeError When the length of 1D `weights` is not the same as the shape of `a` along axis. See Also -------- mean ma.average : average for masked arrays -- useful if your data contains "missing" values Examples -------- >>> data = range(1,5) >>> data [1, 2, 3, 4] >>> np.average(data) 2.5 >>> np.average(range(1,11), weights=range(10,0,-1)) 4.0 >>> data = np.arange(6).reshape((3,2)) >>> data array([[0, 1], [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) array([ 0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) Traceback (most recent call last): ... TypeError: Axis must be specified when shapes of a and weights differ. """ a = np.asanyarray(a) if weights is None: avg = a.mean(axis) scl = avg.dtype.type(a.size/avg.size) else: wgt = np.asanyarray(weights) if issubclass(a.dtype.type, (np.integer, np.bool_)): result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') else: result_dtype = np.result_type(a.dtype, wgt.dtype) # Sanity checks if a.shape != wgt.shape: if axis is None: raise TypeError( "Axis must be specified when shapes of a and weights " "differ.") if wgt.ndim != 1: raise TypeError( "1D weights expected when shapes of a and weights differ.") if wgt.shape[0] != a.shape[axis]: raise ValueError( "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) wgt = wgt.swapaxes(-1, axis) scl = wgt.sum(axis=axis, dtype=result_dtype) if np.any(scl == 0.0): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl if returned: if scl.shape != avg.shape: scl = np.broadcast_to(scl, avg.shape).copy() return avg, scl else: return avg def asarray_chkfinite(a, dtype=None, order=None): """Convert the input to an array, checking for NaNs or Infs. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. Success requires no NaNs or Infs. dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to 'C'. Returns ------- out : ndarray Array interpretation of `a`. No copy is performed if the input is already an ndarray. If `a` is a subclass of ndarray, a base class ndarray is returned. Raises ------ ValueError Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). See Also -------- asarray : Create and array. asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. asfarray : Convert input to a floating point ndarray. asfortranarray : Convert input to an ndarray with column-major memory order. fromiter : Create an array from an iterator. fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array. If all elements are finite ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] >>> np.asarray_chkfinite(a, dtype=float) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. >>> a = [1, 2, np.inf] >>> try: ... np.asarray_chkfinite(a) ... except ValueError: ... print('ValueError') ... ValueError """ a = asarray(a, dtype=dtype, order=order) if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): raise ValueError( "array must not contain infs or NaNs") return a def piecewise(x, condlist, funclist, *args, **kw): """ Evaluate a piecewise-defined function. Given a set of conditions and corresponding functions, evaluate each function on the input data wherever its condition is true. Parameters ---------- x : ndarray or scalar The input domain. condlist : list of bool arrays or bool scalars Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as the output value. Each boolean array in `condlist` selects a piece of `x`, and should therefore be of the same shape as `x`. The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if ``len(funclist) == len(condlist) + 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding condition is True. It should take a 1d array as input and give an 1d array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional Any further arguments given to `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is called as ``f(x, 1, 'a')``. kw : dict, optional Keyword arguments used in calling `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., alpha=1)``, then each function is called as ``f(x, alpha=1)``. Returns ------- out : ndarray The output is the same shape and type as x and is found by calling the functions in `funclist` on the appropriate portions of `x`, as defined by the boolean arrays in `condlist`. Portions not covered by any condition have a default value of 0. See Also -------- choose, select, where Notes ----- This is similar to choose or select, except that functions are evaluated on elements of `x` that satisfy the corresponding condition from `condlist`. The result is:: |-- |funclist[0](x[condlist[0]]) out = |funclist[1](x[condlist[1]]) |... |funclist[n2](x[condlist[n2]]) |-- Examples -------- Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) array([-1., -1., -1., 1., 1., 1.]) Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) Apply the same function to a scalar value. >>> y = -2 >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) array(2) """ x = asanyarray(x) n2 = len(funclist) # undocumented: single condition is promoted to a list of one condition if isscalar(condlist) or ( not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): condlist = [condlist] condlist = array(condlist, dtype=bool) n = len(condlist) if n == n2 - 1: # compute the "otherwise" condition. condelse = ~np.any(condlist, axis=0, keepdims=True) condlist = np.concatenate([condlist, condelse], axis=0) n += 1 elif n != n2: raise ValueError( "with {} condition(s), either {} or {} functions are expected" .format(n, n, n+1) ) y = zeros(x.shape, x.dtype) for k in range(n): item = funclist[k] if not isinstance(item, collections.Callable): y[condlist[k]] = item else: vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) return y def select(condlist, choicelist, default=0): """ Return an array drawn from elements in choicelist, depending on conditions. Parameters ---------- condlist : list of bool ndarrays The list of conditions which determine from which array in `choicelist` the output elements are taken. When multiple conditions are satisfied, the first one encountered in `condlist` is used. choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. default : scalar, optional The element inserted in `output` when all conditions evaluate to False. Returns ------- output : ndarray The output at position m is the m-th element of the array in `choicelist` where the m-th element of the corresponding array in `condlist` is True. See Also -------- where : Return elements from one of two arrays depending on condition. take, choose, compress, diag, diagonal Examples -------- >>> x = np.arange(10) >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. if len(condlist) != len(choicelist): raise ValueError( 'list of cases must be same length as list of conditions') # Now that the dtype is known, handle the deprecated select([], []) case if len(condlist) == 0: # 2014-02-24, 1.9 warnings.warn("select with an empty condition list is not possible" "and will be deprecated", DeprecationWarning, stacklevel=2) return np.asarray(default)[()] choicelist = [np.asarray(choice) for choice in choicelist] choicelist.append(np.asarray(default)) # need to get the result type before broadcasting for correct scalar # behaviour dtype = np.result_type(*choicelist) # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it separately optimizes # for example when all choices are scalars. condlist = np.broadcast_arrays(*condlist) choicelist = np.broadcast_arrays(*choicelist) # If cond array is not an ndarray in boolean format or scalar bool, abort. deprecated_ints = False for i in range(len(condlist)): cond = condlist[i] if cond.dtype.type is not np.bool_: if np.issubdtype(cond.dtype, np.integer): # A previous implementation accepted int ndarrays accidentally. # Supported here deliberately, but deprecated. condlist[i] = condlist[i].astype(bool) deprecated_ints = True else: raise ValueError( 'invalid entry in choicelist: should be boolean ndarray') if deprecated_ints: # 2014-02-24, 1.9 msg = "select condlists containing integer ndarrays is deprecated " \ "and will be removed in the future. Use `.astype(bool)` to " \ "convert to bools." warnings.warn(msg, DeprecationWarning, stacklevel=2) if choicelist[0].ndim == 0: # This may be common, so avoid the call. result_shape = condlist[0].shape else: result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape result = np.full(result_shape, choicelist[-1], dtype) # Use np.copyto to burn each choicelist array onto result, using the # corresponding condlist as a boolean mask. This is done in reverse # order since the first choice should take precedence. choicelist = choicelist[-2::-1] condlist = condlist[::-1] for choice, cond in zip(choicelist, condlist): np.copyto(result, choice, where=cond) return result def copy(a, order='K'): """ Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) Returns ------- arr : ndarray Array interpretation of `a`. Notes ----- This is equivalent to: >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False """ return array(a, order=order, copy=True) # Basic operations def gradient(f, *varargs, **kwargs): """ Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences in the interior points and either first or second order accurate one-sides (forward or backwards) differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. varargs : list of scalar or array, optional Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: 1. single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each dimension of F. The length of the array must match the size of the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. If `axis` is given, the number of varargs must equal the number of axes. Default: 1. edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. .. versionadded:: 1.9.1 axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.11.0 Returns ------- gradient : ndarray or list of ndarray A set of ndarrays (or a single ndarray if there is only one dimension) corresponding to the derivatives of f with respect to each dimension. Each derivative has the same shape as f. Examples -------- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) >>> np.gradient(f) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) Spacing can be also specified with an array that represents the coordinates of the values F along the dimensions. For instance a uniform spacing: >>> x = np.arange(f.size) >>> np.gradient(f, x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) Or a non uniform one: >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) >>> np.gradient(f, x) array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 >>> dx = 2. >>> y = [1., 1.5, 3.5] >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) [array([[ 1. , 1. , -0.5], [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], [ 2. , 1.7, 0.5]])] It is possible to specify how boundaries are treated using `edge_order` >>> x = np.array([0, 1, 2, 3, 4]) >>> f = x**2 >>> np.gradient(f, edge_order=1) array([ 1., 2., 4., 6., 7.]) >>> np.gradient(f, edge_order=2) array([-0., 2., 4., 6., 8.]) The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) Notes ----- Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the spacing the finite difference coefficients are computed by minimising the consistency error :math:`\\eta_{i}`: .. math:: \\eta_{i} = f_{i}^{\\left(1\\right)} - \\left[ \\alpha f\\left(x_{i}\\right) + \\beta f\\left(x_{i} + h_{d}\\right) + \\gamma f\\left(x_{i}-h_{s}\\right) \\right] By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` with their Taylor series expansion, this translates into solving the following the linear system: .. math:: \\left\\{ \\begin{array}{r} \\alpha+\\beta+\\gamma=0 \\\\ -\\beta h_{d}+\\gamma h_{s}=1 \\\\ \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 \\end{array} \\right. The resulting approximation of :math:`f_{i}^{(1)}` is the following: .. math:: \\hat f_{i}^{(1)} = \\frac{ h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + h_{s}h_{d}^{2}}{h_{d} + h_{s}}\\right) It is worth noting that if :math:`h_{s}=h_{d}` (i.e., data are evenly spaced) we find the standard second order approximation: .. math:: \\hat f_{i}^{(1)}= \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + \\mathcal{O}\\left(h^{2}\\right) With a similar procedure the forward/backward approximations used for boundaries can be derived. References ---------- .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics (Texts in Applied Mathematics). New York: Springer. .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations in Geophysical Fluid Dynamics. New York: Springer. .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on Arbitrarily Spaced Grids, Mathematics of Computation 51, no. 184 : 699-706. `PDF <http://www.ams.org/journals/mcom/1988-51-184/ S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. """ f = np.asanyarray(f) N = f.ndim # number of dimensions axes = kwargs.pop('axis', None) if axes is None: axes = tuple(range(N)) else: axes = _nx.normalize_axis_tuple(axes, N) len_axes = len(axes) n = len(varargs) if n == 0: # no spacing argument - use 1 in all axes dx = [1.0] * len_axes elif n == 1 and np.ndim(varargs[0]) == 0: # single scalar for all axes dx = varargs * len_axes elif n == len_axes: # scalar or 1d array for each axis dx = list(varargs) for i, distances in enumerate(dx): if np.ndim(distances) == 0: continue elif np.ndim(distances) != 1: raise ValueError("distances must be either scalars or 1d") if len(distances) != f.shape[axes[i]]: raise ValueError("when 1d, distances must match " "the length of the corresponding dimension") diffx = np.diff(distances) # if distances are constant reduce to the scalar case # since it brings a consistent speedup if (diffx == diffx[0]).all(): diffx = diffx[0] dx[i] = diffx else: raise TypeError("invalid number of arguments") edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( '", "'.join(kwargs.keys()))) if edge_order > 2: raise ValueError("'edge_order' greater than 2 not supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. outvals = [] # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)]*N slice2 = [slice(None)]*N slice3 = [slice(None)]*N slice4 = [slice(None)]*N otype = f.dtype if otype.type is np.datetime64: # the timedelta dtype with the same unit information otype = np.dtype(otype.name.replace('datetime', 'timedelta')) # view as timedelta to allow addition f = f.view(otype) elif otype.type is np.timedelta64: pass elif np.issubdtype(otype, np.inexact): pass else: # all other types convert to floating point otype = np.double for axis, ax_dx in zip(axes, dx): if f.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least (edge_order + 1) elements are required.") # result allocation out = np.empty_like(f, dtype=otype) # spacing for the current axis uniform_spacing = np.ndim(ax_dx) == 0 # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) slice2[axis] = slice(None, -2) slice3[axis] = slice(1, -1) slice4[axis] = slice(2, None) if uniform_spacing: out[slice1] = (f[slice4] - f[slice2]) / (2. * ax_dx) else: dx1 = ax_dx[0:-1] dx2 = ax_dx[1:] a = -(dx2)/(dx1 * (dx1 + dx2)) b = (dx2 - dx1) / (dx1 * dx2) c = dx1 / (dx2 * (dx1 + dx2)) # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 a.shape = b.shape = c.shape = shape # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] # Numerical differentiation: 1st order edges if edge_order == 1: slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 dx_0 = ax_dx if uniform_spacing else ax_dx[0] # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) out[slice1] = (f[slice2] - f[slice3]) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = ax_dx if uniform_spacing else ax_dx[-1] # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) out[slice1] = (f[slice2] - f[slice3]) / dx_n # Numerical differentiation: 2nd order edges else: slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 if uniform_spacing: a = -1.5 / ax_dx b = 2. / ax_dx c = -0.5 / ax_dx else: dx1 = ax_dx[0] dx2 = ax_dx[1] a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] slice1[axis] = -1 slice2[axis] = -3 slice3[axis] = -2 slice4[axis] = -1 if uniform_spacing: a = 0.5 / ax_dx b = -2. / ax_dx c = 1.5 / ax_dx else: dx1 = ax_dx[-2] dx2 = ax_dx[-1] a = (dx2) / (dx1 * (dx1 + dx2)) b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) slice4[axis] = slice(None) if len_axes == 1: return outvals[0] else: return outvals def diff(a, n=1, axis=-1): """ Calculate the n-th discrete difference along the given axis. The first difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher differences are calculated by using `diff` recursively. Parameters ---------- a : array_like Input array n : int, optional The number of times values are differenced. If zero, the input is returned as-is. axis : int, optional The axis along which the difference is taken, default is the last axis. Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. The type of the output is the same as the type of the difference between any two elements of `a`. This is the same as the type of `a` in most cases. A notable exception is `datetime64`, which results in a `timedelta64` output array. See Also -------- gradient, ediff1d, cumsum Notes ----- Type is preserved for boolean arrays, so the result will contain `False` when consecutive elements are the same and `True` when they differ. For unsigned integer arrays, the results will also be unsigned. This should not be surprising, as the result is consistent with calculating the difference directly: >>> u8_arr = np.array([1, 0], dtype=np.uint8) >>> np.diff(u8_arr) array([255], dtype=uint8) >>> u8_arr[1,...] - u8_arr[0,...] array(255, np.uint8) If this is not desirable, then the array should be cast to a larger integer type first: >>> i16_arr = u8_arr.astype(np.int16) >>> np.diff(i16_arr) array([-1], dtype=int16) Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) >>> np.diff(x) array([1, 1], dtype='timedelta64[D]') """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) a = asanyarray(a) nd = a.ndim axis = normalize_axis_index(axis, nd) slice1 = [slice(None)] * nd slice2 = [slice(None)] * nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) op = not_equal if a.dtype == np.bool_ else subtract for _ in range(n): a = op(a[slice1], a[slice2]) return a def interp(x, xp, fp, left=None, right=None, period=None): """ One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points. Parameters ---------- x : array_like The x-coordinates of the interpolated values. xp : 1-D sequence of floats The x-coordinates of the data points, must be increasing if argument `period` is not specified. Otherwise, `xp` is internally sorted after normalizing the periodic boundaries with ``xp = xp % period``. fp : 1-D sequence of float or complex The y-coordinates of the data points, same length as `xp`. left : optional float or complex corresponding to fp Value to return for `x < xp[0]`, default is `fp[0]`. right : optional float or complex corresponding to fp Value to return for `x > xp[-1]`, default is `fp[-1]`. period : None or float, optional A period for the x-coordinates. This parameter allows the proper interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. .. versionadded:: 1.10.0 Returns ------- y : float or complex (corresponding to fp) or ndarray The interpolated values, same shape as `x`. Raises ------ ValueError If `xp` and `fp` have different length If `xp` or `fp` are not 1-D sequences If `period == 0` Notes ----- Does not check that the x-coordinate sequence `xp` is increasing. If `xp` is not increasing, the results are nonsense. A simple check for increasing is:: np.all(np.diff(xp) > 0) Examples -------- >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) array([ 3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 Plot an interpolant to the sine function: >>> x = np.linspace(0, 2*np.pi, 10) >>> y = np.sin(x) >>> xvals = np.linspace(0, 2*np.pi, 50) >>> yinterp = np.interp(xvals, x, y) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(xvals, yinterp, '-x') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.show() Interpolation with periodic x-coordinates: >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) Complex interpolation >>> x = [1.5, 4.0] >>> xp = [2,3,5] >>> fp = [1.0j, 0, 2+3j] >>> np.interp(x, xp, fp) array([ 0.+1.j , 1.+1.5j]) """ fp = np.asarray(fp) if np.iscomplexobj(fp): interp_func = compiled_interp_complex input_dtype = np.complex128 else: interp_func = compiled_interp input_dtype = np.float64 if period is None: if isinstance(x, (float, int, number)): return interp_func([x], xp, fp, left, right).item() elif isinstance(x, np.ndarray) and x.ndim == 0: return interp_func([x], xp, fp, left, right).item() else: return interp_func(x, xp, fp, left, right) else: if period == 0: raise ValueError("period must be a non-zero value") period = abs(period) left = None right = None return_array = True if isinstance(x, (float, int, number)): return_array = False x = [x] x = np.asarray(x, dtype=np.float64) xp = np.asarray(xp, dtype=np.float64) fp = np.asarray(fp, dtype=input_dtype) if xp.ndim != 1 or fp.ndim != 1: raise ValueError("Data points must be 1-D sequences") if xp.shape[0] != fp.shape[0]: raise ValueError("fp and xp are not of the same length") # normalizing periodic boundaries x = x % period xp = xp % period asort_xp = np.argsort(xp) xp = xp[asort_xp] fp = fp[asort_xp] xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) if return_array: return interp_func(x, xp, fp, left, right) else: return interp_func(x, xp, fp, left, right).item() def angle(z, deg=0): """ Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ if deg: fact = 180/pi else: fact = 1.0 z = asarray(z) if (issubclass(z.dtype.type, _nx.complexfloating)): zimag = z.imag zreal = z.real else: zimag = 0 zreal = z return arctan2(zimag, zreal) * fact def unwrap(p, discont=pi, axis=-1): """ Unwrap by changing deltas between values to 2*pi complement. Unwrap radian phase `p` by changing absolute jumps greater than `discont` to their 2*pi complement along the given axis. Parameters ---------- p : array_like Input array. discont : float, optional Maximum discontinuity between values, default is ``pi``. axis : int, optional Axis along which unwrap will operate, default is the last axis. Returns ------- out : ndarray Output array. See Also -------- rad2deg, deg2rad Notes ----- If the discontinuity in `p` is smaller than ``pi``, but larger than `discont`, no unwrapping is done because taking the 2*pi complement would only make the discontinuity larger. Examples -------- >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) """ p = asarray(p) nd = p.ndim dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) ddmod = mod(dd + pi, 2*pi) - pi _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up def sort_complex(a): """ Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': return b.astype('F') elif b.dtype.char == 'g': return b.astype('G') else: return b.astype('D') else: return b def trim_zeros(filt, trim='fb'): """ Trim the leading and/or trailing zeros from a 1-D array or sequence. Parameters ---------- filt : 1-D array or sequence Input array. trim : str, optional A string with 'f' representing trim from front and 'b' to trim from back. Default is 'fb', trim zeros from both front and back of the array. Returns ------- trimmed : 1-D array or sequence The result of trimming the input. The input data type is preserved. Examples -------- >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') array([0, 0, 0, 1, 2, 3, 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ first = 0 trim = trim.upper() if 'F' in trim: for i in filt: if i != 0.: break else: first = first + 1 last = len(filt) if 'B' in trim: for i in filt[::-1]: if i != 0.: break else: last = last - 1 return filt[first:last] @deprecate def unique(x): """ This function is deprecated. Use numpy.lib.arraysetops.unique() instead. """ try: tmp = x.flatten() if tmp.size == 0: return tmp tmp.sort() idx = concatenate(([True], tmp[1:] != tmp[:-1])) return tmp[idx] except AttributeError: items = sorted(set(x)) return asarray(items) def extract(condition, arr): """ Return the elements of an array that satisfy some condition. This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. Note that `place` does the exact opposite of `extract`. Parameters ---------- condition : array_like An array whose nonzero or True entries indicate the elements of `arr` to extract. arr : array_like Input array of the same size as `condition`. Returns ------- extract : ndarray Rank 1 array of values from `arr` where `condition` is True. See Also -------- take, put, copyto, compress, place Examples -------- >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> condition = np.mod(arr, 3)==0 >>> condition array([[ True, False, False, True], [False, False, True, False], [False, True, False, False]]) >>> np.extract(condition, arr) array([0, 3, 6, 9]) If `condition` is boolean: >>> arr[condition] array([0, 3, 6, 9]) """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) def place(arr, mask, vals): """ Change elements of an array based on conditional and input values. Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that `place` uses the first N elements of `vals`, where N is the number of True values in `mask`, while `copyto` uses the elements where `mask` is True. Note that `extract` does the exact opposite of `place`. Parameters ---------- arr : ndarray Array to put data into. mask : array_like Boolean mask array. Must have the same size as `a`. vals : 1-D sequence Values to put into `a`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller than N, it will be repeated, and if elements of `a` are to be masked, this sequence must be non-empty. See Also -------- copyto, put, take, extract Examples -------- >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]]) """ if not isinstance(arr, np.ndarray): raise TypeError("argument 1 must be numpy.ndarray, " "not {name}".format(name=type(arr).__name__)) return _insert(arr, mask, vals) def disp(mesg, device=None, linefeed=True): """ Display a message on a device. Parameters ---------- mesg : str Message to display. device : object Device to write message. If None, defaults to ``sys.stdout`` which is very similar to ``print``. `device` needs to have ``write()`` and ``flush()`` methods. linefeed : bool, optional Option whether to print a line feed or not. Defaults to True. Raises ------ AttributeError If `device` does not have a ``write()`` or ``flush()`` method. Examples -------- Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: >>> from StringIO import StringIO >>> buf = StringIO() >>> np.disp('"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' """ if device is None: device = sys.stdout if linefeed: device.write('%s\n' % mesg) else: device.write('%s' % mesg) device.flush() return # See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' _CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) _ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) _ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) _SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) def _parse_gufunc_signature(signature): """ Parse string signatures for a generalized universal function. Arguments --------- signature : string Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` for ``np.matmul``. Returns ------- Tuple of input and output core dimensions parsed from the signature, each of the form List[Tuple[str, ...]]. """ if not re.match(_SIGNATURE, signature): raise ValueError( 'not a valid gufunc signature: {}'.format(signature)) return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, arg_list)] for arg_list in signature.split('->')) def _update_dim_sizes(dim_sizes, arg, core_dims): """ Incrementally check and update core dimension sizes for a single argument. Arguments --------- dim_sizes : Dict[str, int] Sizes of existing core dimensions. Will be updated in-place. arg : ndarray Argument to examine. core_dims : Tuple[str, ...] Core dimensions for this argument. """ if not core_dims: return num_core_dims = len(core_dims) if arg.ndim < num_core_dims: raise ValueError( '%d-dimensional argument does not have enough ' 'dimensions for all core dimensions %r' % (arg.ndim, core_dims)) core_shape = arg.shape[-num_core_dims:] for dim, size in zip(core_dims, core_shape): if dim in dim_sizes: if size != dim_sizes[dim]: raise ValueError( 'inconsistent size for core dimension %r: %r vs %r' % (dim, size, dim_sizes[dim])) else: dim_sizes[dim] = size def _parse_input_dimensions(args, input_core_dims): """ Parse broadcast and core dimensions for vectorize with a signature. Arguments --------- args : Tuple[ndarray, ...] Tuple of input arguments to examine. input_core_dims : List[Tuple[str, ...]] List of core dimensions corresponding to each input. Returns ------- broadcast_shape : Tuple[int, ...] Common shape to broadcast all non-core dimensions to. dim_sizes : Dict[str, int] Common sizes for named core dimensions. """ broadcast_args = [] dim_sizes = {} for arg, core_dims in zip(args, input_core_dims): _update_dim_sizes(dim_sizes, arg, core_dims) ndim = arg.ndim - len(core_dims) dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) broadcast_args.append(dummy_array) broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) return broadcast_shape, dim_sizes def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): """Helper for calculating broadcast shapes with core dimensions.""" return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) for core_dims in list_of_core_dims] def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): """Helper for creating output arrays in vectorize.""" shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) arrays = tuple(np.empty(shape, dtype=dtype) for shape, dtype in zip(shapes, dtypes)) return arrays class vectorize(object): """ vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None) Generalized function class. Define a vectorized function which takes a nested sequence of objects or numpy arrays as inputs and returns an single or tuple of numpy array as output. The vectorized function evaluates `pyfunc` over successive tuples of the input arrays like the python map function, except it uses the broadcasting rules of numpy. The data type of the output of `vectorized` is determined by calling the function with the first element of the input. This can be avoided by specifying the `otypes` argument. Parameters ---------- pyfunc : callable A python function or method. otypes : str or list of dtypes, optional The output data type. It must be specified as either a string of typecode characters or a list of data type specifiers. There should be one data type specifier for each output. doc : str, optional The docstring for the function. If `None`, the docstring will be the ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. .. versionadded:: 1.7.0 cache : bool, optional If `True`, then cache the first function call that determines the number of outputs if `otypes` is not provided. .. versionadded:: 1.7.0 signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for vectorized matrix-vector multiplication. If provided, ``pyfunc`` will be called with (and expected to return) arrays with shapes given by the size of corresponding core dimensions. By default, ``pyfunc`` is assumed to take scalars as input and output. .. versionadded:: 1.12.0 Returns ------- vectorized : callable Vectorized function. Examples -------- >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: ... return a - b ... else: ... return a + b >>> vfunc = np.vectorize(myfunc) >>> vfunc([1, 2, 3, 4], 2) array([3, 4, 1, 2]) The docstring is taken from the input function to `vectorize` unless it is specified: >>> vfunc.__doc__ 'Return a-b if a>b, otherwise return a+b' >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') >>> vfunc.__doc__ 'Vectorized `myfunc`' The output type is determined by evaluating the first element of the input, unless it is specified: >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.int32'> >>> vfunc = np.vectorize(myfunc, otypes=[float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length such as the coefficients for a polynomial as in `polyval`: >>> def mypolyval(p, x): ... _p = list(p) ... res = _p.pop(0) ... while _p: ... res = res*x + _p.pop(0) ... return res >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) >>> vpolyval(p=[1, 2, 3], x=[0, 1]) array([3, 6]) Positional arguments may also be excluded by specifying their position: >>> vpolyval.excluded.add(0) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) The `signature` argument allows for vectorizing functions that act on non-scalar arrays of fixed length. For example, you can use it for a vectorized calculation of Pearson correlation coefficient and its p-value: >>> import scipy.stats >>> pearsonr = np.vectorize(scipy.stats.pearsonr, ... signature='(n),(n)->(),()') >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) (array([ 1., -1.]), array([ 0., 0.])) Or for a vectorized convolution: >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') >>> convolve(np.eye(4), [1, 2, 1]) array([[ 1., 2., 1., 0., 0., 0.], [ 0., 1., 2., 1., 0., 0.], [ 0., 0., 1., 2., 1., 0.], [ 0., 0., 0., 1., 2., 1.]]) See Also -------- frompyfunc : Takes an arbitrary Python function and returns a ufunc Notes ----- The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. If `otypes` is not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the original function must be wrapped which will slow down subsequent calls, so only do this if your function is expensive. The new keyword argument interface and `excluded` argument support further degrades performance. References ---------- .. [1] NumPy Reference, section `Generalized Universal Function API <http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_. """ def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None): self.pyfunc = pyfunc self.cache = cache self.signature = signature self._ufunc = None # Caching to improve default performance if doc is None: self.__doc__ = pyfunc.__doc__ else: self.__doc__ = doc if isinstance(otypes, str): for char in otypes: if char not in typecodes['All']: raise ValueError("Invalid otype specified: %s" % (char,)) elif iterable(otypes): otypes = ''.join([_nx.dtype(x).char for x in otypes]) elif otypes is not None: raise ValueError("Invalid otype specification") self.otypes = otypes # Excluded variable support if excluded is None: excluded = set() self.excluded = set(excluded) if signature is not None: self._in_and_out_core_dims = _parse_gufunc_signature(signature) else: self._in_and_out_core_dims = None def __call__(self, *args, **kwargs): """ Return arrays with the results of `pyfunc` broadcast (vectorized) over `args` and `kwargs` not in `excluded`. """ excluded = self.excluded if not kwargs and not excluded: func = self.pyfunc vargs = args else: # The wrapper accepts only positional arguments: we use `names` and # `inds` to mutate `the_args` and `kwargs` to pass to the original # function. nargs = len(args) names = [_n for _n in kwargs if _n not in excluded] inds = [_i for _i in range(nargs) if _i not in excluded] the_args = list(args) def func(*vargs): for _n, _i in enumerate(inds): the_args[_i] = vargs[_n] kwargs.update(zip(names, vargs[len(inds):])) return self.pyfunc(*the_args, **kwargs) vargs = [args[_i] for _i in inds] vargs.extend([kwargs[_n] for _n in names]) return self._vectorize_call(func=func, args=vargs) def _get_ufunc_and_otypes(self, func, args): """Return (ufunc, otypes).""" # frompyfunc will fail if args is empty if not args: raise ValueError('args can not be empty') if self.otypes is not None: otypes = self.otypes nout = len(otypes) # Note logic here: We only *use* self._ufunc if func is self.pyfunc # even though we set self._ufunc regardless. if func is self.pyfunc and self._ufunc is not None: ufunc = self._ufunc else: ufunc = self._ufunc = frompyfunc(func, len(args), nout) else: # Get number of outputs and output types by calling the function on # the first entries of args. We also cache the result to prevent # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) args = [asarray(arg) for arg in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') inputs = [arg.flat[0] for arg in args] outputs = func(*inputs) # Performance note: profiling indicates that -- for simple # functions at least -- this wrapping can almost double the # execution time. # Hence we make it optional. if self.cache: _cache = [outputs] def _func(*vargs): if _cache: return _cache.pop() else: return func(*vargs) else: _func = func if isinstance(outputs, tuple): nout = len(outputs) else: nout = 1 outputs = (outputs,) otypes = ''.join([asarray(outputs[_k]).dtype.char for _k in range(nout)]) # Performance note: profiling indicates that creating the ufunc is # not a significant cost compared with wrapping so it seems not # worth trying to cache this. ufunc = frompyfunc(_func, len(args), nout) return ufunc, otypes def _vectorize_call(self, func, args): """Vectorized call to `func` over positional `args`.""" if self.signature is not None: res = self._vectorize_call_with_signature(func, args) elif not args: res = func() else: ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) # Convert args to object arrays first inputs = [array(a, copy=False, subok=True, dtype=object) for a in args] outputs = ufunc(*inputs) if ufunc.nout == 1: res = array(outputs, copy=False, subok=True, dtype=otypes[0]) else: res = tuple([array(x, copy=False, subok=True, dtype=t) for x, t in zip(outputs, otypes)]) return res def _vectorize_call_with_signature(self, func, args): """Vectorized call over positional arguments with a signature.""" input_core_dims, output_core_dims = self._in_and_out_core_dims if len(args) != len(input_core_dims): raise TypeError('wrong number of positional arguments: ' 'expected %r, got %r' % (len(input_core_dims), len(args))) args = tuple(asanyarray(arg) for arg in args) broadcast_shape, dim_sizes = _parse_input_dimensions( args, input_core_dims) input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, input_core_dims) args = [np.broadcast_to(arg, shape, subok=True) for arg, shape in zip(args, input_shapes)] outputs = None otypes = self.otypes nout = len(output_core_dims) for index in np.ndindex(*broadcast_shape): results = func(*(arg[index] for arg in args)) n_results = len(results) if isinstance(results, tuple) else 1 if nout != n_results: raise ValueError( 'wrong number of outputs from pyfunc: expected %r, got %r' % (nout, n_results)) if nout == 1: results = (results,) if outputs is None: for result, core_dims in zip(results, output_core_dims): _update_dim_sizes(dim_sizes, result, core_dims) if otypes is None: otypes = [asarray(result).dtype for result in results] outputs = _create_arrays(broadcast_shape, dim_sizes, output_core_dims, otypes) for output, result in zip(outputs, results): output[index] = result if outputs is None: # did not call the function even once if otypes is None: raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') if builtins.any(dim not in dim_sizes for dims in output_core_dims for dim in dims): raise ValueError('cannot call `vectorize` with a signature ' 'including new output dimensions on size 0 ' 'inputs') outputs = _create_arrays(broadcast_shape, dim_sizes, output_core_dims, otypes) return outputs[0] if nout == 1 else outputs def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None): """ Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. See the notes for an outline of the algorithm. Parameters ---------- m : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : bool, optional Default normalization (False) is by ``(N - 1)``, where ``N`` is the number of observations given (unbiased estimate). If `bias` is True, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. Note that ``ddof=1`` will return the unbiased estimate, even if both `fweights` and `aweights` are specified, and ``ddof=0`` will return the simple average. See the notes for the details. The default value is ``None``. .. versionadded:: 1.5 fweights : array_like, int, optional 1-D array of integer freguency weights; the number of times each observation vector should be repeated. .. versionadded:: 1.10 aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. .. versionadded:: 1.10 Returns ------- out : ndarray The covariance matrix of the variables. See Also -------- corrcoef : Normalized covariance matrix Notes ----- Assume that the observations are in the columns of the observation array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The steps to compute the weighted covariance are as follows:: >>> w = f * a >>> v1 = np.sum(w) >>> v2 = np.sum(w * a) >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) Note that when ``a == 1``, the normalization factor ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` as it should. Examples -------- Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T >>> x array([[0, 1, 2], [2, 1, 0]]) Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance matrix shows this clearly: >>> np.cov(x) array([[ 1., -1.], [-1., 1.]]) Note that element :math:`C_{0,1}`, which shows the correlation between :math:`x_0` and :math:`x_1`, is negative. Further, note how `x` and `y` are combined: >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.stack((x, y), axis=0) >>> print(np.cov(X)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x, y)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x)) 11.71 """ # Check inputs if ddof is not None and ddof != int(ddof): raise ValueError( "ddof must be integer") # Handles complex arrays too m = np.asarray(m) if m.ndim > 2: raise ValueError("m has more than 2 dimensions") if y is None: dtype = np.result_type(m, np.float64) else: y = np.asarray(y) if y.ndim > 2: raise ValueError("y has more than 2 dimensions") dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) if not rowvar and X.shape[0] != 1: X = X.T if X.shape[0] == 0: return np.array([]).reshape(0, 0) if y is not None: y = array(y, copy=False, ndmin=2, dtype=dtype) if not rowvar and y.shape[0] != 1: y = y.T X = np.concatenate((X, y), axis=0) if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 # Get the product of frequencies and weights w = None if fweights is not None: fweights = np.asarray(fweights, dtype=float) if not np.all(fweights == np.around(fweights)): raise TypeError( "fweights must be integer") if fweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional fweights") if fweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and fweights") if any(fweights < 0): raise ValueError( "fweights cannot be negative") w = fweights if aweights is not None: aweights = np.asarray(aweights, dtype=float) if aweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional aweights") if aweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and aweights") if any(aweights < 0): raise ValueError( "aweights cannot be negative") if w is None: w = aweights else: w *= aweights avg, w_sum = average(X, axis=1, weights=w, returned=True) w_sum = w_sum[0] # Determine the normalization if w is None: fact = X.shape[1] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) fact = 0.0 X -= avg[:, None] if w is None: X_T = X.T else: X_T = (X*w).T c = dot(X, X_T.conj()) c *= 1. / np.float64(fact) return c.squeeze() def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): """ Return Pearson product-moment correlation coefficients. Please refer to the documentation for `cov` for more detail. The relationship between the correlation coefficient matrix, `R`, and the covariance matrix, `C`, is .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } The values of `R` are between -1 and 1, inclusive. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 ddof : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 Returns ------- R : ndarray The correlation coefficient matrix of the variables. See Also -------- cov : Covariance matrix Notes ----- Due to floating point rounding the resulting array may not be Hermitian, the diagonal elements may not be 1, and the elements may not satisfy the inequality abs(a) <= 1. The real and imaginary parts are clipped to the interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. This function accepts but discards arguments `bias` and `ddof`. This is for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn('bias and ddof have no effect and are deprecated', DeprecationWarning, stacklevel=2) c = cov(x, y, rowvar) try: d = diag(c) except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c stddev = sqrt(d.real) c /= stddev[:, None] c /= stddev[None, :] # Clip real and imaginary parts to [-1, 1]. This does not guarantee # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without # excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag) return c def blackman(M): """ Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, hamming, hanning, kaiser Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.blackman(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): """ Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, with the maximum value normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy.fft import fft, fftshift >>> window = np.bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) def hanning(M): """ Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- bartlett, blackman, hamming, kaiser Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hanning was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 0.07937323, 0. ]) Plot the window and its frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) def hamming(M): """ Return the Hamming window. The Hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hanning, kaiser Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) ## Code from cephes for i0 _i0A = [ -4.41534164647933937950E-18, 3.33079451882223809783E-17, -2.43127984654795469359E-16, 1.71539128555513303061E-15, -1.16853328779934516808E-14, 7.67618549860493561688E-14, -4.85644678311192946090E-13, 2.95505266312963983461E-12, -1.72682629144155570723E-11, 9.67580903537323691224E-11, -5.18979560163526290666E-10, 2.65982372468238665035E-9, -1.30002500998624804212E-8, 6.04699502254191894932E-8, -2.67079385394061173391E-7, 1.11738753912010371815E-6, -4.41673835845875056359E-6, 1.64484480707288970893E-5, -5.75419501008210370398E-5, 1.88502885095841655729E-4, -5.76375574538582365885E-4, 1.63947561694133579842E-3, -4.32430999505057594430E-3, 1.05464603945949983183E-2, -2.37374148058994688156E-2, 4.93052842396707084878E-2, -9.49010970480476444210E-2, 1.71620901522208775349E-1, -3.04682672343198398683E-1, 6.76795274409476084995E-1 ] _i0B = [ -7.23318048787475395456E-18, -4.83050448594418207126E-18, 4.46562142029675999901E-17, 3.46122286769746109310E-17, -2.82762398051658348494E-16, -3.42548561967721913462E-16, 1.77256013305652638360E-15, 3.81168066935262242075E-15, -9.55484669882830764870E-15, -4.15056934728722208663E-14, 1.54008621752140982691E-14, 3.85277838274214270114E-13, 7.18012445138366623367E-13, -1.79417853150680611778E-12, -1.32158118404477131188E-11, -3.14991652796324136454E-11, 1.18891471078464383424E-11, 4.94060238822496958910E-10, 3.39623202570838634515E-9, 2.26666899049817806459E-8, 2.04891858946906374183E-7, 2.89137052083475648297E-6, 6.88975834691682398426E-5, 3.36911647825569408990E-3, 8.04490411014108831608E-1 ] def _chbevl(x, vals): b0 = vals[0] b1 = 0.0 for i in range(1, len(vals)): b2 = b1 b1 = b0 b0 = x*b1 - b2 + vals[i] return 0.5*(b0 - b2) def _i0_1(x): return exp(x) * _chbevl(x/2.0-2, _i0A) def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) def i0(x): """ Modified Bessel function of the first kind, order 0. Usually denoted :math:`I_0`. This function does broadcast, but will *not* "up-cast" int dtype arguments unless accompanied by at least one float or complex dtype argument (see Raises below). Parameters ---------- x : array_like, dtype float or complex Argument of the Bessel function. Returns ------- out : ndarray, shape = x.shape, dtype = x.dtype The modified Bessel function evaluated at each of the elements of `x`. Raises ------ TypeError: array cannot be safely cast to required type If argument consists exclusively of int dtypes. See Also -------- scipy.special.iv, scipy.special.ive Notes ----- We use the algorithm published by Clenshaw [1]_ and referenced by Abramowitz and Stegun [2]_, for which the function domain is partitioned into the two intervals [0,8] and (8,inf), and Chebyshev polynomial expansions are employed in each interval. Relative error on the domain [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). References ---------- .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in *National Physical Laboratory Mathematical Tables*, vol. 5, London: Her Majesty's Stationery Office, 1962. .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 379. http://www.math.sfu.ca/~cbm/aands/page_379.htm .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html Examples -------- >>> np.i0([0.]) array(1.0) >>> np.i0([0., 1. + 2j]) array([ 1.00000000+0.j , 0.18785373+0.64616944j]) """ x = atleast_1d(x).copy() y = empty_like(x) ind = (x < 0) x[ind] = -x[ind] ind = (x <= 8.0) y[ind] = _i0_1(x[ind]) ind2 = ~ind y[ind2] = _i0_2(x[ind2]) return y.squeeze() ## End of cephes code for i0 def kaiser(M, beta): """ Return the Kaiser window. The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. beta : float Shape parameter for window. Returns ------- out : array The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hamming, hanning Notes ----- The Kaiser window is defined as .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} \\right)/I_0(\\beta) with .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, where :math:`I_0` is the modified zeroth-order Bessel function. The Kaiser was named for Jim Kaiser, who discovered a simple approximation to the DPSS window based on Bessel functions. The Kaiser window is a very good approximation to the Digital Prolate Spheroidal Sequence, or Slepian window, which is the transform which maximizes the energy in the main lobe of the window relative to total energy. The Kaiser can approximate many other windows by varying the beta parameter. ==== ======================= beta Window shape ==== ======================= 0 Rectangular 5 Similar to a Hamming 6 Similar to a Hanning 8.6 Similar to a Blackman ==== ======================= A beta value of 14 is probably a good starting point. Note that as beta gets large, the window narrows, and so the number of samples needs to be large enough to sample the increasingly narrow spike, otherwise NaNs will get returned. Most references to the Kaiser window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. John Wiley and Sons, New York, (1966). .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function Examples -------- >>> np.kaiser(12, 14) array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.kaiser(51, 14) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ from numpy.dual import i0 if M == 1: return np.array([1.]) n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) def sinc(x): """ Return the sinc function. The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. Parameters ---------- x : ndarray Array (possibly multi-dimensional) of values for which to to calculate ``sinc(x)``. Returns ------- out : ndarray ``sinc(x)``, which has the same shape as the input. Notes ----- ``sinc(0)`` is the limit value 1. The name sinc is short for "sine cardinal" or "sinus cardinalis". The sinc function is used in various signal processing applications, including in anti-aliasing, in the construction of a Lanczos resampling filter, and in interpolation. For bandlimited interpolation of discrete-time signals, the ideal interpolation kernel is proportional to the sinc function. References ---------- .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/SincFunction.html .. [2] Wikipedia, "Sinc function", http://en.wikipedia.org/wiki/Sinc_function Examples -------- >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("X") <matplotlib.text.Text object at 0x...> >>> plt.show() It works in 2-D as well: >>> x = np.linspace(-4, 4, 401) >>> xx = np.outer(x, x) >>> plt.imshow(np.sinc(xx)) <matplotlib.image.AxesImage object at 0x...> """ x = np.asanyarray(x) y = pi * where(x == 0, 1.0e-20, x) return sin(y)/y def msort(a): """ Return a copy of an array sorted along the first axis. Parameters ---------- a : array_like Array to be sorted. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- sort Notes ----- ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. """ b = array(a, subok=True, copy=True) b.sort(0) return b def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input array or object that can be converted to an array. func : callable Reduction function capable of receiving a single axis argument. It is is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ a = np.asanyarray(a) axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim axis = _nx.normalize_axis_tuple(axis, nd) for ax in axis: keepdim[ax] = 1 if len(axis) == 1: kwargs['axis'] = axis[0] else: keep = set(range(nd)) - set(axis) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 keepdim = tuple(keepdim) else: keepdim = (1,) * a.ndim r = func(a, **kwargs) return r, keepdim def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): """ Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to `median`. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. If `overwrite_input` is ``True`` and `a` is not already an `ndarray`, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.9.0 Returns ------- median : ndarray A new array holding the result. If the input contains integers or floats smaller than ``float64``, then the output data-type is ``np.float64``. Otherwise, the data-type of the output is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, percentile Notes ----- Given a vector ``V`` of length ``N``, the median of ``V`` is the middle value of a sorted copy of ``V``, ``V_sorted`` - i e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the two middle values of ``V_sorted`` when ``N`` is even. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.median(a) 3.5 >>> np.median(a, axis=0) array([ 6.5, 4.5, 2.5]) >>> np.median(a, axis=1) array([ 7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) array([ 6.5, 4.5, 2.5]) >>> m array([ 6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) 3.5 >>> assert not np.all(a==b) """ r, k = _ureduce(a, func=_median, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims: return r.reshape(k) else: return r def _median(a, axis=None, out=None, overwrite_input=False): # can't be reasonably be implemented in terms of percentile as we have to # call mean to not break astropy a = np.asanyarray(a) # Set the partition indexes if axis is None: sz = a.size else: sz = a.shape[axis] if sz % 2 == 0: szh = sz // 2 kth = [szh - 1, szh] else: kth = [(sz - 1) // 2] # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): kth.append(-1) if overwrite_input: if axis is None: part = a.ravel() part.partition(kth) else: a.partition(kth, axis=axis) part = a else: part = partition(a, kth, axis=axis) if part.shape == (): # make 0-D arrays work return part.item() if axis is None: axis = 0 indexer = [slice(None)] * part.ndim index = part.shape[axis] // 2 if part.shape[axis] % 2 == 1: # index with slice to allow mean (below) to work indexer[axis] = slice(index, index+1) else: indexer[axis] = slice(index-1, index+1) # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact) and sz > 0: # warn and return nans like mean would rout = mean(part[indexer], axis=axis, out=out) return np.lib.utils._median_nancheck(part, rout, axis, out) else: # if there are no nans # Use mean in odd and even case to coerce data type # and check, use out array. return mean(part[indexer], axis=axis, out=out) def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ Compute the qth percentile of the data along the specified axis. Returns the qth percentile(s) of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute, which must be between 0 and 100 inclusive. axis : {int, sequence of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` calculations. The input array will be modified by the call to `percentile`. This will save memory when you do not need to preserve the contents of the input array. In this case you should not make any assumptions about the contents of the input `a` after this function completes -- treat it as undefined. Default is False. If `a` is not already an array, this parameter will have no effect as `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. .. versionadded:: 1.9.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the percentiles. The other axes are the axes that remain after the reduction of `a`. If the input contains integers or floats smaller than ``float64``, the output data-type is ``float64``. Otherwise, the output data-type is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, median, nanpercentile Notes ----- Given a vector ``V`` of length ``N``, the ``q``-th percentile of ``V`` is the value ``q/100`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) 3.5 >>> np.percentile(a, 50, axis=0) array([[ 6.5, 4.5, 2.5]]) >>> np.percentile(a, 50, axis=1) array([ 7., 2.]) >>> np.percentile(a, 50, axis=1, keepdims=True) array([[ 7.], [ 2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=out) array([[ 6.5, 4.5, 2.5]]) >>> m array([[ 6.5, 4.5, 2.5]]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a == b) """ q = array(q, dtype=np.float64, copy=True) r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: return r.reshape(q.shape + k) else: return r def _percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): a = asarray(a) if q.ndim == 0: # Do not allow 0-d arrays because following code fails for scalar zerod = True q = q[None] else: zerod = False # avoid expensive reductions, relevant for arrays with < O(1000) elements if q.size < 10: for i in range(q.size): if q[i] < 0. or q[i] > 100.: raise ValueError("Percentiles must be in the range [0,100]") q[i] /= 100. else: # faster than any() if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): raise ValueError("Percentiles must be in the range [0,100]") q /= 100. # prepare a for partioning if overwrite_input: if axis is None: ap = a.ravel() else: ap = a else: if axis is None: ap = a.flatten() else: ap = a.copy() if axis is None: axis = 0 Nx = ap.shape[axis] indices = q * (Nx - 1) # round fractional indices according to interpolation method if interpolation == 'lower': indices = floor(indices).astype(intp) elif interpolation == 'higher': indices = ceil(indices).astype(intp) elif interpolation == 'midpoint': indices = 0.5 * (floor(indices) + ceil(indices)) elif interpolation == 'nearest': indices = around(indices).astype(intp) elif interpolation == 'linear': pass # keep index as fraction and interpolate else: raise ValueError( "interpolation can only be 'linear', 'lower' 'higher', " "'midpoint', or 'nearest'") n = np.array(False, dtype=bool) # check for nan's flag if indices.dtype == intp: # take the points along axis # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = concatenate((indices, [-1])) ap.partition(indices, axis=axis) # ensure axis with qth is first ap = np.moveaxis(ap, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = indices[:-1] n = np.isnan(ap[-1:, ...]) if zerod: indices = indices[0] r = take(ap, indices, axis=axis, out=out) else: # weight the points above and below the indices indices_below = floor(indices).astype(intp) indices_above = indices_below + 1 indices_above[indices_above > Nx - 1] = Nx - 1 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = concatenate((indices_above, [-1])) weights_above = indices - indices_below weights_below = 1.0 - weights_above weights_shape = [1, ] * ap.ndim weights_shape[axis] = len(indices) weights_below.shape = weights_shape weights_above.shape = weights_shape ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with qth is first ap = np.moveaxis(ap, axis, 0) weights_below = np.moveaxis(weights_below, axis, 0) weights_above = np.moveaxis(weights_above, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = indices_above[:-1] n = np.isnan(ap[-1:, ...]) x1 = take(ap, indices_below, axis=axis) * weights_below x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first x1 = np.moveaxis(x1, axis, 0) x2 = np.moveaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) x2 = x2.squeeze(0) if out is not None: r = add(x1, x2, out=out) else: r = add(x1, x2) if np.any(n): warnings.warn("Invalid value encountered in percentile", RuntimeWarning, stacklevel=3) if zerod: if ap.ndim == 1: if out is not None: out[...] = a.dtype.type(np.nan) r = out else: r = a.dtype.type(np.nan) else: r[..., n.squeeze(0)] = a.dtype.type(np.nan) else: if r.ndim == 1: r[:] = a.dtype.type(np.nan) else: r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) return r def trapz(y, x=None, dx=1.0, axis=-1): """ Integrate along the given axis using the composite trapezoidal rule. Integrate `y` (`x`) along given axis. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional The sample points corresponding to the `y` values. If `x` is None, the sample points are assumed to be evenly spaced `dx` apart. The default is None. dx : scalar, optional The spacing between sample points when `x` is None. The default is 1. axis : int, optional The axis along which to integrate. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule. See Also -------- sum, cumsum Notes ----- Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will be taken from `y` array, by default x-axis distances between points will be 1.0, alternatively they can be provided with `x` array or with `dx` scalar. Return value will be equal to combined area under the red lines. References ---------- .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png Examples -------- >>> np.trapz([1,2,3]) 4.0 >>> np.trapz([1,2,3], x=[4,6,8]) 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 >>> a = np.arange(6).reshape(2, 3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) array([ 1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) array([ 2., 8.]) """ y = asanyarray(y) if x is None: d = dx else: x = asanyarray(x) if x.ndim == 1: d = diff(x) # reshape to correct shape shape = [1]*y.ndim shape[axis] = d.shape[0] d = d.reshape(shape) else: d = diff(x, axis=axis) nd = y.ndim slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) return ret #always succeed def add_newdoc(place, obj, doc): """ Adds documentation to obj which is in module place. If doc is a string add it to obj as a docstring If doc is a tuple, then the first element is interpreted as an attribute of obj and the second as the docstring (method, docstring) If doc is a list, then each element of the list should be a sequence of length two --> [(method1, docstring1), (method2, docstring2), ...] This routine never raises an error. This routine cannot modify read-only docstrings, as appear in new-style classes or built-in functions. Because this routine never raises an error the caller must check manually that the docstrings were changed. """ try: new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): add_docstring(new, doc.strip()) elif isinstance(doc, tuple): add_docstring(getattr(new, doc[0]), doc[1].strip()) elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) except Exception: pass # Based on scitools meshgrid def meshgrid(*xi, **kwargs): """ Return coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. .. versionchanged:: 1.9 1-D and 0-D cases are allowed. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. .. versionadded:: 1.7.0 sparse : bool, optional If True a sparse grid is returned in order to conserve memory. Default is False. .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that ``sparse=False, copy=False`` will likely return non-contiguous arrays. Furthermore, more than one element of a broadcast array may refer to a single memory location. If you need to write to the arrays, make copies first. .. versionadded:: 1.7.0 Returns ------- X1, X2,..., XN : ndarray For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Notes ----- This function supports both indexing conventions through the indexing keyword argument. Giving the string 'ij' returns a meshgrid with matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] In the 1-D and 0-D case, the indexing and sparse keywords have no effect. See Also -------- index_tricks.mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" using indexing notation. Examples -------- >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) >>> xv, yv = np.meshgrid(x, y) >>> xv array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv array([[ 0., 0., 0.], [ 1., 1., 1.]]) >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv array([[ 0. , 0.5, 1. ]]) >>> yv array([[ 0.], [ 1.]]) `meshgrid` is very useful to evaluate functions on a grid. >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = np.meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) """ ndim = len(xi) copy_ = kwargs.pop('copy', True) sparse = kwargs.pop('sparse', False) indexing = kwargs.pop('indexing', 'xy') if kwargs: raise TypeError("meshgrid() got an unexpected keyword argument '%s'" % (list(kwargs)[0],)) if indexing not in ['xy', 'ij']: raise ValueError( "Valid values for `indexing` are 'xy' and 'ij'.") s0 = (1,) * ndim output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) for i, x in enumerate(xi)] if indexing == 'xy' and ndim > 1: # switch first and second axis output[0].shape = (1, -1) + s0[2:] output[1].shape = (-1, 1) + s0[2:] if not sparse: # Return the full N-D matrix (not only the 1-D vector) output = np.broadcast_arrays(*output, subok=True) if copy_: output = [x.copy() for x in output] return output def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : array_like Input array. obj : slice, int or array of ints Indicate which sub-arrays to remove. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : ndarray A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. append : Append elements at the end of an array. Notes ----- Often it is preferable to use a boolean mask. For example: >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further use of `mask`. Examples -------- >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], [ 5, 6, 7, 8], [ 9, 10, 11, 12]]) >>> np.delete(arr, 1, 0) array([[ 1, 2, 3, 4], [ 9, 10, 11, 12]]) >>> np.delete(arr, np.s_[::2], 1) array([[ 2, 4], [ 6, 8], [10, 12]]) >>> np.delete(arr, [1,3,5], None) array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = -1 if ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from delete and raise an error", DeprecationWarning, stacklevel=2) if wrap: return wrap(arr) else: return arr.copy(order=arrorder) axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): start, stop, step = obj.indices(N) xr = range(start, stop, step) numtodel = len(xr) if numtodel <= 0: if wrap: return wrap(arr.copy(order=arrorder)) else: return arr.copy(order=arrorder) # Invert if step is negative: if step < 0: step = -step start = xr[-1] stop = xr[0] + 1 newshape[axis] -= numtodel new = empty(newshape, arr.dtype, arrorder) # copy initial chunk if start == 0: pass else: slobj[axis] = slice(None, start) new[slobj] = arr[slobj] # copy end chunck if stop == N: pass else: slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) new[slobj] = arr[slobj2] # copy middle pieces if step == 1: pass else: # use array indexing. keep = ones(stop-start, dtype=bool) keep[:stop-start:step] = False slobj[axis] = slice(start, stop-numtodel) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(start, stop) arr = arr[slobj2] slobj2[axis] = keep new[slobj] = arr[slobj2] if wrap: return wrap(new) else: return new _obj = obj obj = np.asarray(obj) # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: warnings.warn("in the future insert will treat boolean arrays and " "array-likes as boolean index instead of casting it " "to integer", FutureWarning, stacklevel=2) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value obj = obj.item() if (obj < -N or obj >= N): raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (obj < 0): obj += N newshape[axis] -= 1 new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(obj+1, None) new[slobj] = arr[slobj2] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) if not np.can_cast(obj, intp, 'same_kind'): # obj.size = 1 special case always failed and would just # give superfluous warnings. # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in delete will result in an " "error in the future", DeprecationWarning, stacklevel=2) obj = obj.astype(intp) keep = ones(N, dtype=bool) # Test if there are out of bound indices, this is deprecated inside_bounds = (obj < N) & (obj >= -N) if not inside_bounds.all(): # 2013-09-24, 1.9 warnings.warn( "in the future out of bounds indices will raise an error " "instead of being ignored by `numpy.delete`.", DeprecationWarning, stacklevel=2) obj = obj[inside_bounds] positive_indices = obj >= 0 if not positive_indices.all(): warnings.warn( "in the future negative indices will not be ignored by " "`numpy.delete`.", FutureWarning, stacklevel=2) obj = obj[positive_indices] keep[obj, ] = False slobj[axis] = keep new = arr[slobj] if wrap: return wrap(new) else: return new def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : array_like Input array. obj : int, slice or sequence of ints Object that defines the index or indices before which `values` is inserted. .. versionadded:: 1.8.0 Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple times). values : array_like Values to insert into `arr`. If the type of `values` is different from that of `arr`, `values` is converted to the type of `arr`. `values` should be shaped so that ``arr[...,obj,...] = values`` is legal. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : ndarray A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. See Also -------- append : Append elements at the end of an array. concatenate : Join a sequence of arrays along an existing axis. delete : Delete elements from an array. Notes ----- Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. Examples -------- >>> a = np.array([[1, 1], [2, 2], [3, 3]]) >>> a array([[1, 1], [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) array([1, 5, 1, 2, 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], [3, 5, 3]]) Difference between sequence and scalars: >>> np.insert(a, [1], [[1],[2],[3]], axis=1) array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), ... np.insert(a, [1], [[1],[2],[3]], axis=1)) True >>> b = a.flatten() >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) array([1, 1, 5, 6, 2, 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) array([1, 1, 5, 2, 6, 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting array([1, 1, 7, 0, 2, 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) >>> np.insert(x, idx, 999, axis=1) array([[ 0, 999, 1, 2, 999, 3], [ 4, 999, 5, 6, 999, 7]]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 elif ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from insert and raise an error", DeprecationWarning, stacklevel=2) arr = arr.copy(order=arrorder) arr[...] = values if wrap: return wrap(arr) else: return arr else: axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): # turn it into a range object indices = arange(*obj.indices(N), **{'dtype': intp}) else: # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: # See also delete warnings.warn( "in the future insert will treat boolean arrays and " "array-likes as a boolean index instead of casting it to " "integer", FutureWarning, stacklevel=2) indices = indices.astype(intp) # Code after warning period: #if obj.ndim != 1: # raise ValueError('boolean array argument obj to insert ' # 'must be one dimensional') #indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " "or scalar") if indices.size == 1: index = indices.item() if index < -N or index > N: raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (index < 0): index += N # There are some object array corner cases here, but we cannot avoid # that: values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) if indices.ndim == 0: # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) values = np.moveaxis(values, 0, axis) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, index) new[slobj] = arr[slobj] slobj[axis] = slice(index, index+numnew) new[slobj] = values slobj[axis] = slice(index+numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) new[slobj] = arr[slobj2] if wrap: return wrap(new) return new elif indices.size == 0 and not isinstance(obj, np.ndarray): # Can safely cast the empty list to intp indices = indices.astype(intp) if not np.can_cast(indices, intp, 'same_kind'): # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in insert will result in an " "error in the future", DeprecationWarning, stacklevel=2) indices = indices.astype(intp) indices[indices < 0] += N numnew = len(indices) order = indices.argsort(kind='mergesort') # stable sort indices[order] += np.arange(numnew) newshape[axis] += numnew old_mask = ones(newshape[axis], dtype=bool) old_mask[indices] = False new = empty(newshape, arr.dtype, arrorder) slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask new[slobj] = values new[slobj2] = arr if wrap: return wrap(new) return new def append(arr, values, axis=None): """ Append values to the end of an array. Parameters ---------- arr : array_like Values are appended to a copy of this array. values : array_like These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. delete : Delete elements from an array. Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... ValueError: arrays must have same number of dimensions """ arr = asanyarray(arr) if axis is None: if arr.ndim != 1: arr = arr.ravel() values = ravel(values) axis = arr.ndim-1 return concatenate((arr, values), axis=axis)
170,032
31.90749
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/info.py
""" Basic functions used by several sub-packages and useful to have in the main name-space. Type Handling ------------- ================ =================== iscomplexobj Test for complex object, scalar result isrealobj Test for real object, scalar result iscomplex Test for complex elements, array result isreal Test for real elements, array result imag Imaginary part real Real part real_if_close Turns complex number with tiny imaginary part to real isneginf Tests for negative infinity, array result isposinf Tests for positive infinity, array result isnan Tests for nans, array result isinf Tests for infinity, array result isfinite Tests for finite numbers, array result isscalar True if argument is a scalar nan_to_num Replaces NaN's with 0 and infinities with large numbers cast Dictionary of functions to force cast to each type common_type Determine the minimum common type code for a group of arrays mintypecode Return minimal allowed common typecode. ================ =================== Index Tricks ------------ ================ =================== mgrid Method which allows easy construction of N-d 'mesh-grids' ``r_`` Append and construct arrays: turns slice objects into ranges and concatenates them, for 2d arrays appends rows. index_exp Konrad Hinsen's index_expression class instance which can be useful for building complicated slicing syntax. ================ =================== Useful Functions ---------------- ================ =================== select Extension of where to multiple conditions and choices extract Extract 1d array from flattened array according to mask insert Insert 1d array of values into Nd array according to mask linspace Evenly spaced samples in linear space logspace Evenly spaced samples in logarithmic space fix Round x to nearest integer towards zero mod Modulo mod(x,y) = x % y except keeps sign of y amax Array maximum along axis amin Array minimum along axis ptp Array max-min along axis cumsum Cumulative sum along axis prod Product of elements along axis cumprod Cumluative product along axis diff Discrete differences along axis angle Returns angle of complex argument unwrap Unwrap phase along given axis (1-d algorithm) sort_complex Sort a complex-array (based on real, then imaginary) trim_zeros Trim the leading and trailing zeros from 1D array. vectorize A class that wraps a Python function taking scalar arguments into a generalized function which can handle arrays of arguments using the broadcast rules of numerix Python. ================ =================== Shape Manipulation ------------------ ================ =================== squeeze Return a with length-one dimensions removed. atleast_1d Force arrays to be >= 1D atleast_2d Force arrays to be >= 2D atleast_3d Force arrays to be >= 3D vstack Stack arrays vertically (row on row) hstack Stack arrays horizontally (column on column) column_stack Stack 1D arrays as columns into 2D array dstack Stack arrays depthwise (along third dimension) stack Stack arrays along a new axis split Divide array into a list of sub-arrays hsplit Split into columns vsplit Split into rows dsplit Split along third dimension ================ =================== Matrix (2D Array) Manipulations ------------------------------- ================ =================== fliplr 2D array with columns flipped flipud 2D array with rows flipped rot90 Rotate a 2D array a multiple of 90 degrees eye Return a 2D array with ones down a given diagonal diag Construct a 2D array from a vector, or return a given diagonal from a 2D array. mat Construct a Matrix bmat Build a Matrix from blocks ================ =================== Polynomials ----------- ================ =================== poly1d A one-dimensional polynomial class poly Return polynomial coefficients from roots roots Find roots of polynomial given coefficients polyint Integrate polynomial polyder Differentiate polynomial polyadd Add polynomials polysub Subtract polynomials polymul Multiply polynomials polydiv Divide polynomials polyval Evaluate polynomial at given argument ================ =================== Iterators --------- ================ =================== Arrayterator A buffered iterator for big arrays. ================ =================== Import Tricks ------------- ================ =================== ppimport Postpone module import until trying to use it ppimport_attr Postpone module import until trying to use its attribute ppresolve Import postponed module and return it. ================ =================== Machine Arithmetics ------------------- ================ =================== machar_single Single precision floating point arithmetic parameters machar_double Double precision floating point arithmetic parameters ================ =================== Threading Tricks ---------------- ================ =================== ParallelExec Execute commands in parallel thread. ================ =================== Array Set Operations ----------------------- Set operations for numeric arrays based on sort() function. ================ =================== unique Unique elements of an array. isin Test whether each element of an ND array is present anywhere within a second array. ediff1d Array difference (auxiliary function). intersect1d Intersection of 1D arrays with unique elements. setxor1d Set exclusive-or of 1D arrays with unique elements. in1d Test whether elements in a 1D array are also present in another array. union1d Union of 1D arrays with unique elements. setdiff1d Set difference of 1D arrays with unique elements. ================ =================== """ from __future__ import division, absolute_import, print_function depends = ['core', 'testing'] global_symbols = ['*']
6,616
40.099379
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/__init__.py
from __future__ import division, absolute_import, print_function import math from .info import __doc__ from numpy.version import version as __version__ from .type_check import * from .index_tricks import * from .function_base import * from .mixins import * from .nanfunctions import * from .shape_base import * from .stride_tricks import * from .twodim_base import * from .ufunclike import * from . import scimath as emath from .polynomial import * #import convertcode from .utils import * from .arraysetops import * from .npyio import * from .financial import * from .arrayterator import Arrayterator from .arraypad import * from ._version import * from numpy.core.multiarray import tracemalloc_domain __all__ = ['emath', 'math', 'tracemalloc_domain'] __all__ += type_check.__all__ __all__ += index_tricks.__all__ __all__ += function_base.__all__ __all__ += mixins.__all__ __all__ += shape_base.__all__ __all__ += stride_tricks.__all__ __all__ += twodim_base.__all__ __all__ += ufunclike.__all__ __all__ += arraypad.__all__ __all__ += polynomial.__all__ __all__ += utils.__all__ __all__ += arraysetops.__all__ __all__ += npyio.__all__ __all__ += financial.__all__ __all__ += nanfunctions.__all__ from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench
1,301
25.04
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/arraysetops.py
""" Set operations for arrays based on sorting. :Contains: unique, isin, ediff1d, intersect1d, setxor1d, in1d, union1d, setdiff1d :Notes: For floating point arrays, inaccurate results may appear due to usual round-off and floating point comparison issues. Speed could be gained in some operations by an implementation of sort(), that can provide directly the permutation vectors, avoiding thus calls to argsort(). To do: Optionally return indices analogously to unique for all functions. :Author: Robert Cimrman """ from __future__ import division, absolute_import, print_function import numpy as np __all__ = [ 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique', 'in1d', 'isin' ] def ediff1d(ary, to_end=None, to_begin=None): """ The differences between consecutive elements of an array. Parameters ---------- ary : array_like If necessary, will be flattened before the differences are taken. to_end : array_like, optional Number(s) to append at the end of the returned differences. to_begin : array_like, optional Number(s) to prepend at the beginning of the returned differences. Returns ------- ediff1d : ndarray The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. See Also -------- diff, gradient Notes ----- When applied to masked arrays, this function drops the mask information if the `to_begin` and/or `to_end` parameters are used. Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.ediff1d(x) array([ 1, 2, 3, -7]) >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) array([-99, 1, 2, 3, -7, 88, 99]) The returned array is always 1D. >>> y = [[1, 2, 4], [1, 6, 24]] >>> np.ediff1d(y) array([ 1, 2, -3, 5, 18]) """ # force a 1d array ary = np.asanyarray(ary).ravel() # fast track default case if to_begin is None and to_end is None: return ary[1:] - ary[:-1] if to_begin is None: l_begin = 0 else: to_begin = np.asanyarray(to_begin).ravel() l_begin = len(to_begin) if to_end is None: l_end = 0 else: to_end = np.asanyarray(to_end).ravel() l_end = len(to_end) # do the calculation in place and copy to_begin and to_end l_diff = max(len(ary) - 1, 0) result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype) result = ary.__array_wrap__(result) if l_begin > 0: result[:l_begin] = to_begin if l_end > 0: result[l_begin + l_diff:] = to_end np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) return result def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None): """ Find the unique elements of an array. Returns the sorted unique elements of an array. There are three optional outputs in addition to the unique elements: the indices of the input array that give the unique values, the indices of the unique array that reconstruct the input array, and the number of times each unique value comes up in the input array. Parameters ---------- ar : array_like Input array. Unless `axis` is specified, this will be flattened if it is not already 1-D. return_index : bool, optional If True, also return the indices of `ar` (along the specified axis, if provided, or in the flattened array) that result in the unique array. return_inverse : bool, optional If True, also return the indices of the unique array (for the specified axis, if provided) that can be used to reconstruct `ar`. return_counts : bool, optional If True, also return the number of times each unique item appears in `ar`. .. versionadded:: 1.9.0 axis : int or None, optional The axis to operate on. If None, `ar` will be flattened. If an integer, the subarrays indexed by the given axis will be flattened and treated as the elements of a 1-D array with the dimension of the given axis, see the notes for more details. Object arrays or structured arrays that contain objects are not supported if the `axis` kwarg is used. The default is None. .. versionadded:: 1.13.0 Returns ------- unique : ndarray The sorted unique values. unique_indices : ndarray, optional The indices of the first occurrences of the unique values in the original array. Only provided if `return_index` is True. unique_inverse : ndarray, optional The indices to reconstruct the original array from the unique array. Only provided if `return_inverse` is True. unique_counts : ndarray, optional The number of times each of the unique values comes up in the original array. Only provided if `return_counts` is True. .. versionadded:: 1.9.0 See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Notes ----- When an axis is specified the subarrays indexed by the axis are sorted. This is done by making the specified axis the first dimension of the array and then flattening the subarrays in C order. The flattened subarrays are then viewed as a structured type with each element given a label, with the effect that we end up with a 1-D array of structured types that can be treated in the same way as any other 1-D array. The result is that the flattened subarrays are sorted in lexicographic order starting with the first element. Examples -------- >>> np.unique([1, 1, 2, 2, 3, 3]) array([1, 2, 3]) >>> a = np.array([[1, 1], [2, 3]]) >>> np.unique(a) array([1, 2, 3]) Return the unique rows of a 2D array >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) >>> np.unique(a, axis=0) array([[1, 0, 0], [2, 3, 4]]) Return the indices of the original array that give the unique values: >>> a = np.array(['a', 'b', 'b', 'c', 'a']) >>> u, indices = np.unique(a, return_index=True) >>> u array(['a', 'b', 'c'], dtype='|S1') >>> indices array([0, 1, 3]) >>> a[indices] array(['a', 'b', 'c'], dtype='|S1') Reconstruct the input array from the unique values: >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) >>> u, indices = np.unique(a, return_inverse=True) >>> u array([1, 2, 3, 4, 6]) >>> indices array([0, 1, 4, 3, 1, 2, 1]) >>> u[indices] array([1, 2, 6, 4, 2, 3, 2]) """ ar = np.asanyarray(ar) if axis is None: return _unique1d(ar, return_index, return_inverse, return_counts) if not (-ar.ndim <= axis < ar.ndim): raise ValueError('Invalid axis kwarg specified for unique') ar = np.swapaxes(ar, axis, 0) orig_shape, orig_dtype = ar.shape, ar.dtype # Must reshape to a contiguous 2D array for this to work... ar = ar.reshape(orig_shape[0], -1) ar = np.ascontiguousarray(ar) dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] try: consolidated = ar.view(dtype) except TypeError: # There's no good way to do this for object arrays, etc... msg = 'The axis argument to unique is not supported for dtype {dt}' raise TypeError(msg.format(dt=ar.dtype)) def reshape_uniq(uniq): uniq = uniq.view(orig_dtype) uniq = uniq.reshape(-1, *orig_shape[1:]) uniq = np.swapaxes(uniq, 0, axis) return uniq output = _unique1d(consolidated, return_index, return_inverse, return_counts) if not (return_index or return_inverse or return_counts): return reshape_uniq(output) else: uniq = reshape_uniq(output[0]) return (uniq,) + output[1:] def _unique1d(ar, return_index=False, return_inverse=False, return_counts=False): """ Find the unique elements of an array, ignoring shape. """ ar = np.asanyarray(ar).flatten() optional_indices = return_index or return_inverse optional_returns = optional_indices or return_counts if ar.size == 0: if not optional_returns: ret = ar else: ret = (ar,) if return_index: ret += (np.empty(0, np.intp),) if return_inverse: ret += (np.empty(0, np.intp),) if return_counts: ret += (np.empty(0, np.intp),) return ret if optional_indices: perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') aux = ar[perm] else: ar.sort() aux = ar flag = np.concatenate(([True], aux[1:] != aux[:-1])) if not optional_returns: ret = aux[flag] else: ret = (aux[flag],) if return_index: ret += (perm[flag],) if return_inverse: iflag = np.cumsum(flag) - 1 inv_idx = np.empty(ar.shape, dtype=np.intp) inv_idx[perm] = iflag ret += (inv_idx,) if return_counts: idx = np.concatenate(np.nonzero(flag) + ([ar.size],)) ret += (np.diff(idx),) return ret def intersect1d(ar1, ar2, assume_unique=False): """ Find the intersection of two arrays. Return the sorted, unique values that are in both of the input arrays. Parameters ---------- ar1, ar2 : array_like Input arrays. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- intersect1d : ndarray Sorted 1D array of common and unique elements. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) array([1, 3]) To intersect more than two arrays, use functools.reduce: >>> from functools import reduce >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([3]) """ if not assume_unique: # Might be faster than unique( intersect1d( ar1, ar2 ) )? ar1 = unique(ar1) ar2 = unique(ar2) aux = np.concatenate((ar1, ar2)) aux.sort() return aux[:-1][aux[1:] == aux[:-1]] def setxor1d(ar1, ar2, assume_unique=False): """ Find the set exclusive-or of two arrays. Return the sorted, unique values that are in only one (not both) of the input arrays. Parameters ---------- ar1, ar2 : array_like Input arrays. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setxor1d : ndarray Sorted 1D array of unique values that are in only one of the input arrays. Examples -------- >>> a = np.array([1, 2, 3, 2, 4]) >>> b = np.array([2, 3, 5, 7, 5]) >>> np.setxor1d(a,b) array([1, 4, 5, 7]) """ if not assume_unique: ar1 = unique(ar1) ar2 = unique(ar2) aux = np.concatenate((ar1, ar2)) if aux.size == 0: return aux aux.sort() flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) return aux[flag[1:] & flag[:-1]] def in1d(ar1, ar2, assume_unique=False, invert=False): """ Test whether each element of a 1-D array is also present in a second array. Returns a boolean array the same length as `ar1` that is True where an element of `ar1` is in `ar2` and False otherwise. We recommend using :func:`isin` instead of `in1d` for new code. Parameters ---------- ar1 : (M,) array_like Input array. ar2 : array_like The values against which to test each value of `ar1`. assume_unique : bool, optional If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. invert : bool, optional If True, the values in the returned array are inverted (that is, False where an element of `ar1` is in `ar2` and True otherwise). Default is False. ``np.in1d(a, b, invert=True)`` is equivalent to (but is faster than) ``np.invert(in1d(a, b))``. .. versionadded:: 1.8.0 Returns ------- in1d : (M,) ndarray, bool The values `ar1[in1d]` are in `ar2`. See Also -------- isin : Version of this function that preserves the shape of ar1. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Notes ----- `in1d` can be considered as an element-wise function version of the python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])``. However, this idea fails if `ar2` is a set, or similar (non-sequence) container: As ``ar2`` is converted to an array, in those cases ``asarray(ar2)`` is an object array rather than the expected array of contained values. .. versionadded:: 1.4.0 Examples -------- >>> test = np.array([0, 1, 2, 5, 0]) >>> states = [0, 2] >>> mask = np.in1d(test, states) >>> mask array([ True, False, True, False, True]) >>> test[mask] array([0, 2, 0]) >>> mask = np.in1d(test, states, invert=True) >>> mask array([False, True, False, True, False]) >>> test[mask] array([1, 5]) """ # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx] def isin(element, test_elements, assume_unique=False, invert=False): """ Calculates `element in test_elements`, broadcasting over `element` only. Returns a boolean array of the same shape as `element` that is True where an element of `element` is in `test_elements` and False otherwise. Parameters ---------- element : array_like Input array. test_elements : array_like The values against which to test each value of `element`. This argument is flattened if it is an array or array_like. See notes for behavior with non-array-like parameters. assume_unique : bool, optional If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. invert : bool, optional If True, the values in the returned array are inverted, as if calculating `element not in test_elements`. Default is False. ``np.isin(a, b, invert=True)`` is equivalent to (but faster than) ``np.invert(np.isin(a, b))``. Returns ------- isin : ndarray, bool Has the same shape as `element`. The values `element[isin]` are in `test_elements`. See Also -------- in1d : Flattened version of this function. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Notes ----- `isin` is an element-wise function version of the python keyword `in`. ``isin(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. `element` and `test_elements` are converted to arrays if they are not already. If `test_elements` is a set (or other non-sequence collection) it will be converted to an object array with one element, rather than an array of the values contained in `test_elements`. This is a consequence of the `array` constructor's way of handling non-sequence collections. Converting the set to a list usually gives the desired behavior. .. versionadded:: 1.13.0 Examples -------- >>> element = 2*np.arange(4).reshape((2, 2)) >>> element array([[0, 2], [4, 6]]) >>> test_elements = [1, 2, 4, 8] >>> mask = np.isin(element, test_elements) >>> mask array([[ False, True], [ True, False]]) >>> element[mask] array([2, 4]) >>> mask = np.isin(element, test_elements, invert=True) >>> mask array([[ True, False], [ False, True]]) >>> element[mask] array([0, 6]) Because of how `array` handles sets, the following does not work as expected: >>> test_set = {1, 2, 4, 8} >>> np.isin(element, test_set) array([[ False, False], [ False, False]]) Casting the set to a list gives the expected result: >>> np.isin(element, list(test_set)) array([[ False, True], [ True, False]]) """ element = np.asarray(element) return in1d(element, test_elements, assume_unique=assume_unique, invert=invert).reshape(element.shape) def union1d(ar1, ar2): """ Find the union of two arrays. Return the unique, sorted array of values that are in either of the two input arrays. Parameters ---------- ar1, ar2 : array_like Input arrays. They are flattened if they are not already 1D. Returns ------- union1d : ndarray Unique, sorted union of the input arrays. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) To find the union of more than two arrays, use functools.reduce: >>> from functools import reduce >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([1, 2, 3, 4, 6]) """ return unique(np.concatenate((ar1, ar2), axis=None)) def setdiff1d(ar1, ar2, assume_unique=False): """ Find the set difference of two arrays. Return the sorted, unique values in `ar1` that are not in `ar2`. Parameters ---------- ar1 : array_like Input array. ar2 : array_like Input comparison array. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setdiff1d : ndarray Sorted 1D array of values in `ar1` that are not in `ar2`. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) array([1, 2]) """ if assume_unique: ar1 = np.asarray(ar1).ravel() else: ar1 = unique(ar1) ar2 = unique(ar2) return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
20,567
29.929323
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/mixins.py
"""Mixin classes for custom array types that don't inherit from ndarray.""" from __future__ import division, absolute_import, print_function import sys from numpy.core import umath as um # Nothing should be exposed in the top-level NumPy module. __all__ = [] def _disables_array_ufunc(obj): """True when __array_ufunc__ is set to None.""" try: return obj.__array_ufunc__ is None except AttributeError: return False def _binary_method(ufunc, name): """Implement a forward binary method with a ufunc, e.g., __add__.""" def func(self, other): if _disables_array_ufunc(other): return NotImplemented return ufunc(self, other) func.__name__ = '__{}__'.format(name) return func def _reflected_binary_method(ufunc, name): """Implement a reflected binary method with a ufunc, e.g., __radd__.""" def func(self, other): if _disables_array_ufunc(other): return NotImplemented return ufunc(other, self) func.__name__ = '__r{}__'.format(name) return func def _inplace_binary_method(ufunc, name): """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" def func(self, other): return ufunc(self, other, out=(self,)) func.__name__ = '__i{}__'.format(name) return func def _numeric_methods(ufunc, name): """Implement forward, reflected and inplace binary methods with a ufunc.""" return (_binary_method(ufunc, name), _reflected_binary_method(ufunc, name), _inplace_binary_method(ufunc, name)) def _unary_method(ufunc, name): """Implement a unary special method with a ufunc.""" def func(self): return ufunc(self) func.__name__ = '__{}__'.format(name) return func class NDArrayOperatorsMixin(object): """Mixin defining all operator special methods using __array_ufunc__. This class implements the special methods for almost all of Python's builtin operators defined in the `operator` module, including comparisons (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by deferring to the ``__array_ufunc__`` method, which subclasses must implement. This class does not yet implement the special operators corresponding to ``matmul`` (``@``), because ``np.matmul`` is not yet a NumPy ufunc. It is useful for writing classes that do not inherit from `numpy.ndarray`, but that should support arithmetic and numpy universal functions like arrays as described in :ref:`A Mechanism for Overriding Ufuncs <neps.ufunc-overrides>`. As an trivial example, consider this implementation of an ``ArrayLike`` class that simply wraps a NumPy array and ensures that the result of any arithmetic operation is also an ``ArrayLike`` object:: class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): def __init__(self, value): self.value = np.asarray(value) # One might also consider adding the built-in list type to this # list, to support operations like np.add(array_like, list) _HANDLED_TYPES = (np.ndarray, numbers.Number) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): out = kwargs.get('out', ()) for x in inputs + out: # Only support operations with instances of _HANDLED_TYPES. # Use ArrayLike instead of type(self) for isinstance to # allow subclasses that don't override __array_ufunc__ to # handle ArrayLike objects. if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): return NotImplemented # Defer to the implementation of the ufunc on unwrapped values. inputs = tuple(x.value if isinstance(x, ArrayLike) else x for x in inputs) if out: kwargs['out'] = tuple( x.value if isinstance(x, ArrayLike) else x for x in out) result = getattr(ufunc, method)(*inputs, **kwargs) if type(result) is tuple: # multiple return values return tuple(type(self)(x) for x in result) elif method == 'at': # no return value return None else: # one return value return type(self)(result) def __repr__(self): return '%s(%r)' % (type(self).__name__, self.value) In interactions between ``ArrayLike`` objects and numbers or numpy arrays, the result is always another ``ArrayLike``: >>> x = ArrayLike([1, 2, 3]) >>> x - 1 ArrayLike(array([0, 1, 2])) >>> 1 - x ArrayLike(array([ 0, -1, -2])) >>> np.arange(3) - x ArrayLike(array([-1, -1, -1])) >>> x - np.arange(3) ArrayLike(array([1, 1, 1])) Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations with arbitrary, unrecognized types. This ensures that interactions with ArrayLike preserve a well-defined casting hierarchy. """ # Like np.ndarray, this mixin class implements "Option 1" from the ufunc # overrides NEP. # comparisons don't have reflected and in-place versions __lt__ = _binary_method(um.less, 'lt') __le__ = _binary_method(um.less_equal, 'le') __eq__ = _binary_method(um.equal, 'eq') __ne__ = _binary_method(um.not_equal, 'ne') __gt__ = _binary_method(um.greater, 'gt') __ge__ = _binary_method(um.greater_equal, 'ge') # numeric methods __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') if sys.version_info.major < 3: # Python 3 uses only __truediv__ and __floordiv__ __div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, 'div') __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( um.true_divide, 'truediv') __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( um.floor_divide, 'floordiv') __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') __divmod__ = _binary_method(um.divmod, 'divmod') __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') # __idivmod__ does not exist # TODO: handle the optional third argument for __pow__? __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') __lshift__, __rlshift__, __ilshift__ = _numeric_methods( um.left_shift, 'lshift') __rshift__, __rrshift__, __irshift__ = _numeric_methods( um.right_shift, 'rshift') __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') # unary methods __neg__ = _unary_method(um.negative, 'neg') __pos__ = _unary_method(um.positive, 'pos') __abs__ = _unary_method(um.absolute, 'abs') __invert__ = _unary_method(um.invert, 'invert')
7,284
39.027473
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_type_check.py
from __future__ import division, absolute_import, print_function import numpy as np from numpy.compat import long from numpy.testing import ( assert_, assert_equal, assert_array_equal, run_module_suite, assert_raises ) from numpy.lib.type_check import ( common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close ) def assert_all(x): assert_(np.all(x), x) class TestCommonType(object): def test_basic(self): ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) assert_(common_type(ai32) == np.float64) assert_(common_type(af16) == np.float16) assert_(common_type(af32) == np.float32) assert_(common_type(af64) == np.float64) assert_(common_type(acs) == np.csingle) assert_(common_type(acd) == np.cdouble) class TestMintypecode(object): def test_default_1(self): for itype in '1bcsuwil': assert_equal(mintypecode(itype), 'd') assert_equal(mintypecode('f'), 'f') assert_equal(mintypecode('d'), 'd') assert_equal(mintypecode('F'), 'F') assert_equal(mintypecode('D'), 'D') def test_default_2(self): for itype in '1bcsuwil': assert_equal(mintypecode(itype+'f'), 'f') assert_equal(mintypecode(itype+'d'), 'd') assert_equal(mintypecode(itype+'F'), 'F') assert_equal(mintypecode(itype+'D'), 'D') assert_equal(mintypecode('ff'), 'f') assert_equal(mintypecode('fd'), 'd') assert_equal(mintypecode('fF'), 'F') assert_equal(mintypecode('fD'), 'D') assert_equal(mintypecode('df'), 'd') assert_equal(mintypecode('dd'), 'd') #assert_equal(mintypecode('dF',savespace=1),'F') assert_equal(mintypecode('dF'), 'D') assert_equal(mintypecode('dD'), 'D') assert_equal(mintypecode('Ff'), 'F') #assert_equal(mintypecode('Fd',savespace=1),'F') assert_equal(mintypecode('Fd'), 'D') assert_equal(mintypecode('FF'), 'F') assert_equal(mintypecode('FD'), 'D') assert_equal(mintypecode('Df'), 'D') assert_equal(mintypecode('Dd'), 'D') assert_equal(mintypecode('DF'), 'D') assert_equal(mintypecode('DD'), 'D') def test_default_3(self): assert_equal(mintypecode('fdF'), 'D') #assert_equal(mintypecode('fdF',savespace=1),'F') assert_equal(mintypecode('fdD'), 'D') assert_equal(mintypecode('fFD'), 'D') assert_equal(mintypecode('dFD'), 'D') assert_equal(mintypecode('ifd'), 'd') assert_equal(mintypecode('ifF'), 'F') assert_equal(mintypecode('ifD'), 'D') assert_equal(mintypecode('idF'), 'D') #assert_equal(mintypecode('idF',savespace=1),'F') assert_equal(mintypecode('idD'), 'D') class TestIsscalar(object): def test_basic(self): assert_(np.isscalar(3)) assert_(not np.isscalar([3])) assert_(not np.isscalar((3,))) assert_(np.isscalar(3j)) assert_(np.isscalar(long(10))) assert_(np.isscalar(4.0)) class TestReal(object): def test_real(self): y = np.random.rand(10,) assert_array_equal(y, np.real(y)) y = np.array(1) out = np.real(y) assert_array_equal(y, out) assert_(isinstance(out, np.ndarray)) y = 1 out = np.real(y) assert_equal(y, out) assert_(not isinstance(out, np.ndarray)) def test_cmplx(self): y = np.random.rand(10,)+1j*np.random.rand(10,) assert_array_equal(y.real, np.real(y)) y = np.array(1 + 1j) out = np.real(y) assert_array_equal(y.real, out) assert_(isinstance(out, np.ndarray)) y = 1 + 1j out = np.real(y) assert_equal(1.0, out) assert_(not isinstance(out, np.ndarray)) class TestImag(object): def test_real(self): y = np.random.rand(10,) assert_array_equal(0, np.imag(y)) y = np.array(1) out = np.imag(y) assert_array_equal(0, out) assert_(isinstance(out, np.ndarray)) y = 1 out = np.imag(y) assert_equal(0, out) assert_(not isinstance(out, np.ndarray)) def test_cmplx(self): y = np.random.rand(10,)+1j*np.random.rand(10,) assert_array_equal(y.imag, np.imag(y)) y = np.array(1 + 1j) out = np.imag(y) assert_array_equal(y.imag, out) assert_(isinstance(out, np.ndarray)) y = 1 + 1j out = np.imag(y) assert_equal(1.0, out) assert_(not isinstance(out, np.ndarray)) class TestIscomplex(object): def test_fail(self): z = np.array([-1, 0, 1]) res = iscomplex(z) assert_(not np.sometrue(res, axis=0)) def test_pass(self): z = np.array([-1j, 1, 0]) res = iscomplex(z) assert_array_equal(res, [1, 0, 0]) class TestIsreal(object): def test_pass(self): z = np.array([-1, 0, 1j]) res = isreal(z) assert_array_equal(res, [1, 1, 0]) def test_fail(self): z = np.array([-1j, 1, 0]) res = isreal(z) assert_array_equal(res, [0, 1, 1]) class TestIscomplexobj(object): def test_basic(self): z = np.array([-1, 0, 1]) assert_(not iscomplexobj(z)) z = np.array([-1j, 0, -1]) assert_(iscomplexobj(z)) def test_scalar(self): assert_(not iscomplexobj(1.0)) assert_(iscomplexobj(1+0j)) def test_list(self): assert_(iscomplexobj([3, 1+0j, True])) assert_(not iscomplexobj([3, 1, True])) def test_duck(self): class DummyComplexArray: @property def dtype(self): return np.dtype(complex) dummy = DummyComplexArray() assert_(iscomplexobj(dummy)) def test_pandas_duck(self): # This tests a custom np.dtype duck-typed class, such as used by pandas # (pandas.core.dtypes) class PdComplex(np.complex128): pass class PdDtype(object): name = 'category' names = None type = PdComplex kind = 'c' str = '<c16' base = np.dtype('complex128') class DummyPd: @property def dtype(self): return PdDtype dummy = DummyPd() assert_(iscomplexobj(dummy)) def test_custom_dtype_duck(self): class MyArray(list): @property def dtype(self): return complex a = MyArray([1+0j, 2+0j, 3+0j]) assert_(iscomplexobj(a)) class TestIsrealobj(object): def test_basic(self): z = np.array([-1, 0, 1]) assert_(isrealobj(z)) z = np.array([-1j, 0, -1]) assert_(not isrealobj(z)) class TestIsnan(object): def test_goodvalues(self): z = np.array((-1., 0., 1.)) res = np.isnan(z) == 0 assert_all(np.all(res, axis=0)) def test_posinf(self): with np.errstate(divide='ignore'): assert_all(np.isnan(np.array((1.,))/0.) == 0) def test_neginf(self): with np.errstate(divide='ignore'): assert_all(np.isnan(np.array((-1.,))/0.) == 0) def test_ind(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isnan(np.array((0.,))/0.) == 1) def test_integer(self): assert_all(np.isnan(1) == 0) def test_complex(self): assert_all(np.isnan(1+1j) == 0) def test_complex1(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isnan(np.array(0+0j)/0.) == 1) class TestIsfinite(object): # Fixme, wrong place, isfinite now ufunc def test_goodvalues(self): z = np.array((-1., 0., 1.)) res = np.isfinite(z) == 1 assert_all(np.all(res, axis=0)) def test_posinf(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isfinite(np.array((1.,))/0.) == 0) def test_neginf(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isfinite(np.array((-1.,))/0.) == 0) def test_ind(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isfinite(np.array((0.,))/0.) == 0) def test_integer(self): assert_all(np.isfinite(1) == 1) def test_complex(self): assert_all(np.isfinite(1+1j) == 1) def test_complex1(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isfinite(np.array(1+1j)/0.) == 0) class TestIsinf(object): # Fixme, wrong place, isinf now ufunc def test_goodvalues(self): z = np.array((-1., 0., 1.)) res = np.isinf(z) == 0 assert_all(np.all(res, axis=0)) def test_posinf(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array((1.,))/0.) == 1) def test_posinf_scalar(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array(1.,)/0.) == 1) def test_neginf(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array((-1.,))/0.) == 1) def test_neginf_scalar(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array(-1.)/0.) == 1) def test_ind(self): with np.errstate(divide='ignore', invalid='ignore'): assert_all(np.isinf(np.array((0.,))/0.) == 0) class TestIsposinf(object): def test_generic(self): with np.errstate(divide='ignore', invalid='ignore'): vals = isposinf(np.array((-1., 0, 1))/0.) assert_(vals[0] == 0) assert_(vals[1] == 0) assert_(vals[2] == 1) class TestIsneginf(object): def test_generic(self): with np.errstate(divide='ignore', invalid='ignore'): vals = isneginf(np.array((-1., 0, 1))/0.) assert_(vals[0] == 1) assert_(vals[1] == 0) assert_(vals[2] == 0) class TestNanToNum(object): def test_generic(self): with np.errstate(divide='ignore', invalid='ignore'): vals = nan_to_num(np.array((-1., 0, 1))/0.) assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) # perform the same test but in-place with np.errstate(divide='ignore', invalid='ignore'): vals = np.array((-1., 0, 1))/0. result = nan_to_num(vals, copy=False) assert_(result is vals) assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) def test_integer(self): vals = nan_to_num(1) assert_all(vals == 1) vals = nan_to_num([1]) assert_array_equal(vals, np.array([1], int)) def test_complex_good(self): vals = nan_to_num(1+1j) assert_all(vals == 1+1j) def test_complex_bad(self): with np.errstate(divide='ignore', invalid='ignore'): v = 1 + 1j v += np.array(0+1.j)/0. vals = nan_to_num(v) # !! This is actually (unexpectedly) zero assert_all(np.isfinite(vals)) def test_complex_bad2(self): with np.errstate(divide='ignore', invalid='ignore'): v = 1 + 1j v += np.array(-1+1.j)/0. vals = nan_to_num(v) assert_all(np.isfinite(vals)) # Fixme #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) # !! This is actually (unexpectedly) positive # !! inf. Comment out for now, and see if it # !! changes #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) class TestRealIfClose(object): def test_basic(self): a = np.random.rand(10) b = real_if_close(a+1e-15j) assert_all(isrealobj(b)) assert_array_equal(a, b) b = real_if_close(a+1e-7j) assert_all(iscomplexobj(b)) b = real_if_close(a+1e-7j, tol=1e-6) assert_all(isrealobj(b)) class TestArrayConversion(object): def test_asfarray(self): a = asfarray(np.array([1, 2, 3])) assert_equal(a.__class__, np.ndarray) assert_(np.issubdtype(a.dtype, np.floating)) # previously this would infer dtypes from arrays, unlike every single # other numpy function assert_raises(TypeError, asfarray, np.array([1, 2, 3]), dtype=np.array(1.0)) if __name__ == "__main__": run_module_suite()
13,103
29.263279
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_io.py
from __future__ import division, absolute_import, print_function import sys import gzip import os import threading from tempfile import NamedTemporaryFile import time import warnings import gc import io from io import BytesIO, StringIO from datetime import datetime import locale import re import numpy as np import numpy.ma as ma from numpy.lib._iotools import ConverterError, ConversionWarning from numpy.compat import asbytes, bytes, unicode, Path from numpy.ma.testutils import assert_equal from numpy.testing import ( run_module_suite, assert_warns, assert_, SkipTest, assert_raises_regex, assert_raises, assert_allclose, assert_array_equal, temppath, tempdir, dec, IS_PYPY, suppress_warnings ) class TextIO(BytesIO): """Helper IO class. Writes encode strings to bytes if needed, reads return bytes. This makes it easier to emulate files opened in binary mode without needing to explicitly convert strings to bytes in setting up the test data. """ def __init__(self, s=""): BytesIO.__init__(self, asbytes(s)) def write(self, s): BytesIO.write(self, asbytes(s)) def writelines(self, lines): BytesIO.writelines(self, [asbytes(s) for s in lines]) MAJVER, MINVER = sys.version_info[:2] IS_64BIT = sys.maxsize > 2**32 try: import bz2 HAS_BZ2 = True except ImportError: HAS_BZ2 = False try: import lzma HAS_LZMA = True except ImportError: HAS_LZMA = False def strptime(s, fmt=None): """ This function is available in the datetime module only from Python >= 2.5. """ if type(s) == bytes: s = s.decode("latin1") return datetime(*time.strptime(s, fmt)[:3]) class RoundtripTest(object): def roundtrip(self, save_func, *args, **kwargs): """ save_func : callable Function used to save arrays to file. file_on_disk : bool If true, store the file on disk, instead of in a string buffer. save_kwds : dict Parameters passed to `save_func`. load_kwds : dict Parameters passed to `numpy.load`. args : tuple of arrays Arrays stored to file. """ save_kwds = kwargs.get('save_kwds', {}) load_kwds = kwargs.get('load_kwds', {}) file_on_disk = kwargs.get('file_on_disk', False) if file_on_disk: target_file = NamedTemporaryFile(delete=False) load_file = target_file.name else: target_file = BytesIO() load_file = target_file try: arr = args save_func(target_file, *arr, **save_kwds) target_file.flush() target_file.seek(0) if sys.platform == 'win32' and not isinstance(target_file, BytesIO): target_file.close() arr_reloaded = np.load(load_file, **load_kwds) self.arr = arr self.arr_reloaded = arr_reloaded finally: if not isinstance(target_file, BytesIO): target_file.close() # holds an open file descriptor so it can't be deleted on win if 'arr_reloaded' in locals(): if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): os.remove(target_file.name) def check_roundtrips(self, a): self.roundtrip(a) self.roundtrip(a, file_on_disk=True) self.roundtrip(np.asfortranarray(a)) self.roundtrip(np.asfortranarray(a), file_on_disk=True) if a.shape[0] > 1: # neither C nor Fortran contiguous for 2D arrays or more self.roundtrip(np.asfortranarray(a)[1:]) self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) def test_array(self): a = np.array([], float) self.check_roundtrips(a) a = np.array([[1, 2], [3, 4]], float) self.check_roundtrips(a) a = np.array([[1, 2], [3, 4]], int) self.check_roundtrips(a) a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) self.check_roundtrips(a) a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) self.check_roundtrips(a) def test_array_object(self): a = np.array([], object) self.check_roundtrips(a) a = np.array([[1, 2], [3, 4]], object) self.check_roundtrips(a) def test_1D(self): a = np.array([1, 2, 3, 4], int) self.roundtrip(a) @dec.knownfailureif(sys.platform == 'win32', "Fail on Win32") def test_mmap(self): a = np.array([[1, 2.5], [4, 7.3]]) self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) a = np.asfortranarray([[1, 2.5], [4, 7.3]]) self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) def test_record(self): a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) self.check_roundtrips(a) @dec.slow def test_format_2_0(self): dt = [(("%d" % i) * 100, float) for i in range(500)] a = np.ones(1000, dtype=dt) with warnings.catch_warnings(record=True): warnings.filterwarnings('always', '', UserWarning) self.check_roundtrips(a) class TestSaveLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): RoundtripTest.roundtrip(self, np.save, *args, **kwargs) assert_equal(self.arr[0], self.arr_reloaded) assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) class TestSavezLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: for n, arr in enumerate(self.arr): reloaded = self.arr_reloaded['arr_%d' % n] assert_equal(arr, reloaded) assert_equal(arr.dtype, reloaded.dtype) assert_equal(arr.flags.fnc, reloaded.flags.fnc) finally: # delete tempfile, must be done here on windows if self.arr_reloaded.fid: self.arr_reloaded.fid.close() os.remove(self.arr_reloaded.fid.name) @dec.skipif(not IS_64BIT, "Works only with 64bit systems") @dec.slow def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: np.savez(tmp, a=a) del a npfile = np.load(tmp) a = npfile['a'] # Should succeed npfile.close() del a # Avoid pyflakes unused variable warning. def test_multiple_arrays(self): a = np.array([[1, 2], [3, 4]], float) b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) self.roundtrip(a, b) def test_named_arrays(self): a = np.array([[1, 2], [3, 4]], float) b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) c = BytesIO() np.savez(c, file_a=a, file_b=b) c.seek(0) l = np.load(c) assert_equal(a, l['file_a']) assert_equal(b, l['file_b']) def test_BagObj(self): a = np.array([[1, 2], [3, 4]], float) b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) c = BytesIO() np.savez(c, file_a=a, file_b=b) c.seek(0) l = np.load(c) assert_equal(sorted(dir(l.f)), ['file_a','file_b']) assert_equal(a, l.f.file_a) assert_equal(b, l.f.file_b) def test_savez_filename_clashes(self): # Test that issue #852 is fixed # and savez functions in multithreaded environment def writer(error_list): with temppath(suffix='.npz') as tmp: arr = np.random.randn(500, 500) try: np.savez(tmp, arr=arr) except OSError as err: error_list.append(err) errors = [] threads = [threading.Thread(target=writer, args=(errors,)) for j in range(3)] for t in threads: t.start() for t in threads: t.join() if errors: raise AssertionError(errors) def test_not_closing_opened_fid(self): # Test that issue #2178 is fixed: # verify could seek on 'loaded' file with temppath(suffix='.npz') as tmp: with open(tmp, 'wb') as fp: np.savez(fp, data='LOVELY LOAD') with open(tmp, 'rb', 10000) as fp: fp.seek(0) assert_(not fp.closed) np.load(fp)['data'] # fp must not get closed by .load assert_(not fp.closed) fp.seek(0) assert_(not fp.closed) @dec.skipif(IS_PYPY, "context manager required on PyPy") def test_closing_fid(self): # Test that issue #1517 (too many opened files) remains closed # It might be a "weak" test since failed to get triggered on # e.g. Debian sid of 2012 Jul 05 but was reported to # trigger the failure on Ubuntu 10.04: # http://projects.scipy.org/numpy/ticket/1517#comment:2 with temppath(suffix='.npz') as tmp: np.savez(tmp, data='LOVELY LOAD') # We need to check if the garbage collector can properly close # numpy npz file returned by np.load when their reference count # goes to zero. Python 3 running in debug mode raises a # ResourceWarning when file closing is left to the garbage # collector, so we catch the warnings. Because ResourceWarning # is unknown in Python < 3.x, we take the easy way out and # catch all warnings. with suppress_warnings() as sup: sup.filter(Warning) # TODO: specify exact message for i in range(1, 1025): try: np.load(tmp)["data"] except Exception as e: msg = "Failed to load data from a file: %s" % e raise AssertionError(msg) def test_closing_zipfile_after_load(self): # Check that zipfile owns file and can close it. This needs to # pass a file name to load for the test. On windows failure will # cause a second error will be raised when the attempt to remove # the open file is made. prefix = 'numpy_test_closing_zipfile_after_load_' with temppath(suffix='.npz', prefix=prefix) as tmp: np.savez(tmp, lab='place holder') data = np.load(tmp) fp = data.zip.fp data.close() assert_(fp.closed) class TestSaveTxt(object): def test_array(self): a = np.array([[1, 2], [3, 4]], float) fmt = "%.18e" c = BytesIO() np.savetxt(c, a, fmt=fmt) c.seek(0) assert_equal(c.readlines(), [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) a = np.array([[1, 2], [3, 4]], int) c = BytesIO() np.savetxt(c, a, fmt='%d') c.seek(0) assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) def test_1D(self): a = np.array([1, 2, 3, 4], int) c = BytesIO() np.savetxt(c, a, fmt='%d') c.seek(0) lines = c.readlines() assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) def test_0D_3D(self): c = BytesIO() assert_raises(ValueError, np.savetxt, c, np.array(1)) assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) def test_record(self): a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) c = BytesIO() np.savetxt(c, a, fmt='%d') c.seek(0) assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) def test_delimiter(self): a = np.array([[1., 2.], [3., 4.]]) c = BytesIO() np.savetxt(c, a, delimiter=',', fmt='%d') c.seek(0) assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) def test_format(self): a = np.array([(1, 2), (3, 4)]) c = BytesIO() # Sequence of formats np.savetxt(c, a, fmt=['%02d', '%3.1f']) c.seek(0) assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) # A single multiformat string c = BytesIO() np.savetxt(c, a, fmt='%02d : %3.1f') c.seek(0) lines = c.readlines() assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) # Specify delimiter, should be overiden c = BytesIO() np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') c.seek(0) lines = c.readlines() assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) # Bad fmt, should raise a ValueError c = BytesIO() assert_raises(ValueError, np.savetxt, c, a, fmt=99) def test_header_footer(self): # Test the functionality of the header and footer keyword argument. c = BytesIO() a = np.array([(1, 2), (3, 4)], dtype=int) test_header_footer = 'Test header / footer' # Test the header keyword argument np.savetxt(c, a, fmt='%1d', header=test_header_footer) c.seek(0) assert_equal(c.read(), asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) # Test the footer keyword argument c = BytesIO() np.savetxt(c, a, fmt='%1d', footer=test_header_footer) c.seek(0) assert_equal(c.read(), asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) # Test the commentstr keyword argument used on the header c = BytesIO() commentstr = '% ' np.savetxt(c, a, fmt='%1d', header=test_header_footer, comments=commentstr) c.seek(0) assert_equal(c.read(), asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) # Test the commentstr keyword argument used on the footer c = BytesIO() commentstr = '% ' np.savetxt(c, a, fmt='%1d', footer=test_header_footer, comments=commentstr) c.seek(0) assert_equal(c.read(), asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) def test_file_roundtrip(self): with temppath() as name: a = np.array([(1, 2), (3, 4)]) np.savetxt(name, a) b = np.loadtxt(name) assert_array_equal(a, b) def test_complex_arrays(self): ncols = 2 nrows = 2 a = np.zeros((ncols, nrows), dtype=np.complex128) re = np.pi im = np.e a[:] = re + 1.0j * im # One format only c = BytesIO() np.savetxt(c, a, fmt=' %+.3e') c.seek(0) lines = c.readlines() assert_equal( lines, [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) # One format for each real and imaginary part c = BytesIO() np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) c.seek(0) lines = c.readlines() assert_equal( lines, [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) # One format for each complex number c = BytesIO() np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) c.seek(0) lines = c.readlines() assert_equal( lines, [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) def test_custom_writer(self): class CustomWriter(list): def write(self, text): self.extend(text.split(b'\n')) w = CustomWriter() a = np.array([(1, 2), (3, 4)]) np.savetxt(w, a) b = np.loadtxt(w) assert_array_equal(a, b) def test_unicode(self): utf8 = b'\xcf\x96'.decode('UTF-8') a = np.array([utf8], dtype=np.unicode) with tempdir() as tmpdir: # set encoding as on windows it may not be unicode even on py3 np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], encoding='UTF-8') def test_unicode_roundtrip(self): utf8 = b'\xcf\x96'.decode('UTF-8') a = np.array([utf8], dtype=np.unicode) # our gz wrapper support encoding suffixes = ['', '.gz'] # stdlib 2 versions do not support encoding if MAJVER > 2: if HAS_BZ2: suffixes.append('.bz2') if HAS_LZMA: suffixes.extend(['.xz', '.lzma']) with tempdir() as tmpdir: for suffix in suffixes: np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, fmt=['%s'], encoding='UTF-16-LE') b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), encoding='UTF-16-LE', dtype=np.unicode) assert_array_equal(a, b) def test_unicode_bytestream(self): utf8 = b'\xcf\x96'.decode('UTF-8') a = np.array([utf8], dtype=np.unicode) s = BytesIO() np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') s.seek(0) assert_equal(s.read().decode('UTF-8'), utf8 + '\n') def test_unicode_stringstream(self): utf8 = b'\xcf\x96'.decode('UTF-8') a = np.array([utf8], dtype=np.unicode) s = StringIO() np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') s.seek(0) assert_equal(s.read(), utf8 + '\n') class LoadTxtBase(object): def check_compressed(self, fopen, suffixes): # Test that we can load data from a compressed file wanted = np.arange(6).reshape((2, 3)) linesep = ('\n', '\r\n', '\r') for sep in linesep: data = '0 1 2' + sep + '3 4 5' for suffix in suffixes: with temppath(suffix=suffix) as name: with fopen(name, mode='wt', encoding='UTF-32-LE') as f: f.write(data) res = self.loadfunc(name, encoding='UTF-32-LE') assert_array_equal(res, wanted) with fopen(name, "rt", encoding='UTF-32-LE') as f: res = self.loadfunc(f) assert_array_equal(res, wanted) # Python2 .open does not support encoding @dec.skipif(MAJVER == 2) def test_compressed_gzip(self): self.check_compressed(gzip.open, ('.gz',)) @dec.skipif(MAJVER == 2 or not HAS_BZ2) def test_compressed_gzip(self): self.check_compressed(bz2.open, ('.bz2',)) @dec.skipif(MAJVER == 2 or not HAS_LZMA) def test_compressed_gzip(self): self.check_compressed(lzma.open, ('.xz', '.lzma')) def test_encoding(self): with temppath() as path: with open(path, "wb") as f: f.write('0.\n1.\n2.'.encode("UTF-16")) x = self.loadfunc(path, encoding="UTF-16") assert_array_equal(x, [0., 1., 2.]) def test_stringload(self): # umlaute nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") with temppath() as path: with open(path, "wb") as f: f.write(nonascii.encode("UTF-16")) x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode) assert_array_equal(x, nonascii) def test_binary_decode(self): utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16') assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) def test_converters_decode(self): # test converters that decode strings c = TextIO() c.write(b'\xcf\x96') c.seek(0) x = self.loadfunc(c, dtype=np.unicode, converters={0: lambda x: x.decode('UTF-8')}) a = np.array([b'\xcf\x96'.decode('UTF-8')]) assert_array_equal(x, a) def test_converters_nodecode(self): # test native string converters enabled by setting an encoding utf8 = b'\xcf\x96'.decode('UTF-8') with temppath() as path: with io.open(path, 'wt', encoding='UTF-8') as f: f.write(utf8) x = self.loadfunc(path, dtype=np.unicode, converters={0: lambda x: x + 't'}, encoding='UTF-8') a = np.array([utf8 + 't']) assert_array_equal(x, a) class TestLoadTxt(LoadTxtBase): loadfunc = staticmethod(np.loadtxt) def setUp(self): # lower chunksize for testing self.orig_chunk = np.lib.npyio._loadtxt_chunksize np.lib.npyio._loadtxt_chunksize = 1 def tearDown(self): np.lib.npyio._loadtxt_chunksize = self.orig_chunk def test_record(self): c = TextIO() c.write('1 2\n3 4') c.seek(0) x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) assert_array_equal(x, a) d = TextIO() d.write('M 64.0 75.0\nF 25.0 60.0') d.seek(0) mydescriptor = {'names': ('gender', 'age', 'weight'), 'formats': ('S1', 'i4', 'f4')} b = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], dtype=mydescriptor) y = np.loadtxt(d, dtype=mydescriptor) assert_array_equal(y, b) def test_array(self): c = TextIO() c.write('1 2\n3 4') c.seek(0) x = np.loadtxt(c, dtype=int) a = np.array([[1, 2], [3, 4]], int) assert_array_equal(x, a) c.seek(0) x = np.loadtxt(c, dtype=float) a = np.array([[1, 2], [3, 4]], float) assert_array_equal(x, a) def test_1D(self): c = TextIO() c.write('1\n2\n3\n4\n') c.seek(0) x = np.loadtxt(c, dtype=int) a = np.array([1, 2, 3, 4], int) assert_array_equal(x, a) c = TextIO() c.write('1,2,3,4\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',') a = np.array([1, 2, 3, 4], int) assert_array_equal(x, a) def test_missing(self): c = TextIO() c.write('1,2,3,,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', converters={3: lambda s: int(s or - 999)}) a = np.array([1, 2, 3, -999, 5], int) assert_array_equal(x, a) def test_converters_with_usecols(self): c = TextIO() c.write('1,2,3,,5\n6,7,8,9,10\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', converters={3: lambda s: int(s or - 999)}, usecols=(1, 3,)) a = np.array([[2, -999], [7, 9]], int) assert_array_equal(x, a) def test_comments_unicode(self): c = TextIO() c.write('# comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', comments=u'#') a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) def test_comments_byte(self): c = TextIO() c.write('# comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', comments=b'#') a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) def test_comments_multiple(self): c = TextIO() c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', comments=['#', '@', '//']) a = np.array([[1, 2, 3], [4, 5, 6]], int) assert_array_equal(x, a) def test_comments_multi_chars(self): c = TextIO() c.write('/* comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', comments='/*') a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) # Check that '/*' is not transformed to ['/', '*'] c = TextIO() c.write('*/ comment\n1,2,3,5\n') c.seek(0) assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', comments='/*') def test_skiprows(self): c = TextIO() c.write('comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', skiprows=1) a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) c = TextIO() c.write('# comment\n1,2,3,5\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', skiprows=1) a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) def test_usecols(self): a = np.array([[1, 2], [3, 4]], float) c = BytesIO() np.savetxt(c, a) c.seek(0) x = np.loadtxt(c, dtype=float, usecols=(1,)) assert_array_equal(x, a[:, 1]) a = np.array([[1, 2, 3], [3, 4, 5]], float) c = BytesIO() np.savetxt(c, a) c.seek(0) x = np.loadtxt(c, dtype=float, usecols=(1, 2)) assert_array_equal(x, a[:, 1:]) # Testing with arrays instead of tuples. c.seek(0) x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) assert_array_equal(x, a[:, 1:]) # Testing with an integer instead of a sequence for int_type in [int, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]: to_read = int_type(1) c.seek(0) x = np.loadtxt(c, dtype=float, usecols=to_read) assert_array_equal(x, a[:, 1]) # Testing with some crazy custom integer type class CrazyInt(object): def __index__(self): return 1 crazy_int = CrazyInt() c.seek(0) x = np.loadtxt(c, dtype=float, usecols=crazy_int) assert_array_equal(x, a[:, 1]) c.seek(0) x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) assert_array_equal(x, a[:, 1]) # Checking with dtypes defined converters. data = '''JOE 70.1 25.3 BOB 60.5 27.9 ''' c = TextIO(data) names = ['stid', 'temp'] dtypes = ['S4', 'f8'] arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) assert_equal(arr['stid'], [b"JOE", b"BOB"]) assert_equal(arr['temp'], [25.3, 27.9]) # Testing non-ints in usecols c.seek(0) bogus_idx = 1.5 assert_raises_regex( TypeError, '^usecols must be.*%s' % type(bogus_idx), np.loadtxt, c, usecols=bogus_idx ) assert_raises_regex( TypeError, '^usecols must be.*%s' % type(bogus_idx), np.loadtxt, c, usecols=[0, bogus_idx, 0] ) def test_fancy_dtype(self): c = TextIO() c.write('1,2,3.0\n4,5,6.0\n') c.seek(0) dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) x = np.loadtxt(c, dtype=dt, delimiter=',') a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) assert_array_equal(x, a) def test_shaped_dtype(self): c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ('block', int, (2, 3))]) x = np.loadtxt(c, dtype=dt) a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], dtype=dt) assert_array_equal(x, a) def test_3d_shaped_dtype(self): c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ('block', int, (2, 2, 3))]) x = np.loadtxt(c, dtype=dt) a = np.array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], dtype=dt) assert_array_equal(x, a) def test_str_dtype(self): # see gh-8033 c = ["str1", "str2"] for dt in (str, np.bytes_): a = np.array(["str1", "str2"], dtype=dt) x = np.loadtxt(c, dtype=dt) assert_array_equal(x, a) def test_empty_file(self): with suppress_warnings() as sup: sup.filter(message="loadtxt: Empty input file:") c = TextIO() x = np.loadtxt(c) assert_equal(x.shape, (0,)) x = np.loadtxt(c, dtype=np.int64) assert_equal(x.shape, (0,)) assert_(x.dtype == np.int64) def test_unused_converter(self): c = TextIO() c.writelines(['1 21\n', '3 42\n']) c.seek(0) data = np.loadtxt(c, usecols=(1,), converters={0: lambda s: int(s, 16)}) assert_array_equal(data, [21, 42]) c.seek(0) data = np.loadtxt(c, usecols=(1,), converters={1: lambda s: int(s, 16)}) assert_array_equal(data, [33, 66]) def test_dtype_with_object(self): # Test using an explicit dtype with an object data = """ 1; 2001-01-01 2; 2002-01-31 """ ndtype = [('idx', int), ('code', object)] func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, converters=converters) control = np.array( [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], dtype=ndtype) assert_equal(test, control) def test_uint64_type(self): tgt = (9223372043271415339, 9223372043271415853) c = TextIO() c.write("%s %s" % tgt) c.seek(0) res = np.loadtxt(c, dtype=np.uint64) assert_equal(res, tgt) def test_int64_type(self): tgt = (-9223372036854775807, 9223372036854775807) c = TextIO() c.write("%s %s" % tgt) c.seek(0) res = np.loadtxt(c, dtype=np.int64) assert_equal(res, tgt) def test_from_float_hex(self): # IEEE doubles and floats only, otherwise the float32 # conversion may fail. tgt = np.logspace(-10, 10, 5).astype(np.float32) tgt = np.hstack((tgt, -tgt)).astype(float) inp = '\n'.join(map(float.hex, tgt)) c = TextIO() c.write(inp) for dt in [float, np.float32]: c.seek(0) res = np.loadtxt(c, dtype=dt) assert_equal(res, tgt, err_msg="%s" % dt) def test_from_complex(self): tgt = (complex(1, 1), complex(1, -1)) c = TextIO() c.write("%s %s" % tgt) c.seek(0) res = np.loadtxt(c, dtype=complex) assert_equal(res, tgt) def test_universal_newline(self): with temppath() as name: with open(name, 'w') as f: f.write('1 21\r3 42\r') data = np.loadtxt(name) assert_array_equal(data, [[1, 21], [3, 42]]) def test_empty_field_after_tab(self): c = TextIO() c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') c.seek(0) dt = {'names': ('x', 'y', 'z', 'comment'), 'formats': ('<i4', '<i4', '<f4', '|S8')} x = np.loadtxt(c, dtype=dt, delimiter='\t') a = np.array([b'start ', b' ', b'']) assert_array_equal(x['comment'], a) def test_structure_unpack(self): txt = TextIO("M 21 72\nF 35 58") dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')} a, b, c = np.loadtxt(txt, dtype=dt, unpack=True) assert_(a.dtype.str == '|S1') assert_(b.dtype.str == '<i4') assert_(c.dtype.str == '<f4') assert_array_equal(a, np.array([b'M', b'F'])) assert_array_equal(b, np.array([21, 35])) assert_array_equal(c, np.array([72., 58.])) def test_ndmin_keyword(self): c = TextIO() c.write('1,2,3\n4,5,6') c.seek(0) assert_raises(ValueError, np.loadtxt, c, ndmin=3) c.seek(0) assert_raises(ValueError, np.loadtxt, c, ndmin=1.5) c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1) a = np.array([[1, 2, 3], [4, 5, 6]]) assert_array_equal(x, a) d = TextIO() d.write('0,1,2') d.seek(0) x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2) assert_(x.shape == (1, 3)) d.seek(0) x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1) assert_(x.shape == (3,)) d.seek(0) x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0) assert_(x.shape == (3,)) e = TextIO() e.write('0\n1\n2') e.seek(0) x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2) assert_(x.shape == (3, 1)) e.seek(0) x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1) assert_(x.shape == (3,)) e.seek(0) x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0) assert_(x.shape == (3,)) # Test ndmin kw with empty file. with suppress_warnings() as sup: sup.filter(message="loadtxt: Empty input file:") f = TextIO() assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,)) assert_(np.loadtxt(f, ndmin=1).shape == (0,)) def test_generator_source(self): def count(): for i in range(10): yield "%d" % i res = np.loadtxt(count()) assert_array_equal(res, np.arange(10)) def test_bad_line(self): c = TextIO() c.write('1 2 3\n4 5 6\n2 3') c.seek(0) # Check for exception and that exception contains line number assert_raises_regex(ValueError, "3", np.loadtxt, c) def test_none_as_string(self): # gh-5155, None should work as string when format demands it c = TextIO() c.write('100,foo,200\n300,None,400') c.seek(0) dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)]) np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed @dec.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968') def test_binary_load(self): butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\ b"20,2,3,\xc3\x95scar\n\r" sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines() with temppath() as path: with open(path, "wb") as f: f.write(butf8) with open(path, "rb") as f: x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode) assert_array_equal(x, sutf8) # test broken latin1 conversion people now rely on with open(path, "rb") as f: x = np.loadtxt(f, encoding="UTF-8", dtype="S") x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar'] assert_array_equal(x, np.array(x, dtype="S")) class Testfromregex(object): def test_record(self): c = TextIO() c.write('1.312 foo\n1.534 bar\n4.444 qux') c.seek(0) dt = [('num', np.float64), ('val', 'S3')] x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], dtype=dt) assert_array_equal(x, a) def test_record_2(self): c = TextIO() c.write('1312 foo\n1534 bar\n4444 qux') c.seek(0) dt = [('num', np.int32), ('val', 'S3')] x = np.fromregex(c, r"(\d+)\s+(...)", dt) a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], dtype=dt) assert_array_equal(x, a) def test_record_3(self): c = TextIO() c.write('1312 foo\n1534 bar\n4444 qux') c.seek(0) dt = [('num', np.float64)] x = np.fromregex(c, r"(\d+)\s+...", dt) a = np.array([(1312,), (1534,), (4444,)], dtype=dt) assert_array_equal(x, a) def test_record_unicode(self): utf8 = b'\xcf\x96' with temppath() as path: with open(path, 'wb') as f: f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') dt = [('num', np.float64), ('val', 'U4')] x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), (4.444, 'qux')], dtype=dt) assert_array_equal(x, a) regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) x = np.fromregex(path, regexp, dt, encoding='UTF-8') assert_array_equal(x, a) #####-------------------------------------------------------------------------- class TestFromTxt(LoadTxtBase): loadfunc = staticmethod(np.genfromtxt) def test_record(self): # Test w/ explicit dtype data = TextIO('1 2\n3 4') test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) assert_equal(test, control) # data = TextIO('M 64.0 75.0\nF 25.0 60.0') descriptor = {'names': ('gender', 'age', 'weight'), 'formats': ('S1', 'i4', 'f4')} control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], dtype=descriptor) test = np.ndfromtxt(data, dtype=descriptor) assert_equal(test, control) def test_array(self): # Test outputing a standard ndarray data = TextIO('1 2\n3 4') control = np.array([[1, 2], [3, 4]], dtype=int) test = np.ndfromtxt(data, dtype=int) assert_array_equal(test, control) # data.seek(0) control = np.array([[1, 2], [3, 4]], dtype=float) test = np.loadtxt(data, dtype=float) assert_array_equal(test, control) def test_1D(self): # Test squeezing to 1D control = np.array([1, 2, 3, 4], int) # data = TextIO('1\n2\n3\n4\n') test = np.ndfromtxt(data, dtype=int) assert_array_equal(test, control) # data = TextIO('1,2,3,4\n') test = np.ndfromtxt(data, dtype=int, delimiter=',') assert_array_equal(test, control) def test_comments(self): # Test the stripping of comments control = np.array([1, 2, 3, 5], int) # Comment on its own line data = TextIO('# comment\n1,2,3,5\n') test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') assert_equal(test, control) # Comment at the end of a line data = TextIO('1,2,3,5# comment\n') test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') assert_equal(test, control) def test_skiprows(self): # Test row skipping control = np.array([1, 2, 3, 5], int) kwargs = dict(dtype=int, delimiter=',') # data = TextIO('comment\n1,2,3,5\n') test = np.ndfromtxt(data, skip_header=1, **kwargs) assert_equal(test, control) # data = TextIO('# comment\n1,2,3,5\n') test = np.loadtxt(data, skiprows=1, **kwargs) assert_equal(test, control) def test_skip_footer(self): data = ["# %i" % i for i in range(1, 6)] data.append("A, B, C") data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) data[-1] = "99,99" kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], dtype=[(_, float) for _ in "ABC"]) assert_equal(test, ctrl) def test_skip_footer_with_invalid(self): with suppress_warnings() as sup: sup.filter(ConversionWarning) basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' # Footer too small to get rid of all invalid values assert_raises(ValueError, np.genfromtxt, TextIO(basestr), skip_footer=1) # except ValueError: # pass a = np.genfromtxt( TextIO(basestr), skip_footer=1, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) # a = np.genfromtxt(TextIO(basestr), skip_footer=3) assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) # basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' a = np.genfromtxt( TextIO(basestr), skip_footer=1, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) a = np.genfromtxt( TextIO(basestr), skip_footer=3, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) def test_header(self): # Test retrieving a header data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.ndfromtxt(data, dtype=None, names=True) assert_(w[0].category is np.VisibleDeprecationWarning) control = {'gender': np.array([b'M', b'F']), 'age': np.array([64.0, 25.0]), 'weight': np.array([75.0, 60.0])} assert_equal(test['gender'], control['gender']) assert_equal(test['age'], control['age']) assert_equal(test['weight'], control['weight']) def test_auto_dtype(self): # Test the automatic definition of the output dtype data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.ndfromtxt(data, dtype=None) assert_(w[0].category is np.VisibleDeprecationWarning) control = [np.array([b'A', b'BCD']), np.array([64, 25]), np.array([75.0, 60.0]), np.array([3 + 4j, 5 + 6j]), np.array([True, False]), ] assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) for (i, ctrl) in enumerate(control): assert_equal(test['f%i' % i], ctrl) def test_auto_dtype_uniform(self): # Tests whether the output dtype can be uniformized data = TextIO('1 2 3 4\n5 6 7 8\n') test = np.ndfromtxt(data, dtype=None) control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) assert_equal(test, control) def test_fancy_dtype(self): # Check that a nested dtype isn't MIA data = TextIO('1,2,3.0\n4,5,6.0\n') fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',') control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) assert_equal(test, control) def test_names_overwrite(self): # Test overwriting the names of the dtype descriptor = {'names': ('g', 'a', 'w'), 'formats': ('S1', 'i4', 'f4')} data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') names = ('gender', 'age', 'weight') test = np.ndfromtxt(data, dtype=descriptor, names=names) descriptor['names'] = names control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], dtype=descriptor) assert_equal(test, control) def test_commented_header(self): # Check that names can be retrieved even if the line is commented out. data = TextIO(""" #gender age weight M 21 72.100000 F 35 58.330000 M 33 21.99 """) # The # is part of the first name and should be deleted automatically. with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.genfromtxt(data, names=True, dtype=None) assert_(w[0].category is np.VisibleDeprecationWarning) ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) assert_equal(test, ctrl) # Ditto, but we should get rid of the first element data = TextIO(b""" # gender age weight M 21 72.100000 F 35 58.330000 M 33 21.99 """) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.genfromtxt(data, names=True, dtype=None) assert_(w[0].category is np.VisibleDeprecationWarning) assert_equal(test, ctrl) def test_autonames_and_usecols(self): # Tests names and usecols data = TextIO('A B C D\n aaaa 121 45 9.1') with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, dtype=None) assert_(w[0].category is np.VisibleDeprecationWarning) control = np.array(('aaaa', 45, 9.1), dtype=[('A', '|S4'), ('C', int), ('D', float)]) assert_equal(test, control) def test_converters_with_usecols(self): # Test the combination user-defined converters and usecol data = TextIO('1,2,3,,5\n6,7,8,9,10\n') test = np.ndfromtxt(data, dtype=int, delimiter=',', converters={3: lambda s: int(s or - 999)}, usecols=(1, 3,)) control = np.array([[2, -999], [7, 9]], int) assert_equal(test, control) def test_converters_with_usecols_and_names(self): # Tests names and usecols data = TextIO('A B C D\n aaaa 121 45 9.1') with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, dtype=None, converters={'C': lambda s: 2 * int(s)}) assert_(w[0].category is np.VisibleDeprecationWarning) control = np.array(('aaaa', 90, 9.1), dtype=[('A', '|S4'), ('C', int), ('D', float)]) assert_equal(test, control) def test_converters_cornercases(self): # Test the conversion to datetime. converter = { 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} data = TextIO('2009-02-03 12:00:00Z, 72214.0') test = np.ndfromtxt(data, delimiter=',', dtype=None, names=['date', 'stid'], converters=converter) control = np.array((datetime(2009, 2, 3), 72214.), dtype=[('date', np.object_), ('stid', float)]) assert_equal(test, control) def test_converters_cornercases2(self): # Test the conversion to datetime64. converter = { 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} data = TextIO('2009-02-03 12:00:00Z, 72214.0') test = np.ndfromtxt(data, delimiter=',', dtype=None, names=['date', 'stid'], converters=converter) control = np.array((datetime(2009, 2, 3), 72214.), dtype=[('date', 'datetime64[us]'), ('stid', float)]) assert_equal(test, control) def test_unused_converter(self): # Test whether unused converters are forgotten data = TextIO("1 21\n 3 42\n") test = np.ndfromtxt(data, usecols=(1,), converters={0: lambda s: int(s, 16)}) assert_equal(test, [21, 42]) # data.seek(0) test = np.ndfromtxt(data, usecols=(1,), converters={1: lambda s: int(s, 16)}) assert_equal(test, [33, 66]) def test_invalid_converter(self): strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or (b'r' not in x.lower() and x.strip() or 0.0)) strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or (b'%' not in x.lower() and x.strip() or 0.0)) s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" "D02N03,10/10/2004,R 1,,7,145.55") kwargs = dict( converters={2: strip_per, 3: strip_rand}, delimiter=",", dtype=None) assert_raises(ConverterError, np.genfromtxt, s, **kwargs) def test_tricky_converter_bug1666(self): # Test some corner cases s = TextIO('q1,2\nq3,4') cnv = lambda s: float(s[1:]) test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) control = np.array([[1., 2.], [3., 4.]]) assert_equal(test, control) def test_dtype_with_converters(self): dstr = "2009; 23; 46" test = np.ndfromtxt(TextIO(dstr,), delimiter=";", dtype=float, converters={0: bytes}) control = np.array([('2009', 23., 46)], dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) assert_equal(test, control) test = np.ndfromtxt(TextIO(dstr,), delimiter=";", dtype=float, converters={0: float}) control = np.array([2009., 23., 46],) assert_equal(test, control) def test_dtype_with_converters_and_usecols(self): dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')] conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', names=None, converters=conv) control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp) assert_equal(test, control) dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')] test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', usecols=(0,1,3), names=None, converters=conv) control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp) assert_equal(test, control) def test_dtype_with_object(self): # Test using an explicit dtype with an object data = """ 1; 2001-01-01 2; 2002-01-31 """ ndtype = [('idx', int), ('code', object)] func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, converters=converters) control = np.array( [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], dtype=ndtype) assert_equal(test, control) ndtype = [('nest', [('idx', int), ('code', object)])] try: test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, converters=converters) except NotImplementedError: pass else: errmsg = "Nested dtype involving objects should be supported." raise AssertionError(errmsg) def test_userconverters_with_explicit_dtype(self): # Test user_converters w/ explicit (standard) dtype data = TextIO('skip,skip,2001-01-01,1.0,skip') test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, usecols=(2, 3), converters={2: bytes}) control = np.array([('2001-01-01', 1.)], dtype=[('', '|S10'), ('', float)]) assert_equal(test, control) def test_utf8_userconverters_with_explicit_dtype(self): utf8 = b'\xcf\x96' with temppath() as path: with open(path, 'wb') as f: f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, usecols=(2, 3), converters={2: np.unicode}, encoding='UTF-8') control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], dtype=[('', '|U11'), ('', float)]) assert_equal(test, control) def test_spacedelimiter(self): # Test space delimiter data = TextIO("1 2 3 4 5\n6 7 8 9 10") test = np.ndfromtxt(data) control = np.array([[1., 2., 3., 4., 5.], [6., 7., 8., 9., 10.]]) assert_equal(test, control) def test_integer_delimiter(self): # Test using an integer for delimiter data = " 1 2 3\n 4 5 67\n890123 4" test = np.genfromtxt(TextIO(data), delimiter=3) control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) assert_equal(test, control) def test_missing(self): data = TextIO('1,2,3,,5\n') test = np.ndfromtxt(data, dtype=int, delimiter=',', converters={3: lambda s: int(s or - 999)}) control = np.array([1, 2, 3, -999, 5], int) assert_equal(test, control) def test_missing_with_tabs(self): # Test w/ a delimiter tab txt = "1\t2\t3\n\t2\t\n1\t\t3" test = np.genfromtxt(TextIO(txt), delimiter="\t", usemask=True,) ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) assert_equal(test.data, ctrl_d) assert_equal(test.mask, ctrl_m) def test_usecols(self): # Test the selection of columns # Select 1 column control = np.array([[1, 2], [3, 4]], float) data = TextIO() np.savetxt(data, control) data.seek(0) test = np.ndfromtxt(data, dtype=float, usecols=(1,)) assert_equal(test, control[:, 1]) # control = np.array([[1, 2, 3], [3, 4, 5]], float) data = TextIO() np.savetxt(data, control) data.seek(0) test = np.ndfromtxt(data, dtype=float, usecols=(1, 2)) assert_equal(test, control[:, 1:]) # Testing with arrays instead of tuples. data.seek(0) test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2])) assert_equal(test, control[:, 1:]) def test_usecols_as_css(self): # Test giving usecols with a comma-separated string data = "1 2 3\n4 5 6" test = np.genfromtxt(TextIO(data), names="a, b, c", usecols="a, c") ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) assert_equal(test, ctrl) def test_usecols_with_structured_dtype(self): # Test usecols with an explicit structured dtype data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") names = ['stid', 'temp'] dtypes = ['S4', 'f8'] test = np.ndfromtxt( data, usecols=(0, 2), dtype=list(zip(names, dtypes))) assert_equal(test['stid'], [b"JOE", b"BOB"]) assert_equal(test['temp'], [25.3, 27.9]) def test_usecols_with_integer(self): # Test usecols with an integer test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) assert_equal(test, np.array([1., 4.])) def test_usecols_with_named_columns(self): # Test usecols with named columns ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) data = "1 2 3\n4 5 6" kwargs = dict(names="a, b, c") test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) assert_equal(test, ctrl) test = np.genfromtxt(TextIO(data), usecols=('a', 'c'), **kwargs) assert_equal(test, ctrl) def test_empty_file(self): # Test that an empty file raises the proper warning. with suppress_warnings() as sup: sup.filter(message="genfromtxt: Empty input file:") data = TextIO() test = np.genfromtxt(data) assert_equal(test, np.array([])) def test_fancy_dtype_alt(self): # Check that a nested dtype isn't MIA data = TextIO('1,2,3.0\n4,5,6.0\n') fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) test = np.mafromtxt(data, dtype=fancydtype, delimiter=',') control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) assert_equal(test, control) def test_shaped_dtype(self): c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ('block', int, (2, 3))]) x = np.ndfromtxt(c, dtype=dt) a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], dtype=dt) assert_array_equal(x, a) def test_withmissing(self): data = TextIO('A,B\n0,1\n2,N/A') kwargs = dict(delimiter=",", missing_values="N/A", names=True) test = np.mafromtxt(data, dtype=None, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], dtype=[('A', int), ('B', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) # data.seek(0) test = np.mafromtxt(data, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], dtype=[('A', float), ('B', float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_user_missing_values(self): data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" basekwargs = dict(dtype=None, delimiter=",", names=True,) mdtype = [('A', int), ('B', float), ('C', complex)] # test = np.mafromtxt(TextIO(data), missing_values="N/A", **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], dtype=mdtype) assert_equal(test, control) # basekwargs['dtype'] = mdtype test = np.mafromtxt(TextIO(data), missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], dtype=mdtype) assert_equal(test, control) # test = np.mafromtxt(TextIO(data), missing_values={0: -9, 'B': -99, 'C': -999j}, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], dtype=mdtype) assert_equal(test, control) def test_user_filling_values(self): # Test with missing and filling values ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) data = "N/A, 2, 3\n4, ,???" kwargs = dict(delimiter=",", dtype=int, names="a,b,c", missing_values={0: "N/A", 'b': " ", 2: "???"}, filling_values={0: 0, 'b': 0, 2: -999}) test = np.genfromtxt(TextIO(data), **kwargs) ctrl = np.array([(0, 2, 3), (4, 0, -999)], dtype=[(_, int) for _ in "abc"]) assert_equal(test, ctrl) # test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) assert_equal(test, ctrl) data2 = "1,2,*,4\n5,*,7,8\n" test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, missing_values="*", filling_values=0) ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) assert_equal(test, ctrl) test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, missing_values="*", filling_values=-1) ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) assert_equal(test, ctrl) def test_withmissing_float(self): data = TextIO('A,B\n0,1.5\n2,-999.00') test = np.mafromtxt(data, dtype=None, delimiter=',', missing_values='-999.0', names=True,) control = ma.array([(0, 1.5), (2, -1.)], mask=[(False, False), (False, True)], dtype=[('A', int), ('B', float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_with_masked_column_uniform(self): # Test masked column data = TextIO('1 2 3\n4 5 6\n') test = np.genfromtxt(data, dtype=None, missing_values='2,5', usemask=True) control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) assert_equal(test, control) def test_with_masked_column_various(self): # Test masked column data = TextIO('True 2 3\nFalse 5 6\n') test = np.genfromtxt(data, dtype=None, missing_values='2,5', usemask=True) control = ma.array([(1, 2, 3), (0, 5, 6)], mask=[(0, 1, 0), (0, 1, 0)], dtype=[('f0', bool), ('f1', bool), ('f2', int)]) assert_equal(test, control) def test_invalid_raise(self): # Test invalid raise data = ["1, 1, 1, 1, 1"] * 50 for i in range(5): data[10 * i] = "2, 2, 2, 2 2" data.insert(0, "a, b, c, d, e") mdata = TextIO("\n".join(data)) # kwargs = dict(delimiter=",", dtype=None, names=True) # XXX: is there a better way to get the return value of the # callable in assert_warns ? ret = {} def f(_ret={}): _ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs) assert_warns(ConversionWarning, f, _ret=ret) mtest = ret['mtest'] assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) # mdata.seek(0) assert_raises(ValueError, np.ndfromtxt, mdata, delimiter=",", names=True) def test_invalid_raise_with_usecols(self): # Test invalid_raise with usecols data = ["1, 1, 1, 1, 1"] * 50 for i in range(5): data[10 * i] = "2, 2, 2, 2 2" data.insert(0, "a, b, c, d, e") mdata = TextIO("\n".join(data)) kwargs = dict(delimiter=",", dtype=None, names=True, invalid_raise=False) # XXX: is there a better way to get the return value of the # callable in assert_warns ? ret = {} def f(_ret={}): _ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs) assert_warns(ConversionWarning, f, _ret=ret) mtest = ret['mtest'] assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) # mdata.seek(0) mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs) assert_equal(len(mtest), 50) control = np.ones(50, dtype=[(_, int) for _ in 'ab']) control[[10 * _ for _ in range(5)]] = (2, 2) assert_equal(mtest, control) def test_inconsistent_dtype(self): # Test inconsistent dtype data = ["1, 1, 1, 1, -1.1"] * 50 mdata = TextIO("\n".join(data)) converters = {4: lambda x: "(%s)" % x} kwargs = dict(delimiter=",", converters=converters, dtype=[(_, int) for _ in 'abcde'],) assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) def test_default_field_format(self): # Test default format data = "0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=None, defaultfmt="f%02i") ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], dtype=[("f00", int), ("f01", int), ("f02", float)]) assert_equal(mtest, ctrl) def test_single_dtype_wo_names(self): # Test single dtype w/o names data = "0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=float, defaultfmt="f%02i") ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) assert_equal(mtest, ctrl) def test_single_dtype_w_explicit_names(self): # Test single dtype w explicit names data = "0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=float, names="a, b, c") ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], dtype=[(_, float) for _ in "abc"]) assert_equal(mtest, ctrl) def test_single_dtype_w_implicit_names(self): # Test single dtype w implicit names data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=float, names=True) ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], dtype=[(_, float) for _ in "abc"]) assert_equal(mtest, ctrl) def test_easy_structured_dtype(self): # Test easy structured dtype data = "0, 1, 2.3\n4, 5, 6.7" mtest = np.ndfromtxt(TextIO(data), delimiter=",", dtype=(int, float, float), defaultfmt="f_%02i") ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) assert_equal(mtest, ctrl) def test_autostrip(self): # Test autostrip data = "01/01/2003 , 1.3, abcde" kwargs = dict(delimiter=",", dtype=None) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) mtest = np.ndfromtxt(TextIO(data), **kwargs) assert_(w[0].category is np.VisibleDeprecationWarning) ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) assert_equal(mtest, ctrl) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs) assert_(w[0].category is np.VisibleDeprecationWarning) ctrl = np.array([('01/01/2003', 1.3, 'abcde')], dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) assert_equal(mtest, ctrl) def test_replace_space(self): # Test the 'replace_space' option txt = "A.A, B (B), C:C\n1, 2, 3.14" # Test default: replace ' ' by '_' and delete non-alphanum chars test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=None) ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no replace, no delete test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=None, replace_space='', deletechars='') ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no delete (spaces are replaced by _) test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=None, deletechars='') ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl) def test_replace_space_known_dtype(self): # Test the 'replace_space' (and related) options when dtype != None txt = "A.A, B (B), C:C\n1, 2, 3" # Test default: replace ' ' by '_' and delete non-alphanum chars test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=int) ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no replace, no delete test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=int, replace_space='', deletechars='') ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no delete (spaces are replaced by _) test = np.genfromtxt(TextIO(txt), delimiter=",", names=True, dtype=int, deletechars='') ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) assert_equal(test, ctrl) def test_incomplete_names(self): # Test w/ incomplete names data = "A,,C\n0,1,2\n3,4,5" kwargs = dict(delimiter=",", names=True) # w/ dtype=None ctrl = np.array([(0, 1, 2), (3, 4, 5)], dtype=[(_, int) for _ in ('A', 'f0', 'C')]) test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs) assert_equal(test, ctrl) # w/ default dtype ctrl = np.array([(0, 1, 2), (3, 4, 5)], dtype=[(_, float) for _ in ('A', 'f0', 'C')]) test = np.ndfromtxt(TextIO(data), **kwargs) def test_names_auto_completion(self): # Make sure that names are properly completed data = "1 2 3\n 4 5 6" test = np.genfromtxt(TextIO(data), dtype=(int, float, int), names="a") ctrl = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('f0', float), ('f1', int)]) assert_equal(test, ctrl) def test_names_with_usecols_bug1636(self): # Make sure we pick up the right names w/ usecols data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" ctrl_names = ("A", "C", "E") test = np.genfromtxt(TextIO(data), dtype=(int, int, int), delimiter=",", usecols=(0, 2, 4), names=True) assert_equal(test.dtype.names, ctrl_names) # test = np.genfromtxt(TextIO(data), dtype=(int, int, int), delimiter=",", usecols=("A", "C", "E"), names=True) assert_equal(test.dtype.names, ctrl_names) # test = np.genfromtxt(TextIO(data), dtype=int, delimiter=",", usecols=("A", "C", "E"), names=True) assert_equal(test.dtype.names, ctrl_names) def test_fixed_width_names(self): # Test fix-width w/ names data = " A B C\n 0 1 2.3\n 45 67 9." kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None) ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], dtype=[('A', int), ('B', int), ('C', float)]) test = np.ndfromtxt(TextIO(data), **kwargs) assert_equal(test, ctrl) # kwargs = dict(delimiter=5, names=True, dtype=None) ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], dtype=[('A', int), ('B', int), ('C', float)]) test = np.ndfromtxt(TextIO(data), **kwargs) assert_equal(test, ctrl) def test_filling_values(self): # Test missing values data = b"1, 2, 3\n1, , 5\n0, 6, \n" kwargs = dict(delimiter=",", dtype=None, filling_values=-999) ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) test = np.ndfromtxt(TextIO(data), **kwargs) assert_equal(test, ctrl) def test_comments_is_none(self): # Github issue 329 (None was previously being converted to 'None'). with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), dtype=None, comments=None, delimiter=',') assert_(w[0].category is np.VisibleDeprecationWarning) assert_equal(test[1], b'testNonetherestofthedata') with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), dtype=None, comments=None, delimiter=',') assert_(w[0].category is np.VisibleDeprecationWarning) assert_equal(test[1], b' testNonetherestofthedata') def test_latin1(self): latin1 = b'\xf6\xfc\xf6' norm = b"norm1,norm2,norm3\n" enc = b"test1,testNonethe" + latin1 + b",test3\n" s = norm + enc + norm with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.genfromtxt(TextIO(s), dtype=None, comments=None, delimiter=',') assert_(w[0].category is np.VisibleDeprecationWarning) assert_equal(test[1, 0], b"test1") assert_equal(test[1, 1], b"testNonethe" + latin1) assert_equal(test[1, 2], b"test3") test = np.genfromtxt(TextIO(s), dtype=None, comments=None, delimiter=',', encoding='latin1') assert_equal(test[1, 0], u"test1") assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1')) assert_equal(test[1, 2], u"test3") with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), dtype=None, comments=None, delimiter=',') assert_(w[0].category is np.VisibleDeprecationWarning) assert_equal(test['f0'], 0) assert_equal(test['f1'], b"testNonethe" + latin1) def test_binary_decode_autodtype(self): utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) def test_utf8_byte_encoding(self): utf8 = b"\xcf\x96" norm = b"norm1,norm2,norm3\n" enc = b"test1,testNonethe" + utf8 + b",test3\n" s = norm + enc + norm with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.genfromtxt(TextIO(s), dtype=None, comments=None, delimiter=',') assert_(w[0].category is np.VisibleDeprecationWarning) ctl = np.array([ [b'norm1', b'norm2', b'norm3'], [b'test1', b'testNonethe' + utf8, b'test3'], [b'norm1', b'norm2', b'norm3']]) assert_array_equal(test, ctl) def test_utf8_file(self): utf8 = b"\xcf\x96" latin1 = b"\xf6\xfc\xf6" with temppath() as path: with open(path, "wb") as f: f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) test = np.genfromtxt(path, dtype=None, comments=None, delimiter=',', encoding="UTF-8") ctl = np.array([ ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], dtype=np.unicode) assert_array_equal(test, ctl) # test a mixed dtype with open(path, "wb") as f: f.write(b"0,testNonethe" + utf8) test = np.genfromtxt(path, dtype=None, comments=None, delimiter=',', encoding="UTF-8") assert_equal(test['f0'], 0) assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) def test_utf8_file_nodtype_unicode(self): # bytes encoding with non-latin1 -> unicode upcast utf8 = u'\u03d6' latin1 = u'\xf6\xfc\xf6' # skip test if cannot encode utf8 test string with preferred # encoding. The preferred encoding is assumed to be the default # encoding of io.open. Will need to change this for PyTest, maybe # using pytest.mark.xfail(raises=***). try: import locale encoding = locale.getpreferredencoding() utf8.encode(encoding) except (UnicodeError, ImportError): raise SkipTest('Skipping test_utf8_file_nodtype_unicode, ' 'unable to encode utf8 in preferred encoding') with temppath() as path: with io.open(path, "wt") as f: f.write(u"norm1,norm2,norm3\n") f.write(u"norm1," + latin1 + u",norm3\n") f.write(u"test1,testNonethe" + utf8 + u",test3\n") with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) test = np.genfromtxt(path, dtype=None, comments=None, delimiter=',') # Check for warning when encoding not specified. assert_(w[0].category is np.VisibleDeprecationWarning) ctl = np.array([ ["norm1", "norm2", "norm3"], ["norm1", latin1, "norm3"], ["test1", "testNonethe" + utf8, "test3"]], dtype=np.unicode) assert_array_equal(test, ctl) def test_recfromtxt(self): # data = TextIO('A,B\n0,1\n2,3') kwargs = dict(delimiter=",", missing_values="N/A", names=True) test = np.recfromtxt(data, **kwargs) control = np.array([(0, 1), (2, 3)], dtype=[('A', int), ('B', int)]) assert_(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,N/A') test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], dtype=[('A', int), ('B', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.A, [0, 2]) def test_recfromcsv(self): # data = TextIO('A,B\n0,1\n2,3') kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) test = np.recfromcsv(data, dtype=None, **kwargs) control = np.array([(0, 1), (2, 3)], dtype=[('A', int), ('B', int)]) assert_(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,N/A') test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], dtype=[('A', int), ('B', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.A, [0, 2]) # data = TextIO('A,B\n0,1\n2,3') test = np.recfromcsv(data, missing_values='N/A',) control = np.array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) assert_(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,3') dtype = [('a', int), ('b', float)] test = np.recfromcsv(data, missing_values='N/A', dtype=dtype) control = np.array([(0, 1), (2, 3)], dtype=dtype) assert_(isinstance(test, np.recarray)) assert_equal(test, control) #gh-10394 data = TextIO('color\n"red"\n"blue"') test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')}) control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))]) assert_equal(test.dtype, control.dtype) assert_equal(test, control) def test_max_rows(self): # Test the `max_rows` keyword argument. data = '1 2\n3 4\n5 6\n7 8\n9 10\n' txt = TextIO(data) a1 = np.genfromtxt(txt, max_rows=3) a2 = np.genfromtxt(txt) assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) assert_equal(a2, [[7, 8], [9, 10]]) # max_rows must be at least 1. assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) # An input with several invalid rows. data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' test = np.genfromtxt(TextIO(data), max_rows=2) control = np.array([[1., 1.], [2., 2.]]) assert_equal(test, control) # Test keywords conflict assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, max_rows=4) # Test with invalid value assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) # Test with invalid not raise with suppress_warnings() as sup: sup.filter(ConversionWarning) test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) assert_equal(test, control) test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) assert_equal(test, control) # Structured array with field names. data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' # Test with header, names and comments txt = TextIO(data) test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], dtype=[('c', '<f8'), ('d', '<f8')]) assert_equal(test, control) # To continue reading the same "file", don't use skip_header or # names, and use the previously determined dtype. test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype) control = np.array([(4.0, 4.0), (5.0, 5.0)], dtype=[('c', '<f8'), ('d', '<f8')]) assert_equal(test, control) def test_gft_using_filename(self): # Test that we can load data from a filename as well as a file # object tgt = np.arange(6).reshape((2, 3)) linesep = ('\n', '\r\n', '\r') for sep in linesep: data = '0 1 2' + sep + '3 4 5' with temppath() as name: with open(name, 'w') as f: f.write(data) res = np.genfromtxt(name) assert_array_equal(res, tgt) def test_gft_from_gzip(self): # Test that we can load data from a gzipped file wanted = np.arange(6).reshape((2, 3)) linesep = ('\n', '\r\n', '\r') for sep in linesep: data = '0 1 2' + sep + '3 4 5' s = BytesIO() with gzip.GzipFile(fileobj=s, mode='w') as g: g.write(asbytes(data)) with temppath(suffix='.gz2') as name: with open(name, 'w') as f: f.write(data) assert_array_equal(np.genfromtxt(name), wanted) def test_gft_using_generator(self): # gft doesn't work with unicode. def count(): for i in range(10): yield asbytes("%d" % i) res = np.genfromtxt(count()) assert_array_equal(res, np.arange(10)) def test_auto_dtype_largeint(self): # Regression test for numpy/numpy#5635 whereby large integers could # cause OverflowErrors. # Test the automatic definition of the output dtype # # 2**66 = 73786976294838206464 => should convert to float # 2**34 = 17179869184 => should convert to int64 # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, # int64 on 64-bit systems) data = TextIO('73786976294838206464 17179869184 1024') test = np.ndfromtxt(data, dtype=None) assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) assert_(test.dtype['f0'] == float) assert_(test.dtype['f1'] == np.int64) assert_(test.dtype['f2'] == np.integer) assert_allclose(test['f0'], 73786976294838206464.) assert_equal(test['f1'], 17179869184) assert_equal(test['f2'], 1024) class TestPathUsage(object): # Test that pathlib.Path can be used @dec.skipif(Path is None, "No pathlib.Path") def test_loadtxt(self): with temppath(suffix='.txt') as path: path = Path(path) a = np.array([[1.1, 2], [3, 4]]) np.savetxt(path, a) x = np.loadtxt(path) assert_array_equal(x, a) @dec.skipif(Path is None, "No pathlib.Path") def test_save_load(self): # Test that pathlib.Path instances can be used with savez. with temppath(suffix='.npy') as path: path = Path(path) a = np.array([[1, 2], [3, 4]], int) np.save(path, a) data = np.load(path) assert_array_equal(data, a) @dec.skipif(Path is None, "No pathlib.Path") def test_savez_load(self): # Test that pathlib.Path instances can be used with savez. with temppath(suffix='.npz') as path: path = Path(path) np.savez(path, lab='place holder') with np.load(path) as data: assert_array_equal(data['lab'], 'place holder') @dec.skipif(Path is None, "No pathlib.Path") def test_savez_compressed_load(self): # Test that pathlib.Path instances can be used with savez. with temppath(suffix='.npz') as path: path = Path(path) np.savez_compressed(path, lab='place holder') data = np.load(path) assert_array_equal(data['lab'], 'place holder') data.close() @dec.skipif(Path is None, "No pathlib.Path") def test_genfromtxt(self): with temppath(suffix='.txt') as path: path = Path(path) a = np.array([(1, 2), (3, 4)]) np.savetxt(path, a) data = np.genfromtxt(path) assert_array_equal(a, data) @dec.skipif(Path is None, "No pathlib.Path") def test_ndfromtxt(self): # Test outputing a standard ndarray with temppath(suffix='.txt') as path: path = Path(path) with path.open('w') as f: f.write(u'1 2\n3 4') control = np.array([[1, 2], [3, 4]], dtype=int) test = np.ndfromtxt(path, dtype=int) assert_array_equal(test, control) @dec.skipif(Path is None, "No pathlib.Path") def test_mafromtxt(self): # From `test_fancy_dtype_alt` above with temppath(suffix='.txt') as path: path = Path(path) with path.open('w') as f: f.write(u'1,2,3.0\n4,5,6.0\n') test = np.mafromtxt(path, delimiter=',') control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)]) assert_equal(test, control) @dec.skipif(Path is None, "No pathlib.Path") def test_recfromtxt(self): with temppath(suffix='.txt') as path: path = Path(path) with path.open('w') as f: f.write(u'A,B\n0,1\n2,3') kwargs = dict(delimiter=",", missing_values="N/A", names=True) test = np.recfromtxt(path, **kwargs) control = np.array([(0, 1), (2, 3)], dtype=[('A', int), ('B', int)]) assert_(isinstance(test, np.recarray)) assert_equal(test, control) @dec.skipif(Path is None, "No pathlib.Path") def test_recfromcsv(self): with temppath(suffix='.txt') as path: path = Path(path) with path.open('w') as f: f.write(u'A,B\n0,1\n2,3') kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) test = np.recfromcsv(path, dtype=None, **kwargs) control = np.array([(0, 1), (2, 3)], dtype=[('A', int), ('B', int)]) assert_(isinstance(test, np.recarray)) assert_equal(test, control) def test_gzip_load(): a = np.random.random((5, 5)) s = BytesIO() f = gzip.GzipFile(fileobj=s, mode="w") np.save(f, a) f.close() s.seek(0) f = gzip.GzipFile(fileobj=s, mode="r") assert_array_equal(np.load(f), a) def test_gzip_loadtxt(): # Thanks to another windows brokeness, we can't use # NamedTemporaryFile: a file created from this function cannot be # reopened by another open call. So we first put the gzipped string # of the test reference array, write it to a securely opened file, # which is then read from by the loadtxt function s = BytesIO() g = gzip.GzipFile(fileobj=s, mode='w') g.write(b'1 2 3\n') g.close() s.seek(0) with temppath(suffix='.gz') as name: with open(name, 'wb') as f: f.write(s.read()) res = np.loadtxt(name) s.close() assert_array_equal(res, [1, 2, 3]) def test_gzip_loadtxt_from_string(): s = BytesIO() f = gzip.GzipFile(fileobj=s, mode="w") f.write(b'1 2 3\n') f.close() s.seek(0) f = gzip.GzipFile(fileobj=s, mode="r") assert_array_equal(np.loadtxt(f), [1, 2, 3]) def test_npzfile_dict(): s = BytesIO() x = np.zeros((3, 3)) y = np.zeros((3, 3)) np.savez(s, x=x, y=y) s.seek(0) z = np.load(s) assert_('x' in z) assert_('y' in z) assert_('x' in z.keys()) assert_('y' in z.keys()) for f, a in z.items(): assert_(f in ['x', 'y']) assert_equal(a.shape, (3, 3)) assert_(len(z.items()) == 2) for f in z: assert_(f in ['x', 'y']) assert_('x' in z.keys()) def test_load_refcount(): # Check that objects returned by np.load are directly freed based on # their refcount, rather than needing the gc to collect them. f = BytesIO() np.savez(f, [1, 2, 3]) f.seek(0) assert_(gc.isenabled()) gc.disable() try: gc.collect() np.load(f) # gc.collect returns the number of unreachable objects in cycles that # were found -- we are checking that no cycles were created by np.load n_objects_in_cycles = gc.collect() finally: gc.enable() assert_equal(n_objects_in_cycles, 0) if __name__ == "__main__": run_module_suite()
92,466
37.81906
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_regression.py
from __future__ import division, absolute_import, print_function import os import sys import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_array_almost_equal, assert_raises, _assert_valid_refcount, ) from numpy.compat import unicode class TestRegression(object): def test_poly1d(self): # Ticket #28 assert_equal(np.poly1d([1]) - np.poly1d([1, 0]), np.poly1d([-1, 1])) def test_cov_parameters(self): # Ticket #91 x = np.random.random((3, 3)) y = x.copy() np.cov(x, rowvar=1) np.cov(y, rowvar=0) assert_array_equal(x, y) def test_mem_digitize(self): # Ticket #95 for i in range(100): np.digitize([1, 2, 3, 4], [1, 3]) np.digitize([0, 1, 2, 3, 4], [1, 3]) def test_unique_zero_sized(self): # Ticket #205 assert_array_equal([], np.unique(np.array([]))) def test_mem_vectorise(self): # Ticket #325 vt = np.vectorize(lambda *args: args) vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2))) vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)), np.zeros((2, 2))) def test_mgrid_single_element(self): # Ticket #339 assert_array_equal(np.mgrid[0:0:1j], [0]) assert_array_equal(np.mgrid[0:0], []) def test_refcount_vectorize(self): # Ticket #378 def p(x, y): return 123 v = np.vectorize(p) _assert_valid_refcount(v) def test_poly1d_nan_roots(self): # Ticket #396 p = np.poly1d([np.nan, np.nan, 1], r=0) assert_raises(np.linalg.LinAlgError, getattr, p, "r") def test_mem_polymul(self): # Ticket #448 np.polymul([], [1.]) def test_mem_string_concat(self): # Ticket #469 x = np.array([]) np.append(x, 'asdasd\tasdasd') def test_poly_div(self): # Ticket #553 u = np.poly1d([1, 2, 3]) v = np.poly1d([1, 2, 3, 4, 5]) q, r = np.polydiv(u, v) assert_equal(q*v + r, u) def test_poly_eq(self): # Ticket #554 x = np.poly1d([1, 2, 3]) y = np.poly1d([3, 4]) assert_(x != y) assert_(x == x) def test_polyfit_build(self): # Ticket #628 ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01, 9.95368241e+00, -3.14526520e+02] x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176] y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0, 6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0, 13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0, 7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0, 6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0, 6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0, 8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0] tested = np.polyfit(x, y, 4) assert_array_almost_equal(ref, tested) def test_polydiv_type(self): # Make polydiv work for complex types msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) q, r = np.polydiv(x, x) assert_(q.dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) q, r = np.polydiv(x, x) assert_(q.dtype == float, msg) def test_histogramdd_too_many_bins(self): # Ticket 928. assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10) def test_polyint_type(self): # Ticket #944 msg = "Wrong type, should be complex" x = np.ones(3, dtype=complex) assert_(np.polyint(x).dtype == complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=int) assert_(np.polyint(x).dtype == float, msg) def test_ndenumerate_crash(self): # Ticket 1140 # Shouldn't crash: list(np.ndenumerate(np.array([[]]))) def test_asfarray_none(self): # Test for changeset r5065 assert_array_equal(np.array([np.nan]), np.asfarray([None])) def test_large_fancy_indexing(self): # Large enough to fail on 64-bit. nbits = np.dtype(np.intp).itemsize * 8 thesize = int((2**nbits)**(1.0/5.0)+1) def dp(): n = 3 a = np.ones((n,)*5) i = np.random.randint(0, n, size=thesize) a[np.ix_(i, i, i, i, i)] = 0 def dp2(): n = 3 a = np.ones((n,)*5) i = np.random.randint(0, n, size=thesize) a[np.ix_(i, i, i, i, i)] assert_raises(ValueError, dp) assert_raises(ValueError, dp2) def test_void_coercion(self): dt = np.dtype([('a', 'f4'), ('b', 'i4')]) x = np.zeros((1,), dt) assert_(np.r_[x, x].dtype == dt) def test_who_with_0dim_array(self): # ticket #1243 import os import sys oldstdout = sys.stdout sys.stdout = open(os.devnull, 'w') try: try: np.who({'foo': np.array(1)}) except Exception: raise AssertionError("ticket #1243") finally: sys.stdout.close() sys.stdout = oldstdout def test_include_dirs(self): # As a sanity check, just test that get_include # includes something reasonable. Somewhat # related to ticket #1405. include_dirs = [np.get_include()] for path in include_dirs: assert_(isinstance(path, (str, unicode))) assert_(path != '') def test_polyder_return_type(self): # Ticket #1249 assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d)) assert_(isinstance(np.polyder([1], 0), np.ndarray)) assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d)) assert_(isinstance(np.polyder([1], 1), np.ndarray)) def test_append_fields_dtype_list(self): # Ticket #1676 from numpy.lib.recfunctions import append_fields base = np.array([1, 2, 3], dtype=np.int32) names = ['a', 'b', 'c'] data = np.eye(3).astype(np.int32) dlist = [np.float64, np.int32, np.int32] try: append_fields(base, names, data, dlist) except Exception: raise AssertionError() def test_loadtxt_fields_subarrays(self): # For ticket #1936 if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO dt = [("a", 'u1', 2), ("b", 'u1', 2)] x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])] x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt)) dt = [("a", 'u1', (2, 2))] x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt)) dt = [("a", 'u1', (2, 3, 2))] x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt) data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)] assert_equal(x, np.array(data, dtype=dt)) def test_nansum_with_boolean(self): # gh-2978 a = np.zeros(2, dtype=bool) try: np.nansum(a) except Exception: raise AssertionError() def test_py3_compat(self): # gh-2561 # Test if the oldstyle class test is bypassed in python3 class C(): """Old-style class in python2, normal class in python3""" pass out = open(os.devnull, 'w') try: np.info(C(), output=out) except AttributeError: raise AssertionError() finally: out.close() if __name__ == "__main__": run_module_suite()
8,542
31.984556
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_stride_tricks.py
from __future__ import division, absolute_import, print_function import numpy as np from numpy.core.test_rational import rational from numpy.testing import ( run_module_suite, assert_equal, assert_array_equal, assert_raises, assert_ ) from numpy.lib.stride_tricks import ( as_strided, broadcast_arrays, _broadcast_shape, broadcast_to ) def assert_shapes_correct(input_shapes, expected_shape): # Broadcast a list of arrays with the given input shapes and check the # common output shape. inarrays = [np.zeros(s) for s in input_shapes] outarrays = broadcast_arrays(*inarrays) outshapes = [a.shape for a in outarrays] expected = [expected_shape] * len(inarrays) assert_equal(outshapes, expected) def assert_incompatible_shapes_raise(input_shapes): # Broadcast a list of arrays with the given (incompatible) input shapes # and check that they raise a ValueError. inarrays = [np.zeros(s) for s in input_shapes] assert_raises(ValueError, broadcast_arrays, *inarrays) def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): # Broadcast two shapes against each other and check that the data layout # is the same as if a ufunc did the broadcasting. x0 = np.zeros(shape0, dtype=int) # Note that multiply.reduce's identity element is 1.0, so when shape1==(), # this gives the desired n==1. n = int(np.multiply.reduce(shape1)) x1 = np.arange(n).reshape(shape1) if transposed: x0 = x0.T x1 = x1.T if flipped: x0 = x0[::-1] x1 = x1[::-1] # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the # result should be exactly the same as the broadcasted view of x1. y = x0 + x1 b0, b1 = broadcast_arrays(x0, x1) assert_array_equal(y, b1) def test_same(): x = np.arange(10) y = np.arange(10) bx, by = broadcast_arrays(x, y) assert_array_equal(x, bx) assert_array_equal(y, by) def test_one_off(): x = np.array([[1, 2, 3]]) y = np.array([[1], [2], [3]]) bx, by = broadcast_arrays(x, y) bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) by0 = bx0.T assert_array_equal(bx0, bx) assert_array_equal(by0, by) def test_same_input_shapes(): # Check that the final shape is just the input shape. data = [ (), (1,), (3,), (0, 1), (0, 3), (1, 0), (3, 0), (1, 3), (3, 1), (3, 3), ] for shape in data: input_shapes = [shape] # Single input. assert_shapes_correct(input_shapes, shape) # Double input. input_shapes2 = [shape, shape] assert_shapes_correct(input_shapes2, shape) # Triple input. input_shapes3 = [shape, shape, shape] assert_shapes_correct(input_shapes3, shape) def test_two_compatible_by_ones_input_shapes(): # Check that two different input shapes of the same length, but some have # ones, broadcast to the correct shape. data = [ [[(1,), (3,)], (3,)], [[(1, 3), (3, 3)], (3, 3)], [[(3, 1), (3, 3)], (3, 3)], [[(1, 3), (3, 1)], (3, 3)], [[(1, 1), (3, 3)], (3, 3)], [[(1, 1), (1, 3)], (1, 3)], [[(1, 1), (3, 1)], (3, 1)], [[(1, 0), (0, 0)], (0, 0)], [[(0, 1), (0, 0)], (0, 0)], [[(1, 0), (0, 1)], (0, 0)], [[(1, 1), (0, 0)], (0, 0)], [[(1, 1), (1, 0)], (1, 0)], [[(1, 1), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_shapes_correct(input_shapes, expected_shape) # Reverse the input shapes since broadcasting should be symmetric. assert_shapes_correct(input_shapes[::-1], expected_shape) def test_two_compatible_by_prepending_ones_input_shapes(): # Check that two different input shapes (of different lengths) broadcast # to the correct shape. data = [ [[(), (3,)], (3,)], [[(3,), (3, 3)], (3, 3)], [[(3,), (3, 1)], (3, 3)], [[(1,), (3, 3)], (3, 3)], [[(), (3, 3)], (3, 3)], [[(1, 1), (3,)], (1, 3)], [[(1,), (3, 1)], (3, 1)], [[(1,), (1, 3)], (1, 3)], [[(), (1, 3)], (1, 3)], [[(), (3, 1)], (3, 1)], [[(), (0,)], (0,)], [[(0,), (0, 0)], (0, 0)], [[(0,), (0, 1)], (0, 0)], [[(1,), (0, 0)], (0, 0)], [[(), (0, 0)], (0, 0)], [[(1, 1), (0,)], (1, 0)], [[(1,), (0, 1)], (0, 1)], [[(1,), (1, 0)], (1, 0)], [[(), (1, 0)], (1, 0)], [[(), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_shapes_correct(input_shapes, expected_shape) # Reverse the input shapes since broadcasting should be symmetric. assert_shapes_correct(input_shapes[::-1], expected_shape) def test_incompatible_shapes_raise_valueerror(): # Check that a ValueError is raised for incompatible shapes. data = [ [(3,), (4,)], [(2, 3), (2,)], [(3,), (3,), (4,)], [(1, 3, 4), (2, 3, 3)], ] for input_shapes in data: assert_incompatible_shapes_raise(input_shapes) # Reverse the input shapes since broadcasting should be symmetric. assert_incompatible_shapes_raise(input_shapes[::-1]) def test_same_as_ufunc(): # Check that the data layout is the same as if a ufunc did the operation. data = [ [[(1,), (3,)], (3,)], [[(1, 3), (3, 3)], (3, 3)], [[(3, 1), (3, 3)], (3, 3)], [[(1, 3), (3, 1)], (3, 3)], [[(1, 1), (3, 3)], (3, 3)], [[(1, 1), (1, 3)], (1, 3)], [[(1, 1), (3, 1)], (3, 1)], [[(1, 0), (0, 0)], (0, 0)], [[(0, 1), (0, 0)], (0, 0)], [[(1, 0), (0, 1)], (0, 0)], [[(1, 1), (0, 0)], (0, 0)], [[(1, 1), (1, 0)], (1, 0)], [[(1, 1), (0, 1)], (0, 1)], [[(), (3,)], (3,)], [[(3,), (3, 3)], (3, 3)], [[(3,), (3, 1)], (3, 3)], [[(1,), (3, 3)], (3, 3)], [[(), (3, 3)], (3, 3)], [[(1, 1), (3,)], (1, 3)], [[(1,), (3, 1)], (3, 1)], [[(1,), (1, 3)], (1, 3)], [[(), (1, 3)], (1, 3)], [[(), (3, 1)], (3, 1)], [[(), (0,)], (0,)], [[(0,), (0, 0)], (0, 0)], [[(0,), (0, 1)], (0, 0)], [[(1,), (0, 0)], (0, 0)], [[(), (0, 0)], (0, 0)], [[(1, 1), (0,)], (1, 0)], [[(1,), (0, 1)], (0, 1)], [[(1,), (1, 0)], (1, 0)], [[(), (1, 0)], (1, 0)], [[(), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_same_as_ufunc(input_shapes[0], input_shapes[1], "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) # Reverse the input shapes since broadcasting should be symmetric. assert_same_as_ufunc(input_shapes[1], input_shapes[0]) # Try them transposed, too. assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) # ... and flipped for non-rank-0 inputs in order to test negative # strides. if () not in input_shapes: assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) def test_broadcast_to_succeeds(): data = [ [np.array(0), (0,), np.array(0)], [np.array(0), (1,), np.zeros(1)], [np.array(0), (3,), np.zeros(3)], [np.ones(1), (1,), np.ones(1)], [np.ones(1), (2,), np.ones(2)], [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], [np.arange(3), (3,), np.arange(3)], [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], # test if shape is not a tuple [np.ones(0), 0, np.ones(0)], [np.ones(1), 1, np.ones(1)], [np.ones(1), 2, np.ones(2)], # these cases with size 0 are strange, but they reproduce the behavior # of broadcasting with ufuncs (see test_same_as_ufunc above) [np.ones(1), (0,), np.ones(0)], [np.ones((1, 2)), (0, 2), np.ones((0, 2))], [np.ones((2, 1)), (2, 0), np.ones((2, 0))], ] for input_array, shape, expected in data: actual = broadcast_to(input_array, shape) assert_array_equal(expected, actual) def test_broadcast_to_raises(): data = [ [(0,), ()], [(1,), ()], [(3,), ()], [(3,), (1,)], [(3,), (2,)], [(3,), (4,)], [(1, 2), (2, 1)], [(1, 1), (1,)], [(1,), -1], [(1,), (-1,)], [(1, 2), (-1, 2)], ] for orig_shape, target_shape in data: arr = np.zeros(orig_shape) assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) def test_broadcast_shape(): # broadcast_shape is already exercized indirectly by broadcast_arrays assert_equal(_broadcast_shape(), ()) assert_equal(_broadcast_shape([1, 2]), (2,)) assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) # regression tests for gh-5862 assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) def test_as_strided(): a = np.array([None]) a_view = as_strided(a) expected = np.array([None]) assert_array_equal(a_view, np.array([None])) a = np.array([1, 2, 3, 4]) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) expected = np.array([1, 3]) assert_array_equal(a_view, expected) a = np.array([1, 2, 3, 4]) a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) assert_array_equal(a_view, expected) # Regression test for gh-5081 dt = np.dtype([('num', 'i4'), ('obj', 'O')]) a = np.empty((4,), dtype=dt) a['num'] = np.arange(1, 5) a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) expected_num = [[1, 2, 3, 4]] * 3 expected_obj = [[None]*4]*3 assert_equal(a_view.dtype, dt) assert_array_equal(expected_num, a_view['num']) assert_array_equal(expected_obj, a_view['obj']) # Make sure that void types without fields are kept unchanged a = np.empty((4,), dtype='V4') a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) assert_equal(a.dtype, a_view.dtype) # Make sure that the only type that could fail is properly handled dt = np.dtype({'names': [''], 'formats': ['V4']}) a = np.empty((4,), dtype=dt) a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) assert_equal(a.dtype, a_view.dtype) # Custom dtypes should not be lost (gh-9161) r = [rational(i) for i in range(4)] a = np.array(r, dtype=rational) a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) assert_equal(a.dtype, a_view.dtype) assert_array_equal([r] * 3, a_view) def as_strided_writeable(): arr = np.ones(10) view = as_strided(arr, writeable=False) assert_(not view.flags.writeable) # Check that writeable also is fine: view = as_strided(arr, writeable=True) assert_(view.flags.writeable) view[...] = 3 assert_array_equal(arr, np.full_like(arr, 3)) # Test that things do not break down for readonly: arr.flags.writeable = False view = as_strided(arr, writeable=False) view = as_strided(arr, writeable=True) assert_(not view.flags.writeable) class VerySimpleSubClass(np.ndarray): def __new__(cls, *args, **kwargs): kwargs['subok'] = True return np.array(*args, **kwargs).view(cls) class SimpleSubClass(VerySimpleSubClass): def __new__(cls, *args, **kwargs): kwargs['subok'] = True self = np.array(*args, **kwargs).view(cls) self.info = 'simple' return self def __array_finalize__(self, obj): self.info = getattr(obj, 'info', '') + ' finalized' def test_subclasses(): # test that subclass is preserved only if subok=True a = VerySimpleSubClass([1, 2, 3, 4]) assert_(type(a) is VerySimpleSubClass) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) assert_(type(a_view) is np.ndarray) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) assert_(type(a_view) is VerySimpleSubClass) # test that if a subclass has __array_finalize__, it is used a = SimpleSubClass([1, 2, 3, 4]) a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') # similar tests for broadcast_arrays b = np.arange(len(a)).reshape(-1, 1) a_view, b_view = broadcast_arrays(a, b) assert_(type(a_view) is np.ndarray) assert_(type(b_view) is np.ndarray) assert_(a_view.shape == b_view.shape) a_view, b_view = broadcast_arrays(a, b, subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') assert_(type(b_view) is np.ndarray) assert_(a_view.shape == b_view.shape) # and for broadcast_to shape = (2, 4) a_view = broadcast_to(a, shape) assert_(type(a_view) is np.ndarray) assert_(a_view.shape == shape) a_view = broadcast_to(a, shape, subok=True) assert_(type(a_view) is SimpleSubClass) assert_(a_view.info == 'simple finalized') assert_(a_view.shape == shape) def test_writeable(): # broadcast_to should return a readonly array original = np.array([1, 2, 3]) result = broadcast_to(original, (2, 3)) assert_equal(result.flags.writeable, False) assert_raises(ValueError, result.__setitem__, slice(None), 0) # but the result of broadcast_arrays needs to be writeable (for now), to # preserve backwards compatibility for results in [broadcast_arrays(original), broadcast_arrays(0, original)]: for result in results: assert_equal(result.flags.writeable, True) # keep readonly input readonly original.flags.writeable = False _, result = broadcast_arrays(0, original) assert_equal(result.flags.writeable, False) # regression test for GH6491 shape = (2,) strides = [0] tricky_array = as_strided(np.array(0), shape, strides) other = np.zeros((1,)) first, second = broadcast_arrays(tricky_array, other) assert_(first.shape == second.shape) def test_reference_types(): input_array = np.array('a', dtype=object) expected = np.array(['a'] * 3, dtype=object) actual = broadcast_to(input_array, (3,)) assert_array_equal(expected, actual) actual, _ = broadcast_arrays(input_array, np.ones(3)) assert_array_equal(expected, actual) if __name__ == "__main__": run_module_suite()
15,042
33.266515
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test__version.py
"""Tests for the NumpyVersion class. """ from __future__ import division, absolute_import, print_function from numpy.testing import assert_, run_module_suite, assert_raises from numpy.lib import NumpyVersion def test_main_versions(): assert_(NumpyVersion('1.8.0') == '1.8.0') for ver in ['1.9.0', '2.0.0', '1.8.1']: assert_(NumpyVersion('1.8.0') < ver) for ver in ['1.7.0', '1.7.1', '0.9.9']: assert_(NumpyVersion('1.8.0') > ver) def test_version_1_point_10(): # regression test for gh-2998. assert_(NumpyVersion('1.9.0') < '1.10.0') assert_(NumpyVersion('1.11.0') < '1.11.1') assert_(NumpyVersion('1.11.0') == '1.11.0') assert_(NumpyVersion('1.99.11') < '1.99.12') def test_alpha_beta_rc(): assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') for ver in ['1.8.0', '1.8.0rc2']: assert_(NumpyVersion('1.8.0rc1') < ver) for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: assert_(NumpyVersion('1.8.0rc1') > ver) assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') def test_dev_version(): assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') def test_dev_a_b_rc_mixed(): assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') def test_dev0_version(): assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') def test_dev0_a_b_rc_mixed(): assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') def test_raises(): for ver in ['1.9', '1,9.0', '1.7.x']: assert_raises(ValueError, NumpyVersion, ver) if __name__ == "__main__": run_module_suite()
2,125
28.943662
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_packbits.py
from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import ( assert_array_equal, assert_equal, assert_raises, run_module_suite ) def test_packbits(): # Copied from the docstring. a = [[[1, 0, 1], [0, 1, 0]], [[1, 1, 0], [0, 0, 1]]] for dt in '?bBhHiIlLqQ': arr = np.array(a, dtype=dt) b = np.packbits(arr, axis=-1) assert_equal(b.dtype, np.uint8) assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) def test_packbits_empty(): shapes = [ (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), (0, 0, 20), (0, 0, 0), ] for dt in '?bBhHiIlLqQ': for shape in shapes: a = np.empty(shape, dtype=dt) b = np.packbits(a) assert_equal(b.dtype, np.uint8) assert_equal(b.shape, (0,)) def test_packbits_empty_with_axis(): # Original shapes and lists of packed shapes for different axes. shapes = [ ((0,), [(0,)]), ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), ] for dt in '?bBhHiIlLqQ': for in_shape, out_shapes in shapes: for ax, out_shape in enumerate(out_shapes): a = np.empty(in_shape, dtype=dt) b = np.packbits(a, axis=ax) assert_equal(b.dtype, np.uint8) assert_equal(b.shape, out_shape) def test_packbits_large(): # test data large enough for 16 byte vectorization a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) a = a.repeat(3) for dtype in '?bBhHiIlLqQ': arr = np.array(a, dtype=dtype) b = np.packbits(arr, axis=None) assert_equal(b.dtype, np.uint8) r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, 129, 248, 227, 129, 199, 31, 128] assert_array_equal(b, r) # equal for size being multiple of 8 assert_array_equal(np.unpackbits(b)[:-4], a) # check last byte of different remainders (16 byte vectorization) b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, 198, 196, 192]) arr = arr.reshape(36, 25) b = np.packbits(arr, axis=0) assert_equal(b.dtype, np.uint8) assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, 107, 75, 74, 88], [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, 41, 104, 122, 90, 18], [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, 150, 150, 146, 210, 210, 246, 255, 255, 223, 151, 21, 17, 17, 131, 163], [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, 202, 234, 170, 168], [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, 240, 208, 144, 128, 160, 224, 240, 208, 144, 144, 176, 240, 224, 192, 128]]) b = np.packbits(arr, axis=1) assert_equal(b.dtype, np.uint8) assert_array_equal(b, [[252, 127, 192, 0], [ 7, 252, 15, 128], [240, 0, 28, 0], [255, 128, 0, 128], [192, 31, 255, 128], [142, 63, 0, 0], [255, 240, 7, 0], [ 7, 224, 14, 0], [126, 0, 224, 0], [255, 255, 199, 0], [ 56, 28, 126, 0], [113, 248, 227, 128], [227, 142, 63, 0], [ 0, 28, 112, 0], [ 15, 248, 3, 128], [ 28, 126, 56, 0], [ 56, 255, 241, 128], [240, 7, 224, 0], [227, 129, 192, 128], [255, 255, 254, 0], [126, 0, 224, 0], [ 3, 241, 248, 0], [ 0, 255, 241, 128], [128, 0, 255, 128], [224, 1, 255, 128], [248, 252, 126, 0], [ 0, 7, 3, 128], [224, 113, 248, 0], [ 0, 252, 127, 128], [142, 63, 224, 0], [224, 14, 63, 0], [ 7, 3, 128, 0], [113, 255, 255, 128], [ 28, 113, 199, 0], [ 7, 227, 142, 0], [ 14, 56, 252, 0]]) arr = arr.T.copy() b = np.packbits(arr, axis=0) assert_equal(b.dtype, np.uint8) assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, 7, 113, 28, 7, 14], [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, 3, 255, 113, 227, 56], [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, 128, 255, 199, 142, 252], [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) b = np.packbits(arr, axis=1) assert_equal(b.dtype, np.uint8) assert_array_equal(b, [[190, 72, 113, 214, 0], [186, 216, 120, 210, 128], [178, 248, 248, 210, 128], [178, 241, 216, 64, 192], [150, 227, 152, 68, 80], [215, 195, 24, 5, 112], [ 87, 202, 60, 5, 48], [ 83, 90, 52, 1, 160], [ 83, 90, 182, 72, 160], [195, 83, 150, 88, 224], [199, 83, 150, 92, 240], [206, 119, 150, 92, 208], [204, 127, 146, 78, 144], [204, 109, 210, 110, 128], [140, 73, 210, 39, 160], [140, 64, 246, 181, 224], [136, 208, 255, 149, 240], [136, 244, 255, 220, 208], [ 8, 189, 223, 222, 144], [ 40, 45, 151, 218, 144], [105, 41, 21, 218, 176], [107, 104, 17, 202, 240], [ 75, 122, 17, 234, 224], [ 74, 90, 131, 170, 192], [ 88, 18, 163, 168, 128]]) # result is the same if input is multiplied with a nonzero value for dtype in 'bBhHiIlLqQ': arr = np.array(a, dtype=dtype) rnd = np.random.randint(low=np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=arr.size, dtype=dtype) rnd[rnd == 0] = 1 arr *= rnd.astype(dtype) b = np.packbits(arr, axis=-1) assert_array_equal(np.unpackbits(b)[:-4], a) assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) def test_packbits_very_large(): # test some with a larger arrays gh-8637 # code is covered earlier but larger array makes crash on bug more likely for s in range(950, 1050): for dt in '?bBhHiIlLqQ': x = np.ones((200, s), dtype=bool) np.packbits(x, axis=1) def test_unpackbits(): # Copied from the docstring. a = np.array([[2], [7], [23]], dtype=np.uint8) b = np.unpackbits(a, axis=1) assert_equal(b.dtype, np.uint8) assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 0, 1, 1, 1]])) def test_unpackbits_empty(): a = np.empty((0,), dtype=np.uint8) b = np.unpackbits(a) assert_equal(b.dtype, np.uint8) assert_array_equal(b, np.empty((0,))) def test_unpackbits_empty_with_axis(): # Lists of packed shapes for different axes and unpacked shapes. shapes = [ ([(0,)], (0,)), ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), ] for in_shapes, out_shape in shapes: for ax, in_shape in enumerate(in_shapes): a = np.empty(in_shape, dtype=np.uint8) b = np.unpackbits(a, axis=ax) assert_equal(b.dtype, np.uint8) assert_equal(b.shape, out_shape) def test_unpackbits_large(): # test all possible numbers via comparison to already tested packbits d = np.arange(277, dtype=np.uint8) assert_array_equal(np.packbits(np.unpackbits(d)), d) assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) d = np.tile(d, (3, 1)) assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) d = d.T.copy() assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) if __name__ == "__main__": run_module_suite()
12,929
46.018182
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_arrayterator.py
from __future__ import division, absolute_import, print_function from operator import mul from functools import reduce import numpy as np from numpy.random import randint from numpy.lib import Arrayterator from numpy.testing import assert_ def test(): np.random.seed(np.arange(10)) # Create a random array ndims = randint(5)+1 shape = tuple(randint(10)+1 for dim in range(ndims)) els = reduce(mul, shape) a = np.arange(els) a.shape = shape buf_size = randint(2*els) b = Arrayterator(a, buf_size) # Check that each block has at most ``buf_size`` elements for block in b: assert_(len(block.flat) <= (buf_size or els)) # Check that all elements are iterated correctly assert_(list(b.flat) == list(a.flat)) # Slice arrayterator start = [randint(dim) for dim in shape] stop = [randint(dim)+1 for dim in shape] step = [randint(dim)+1 for dim in shape] slice_ = tuple(slice(*t) for t in zip(start, stop, step)) c = b[slice_] d = a[slice_] # Check that each block has at most ``buf_size`` elements for block in c: assert_(len(block.flat) <= (buf_size or els)) # Check that the arrayterator is sliced correctly assert_(np.all(c.__array__() == d)) # Check that all elements are iterated correctly assert_(list(c.flat) == list(d.flat)) if __name__ == '__main__': from numpy.testing import run_module_suite run_module_suite()
1,455
26.471698
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_arraypad.py
"""Tests for the array padding functions. """ from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,) from numpy.lib import pad class TestConditionalShortcuts(object): def test_zero_padding_shortcuts(self): test = np.arange(120).reshape(4, 5, 6) pad_amt = [(0, 0) for axis in test.shape] modes = ['constant', 'edge', 'linear_ramp', 'maximum', 'mean', 'median', 'minimum', 'reflect', 'symmetric', 'wrap', ] for mode in modes: assert_array_equal(test, pad(test, pad_amt, mode=mode)) def test_shallow_statistic_range(self): test = np.arange(120).reshape(4, 5, 6) pad_amt = [(1, 1) for axis in test.shape] modes = ['maximum', 'mean', 'median', 'minimum', ] for mode in modes: assert_array_equal(pad(test, pad_amt, mode='edge'), pad(test, pad_amt, mode=mode, stat_length=1)) def test_clip_statistic_range(self): test = np.arange(30).reshape(5, 6) pad_amt = [(3, 3) for axis in test.shape] modes = ['maximum', 'mean', 'median', 'minimum', ] for mode in modes: assert_array_equal(pad(test, pad_amt, mode=mode), pad(test, pad_amt, mode=mode, stat_length=30)) class TestStatistic(object): def test_check_mean_stat_length(self): a = np.arange(100).astype('f') a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) b = np.array( [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. ]) assert_array_equal(a, b) def test_check_maximum_1(self): a = np.arange(100) a = pad(a, (25, 20), 'maximum') b = np.array( [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] ) assert_array_equal(a, b) def test_check_maximum_2(self): a = np.arange(100) + 1 a = pad(a, (25, 20), 'maximum') b = np.array( [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] ) assert_array_equal(a, b) def test_check_maximum_stat_length(self): a = np.arange(100) + 1 a = pad(a, (25, 20), 'maximum', stat_length=10) b = np.array( [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] ) assert_array_equal(a, b) def test_check_minimum_1(self): a = np.arange(100) a = pad(a, (25, 20), 'minimum') b = np.array( [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) assert_array_equal(a, b) def test_check_minimum_2(self): a = np.arange(100) + 2 a = pad(a, (25, 20), 'minimum') b = np.array( [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] ) assert_array_equal(a, b) def test_check_minimum_stat_length(self): a = np.arange(100) + 1 a = pad(a, (25, 20), 'minimum', stat_length=10) b = np.array( [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] ) assert_array_equal(a, b) def test_check_median(self): a = np.arange(100).astype('f') a = pad(a, (25, 20), 'median') b = np.array( [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] ) assert_array_equal(a, b) def test_check_median_01(self): a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) a = pad(a, 1, 'median') b = np.array( [[4, 4, 5, 4, 4], [3, 3, 1, 4, 3], [5, 4, 5, 9, 5], [8, 9, 8, 2, 8], [4, 4, 5, 4, 4]] ) assert_array_equal(a, b) def test_check_median_02(self): a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) a = pad(a.T, 1, 'median').T b = np.array( [[5, 4, 5, 4, 5], [3, 3, 1, 4, 3], [5, 4, 5, 9, 5], [8, 9, 8, 2, 8], [5, 4, 5, 4, 5]] ) assert_array_equal(a, b) def test_check_median_stat_length(self): a = np.arange(100).astype('f') a[1] = 2. a[97] = 96. a = pad(a, (25, 20), 'median', stat_length=(3, 5)) b = np.array( [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] ) assert_array_equal(a, b) def test_check_mean_shape_one(self): a = [[4, 5, 6]] a = pad(a, (5, 7), 'mean', stat_length=2) b = np.array( [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] ) assert_array_equal(a, b) def test_check_mean_2(self): a = np.arange(100).astype('f') a = pad(a, (25, 20), 'mean') b = np.array( [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] ) assert_array_equal(a, b) class TestConstant(object): def test_check_constant(self): a = np.arange(100) a = pad(a, (25, 20), 'constant', constant_values=(10, 20)) b = np.array( [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] ) assert_array_equal(a, b) def test_check_constant_zeros(self): a = np.arange(100) a = pad(a, (25, 20), 'constant') b = np.array( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) assert_array_equal(a, b) def test_check_constant_float(self): # If input array is int, but constant_values are float, the dtype of # the array to be padded is kept arr = np.arange(30).reshape(5, 6) test = pad(arr, (1, 2), mode='constant', constant_values=1.1) expected = np.array( [[ 1, 1, 1, 1, 1, 1, 1, 1, 1], [ 1, 0, 1, 2, 3, 4, 5, 1, 1], [ 1, 6, 7, 8, 9, 10, 11, 1, 1], [ 1, 12, 13, 14, 15, 16, 17, 1, 1], [ 1, 18, 19, 20, 21, 22, 23, 1, 1], [ 1, 24, 25, 26, 27, 28, 29, 1, 1], [ 1, 1, 1, 1, 1, 1, 1, 1, 1], [ 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) assert_allclose(test, expected) def test_check_constant_float2(self): # If input array is float, and constant_values are float, the dtype of # the array to be padded is kept - here retaining the float constants arr = np.arange(30).reshape(5, 6) arr_float = arr.astype(np.float64) test = pad(arr_float, ((1, 2), (1, 2)), mode='constant', constant_values=1.1) expected = np.array( [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] ) assert_allclose(test, expected) def test_check_constant_float3(self): a = np.arange(100, dtype=float) a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) b = np.array( [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] ) assert_allclose(a, b) def test_check_constant_odd_pad_amount(self): arr = np.arange(30).reshape(5, 6) test = pad(arr, ((1,), (2,)), mode='constant', constant_values=3) expected = np.array( [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3], [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3], [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3], [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3], [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3], [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] ) assert_allclose(test, expected) def test_check_constant_pad_2d(self): arr = np.arange(4).reshape(2, 2) test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant', constant_values=((1, 2), (3, 4))) expected = np.array( [[3, 1, 1, 4, 4, 4], [3, 0, 1, 4, 4, 4], [3, 2, 3, 4, 4, 4], [3, 2, 2, 4, 4, 4], [3, 2, 2, 4, 4, 4]] ) assert_allclose(test, expected) class TestLinearRamp(object): def test_check_simple(self): a = np.arange(100).astype('f') a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) b = np.array( [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, 0.80, 0.64, 0.48, 0.32, 0.16, 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] ) assert_allclose(a, b, rtol=1e-5, atol=1e-5) def test_check_2d(self): arr = np.arange(20).reshape(4, 5).astype(np.float64) test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) expected = np.array( [[0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], [0., 0., 0., 1., 2., 3., 4., 2., 0.], [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], [0., 5., 10., 11., 12., 13., 14., 7., 0.], [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) assert_allclose(test, expected) class TestReflect(object): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'reflect') b = np.array( [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] ) assert_array_equal(a, b) def test_check_odd_method(self): a = np.arange(100) a = pad(a, (25, 20), 'reflect', reflect_type='odd') b = np.array( [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] ) assert_array_equal(a, b) def test_check_large_pad(self): a = [[4, 5, 6], [6, 7, 8]] a = pad(a, (5, 7), 'reflect') b = np.array( [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] ) assert_array_equal(a, b) def test_check_shape(self): a = [[4, 5, 6]] a = pad(a, (5, 7), 'reflect') b = np.array( [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] ) assert_array_equal(a, b) def test_check_01(self): a = pad([1, 2, 3], 2, 'reflect') b = np.array([3, 2, 1, 2, 3, 2, 1]) assert_array_equal(a, b) def test_check_02(self): a = pad([1, 2, 3], 3, 'reflect') b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) assert_array_equal(a, b) def test_check_03(self): a = pad([1, 2, 3], 4, 'reflect') b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) assert_array_equal(a, b) def test_check_padding_an_empty_array(self): a = pad(np.zeros((0, 3)), ((0,), (1,)), mode='reflect') b = np.zeros((0, 5)) assert_array_equal(a, b) class TestSymmetric(object): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'symmetric') b = np.array( [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] ) assert_array_equal(a, b) def test_check_odd_method(self): a = np.arange(100) a = pad(a, (25, 20), 'symmetric', reflect_type='odd') b = np.array( [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] ) assert_array_equal(a, b) def test_check_large_pad(self): a = [[4, 5, 6], [6, 7, 8]] a = pad(a, (5, 7), 'symmetric') b = np.array( [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] ) assert_array_equal(a, b) def test_check_large_pad_odd(self): a = [[4, 5, 6], [6, 7, 8]] a = pad(a, (5, 7), 'symmetric', reflect_type='odd') b = np.array( [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] ) assert_array_equal(a, b) def test_check_shape(self): a = [[4, 5, 6]] a = pad(a, (5, 7), 'symmetric') b = np.array( [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] ) assert_array_equal(a, b) def test_check_01(self): a = pad([1, 2, 3], 2, 'symmetric') b = np.array([2, 1, 1, 2, 3, 3, 2]) assert_array_equal(a, b) def test_check_02(self): a = pad([1, 2, 3], 3, 'symmetric') b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) assert_array_equal(a, b) def test_check_03(self): a = pad([1, 2, 3], 6, 'symmetric') b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) assert_array_equal(a, b) class TestWrap(object): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'wrap') b = np.array( [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] ) assert_array_equal(a, b) def test_check_large_pad(self): a = np.arange(12) a = np.reshape(a, (3, 4)) a = pad(a, (10, 12), 'wrap') b = np.array( [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11], [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3], [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7], [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11]] ) assert_array_equal(a, b) def test_check_01(self): a = pad([1, 2, 3], 3, 'wrap') b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) assert_array_equal(a, b) def test_check_02(self): a = pad([1, 2, 3], 4, 'wrap') b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) assert_array_equal(a, b) class TestStatLen(object): def test_check_simple(self): a = np.arange(30) a = np.reshape(a, (6, 5)) a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) b = np.array( [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] ) assert_array_equal(a, b) class TestEdge(object): def test_check_simple(self): a = np.arange(12) a = np.reshape(a, (4, 3)) a = pad(a, ((2, 3), (3, 2)), 'edge') b = np.array( [[0, 0, 0, 0, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 2, 2], [3, 3, 3, 3, 4, 5, 5, 5], [6, 6, 6, 6, 7, 8, 8, 8], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11]] ) assert_array_equal(a, b) def test_check_width_shape_1_2(self): # Check a pad_width of the form ((1, 2),). # Regression test for issue gh-7808. a = np.array([1, 2, 3]) padded = pad(a, ((1, 2),), 'edge') expected = np.array([1, 1, 2, 3, 3, 3]) assert_array_equal(padded, expected) a = np.array([[1, 2, 3], [4, 5, 6]]) padded = pad(a, ((1, 2),), 'edge') expected = pad(a, ((1, 2), (1, 2)), 'edge') assert_array_equal(padded, expected) a = np.arange(24).reshape(2, 3, 4) padded = pad(a, ((1, 2),), 'edge') expected = pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') assert_array_equal(padded, expected) class TestZeroPadWidth(object): def test_zero_pad_width(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) for pad_width in (0, (0, 0), ((0, 0), (0, 0))): assert_array_equal(arr, pad(arr, pad_width, mode='constant')) class TestLegacyVectorFunction(object): def test_legacy_vector_functionality(self): def _padwithtens(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 10 vector[-pad_width[1]:] = 10 return vector a = np.arange(6).reshape(2, 3) a = pad(a, 2, _padwithtens) b = np.array( [[10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 0, 1, 2, 10, 10], [10, 10, 3, 4, 5, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10]] ) assert_array_equal(a, b) class TestNdarrayPadWidth(object): def test_check_simple(self): a = np.arange(12) a = np.reshape(a, (4, 3)) a = pad(a, np.array(((2, 3), (3, 2))), 'edge') b = np.array( [[0, 0, 0, 0, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 2, 2], [3, 3, 3, 3, 4, 5, 5, 5], [6, 6, 6, 6, 7, 8, 8, 8], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11], [9, 9, 9, 9, 10, 11, 11, 11]] ) assert_array_equal(a, b) class TestUnicodeInput(object): def test_unicode_mode(self): constant_mode = u'constant' a = np.pad([1], 2, mode=constant_mode) b = np.array([0, 0, 1, 0, 0]) assert_array_equal(a, b) class TestValueError1(object): def test_check_simple(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(3, )) assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)), **kwargs) def test_check_negative_stat_length(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(-3, )) assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)), **kwargs) def test_check_negative_pad_width(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(3, )) assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), **kwargs) def test_check_empty_array(self): assert_raises(ValueError, pad, [], 4, mode='reflect') assert_raises(ValueError, pad, np.ndarray(0), 4, mode='reflect') assert_raises(ValueError, pad, np.zeros((0, 3)), ((1,), (0,)), mode='reflect') class TestValueError2(object): def test_check_negative_pad_amount(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(3, )) assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), **kwargs) class TestValueError3(object): def test_check_kwarg_not_allowed(self): arr = np.arange(30).reshape(5, 6) assert_raises(ValueError, pad, arr, 4, mode='mean', reflect_type='odd') def test_mode_not_set(self): arr = np.arange(30).reshape(5, 6) assert_raises(TypeError, pad, arr, 4) def test_malformed_pad_amount(self): arr = np.arange(30).reshape(5, 6) assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant') def test_malformed_pad_amount2(self): arr = np.arange(30).reshape(5, 6) assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)), mode='constant') def test_pad_too_many_axes(self): arr = np.arange(30).reshape(5, 6) # Attempt to pad using a 3D array equivalent bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,))) assert_raises(ValueError, pad, arr, bad_shape, mode='constant') class TestTypeError1(object): def test_float(self): arr = np.arange(30) assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2))) assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2)))) def test_str(self): arr = np.arange(30) assert_raises(TypeError, pad, arr, 'foo') assert_raises(TypeError, pad, arr, np.array('foo')) def test_object(self): class FooBar(object): pass arr = np.arange(30) assert_raises(TypeError, pad, arr, FooBar()) def test_complex(self): arr = np.arange(30) assert_raises(TypeError, pad, arr, complex(1, -1)) assert_raises(TypeError, pad, arr, np.array(complex(1, -1))) def test_check_wrong_pad_amount(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(3, )) assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)), **kwargs) if __name__ == "__main__": np.testing.run_module_suite()
43,647
38.788514
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_twodim_base.py
"""Test functions for matrix module """ from __future__ import division, absolute_import, print_function from numpy.testing import ( run_module_suite, assert_equal, assert_array_equal, assert_array_max_ulp, assert_array_almost_equal, assert_raises, ) from numpy import ( arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d, tri, mask_indices, triu_indices, triu_indices_from, tril_indices, tril_indices_from, vander, ) import numpy as np def get_mat(n): data = arange(n) data = add.outer(data, data) return data class TestEye(object): def test_basic(self): assert_equal(eye(4), array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])) assert_equal(eye(4, dtype='f'), array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], 'f')) assert_equal(eye(3) == 1, eye(3, dtype=bool)) def test_diag(self): assert_equal(eye(4, k=1), array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]])) assert_equal(eye(4, k=-1), array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])) def test_2d(self): assert_equal(eye(4, 3), array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])) assert_equal(eye(3, 4), array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])) def test_diag2d(self): assert_equal(eye(3, 4, k=2), array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]])) assert_equal(eye(4, 3, k=-2), array([[0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 1, 0]])) def test_eye_bounds(self): assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) def test_strings(self): assert_equal(eye(2, 2, dtype='S3'), [[b'1', b''], [b'', b'1']]) def test_bool(self): assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) def test_order(self): mat_c = eye(4, 3, k=-1) mat_f = eye(4, 3, k=-1, order='F') assert_equal(mat_c, mat_f) assert mat_c.flags.c_contiguous assert not mat_c.flags.f_contiguous assert not mat_f.flags.c_contiguous assert mat_f.flags.f_contiguous class TestDiag(object): def test_vector(self): vals = (100 * arange(5)).astype('l') b = zeros((5, 5)) for k in range(5): b[k, k] = vals[k] assert_equal(diag(vals), b) b = zeros((7, 7)) c = b.copy() for k in range(5): b[k, k + 2] = vals[k] c[k + 2, k] = vals[k] assert_equal(diag(vals, k=2), b) assert_equal(diag(vals, k=-2), c) def test_matrix(self, vals=None): if vals is None: vals = (100 * get_mat(5) + 1).astype('l') b = zeros((5,)) for k in range(5): b[k] = vals[k, k] assert_equal(diag(vals), b) b = b * 0 for k in range(3): b[k] = vals[k, k + 2] assert_equal(diag(vals, 2), b[:3]) for k in range(3): b[k] = vals[k + 2, k] assert_equal(diag(vals, -2), b[:3]) def test_fortran_order(self): vals = array((100 * get_mat(5) + 1), order='F', dtype='l') self.test_matrix(vals) def test_diag_bounds(self): A = [[1, 2], [3, 4], [5, 6]] assert_equal(diag(A, k=2), []) assert_equal(diag(A, k=1), [2]) assert_equal(diag(A, k=0), [1, 4]) assert_equal(diag(A, k=-1), [3, 6]) assert_equal(diag(A, k=-2), [5]) assert_equal(diag(A, k=-3), []) def test_failure(self): assert_raises(ValueError, diag, [[[1]]]) class TestFliplr(object): def test_basic(self): assert_raises(ValueError, fliplr, ones(4)) a = get_mat(4) b = a[:, ::-1] assert_equal(fliplr(a), b) a = [[0, 1, 2], [3, 4, 5]] b = [[2, 1, 0], [5, 4, 3]] assert_equal(fliplr(a), b) class TestFlipud(object): def test_basic(self): a = get_mat(4) b = a[::-1, :] assert_equal(flipud(a), b) a = [[0, 1, 2], [3, 4, 5]] b = [[3, 4, 5], [0, 1, 2]] assert_equal(flipud(a), b) class TestHistogram2d(object): def test_simple(self): x = array( [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) y = array( [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) xedges = np.linspace(0, 1, 10) yedges = np.linspace(0, 1, 10) H = histogram2d(x, y, (xedges, yedges))[0] answer = array( [[0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]) assert_array_equal(H.T, answer) H = histogram2d(x, y, xedges)[0] assert_array_equal(H.T, answer) H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) assert_array_equal(H, eye(10, 10)) assert_array_equal(xedges, np.linspace(0, 9, 11)) assert_array_equal(yedges, np.linspace(0, 9, 11)) def test_asym(self): x = array([1, 1, 2, 3, 4, 4, 4, 5]) y = array([1, 3, 2, 0, 1, 2, 3, 4]) H, xed, yed = histogram2d( x, y, (6, 5), range=[[0, 6], [0, 5]], normed=True) answer = array( [[0., 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [1, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 1]]) assert_array_almost_equal(H, answer/8., 3) assert_array_equal(xed, np.linspace(0, 6, 7)) assert_array_equal(yed, np.linspace(0, 5, 6)) def test_norm(self): x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) H, xed, yed = histogram2d( x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True) answer = array([[1, 1, .5], [1, 1, .5], [.5, .5, .25]])/9. assert_array_almost_equal(H, answer, 3) def test_all_outliers(self): r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) assert_array_equal(H, 0) def test_empty(self): a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) assert_array_max_ulp(a, array([[0.]])) a, edge1, edge2 = histogram2d([], [], bins=4) assert_array_max_ulp(a, np.zeros((4, 4))) def test_binparameter_combination(self): x = array( [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, 0.59944483, 1]) y = array( [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, 0.15886423, 1]) edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) H, xe, ye = histogram2d(x, y, (edges, 4)) answer = array( [[ 2., 0., 0., 0.], [ 0., 1., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 1., 0., 0.], [ 1., 0., 0., 0.], [ 0., 1., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 1.]]) assert_array_equal(H, answer) assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) H, xe, ye = histogram2d(x, y, (4, edges)) answer = array( [[ 1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) assert_array_equal(H, answer) assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) class TestTri(object): def test_dtype(self): out = array([[1, 0, 0], [1, 1, 0], [1, 1, 1]]) assert_array_equal(tri(3), out) assert_array_equal(tri(3, dtype=bool), out.astype(bool)) def test_tril_triu_ndim2(): for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: a = np.ones((2, 2), dtype=dtype) b = np.tril(a) c = np.triu(a) yield assert_array_equal, b, [[1, 0], [1, 1]] yield assert_array_equal, c, b.T # should return the same dtype as the original array yield assert_equal, b.dtype, a.dtype yield assert_equal, c.dtype, a.dtype def test_tril_triu_ndim3(): for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: a = np.array([ [[1, 1], [1, 1]], [[1, 1], [1, 0]], [[1, 1], [0, 0]], ], dtype=dtype) a_tril_desired = np.array([ [[1, 0], [1, 1]], [[1, 0], [1, 0]], [[1, 0], [0, 0]], ], dtype=dtype) a_triu_desired = np.array([ [[1, 1], [0, 1]], [[1, 1], [0, 0]], [[1, 1], [0, 0]], ], dtype=dtype) a_triu_observed = np.triu(a) a_tril_observed = np.tril(a) yield assert_array_equal, a_triu_observed, a_triu_desired yield assert_array_equal, a_tril_observed, a_tril_desired yield assert_equal, a_triu_observed.dtype, a.dtype yield assert_equal, a_tril_observed.dtype, a.dtype def test_tril_triu_with_inf(): # Issue 4859 arr = np.array([[1, 1, np.inf], [1, 1, 1], [np.inf, 1, 1]]) out_tril = np.array([[1, 0, 0], [1, 1, 0], [np.inf, 1, 1]]) out_triu = out_tril.T assert_array_equal(np.triu(arr), out_triu) assert_array_equal(np.tril(arr), out_tril) def test_tril_triu_dtype(): # Issue 4916 # tril and triu should return the same dtype as input for c in np.typecodes['All']: if c == 'V': continue arr = np.zeros((3, 3), dtype=c) assert_equal(np.triu(arr).dtype, arr.dtype) assert_equal(np.tril(arr).dtype, arr.dtype) # check special cases arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], ['2004-01-01T12:00', '2003-01-03T13:45']], dtype='datetime64') assert_equal(np.triu(arr).dtype, arr.dtype) assert_equal(np.tril(arr).dtype, arr.dtype) arr = np.zeros((3,3), dtype='f4,f4') assert_equal(np.triu(arr).dtype, arr.dtype) assert_equal(np.tril(arr).dtype, arr.dtype) def test_mask_indices(): # simple test without offset iu = mask_indices(3, np.triu) a = np.arange(9).reshape(3, 3) assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) # Now with an offset iu1 = mask_indices(3, np.triu, 1) assert_array_equal(a[iu1], array([1, 2, 5])) def test_tril_indices(): # indices without and with offset il1 = tril_indices(4) il2 = tril_indices(4, k=2) il3 = tril_indices(4, m=5) il4 = tril_indices(4, k=2, m=5) a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]) b = np.arange(1, 21).reshape(4, 5) # indexing: assert_array_equal(a[il1], array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) assert_array_equal(b[il3], array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) # And for assigning values: a[il1] = -1 assert_array_equal(a, array([[-1, 2, 3, 4], [-1, -1, 7, 8], [-1, -1, -1, 12], [-1, -1, -1, -1]])) b[il3] = -1 assert_array_equal(b, array([[-1, 2, 3, 4, 5], [-1, -1, 8, 9, 10], [-1, -1, -1, 14, 15], [-1, -1, -1, -1, 20]])) # These cover almost the whole array (two diagonals right of the main one): a[il2] = -10 assert_array_equal(a, array([[-10, -10, -10, 4], [-10, -10, -10, -10], [-10, -10, -10, -10], [-10, -10, -10, -10]])) b[il4] = -10 assert_array_equal(b, array([[-10, -10, -10, 4, 5], [-10, -10, -10, -10, 10], [-10, -10, -10, -10, -10], [-10, -10, -10, -10, -10]])) class TestTriuIndices(object): def test_triu_indices(self): iu1 = triu_indices(4) iu2 = triu_indices(4, k=2) iu3 = triu_indices(4, m=5) iu4 = triu_indices(4, k=2, m=5) a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]) b = np.arange(1, 21).reshape(4, 5) # Both for indexing: assert_array_equal(a[iu1], array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) assert_array_equal(b[iu3], array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20])) # And for assigning values: a[iu1] = -1 assert_array_equal(a, array([[-1, -1, -1, -1], [5, -1, -1, -1], [9, 10, -1, -1], [13, 14, 15, -1]])) b[iu3] = -1 assert_array_equal(b, array([[-1, -1, -1, -1, -1], [6, -1, -1, -1, -1], [11, 12, -1, -1, -1], [16, 17, 18, -1, -1]])) # These cover almost the whole array (two diagonals right of the # main one): a[iu2] = -10 assert_array_equal(a, array([[-1, -1, -10, -10], [5, -1, -1, -10], [9, 10, -1, -1], [13, 14, 15, -1]])) b[iu4] = -10 assert_array_equal(b, array([[-1, -1, -10, -10, -10], [6, -1, -1, -10, -10], [11, 12, -1, -1, -10], [16, 17, 18, -1, -1]])) class TestTrilIndicesFrom(object): def test_exceptions(self): assert_raises(ValueError, tril_indices_from, np.ones((2,))) assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) class TestTriuIndicesFrom(object): def test_exceptions(self): assert_raises(ValueError, triu_indices_from, np.ones((2,))) assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) class TestVander(object): def test_basic(self): c = np.array([0, 1, -2, 3]) v = vander(c) powers = np.array([[0, 0, 0, 0, 1], [1, 1, 1, 1, 1], [16, -8, 4, -2, 1], [81, 27, 9, 3, 1]]) # Check default value of N: yield (assert_array_equal, v, powers[:, 1:]) # Check a range of N values, including 0 and 5 (greater than default) m = powers.shape[1] for n in range(6): v = vander(c, N=n) yield (assert_array_equal, v, powers[:, m-n:m]) def test_dtypes(self): c = array([11, -12, 13], dtype=np.int8) v = vander(c) expected = np.array([[121, 11, 1], [144, -12, 1], [169, 13, 1]]) yield (assert_array_equal, v, expected) c = array([1.0+1j, 1.0-1j]) v = vander(c, N=3) expected = np.array([[2j, 1+1j, 1], [-2j, 1-1j, 1]]) # The data is floating point, but the values are small integers, # so assert_array_equal *should* be safe here (rather than, say, # assert_array_almost_equal). yield (assert_array_equal, v, expected) if __name__ == "__main__": run_module_suite()
17,754
33.542802
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_ufunclike.py
from __future__ import division, absolute_import, print_function import numpy as np import numpy.core as nx import numpy.lib.ufunclike as ufl from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_warns ) class TestUfunclike(object): def test_isposinf(self): a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) out = nx.zeros(a.shape, bool) tgt = nx.array([True, False, False, False, False, False]) res = ufl.isposinf(a) assert_equal(res, tgt) res = ufl.isposinf(a, out) assert_equal(res, tgt) assert_equal(out, tgt) def test_isneginf(self): a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) out = nx.zeros(a.shape, bool) tgt = nx.array([False, True, False, False, False, False]) res = ufl.isneginf(a) assert_equal(res, tgt) res = ufl.isneginf(a, out) assert_equal(res, tgt) assert_equal(out, tgt) def test_fix(self): a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) out = nx.zeros(a.shape, float) tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) res = ufl.fix(a) assert_equal(res, tgt) res = ufl.fix(a, out) assert_equal(res, tgt) assert_equal(out, tgt) assert_equal(ufl.fix(3.14), 3) def test_fix_with_subclass(self): class MyArray(nx.ndarray): def __new__(cls, data, metadata=None): res = nx.array(data, copy=True).view(cls) res.metadata = metadata return res def __array_wrap__(self, obj, context=None): obj.metadata = self.metadata return obj a = nx.array([1.1, -1.1]) m = MyArray(a, metadata='foo') f = ufl.fix(m) assert_array_equal(f, nx.array([1, -1])) assert_(isinstance(f, MyArray)) assert_equal(f.metadata, 'foo') # check 0d arrays don't decay to scalars m0d = m[0,...] m0d.metadata = 'bar' f0d = ufl.fix(m0d) assert_(isinstance(f0d, MyArray)) assert_equal(f0d.metadata, 'bar') def test_deprecated(self): # NumPy 1.13.0, 2017-04-26 assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2)) assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2)) assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2)) def test_scalar(self): x = np.inf actual = np.isposinf(x) expected = np.True_ assert_equal(actual, expected) assert_equal(type(actual), type(expected)) x = -3.4 actual = np.fix(x) expected = np.float64(-3.0) assert_equal(actual, expected) assert_equal(type(actual), type(expected)) out = np.array(0.0) actual = np.fix(x, out=out) assert_(actual is out) if __name__ == "__main__": run_module_suite()
3,018
30.123711
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_nanfunctions.py
from __future__ import division, absolute_import, print_function import warnings import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_almost_equal, assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings ) # Test data _ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) # Rows of _ndat with nans removed _rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), np.array([0.1042, -0.5954]), np.array([0.1610, 0.1859, 0.3146])] # Rows of _ndat with nans converted to ones _ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) # Rows of _ndat with nans converted to zeros _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) class TestNanFunctions_MinMax(object): nanfuncs = [np.nanmin, np.nanmax] stdfuncs = [np.min, np.max] def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() for f in self.nanfuncs: f(ndat) assert_equal(ndat, _ndat) def test_keepdims(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): for axis in [None, 0, 1]: tgt = rf(mat, axis=axis, keepdims=True) res = nf(mat, axis=axis, keepdims=True) assert_(res.ndim == tgt.ndim) def test_out(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): resout = np.zeros(3) tgt = rf(mat, axis=1) res = nf(mat, axis=1, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) def test_dtype_from_input(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: mat = np.eye(3, dtype=c) tgt = rf(mat, axis=1).dtype.type res = nf(mat, axis=1).dtype.type assert_(res is tgt) # scalar case tgt = rf(mat, axis=None).dtype.type res = nf(mat, axis=None).dtype.type assert_(res is tgt) def test_result_values(self): for nf, rf in zip(self.nanfuncs, self.stdfuncs): tgt = [rf(d) for d in _rdat] res = nf(_ndat, axis=1) assert_almost_equal(res, tgt) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for f in self.nanfuncs: for axis in [None, 0, 1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(mat, axis=axis)).all()) assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) # Check scalars with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(np.nan))) assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) def test_masked(self): mat = np.ma.fix_invalid(_ndat) msk = mat._mask.copy() for f in [np.nanmin]: res = f(mat, axis=1) tgt = f(_ndat, axis=1) assert_equal(res, tgt) assert_equal(mat._mask, msk) assert_(not np.isinf(mat).any()) def test_scalar(self): for f in self.nanfuncs: assert_(f(0.) == 0.) def test_matrices(self): # Check that it works and that type and # shape are preserved mat = np.matrix(np.eye(3)) for f in self.nanfuncs: res = f(mat, axis=0) assert_(isinstance(res, np.matrix)) assert_(res.shape == (1, 3)) res = f(mat, axis=1) assert_(isinstance(res, np.matrix)) assert_(res.shape == (3, 1)) res = f(mat) assert_(np.isscalar(res)) # check that rows of nan are dealt with for subclasses (#4628) mat[1] = np.nan for f in self.nanfuncs: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(mat, axis=0) assert_(isinstance(res, np.matrix)) assert_(not np.any(np.isnan(res))) assert_(len(w) == 0) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(mat, axis=1) assert_(isinstance(res, np.matrix)) assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) and not np.isnan(res[2, 0])) assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(mat) assert_(np.isscalar(res)) assert_(res != np.nan) assert_(len(w) == 0) def test_object_array(self): arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) assert_equal(np.nanmin(arr), 1.0) assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') # assert_equal does not work on object arrays of nan assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) assert_(len(w) == 1, 'no warning raised') assert_(issubclass(w[0].category, RuntimeWarning)) class TestNanFunctions_ArgminArgmax(object): nanfuncs = [np.nanargmin, np.nanargmax] def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() for f in self.nanfuncs: f(ndat) assert_equal(ndat, _ndat) def test_result_values(self): for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): for row in _ndat: with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in") ind = f(row) val = row[ind] # comparing with NaN is tricky as the result # is always false except for NaN != NaN assert_(not np.isnan(val)) assert_(not fcmp(val, row).any()) assert_(not np.equal(val, row[:ind]).any()) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for f in self.nanfuncs: for axis in [None, 0, 1]: assert_raises(ValueError, f, mat, axis=axis) assert_raises(ValueError, f, np.nan) def test_empty(self): mat = np.zeros((0, 3)) for f in self.nanfuncs: for axis in [0, None]: assert_raises(ValueError, f, mat, axis=axis) for axis in [1]: res = f(mat, axis=axis) assert_equal(res, np.zeros(0)) def test_scalar(self): for f in self.nanfuncs: assert_(f(0.) == 0.) def test_matrices(self): # Check that it works and that type and # shape are preserved mat = np.matrix(np.eye(3)) for f in self.nanfuncs: res = f(mat, axis=0) assert_(isinstance(res, np.matrix)) assert_(res.shape == (1, 3)) res = f(mat, axis=1) assert_(isinstance(res, np.matrix)) assert_(res.shape == (3, 1)) res = f(mat) assert_(np.isscalar(res)) class TestNanFunctions_IntTypes(object): int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64) mat = np.array([127, 39, 93, 87, 46]) def integer_arrays(self): for dtype in self.int_types: yield self.mat.astype(dtype) def test_nanmin(self): tgt = np.min(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanmin(mat), tgt) def test_nanmax(self): tgt = np.max(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanmax(mat), tgt) def test_nanargmin(self): tgt = np.argmin(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanargmin(mat), tgt) def test_nanargmax(self): tgt = np.argmax(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanargmax(mat), tgt) def test_nansum(self): tgt = np.sum(self.mat) for mat in self.integer_arrays(): assert_equal(np.nansum(mat), tgt) def test_nanprod(self): tgt = np.prod(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanprod(mat), tgt) def test_nancumsum(self): tgt = np.cumsum(self.mat) for mat in self.integer_arrays(): assert_equal(np.nancumsum(mat), tgt) def test_nancumprod(self): tgt = np.cumprod(self.mat) for mat in self.integer_arrays(): assert_equal(np.nancumprod(mat), tgt) def test_nanmean(self): tgt = np.mean(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanmean(mat), tgt) def test_nanvar(self): tgt = np.var(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanvar(mat), tgt) tgt = np.var(mat, ddof=1) for mat in self.integer_arrays(): assert_equal(np.nanvar(mat, ddof=1), tgt) def test_nanstd(self): tgt = np.std(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanstd(mat), tgt) tgt = np.std(self.mat, ddof=1) for mat in self.integer_arrays(): assert_equal(np.nanstd(mat, ddof=1), tgt) class SharedNanFunctionsTestsMixin(object): def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() for f in self.nanfuncs: f(ndat) assert_equal(ndat, _ndat) def test_keepdims(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): for axis in [None, 0, 1]: tgt = rf(mat, axis=axis, keepdims=True) res = nf(mat, axis=axis, keepdims=True) assert_(res.ndim == tgt.ndim) def test_out(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): resout = np.zeros(3) tgt = rf(mat, axis=1) res = nf(mat, axis=1, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) def test_dtype_from_dtype(self): mat = np.eye(3) codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: with suppress_warnings() as sup: if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 sup.filter(np.ComplexWarning) tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type assert_(res is tgt) # scalar case tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type assert_(res is tgt) def test_dtype_from_char(self): mat = np.eye(3) codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: with suppress_warnings() as sup: if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 sup.filter(np.ComplexWarning) tgt = rf(mat, dtype=c, axis=1).dtype.type res = nf(mat, dtype=c, axis=1).dtype.type assert_(res is tgt) # scalar case tgt = rf(mat, dtype=c, axis=None).dtype.type res = nf(mat, dtype=c, axis=None).dtype.type assert_(res is tgt) def test_dtype_from_input(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: mat = np.eye(3, dtype=c) tgt = rf(mat, axis=1).dtype.type res = nf(mat, axis=1).dtype.type assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) # scalar case tgt = rf(mat, axis=None).dtype.type res = nf(mat, axis=None).dtype.type assert_(res is tgt) def test_result_values(self): for nf, rf in zip(self.nanfuncs, self.stdfuncs): tgt = [rf(d) for d in _rdat] res = nf(_ndat, axis=1) assert_almost_equal(res, tgt) def test_scalar(self): for f in self.nanfuncs: assert_(f(0.) == 0.) def test_matrices(self): # Check that it works and that type and # shape are preserved mat = np.matrix(np.eye(3)) for f in self.nanfuncs: res = f(mat, axis=0) assert_(isinstance(res, np.matrix)) assert_(res.shape == (1, 3)) res = f(mat, axis=1) assert_(isinstance(res, np.matrix)) assert_(res.shape == (3, 1)) res = f(mat) assert_(np.isscalar(res)) class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): nanfuncs = [np.nansum, np.nanprod] stdfuncs = [np.sum, np.prod] def test_allnans(self): # Check for FutureWarning with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = np.nansum([np.nan]*3, axis=None) assert_(res == 0, 'result is not 0') assert_(len(w) == 0, 'warning raised') # Check scalar res = np.nansum(np.nan) assert_(res == 0, 'result is not 0') assert_(len(w) == 0, 'warning raised') # Check there is no warning for not all-nan np.nansum([0]*3, axis=None) assert_(len(w) == 0, 'unwanted warning raised') def test_empty(self): for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): mat = np.zeros((0, 3)) tgt = [tgt_value]*3 res = f(mat, axis=0) assert_equal(res, tgt) tgt = [] res = f(mat, axis=1) assert_equal(res, tgt) tgt = tgt_value res = f(mat, axis=None) assert_equal(res, tgt) class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): nanfuncs = [np.nancumsum, np.nancumprod] stdfuncs = [np.cumsum, np.cumprod] def test_allnans(self): for f, tgt_value in zip(self.nanfuncs, [0, 1]): # Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input with assert_no_warnings(): res = f([np.nan]*3, axis=None) tgt = tgt_value*np.ones((3)) assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value)) # Check scalar res = f(np.nan) tgt = tgt_value*np.ones((1)) assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value)) # Check there is no warning for not all-nan f([0]*3, axis=None) def test_empty(self): for f, tgt_value in zip(self.nanfuncs, [0, 1]): mat = np.zeros((0, 3)) tgt = tgt_value*np.ones((0, 3)) res = f(mat, axis=0) assert_equal(res, tgt) tgt = mat res = f(mat, axis=1) assert_equal(res, tgt) tgt = np.zeros((0)) res = f(mat, axis=None) assert_equal(res, tgt) def test_keepdims(self): for f, g in zip(self.nanfuncs, self.stdfuncs): mat = np.eye(3) for axis in [None, 0, 1]: tgt = f(mat, axis=axis, out=None) res = g(mat, axis=axis, out=None) assert_(res.ndim == tgt.ndim) for f in self.nanfuncs: d = np.ones((3, 5, 7, 11)) # Randomly set some elements to NaN: rs = np.random.RandomState(0) d[rs.rand(*d.shape) < 0.5] = np.nan res = f(d, axis=None) assert_equal(res.shape, (1155,)) for axis in np.arange(4): res = f(d, axis=axis) assert_equal(res.shape, (3, 5, 7, 11)) def test_matrices(self): # Check that it works and that type and # shape are preserved mat = np.matrix(np.eye(3)) for f in self.nanfuncs: for axis in np.arange(2): res = f(mat, axis=axis) assert_(isinstance(res, np.matrix)) assert_(res.shape == (3, 3)) res = f(mat) assert_(res.shape == (1, 3*3)) def test_result_values(self): for axis in (-2, -1, 0, 1, None): tgt = np.cumprod(_ndat_ones, axis=axis) res = np.nancumprod(_ndat, axis=axis) assert_almost_equal(res, tgt) tgt = np.cumsum(_ndat_zeros,axis=axis) res = np.nancumsum(_ndat, axis=axis) assert_almost_equal(res, tgt) def test_out(self): mat = np.eye(3) for nf, rf in zip(self.nanfuncs, self.stdfuncs): resout = np.eye(3) for axis in (-2, -1, 0, 1): tgt = rf(mat, axis=axis) res = nf(mat, axis=axis, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): nanfuncs = [np.nanmean, np.nanvar, np.nanstd] stdfuncs = [np.mean, np.var, np.std] def test_dtype_error(self): for f in self.nanfuncs: for dtype in [np.bool_, np.int_, np.object_]: assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) def test_out_dtype_error(self): for f in self.nanfuncs: for dtype in [np.bool_, np.int_, np.object_]: out = np.empty(_ndat.shape[0], dtype=dtype) assert_raises(TypeError, f, _ndat, axis=1, out=out) def test_ddof(self): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in [0, 1]: tgt = [rf(d, ddof=ddof) for d in _rdat] res = nf(_ndat, axis=1, ddof=ddof) assert_almost_equal(res, tgt) def test_ddof_too_big(self): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): with suppress_warnings() as sup: sup.record(RuntimeWarning) sup.filter(np.ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) if any(tgt): assert_(len(sup.log) == 1) else: assert_(len(sup.log) == 0) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for f in self.nanfuncs: for axis in [None, 0, 1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(mat, axis=axis)).all()) assert_(len(w) == 1) assert_(issubclass(w[0].category, RuntimeWarning)) # Check scalar assert_(np.isnan(f(np.nan))) assert_(len(w) == 2) assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): mat = np.zeros((0, 3)) for f in self.nanfuncs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(mat, axis=axis)).all()) assert_(len(w) == 1) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(f(mat, axis=axis), np.zeros([])) assert_(len(w) == 0) class TestNanFunctions_Median(object): def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() np.nanmedian(ndat) assert_equal(ndat, _ndat) def test_keepdims(self): mat = np.eye(3) for axis in [None, 0, 1]: tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) assert_(res.ndim == tgt.ndim) d = np.ones((3, 5, 7, 11)) # Randomly set some elements to NaN: w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan with suppress_warnings() as sup: sup.filter(RuntimeWarning) res = np.nanmedian(d, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1), keepdims=True) assert_equal(res.shape, (1, 1, 7, 11)) res = np.nanmedian(d, axis=(0, 3), keepdims=True) assert_equal(res.shape, (1, 5, 7, 1)) res = np.nanmedian(d, axis=(1,), keepdims=True) assert_equal(res.shape, (3, 1, 7, 11)) res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) assert_equal(res.shape, (1, 1, 7, 1)) def test_out(self): mat = np.random.rand(3, 3) nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) resout = np.zeros(3) tgt = np.median(mat, axis=1) res = np.nanmedian(nan_mat, axis=1, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) # 0-d output: resout = np.zeros(()) tgt = np.median(mat, axis=None) res = np.nanmedian(nan_mat, axis=None, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) def test_small_large(self): # test the small and large code paths, current cutoff 400 elements for s in [5, 20, 51, 200, 1000]: d = np.random.randn(4, s) # Randomly set some elements to NaN: w = np.random.randint(0, d.size, size=d.size // 5) d.ravel()[w] = np.nan d[:,0] = 1. # ensure at least one good value # use normal median without nans to compare tgt = [] for x in d: nonan = np.compress(~np.isnan(x), x) tgt.append(np.median(nonan, overwrite_input=True)) assert_array_equal(np.nanmedian(d, axis=-1), tgt) def test_result_values(self): tgt = [np.median(d) for d in _rdat] res = np.nanmedian(_ndat, axis=1) assert_almost_equal(res, tgt) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for axis in [None, 0, 1]: with suppress_warnings() as sup: sup.record(RuntimeWarning) assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) if axis is None: assert_(len(sup.log) == 1) else: assert_(len(sup.log) == 3) # Check scalar assert_(np.isnan(np.nanmedian(np.nan))) if axis is None: assert_(len(sup.log) == 2) else: assert_(len(sup.log) == 4) def test_empty(self): mat = np.zeros((0, 3)) for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) assert_(len(w) == 1) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) assert_(len(w) == 0) def test_scalar(self): assert_(np.nanmedian(0.) == 0.) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) assert_raises(np.AxisError, np.nanmedian, d, axis=-5) assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5)) assert_raises(np.AxisError, np.nanmedian, d, axis=4) assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4)) assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) def test_float_special(self): with suppress_warnings() as sup: sup.filter(RuntimeWarning) for inf in [np.inf, -np.inf]: a = np.array([[inf, np.nan], [np.nan, np.nan]]) assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) assert_equal(np.nanmedian(a), inf) # minimum fill value check a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) assert_equal(np.nanmedian(a), inf) assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) assert_equal(np.nanmedian(a, axis=1), inf) # no mask path a = np.array([[inf, inf], [inf, inf]]) assert_equal(np.nanmedian(a, axis=1), inf) a = np.array([[inf, 7, -inf, -9], [-10, np.nan, np.nan, 5], [4, np.nan, np.nan, inf]], dtype=np.float32) if inf > 0: assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) assert_equal(np.nanmedian(a), 4.5) else: assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) assert_equal(np.nanmedian(a), -2.5) assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) for i in range(0, 10): for j in range(1, 10): a = np.array([([np.nan] * i) + ([inf] * j)] * 2) assert_equal(np.nanmedian(a), inf) assert_equal(np.nanmedian(a, axis=1), inf) assert_equal(np.nanmedian(a, axis=0), ([np.nan] * i) + [inf] * j) a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) assert_equal(np.nanmedian(a), -inf) assert_equal(np.nanmedian(a, axis=1), -inf) assert_equal(np.nanmedian(a, axis=0), ([np.nan] * i) + [-inf] * j) class TestNanFunctions_Percentile(object): def test_mutation(self): # Check that passed array is not modified. ndat = _ndat.copy() np.nanpercentile(ndat, 30) assert_equal(ndat, _ndat) def test_keepdims(self): mat = np.eye(3) for axis in [None, 0, 1]: tgt = np.percentile(mat, 70, axis=axis, out=None, overwrite_input=False) res = np.nanpercentile(mat, 70, axis=axis, out=None, overwrite_input=False) assert_(res.ndim == tgt.ndim) d = np.ones((3, 5, 7, 11)) # Randomly set some elements to NaN: w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan with suppress_warnings() as sup: sup.filter(RuntimeWarning) res = np.nanpercentile(d, 90, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) assert_equal(res.shape, (1, 1, 7, 11)) res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) assert_equal(res.shape, (1, 5, 7, 1)) res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) assert_equal(res.shape, (3, 1, 7, 11)) res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) assert_equal(res.shape, (1, 1, 7, 1)) def test_out(self): mat = np.random.rand(3, 3) nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) resout = np.zeros(3) tgt = np.percentile(mat, 42, axis=1) res = np.nanpercentile(nan_mat, 42, axis=1, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) # 0-d output: resout = np.zeros(()) tgt = np.percentile(mat, 42, axis=None) res = np.nanpercentile(nan_mat, 42, axis=None, out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout) assert_almost_equal(res, resout) assert_almost_equal(res, tgt) def test_result_values(self): tgt = [np.percentile(d, 28) for d in _rdat] res = np.nanpercentile(_ndat, 28, axis=1) assert_almost_equal(res, tgt) # Transpose the array to fit the output convention of numpy.percentile tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat]) res = np.nanpercentile(_ndat, (28, 98), axis=1) assert_almost_equal(res, tgt) def test_allnans(self): mat = np.array([np.nan]*9).reshape(3, 3) for axis in [None, 0, 1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all()) if axis is None: assert_(len(w) == 1) else: assert_(len(w) == 3) assert_(issubclass(w[0].category, RuntimeWarning)) # Check scalar assert_(np.isnan(np.nanpercentile(np.nan, 60))) if axis is None: assert_(len(w) == 2) else: assert_(len(w) == 4) assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): mat = np.zeros((0, 3)) for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) assert_(len(w) == 1) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) assert_(len(w) == 0) def test_scalar(self): assert_equal(np.nanpercentile(0., 100), 0.) a = np.arange(6) r = np.nanpercentile(a, 50, axis=0) assert_equal(r, 2.5) assert_(np.isscalar(r)) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5) assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4) assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) def test_multiple_percentiles(self): perc = [50, 100] mat = np.ones((4, 3)) nan_mat = np.nan * mat # For checking consistency in higher dimensional case large_mat = np.ones((3, 4, 5)) large_mat[:, 0:2:4, :] = 0 large_mat[:, :, 3:] *= 2 for axis in [None, 0, 1]: for keepdim in [False, True]: with suppress_warnings() as sup: sup.filter(RuntimeWarning, "All-NaN slice encountered") val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) nan_val = np.nanpercentile(nan_mat, perc, axis=axis, keepdims=keepdim) assert_equal(nan_val.shape, val.shape) val = np.percentile(large_mat, perc, axis=axis, keepdims=keepdim) nan_val = np.nanpercentile(large_mat, perc, axis=axis, keepdims=keepdim) assert_equal(nan_val, val) megamat = np.ones((3, 4, 5, 6)) assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6)) if __name__ == "__main__": run_module_suite()
34,835
38.010078
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_index_tricks.py
from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_raises_regex ) from numpy.lib.index_tricks import ( mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, index_exp, ndindex, r_, s_, ix_ ) class TestRavelUnravelIndex(object): def test_basic(self): assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) assert_raises(ValueError, np.unravel_index, -1, (2, 2)) assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) assert_raises(ValueError, np.unravel_index, 4, (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) assert_equal( np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) arr = np.array([[3, 6, 6], [4, 5, 1]]) assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) assert_equal( np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) assert_equal( np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), [12, 13, 13]) assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), [[3, 6, 6], [4, 5, 1]]) assert_equal( np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), [[3, 6, 6], [4, 5, 1]]) assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) def test_big_indices(self): # ravel_multi_index for big indices (issue #7546) if np.intp == np.int64: arr = ([1, 29], [3, 5], [3, 117], [19, 2], [2379, 1284], [2, 2], [0, 1]) assert_equal( np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), [5627771580, 117259570957]) # test overflow checking for too big array (issue #7546) dummy_arr = ([0],[0]) half_max = np.iinfo(np.intp).max // 2 assert_equal( np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) assert_raises(ValueError, np.ravel_multi_index, dummy_arr, (half_max+1, 2)) assert_equal( np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) assert_raises(ValueError, np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F') def test_dtypes(self): # Test with different data types for dtype in [np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64]: coords = np.array( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) shape = (5, 8) uncoords = 8*coords[0]+coords[1] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) uncoords = coords[0]+5*coords[1] assert_equal( np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) coords = np.array( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], dtype=dtype) shape = (5, 8, 10) uncoords = 10*(8*coords[0]+coords[1])+coords[2] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) uncoords = coords[0]+5*(coords[1]+8*coords[2]) assert_equal( np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) def test_clipmodes(self): # Test clipmodes assert_equal( np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode=( 'wrap', 'raise', 'clip', 'raise')), np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) assert_raises( ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) def test_writeability(self): # See gh-7269 x, y = np.unravel_index([1, 2, 3], (4, 5)) assert_(x.flags.writeable) assert_(y.flags.writeable) def test_0d(self): # gh-580 x = np.unravel_index(0, ()) assert_equal(x, ()) assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) assert_raises_regex( ValueError, "out of bounds", np.unravel_index, [1], ()) class TestGrid(object): def test_basic(self): a = mgrid[-1:1:10j] b = mgrid[-1:1:0.1] assert_(a.shape == (10,)) assert_(b.shape == (20,)) assert_(a[0] == -1) assert_almost_equal(a[-1], 1) assert_(b[0] == -1) assert_almost_equal(b[1]-b[0], 0.1, 11) assert_almost_equal(b[-1], b[0]+19*0.1, 11) assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) def test_linspace_equivalence(self): y, st = np.linspace(2, 10, retstep=1) assert_almost_equal(st, 8/49.0) assert_array_almost_equal(y, mgrid[2:10:50j], 13) def test_nd(self): c = mgrid[-1:1:10j, -2:2:10j] d = mgrid[-1:1:0.1, -2:2:0.2] assert_(c.shape == (2, 10, 10)) assert_(d.shape == (2, 20, 20)) assert_array_equal(c[0][0, :], -np.ones(10, 'd')) assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], 0.1*np.ones(20, 'd'), 11) assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], 0.2*np.ones(20, 'd'), 11) class TestConcatenator(object): def test_1d(self): assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) b = np.ones(5) c = r_[b, 0, 0, b] assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) def test_mixed_type(self): g = r_[10.1, 1:10] assert_(g.dtype == 'f8') def test_more_mixed_type(self): g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] assert_(g.dtype == 'f8') def test_2d(self): b = np.random.rand(5, 5) c = np.random.rand(5, 5) d = r_['1', b, c] # append columns assert_(d.shape == (5, 10)) assert_array_equal(d[:, :5], b) assert_array_equal(d[:, 5:], c) d = r_[b, c] assert_(d.shape == (10, 5)) assert_array_equal(d[:5, :], b) assert_array_equal(d[5:, :], c) def test_matrix(self): a = [1, 2] b = [3, 4] ab_r = np.r_['r', a, b] ab_c = np.r_['c', a, b] assert_equal(type(ab_r), np.matrix) assert_equal(type(ab_c), np.matrix) assert_equal(np.array(ab_r), [[1,2,3,4]]) assert_equal(np.array(ab_c), [[1],[2],[3],[4]]) assert_raises(ValueError, lambda: np.r_['rc', a, b]) def test_matrix_scalar(self): r = np.r_['r', [1, 2], 3] assert_equal(type(r), np.matrix) assert_equal(np.array(r), [[1,2,3]]) def test_matrix_builder(self): a = np.array([1]) b = np.array([2]) c = np.array([3]) d = np.array([4]) actual = np.r_['a, b; c, d'] expected = np.bmat([[a, b], [c, d]]) assert_equal(actual, expected) assert_equal(type(actual), type(expected)) class TestNdenumerate(object): def test_basic(self): a = np.array([[1, 2], [3, 4]]) assert_equal(list(ndenumerate(a)), [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) class TestIndexExpression(object): def test_regression_1(self): # ticket #1196 a = np.arange(2) assert_equal(a[:-1], a[s_[:-1]]) assert_equal(a[:-1], a[index_exp[:-1]]) def test_simple_1(self): a = np.random.rand(4, 5, 6) assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) class TestIx_(object): def test_regression_1(self): # Test empty inputs create ouputs of indexing type, gh-5804 # Test both lists and arrays for func in (range, np.arange): a, = np.ix_(func(0)) assert_equal(a.dtype, np.intp) def test_shape_and_dtype(self): sizes = (4, 5, 3, 2) # Test both lists and arrays for func in (range, np.arange): arrays = np.ix_(*[func(sz) for sz in sizes]) for k, (a, sz) in enumerate(zip(arrays, sizes)): assert_equal(a.shape[k], sz) assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) assert_(np.issubdtype(a.dtype, np.integer)) def test_bool(self): bool_a = [True, False, True, True] int_a, = np.nonzero(bool_a) assert_equal(np.ix_(bool_a)[0], int_a) def test_1d_only(self): idx2d = [[1, 2, 3], [4, 5, 6]] assert_raises(ValueError, np.ix_, idx2d) def test_repeated_input(self): length_of_vector = 5 x = np.arange(length_of_vector) out = ix_(x, x) assert_equal(out[0].shape, (length_of_vector, 1)) assert_equal(out[1].shape, (1, length_of_vector)) # check that input shape is not modified assert_equal(x.shape, (length_of_vector,)) def test_c_(): a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) def test_fill_diagonal(): a = np.zeros((3, 3), int) fill_diagonal(a, 5) yield (assert_array_equal, a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5]])) #Test tall matrix a = np.zeros((10, 3), int) fill_diagonal(a, 5) yield (assert_array_equal, a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]])) #Test tall matrix wrap a = np.zeros((10, 3), int) fill_diagonal(a, 5, True) yield (assert_array_equal, a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [5, 0, 0], [0, 5, 0]])) #Test wide matrix a = np.zeros((3, 10), int) fill_diagonal(a, 5) yield (assert_array_equal, a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])) # The same function can operate on a 4-d array: a = np.zeros((3, 3, 3, 3), int) fill_diagonal(a, 4) i = np.array([0, 1, 2]) yield (assert_equal, np.where(a != 0), (i, i, i, i)) def test_diag_indices(): di = diag_indices(4) a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]) a[di] = 100 yield (assert_array_equal, a, np.array([[100, 2, 3, 4], [5, 100, 7, 8], [9, 10, 100, 12], [13, 14, 15, 100]])) # Now, we create indices to manipulate a 3-d array: d3 = diag_indices(2, 3) # And use it to set the diagonal of a zeros array to 1: a = np.zeros((2, 2, 2), int) a[d3] = 1 yield (assert_array_equal, a, np.array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]])) def test_diag_indices_from(): x = np.random.random((4, 4)) r, c = diag_indices_from(x) assert_array_equal(r, np.arange(4)) assert_array_equal(c, np.arange(4)) def test_ndindex(): x = list(ndindex(1, 2, 3)) expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] assert_array_equal(x, expected) x = list(ndindex((1, 2, 3))) assert_array_equal(x, expected) # Test use of scalars and tuples x = list(ndindex((3,))) assert_array_equal(x, list(ndindex(3))) # Make sure size argument is optional x = list(ndindex()) assert_equal(x, [()]) x = list(ndindex(())) assert_equal(x, [()]) # Make sure 0-sized ndindex works correctly x = list(ndindex(*[0])) assert_equal(x, []) if __name__ == "__main__": run_module_suite()
13,689
33.570707
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_recfunctions.py
from __future__ import division, absolute_import, print_function import numpy as np import numpy.ma as ma from numpy.ma.mrecords import MaskedRecords from numpy.ma.testutils import assert_equal from numpy.testing import ( run_module_suite, assert_, assert_raises, dec ) from numpy.lib.recfunctions import ( drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, repack_fields) get_names = np.lib.recfunctions.get_names get_names_flat = np.lib.recfunctions.get_names_flat zip_descr = np.lib.recfunctions.zip_descr class TestRecFunctions(object): # Misc tests def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_zip_descr(self): # Test zip_descr (w, x, y, z) = self.data # Std array test = zip_descr((x, x), flatten=True) assert_equal(test, np.dtype([('', int), ('', int)])) test = zip_descr((x, x), flatten=False) assert_equal(test, np.dtype([('', int), ('', int)])) # Std & flexible-dtype test = zip_descr((x, z), flatten=True) assert_equal(test, np.dtype([('', int), ('A', '|S3'), ('B', float)])) test = zip_descr((x, z), flatten=False) assert_equal(test, np.dtype([('', int), ('', [('A', '|S3'), ('B', float)])])) # Standard & nested dtype test = zip_descr((x, w), flatten=True) assert_equal(test, np.dtype([('', int), ('a', int), ('ba', float), ('bb', int)])) test = zip_descr((x, w), flatten=False) assert_equal(test, np.dtype([('', int), ('', [('a', int), ('b', [('ba', float), ('bb', int)])])])) def test_drop_fields(self): # Test drop_fields a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) # A basic field test = drop_fields(a, 'a') control = np.array([((2, 3.0),), ((5, 6.0),)], dtype=[('b', [('ba', float), ('bb', int)])]) assert_equal(test, control) # Another basic field (but nesting two fields) test = drop_fields(a, 'b') control = np.array([(1,), (4,)], dtype=[('a', int)]) assert_equal(test, control) # A nested sub-field test = drop_fields(a, ['ba', ]) control = np.array([(1, (3.0,)), (4, (6.0,))], dtype=[('a', int), ('b', [('bb', int)])]) assert_equal(test, control) # All the nested sub-field from a field: zap that field test = drop_fields(a, ['ba', 'bb']) control = np.array([(1,), (4,)], dtype=[('a', int)]) assert_equal(test, control) test = drop_fields(a, ['a', 'b']) assert_(test is None) def test_rename_fields(self): # Test rename fields a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], dtype=[('a', int), ('b', [('ba', float), ('bb', (float, 2))])]) test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] control = a.view(newdtype) assert_equal(test.dtype, newdtype) assert_equal(test, control) def test_get_names(self): # Test get_names ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_names(ndtype) assert_equal(test, ('A', 'B')) ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) test = get_names(ndtype) assert_equal(test, ('a', ('b', ('ba', 'bb')))) def test_get_names_flat(self): # Test get_names_flat ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_names_flat(ndtype) assert_equal(test, ('A', 'B')) ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) test = get_names_flat(ndtype) assert_equal(test, ('a', 'b', 'ba', 'bb')) def test_get_fieldstructure(self): # Test get_fieldstructure # No nested fields ndtype = np.dtype([('A', '|S3'), ('B', float)]) test = get_fieldstructure(ndtype) assert_equal(test, {'A': [], 'B': []}) # One 1-nested field ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) test = get_fieldstructure(ndtype) assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) # One 2-nested fields ndtype = np.dtype([('A', int), ('B', [('BA', int), ('BB', [('BBA', int), ('BBB', int)])])]) test = get_fieldstructure(ndtype) control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} assert_equal(test, control) def test_find_duplicates(self): # Test find_duplicates a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) test = find_duplicates(a, ignoremask=False, return_index=True) control = [0, 2] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='A', return_index=True) control = [0, 1, 2, 3, 5] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='B', return_index=True) control = [0, 1, 2, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='BA', return_index=True) control = [0, 1, 2, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, key='BB', return_index=True) control = [0, 1, 2, 3, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) def test_find_duplicates_ignoremask(self): # Test the ignoremask option of find_duplicates ndtype = [('a', int)] a = ma.array([1, 1, 1, 2, 2, 3, 3], mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) test = find_duplicates(a, ignoremask=True, return_index=True) control = [0, 1, 3, 4] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) test = find_duplicates(a, ignoremask=False, return_index=True) control = [0, 1, 2, 3, 4, 6] assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) def test_repack_fields(self): dt = np.dtype('u1,f4,i8', align=True) a = np.zeros(2, dtype=dt) assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) assert_equal(repack_fields(a).itemsize, 13) assert_equal(repack_fields(repack_fields(dt), align=True), dt) # make sure type is preserved dt = np.dtype((np.record, dt)) assert_(repack_fields(dt).type is np.record) class TestRecursiveFillFields(object): # Test recursive_fill_fields. def test_simple_flexible(self): # Test recursive_fill_fields on flexible-array a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) b = np.zeros((3,), dtype=a.dtype) test = recursive_fill_fields(a, b) control = np.array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', int), ('B', float)]) assert_equal(test, control) def test_masked_flexible(self): # Test recursive_fill_fields on masked flexible-array a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], dtype=[('A', int), ('B', float)]) b = ma.zeros((3,), dtype=a.dtype) test = recursive_fill_fields(a, b) control = ma.array([(1, 10.), (2, 20.), (0, 0.)], mask=[(0, 1), (1, 0), (0, 0)], dtype=[('A', int), ('B', float)]) assert_equal(test, control) class TestMergeArrays(object): # Test merge_arrays def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array( [(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_solo(self): # Test merge_arrays on a single array. (_, x, _, z) = self.data test = merge_arrays(x) control = np.array([(1,), (2,)], dtype=[('f0', int)]) assert_equal(test, control) test = merge_arrays((x,)) assert_equal(test, control) test = merge_arrays(z, flatten=False) assert_equal(test, z) test = merge_arrays(z, flatten=True) assert_equal(test, z) def test_solo_w_flatten(self): # Test merge_arrays on a single array w & w/o flattening w = self.data[0] test = merge_arrays(w, flatten=False) assert_equal(test, w) test = merge_arrays(w, flatten=True) control = np.array([(1, 2, 3.0), (4, 5, 6.0)], dtype=[('a', int), ('ba', float), ('bb', int)]) assert_equal(test, control) def test_standard(self): # Test standard & standard # Test merge arrays (_, x, y, _) = self.data test = merge_arrays((x, y), usemask=False) control = np.array([(1, 10), (2, 20), (-1, 30)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) test = merge_arrays((x, y), usemask=True) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_flatten(self): # Test standard & flexible (_, x, _, z) = self.data test = merge_arrays((x, z), flatten=True) control = np.array([(1, 'A', 1.), (2, 'B', 2.)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) assert_equal(test, control) test = merge_arrays((x, z), flatten=False) control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], dtype=[('f0', int), ('f1', [('A', '|S3'), ('B', float)])]) assert_equal(test, control) def test_flatten_wflexible(self): # Test flatten standard & nested (w, x, _, _) = self.data test = merge_arrays((x, w), flatten=True) control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], dtype=[('f0', int), ('a', int), ('ba', float), ('bb', int)]) assert_equal(test, control) test = merge_arrays((x, w), flatten=False) controldtype = [('f0', int), ('f1', [('a', int), ('b', [('ba', float), ('bb', int)])])] control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))], dtype=controldtype) assert_equal(test, control) def test_wmasked_arrays(self): # Test merge_arrays masked arrays (_, x, _, _) = self.data mx = ma.array([1, 2, 3], mask=[1, 0, 0]) test = merge_arrays((x, mx), usemask=True) control = ma.array([(1, 1), (2, 2), (-1, 3)], mask=[(0, 1), (0, 0), (1, 0)], dtype=[('f0', int), ('f1', int)]) assert_equal(test, control) test = merge_arrays((x, mx), usemask=True, asrecarray=True) assert_equal(test, control) assert_(isinstance(test, MaskedRecords)) def test_w_singlefield(self): # Test single field test = merge_arrays((np.array([1, 2]).view([('a', int)]), np.array([10., 20., 30.])),) control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('a', int), ('f1', float)]) assert_equal(test, control) def test_w_shorter_flex(self): # Test merge_arrays w/ a shorter flexndarray. z = self.data[-1] # Fixme, this test looks incomplete and broken #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], # dtype=[('A', '|S3'), ('B', float), ('C', int)]) #assert_equal(test, control) # Hack to avoid pyflakes warnings about unused variables merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], dtype=[('A', '|S3'), ('B', float), ('C', int)]) def test_singlerecord(self): (_, x, y, z) = self.data test = merge_arrays((x[0], y[0], z[0]), usemask=False) control = np.array([(1, 10, ('A', 1))], dtype=[('f0', int), ('f1', int), ('f2', [('A', '|S3'), ('B', float)])]) assert_equal(test, control) class TestAppendFields(object): # Test append_fields def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_append_single(self): # Test simple case (_, x, _, _) = self.data test = append_fields(x, 'A', data=[10, 20, 30]) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], dtype=[('f0', int), ('A', int)],) assert_equal(test, control) def test_append_double(self): # Test simple case (_, x, _, _) = self.data test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], dtype=[('f0', int), ('A', int), ('B', int)],) assert_equal(test, control) def test_append_on_flex(self): # Test append_fields on flexible type arrays z = self.data[-1] test = append_fields(z, 'C', data=[10, 20, 30]) control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('C', int)],) assert_equal(test, control) def test_append_on_nested(self): # Test append_fields on nested fields w = self.data[0] test = append_fields(w, 'C', data=[10, 20, 30]) control = ma.array([(1, (2, 3.0), 10), (4, (5, 6.0), 20), (-1, (-1, -1.), 30)], mask=[( 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], dtype=[('a', int), ('b', [('ba', float), ('bb', int)]), ('C', int)],) assert_equal(test, control) class TestStackArrays(object): # Test stack_arrays def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) self.data = (w, x, y, z) def test_solo(self): # Test stack_arrays on single arrays (_, x, _, _) = self.data test = stack_arrays((x,)) assert_equal(test, x) assert_(test is x) test = stack_arrays(x) assert_equal(test, x) assert_(test is x) def test_unnamed_fields(self): # Tests combinations of arrays w/o named fields (_, x, y, _) = self.data test = stack_arrays((x, x), usemask=False) control = np.array([1, 2, 1, 2]) assert_equal(test, control) test = stack_arrays((x, y), usemask=False) control = np.array([1, 2, 10, 20, 30]) assert_equal(test, control) test = stack_arrays((y, x), usemask=False) control = np.array([10, 20, 30, 1, 2]) assert_equal(test, control) def test_unnamed_and_named_fields(self): # Test combination of arrays w/ & w/o named fields (_, x, _, z) = self.data test = stack_arrays((x, z)) control = ma.array([(1, -1, -1), (2, -1, -1), (-1, 'A', 1), (-1, 'B', 2)], mask=[(0, 1, 1), (0, 1, 1), (1, 0, 0), (1, 0, 0)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, x)) control = ma.array([('A', 1, -1), ('B', 2, -1), (-1, -1, 1), (-1, -1, 2), ], mask=[(0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('f2', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, z, x)) control = ma.array([('A', 1, -1), ('B', 2, -1), ('A', 1, -1), ('B', 2, -1), (-1, -1, 1), (-1, -1, 2), ], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)], dtype=[('A', '|S3'), ('B', float), ('f2', int)]) assert_equal(test, control) def test_matching_named_fields(self): # Test combination of arrays w/ matching field names (_, x, _, z) = self.data zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) test = stack_arrays((z, zz)) control = ma.array([('A', 1, -1), ('B', 2, -1), ( 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0)]) assert_equal(test, control) assert_equal(test.mask, control.mask) test = stack_arrays((z, zz, x)) ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), ('a', 10., 100., -1), ('b', 20., 200., -1), ('c', 30., 300., -1), (-1, -1, -1, 1), (-1, -1, -1, 2)], dtype=ndtype, mask=[(0, 0, 1, 1), (0, 0, 1, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (1, 1, 1, 0), (1, 1, 1, 0)]) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_defaults(self): # Test defaults: no exception raised if keys of defaults are not fields. (_, _, _, z) = self.data zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} test = stack_arrays((z, zz), defaults=defaults) control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), ( 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)], mask=[(0, 0, 1), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0)]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_autoconversion(self): # Tests autoconversion adtype = [('A', int), ('B', bool), ('C', float)] a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) bdtype = [('A', int), ('B', float), ('C', float)] b = ma.array([(4, 5, 6)], dtype=bdtype) control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], dtype=bdtype) test = stack_arrays((a, b), autoconvert=True) assert_equal(test, control) assert_equal(test.mask, control.mask) try: test = stack_arrays((a, b), autoconvert=False) except TypeError: pass else: raise AssertionError def test_checktitles(self): # Test using titles in the field names adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] b = ma.array([(4, 5, 6)], dtype=bdtype) test = stack_arrays((a, b)) control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], dtype=bdtype) assert_equal(test, control) assert_equal(test.mask, control.mask) def test_subdtype(self): z = np.array([ ('A', 1), ('B', 2) ], dtype=[('A', '|S3'), ('B', float, (1,))]) zz = np.array([ ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) res = stack_arrays((z, zz)) expected = ma.array( data=[ (b'A', [1.0], 0), (b'B', [2.0], 0), (b'a', [10.0], 100.0), (b'b', [20.0], 200.0), (b'c', [30.0], 300.0)], mask=[ (False, [False], True), (False, [False], True), (False, [False], False), (False, [False], False), (False, [False], False) ], dtype=zz.dtype ) assert_equal(res.dtype, expected.dtype) assert_equal(res, expected) assert_equal(res.mask, expected.mask) class TestJoinBy(object): def setup(self): self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) def test_inner_join(self): # Basic test of join_by a, b = self.a, self.b test = join_by('a', a, b, jointype='inner') control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), (9, 59, 69, 109, 104)], dtype=[('a', int), ('b1', int), ('b2', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_join(self): a, b = self.a, self.b # Fixme, this test is broken #test = join_by(('a', 'b'), a, b) #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), # (7, 57, 107, 102), (8, 58, 108, 103), # (9, 59, 109, 104)], # dtype=[('a', int), ('b', int), # ('c', int), ('d', int)]) #assert_equal(test, control) # Hack to avoid pyflakes unused variable warnings join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), (9, 59, 109, 104)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) def test_join_subdtype(self): # tests the bug in https://stackoverflow.com/q/44769632/102441 from numpy.lib import recfunctions as rfn foo = np.array([(1,)], dtype=[('key', int)]) bar = np.array([(1, np.array([1,2,3]))], dtype=[('key', int), ('value', 'uint16', 3)]) res = join_by('key', foo, bar) assert_equal(res, bar.view(ma.MaskedArray)) def test_outer_join(self): a, b = self.a, self.b test = join_by(('a', 'b'), a, b, 'outer') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), (4, 54, 104, -1), (5, 55, 105, -1), (5, 65, -1, 100), (6, 56, 106, -1), (6, 66, -1, 101), (7, 57, 107, -1), (7, 67, -1, 102), (8, 58, 108, -1), (8, 68, -1, 103), (9, 59, 109, -1), (9, 69, -1, 104), (10, 70, -1, 105), (11, 71, -1, 106), (12, 72, -1, 107), (13, 73, -1, 108), (14, 74, -1, 109)], mask=[(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_leftouter_join(self): a, b = self.a, self.b test = join_by(('a', 'b'), a, b, 'leftouter') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), (4, 54, 104, -1), (5, 55, 105, -1), (6, 56, 106, -1), (7, 57, 107, -1), (8, 58, 108, -1), (9, 59, 109, -1)], mask=[(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_different_field_order(self): # gh-8940 a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) # this should not give a FutureWarning: j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) def test_duplicate_keys(self): a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) @dec.knownfailureif(True) def test_same_name_different_dtypes_key(self): a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')]) b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')]) expected_dtype = np.dtype([ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')]) a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype) b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype) res = join_by('key', a, b) assert_equal(res.dtype, expected_dtype) def test_same_name_different_dtypes(self): # gh-9338 a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')]) b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')]) expected_dtype = np.dtype([ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')]) a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype) b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype) res = join_by('key', a, b) assert_equal(res.dtype, expected_dtype) def test_subarray_key(self): a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')]) a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype) b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')]) b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype) expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')]) expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype) res = join_by('pos', a, b) assert_equal(res.dtype, expected_dtype) assert_equal(res, expected) def test_padded_dtype(self): dt = np.dtype('i1,f4', align=True) dt.names = ('k', 'v') assert_(len(dt.descr), 3) # padding field is inserted a = np.array([(1, 3), (3, 2)], dt) b = np.array([(1, 1), (2, 2)], dt) res = join_by('k', a, b) # no padding fields remain expected_dtype = np.dtype([ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4') ]) assert_equal(res.dtype, expected_dtype) class TestJoinBy2(object): @classmethod def setup(cls): cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) def test_no_r1postfix(self): # Basic test of join_by no_r1postfix a, b = self.a, self.b test = join_by( 'a', a, b, r1postfix='', r2postfix='2', jointype='inner') control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], dtype=[('a', int), ('b', int), ('b2', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_no_postfix(self): assert_raises(ValueError, join_by, 'a', self.a, self.b, r1postfix='', r2postfix='') def test_no_r2postfix(self): # Basic test of join_by no_r2postfix a, b = self.a, self.b test = join_by( 'a', a, b, r1postfix='1', r2postfix='', jointype='inner') control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], dtype=[('a', int), ('b1', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_two_keys_two_vars(self): a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), np.arange(50, 60), np.arange(10, 20))), dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), np.arange(65, 75), np.arange(0, 10))), dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1), (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3), (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5), (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7), (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)], dtype=[('k', int), ('a', int), ('b1', int), ('b2', int), ('c1', int), ('c2', int)]) test = join_by( ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner') assert_equal(test.dtype, control.dtype) assert_equal(test, control) class TestAppendFieldsObj(object): """ Test append_fields with arrays containing objects """ # https://github.com/numpy/numpy/issues/2346 def setup(self): from datetime import date self.data = dict(obj=date(2000, 1, 1)) def test_append_to_objects(self): "Test append_fields when the base array contains objects" obj = self.data['obj'] x = np.array([(obj, 1.), (obj, 2.)], dtype=[('A', object), ('B', float)]) y = np.array([10, 20], dtype=int) test = append_fields(x, 'C', data=y, usemask=False) control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)], dtype=[('A', object), ('B', float), ('C', int)]) assert_equal(test, control) if __name__ == '__main__': run_module_suite()
35,208
40.569067
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_function_base.py
from __future__ import division, absolute_import, print_function import operator import warnings import sys import decimal import numpy as np from numpy import ma from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex, dec, suppress_warnings, HAS_REFCOUNT, ) import numpy.lib.function_base as nfb from numpy.random import rand from numpy.lib import ( add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov, delete, diff, digitize, extract, flipud, gradient, hamming, hanning, histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros, unwrap, unique, vectorize ) from numpy.compat import long def get_mat(n): data = np.arange(n) data = np.add.outer(data, data) return data class TestRot90(object): def test_basic(self): assert_raises(ValueError, rot90, np.ones(4)) assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2)) assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2)) assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1)) assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(-2,1)) a = [[0, 1, 2], [3, 4, 5]] b1 = [[2, 5], [1, 4], [0, 3]] b2 = [[5, 4, 3], [2, 1, 0]] b3 = [[3, 0], [4, 1], [5, 2]] b4 = [[0, 1, 2], [3, 4, 5]] for k in range(-3, 13, 4): assert_equal(rot90(a, k=k), b1) for k in range(-2, 13, 4): assert_equal(rot90(a, k=k), b2) for k in range(-1, 13, 4): assert_equal(rot90(a, k=k), b3) for k in range(0, 13, 4): assert_equal(rot90(a, k=k), b4) assert_equal(rot90(rot90(a, axes=(0,1)), axes=(1,0)), a) assert_equal(rot90(a, k=1, axes=(1,0)), rot90(a, k=-1, axes=(0,1))) def test_axes(self): a = np.ones((50, 40, 3)) assert_equal(rot90(a).shape, (40, 50, 3)) assert_equal(rot90(a, axes=(0,2)), rot90(a, axes=(0,-1))) assert_equal(rot90(a, axes=(1,2)), rot90(a, axes=(-2,-1))) def test_rotation_axes(self): a = np.arange(8).reshape((2,2,2)) a_rot90_01 = [[[2, 3], [6, 7]], [[0, 1], [4, 5]]] a_rot90_12 = [[[1, 3], [0, 2]], [[5, 7], [4, 6]]] a_rot90_20 = [[[4, 0], [6, 2]], [[5, 1], [7, 3]]] a_rot90_10 = [[[4, 5], [0, 1]], [[6, 7], [2, 3]]] assert_equal(rot90(a, axes=(0, 1)), a_rot90_01) assert_equal(rot90(a, axes=(1, 0)), a_rot90_10) assert_equal(rot90(a, axes=(1, 2)), a_rot90_12) for k in range(1,5): assert_equal(rot90(a, k=k, axes=(2, 0)), rot90(a_rot90_20, k=k-1, axes=(2, 0))) class TestFlip(object): def test_axes(self): assert_raises(ValueError, np.flip, np.ones(4), axis=1) assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2) assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3) def test_basic_lr(self): a = get_mat(4) b = a[:, ::-1] assert_equal(np.flip(a, 1), b) a = [[0, 1, 2], [3, 4, 5]] b = [[2, 1, 0], [5, 4, 3]] assert_equal(np.flip(a, 1), b) def test_basic_ud(self): a = get_mat(4) b = a[::-1, :] assert_equal(np.flip(a, 0), b) a = [[0, 1, 2], [3, 4, 5]] b = [[3, 4, 5], [0, 1, 2]] assert_equal(np.flip(a, 0), b) def test_3d_swap_axis0(self): a = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) b = np.array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) assert_equal(np.flip(a, 0), b) def test_3d_swap_axis1(self): a = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) b = np.array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) assert_equal(np.flip(a, 1), b) def test_3d_swap_axis2(self): a = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) b = np.array([[[1, 0], [3, 2]], [[5, 4], [7, 6]]]) assert_equal(np.flip(a, 2), b) def test_4d(self): a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5) for i in range(a.ndim): assert_equal(np.flip(a, i), np.flipud(a.swapaxes(0, i)).swapaxes(i, 0)) class TestAny(object): def test_basic(self): y1 = [0, 0, 1, 0] y2 = [0, 0, 0, 0] y3 = [1, 0, 1, 0] assert_(np.any(y1)) assert_(np.any(y3)) assert_(not np.any(y2)) def test_nd(self): y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]] assert_(np.any(y1)) assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0]) assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1]) class TestAll(object): def test_basic(self): y1 = [0, 1, 1, 0] y2 = [0, 0, 0, 0] y3 = [1, 1, 1, 1] assert_(not np.all(y1)) assert_(np.all(y3)) assert_(not np.all(y2)) assert_(np.all(~np.array(y2))) def test_nd(self): y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]] assert_(not np.all(y1)) assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1]) assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1]) class TestCopy(object): def test_basic(self): a = np.array([[1, 2], [3, 4]]) a_copy = np.copy(a) assert_array_equal(a, a_copy) a_copy[0, 0] = 10 assert_equal(a[0, 0], 1) assert_equal(a_copy[0, 0], 10) def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) a_fort = np.array([[1, 2], [3, 4]], order="F") assert_(not a_fort.flags.c_contiguous) assert_(a_fort.flags.f_contiguous) a_copy = np.copy(a) assert_(a_copy.flags.c_contiguous) assert_(not a_copy.flags.f_contiguous) a_fort_copy = np.copy(a_fort) assert_(not a_fort_copy.flags.c_contiguous) assert_(a_fort_copy.flags.f_contiguous) class TestAverage(object): def test_basic(self): y1 = np.array([1, 2, 3]) assert_(average(y1, axis=0) == 2.) y2 = np.array([1., 2., 3.]) assert_(average(y2, axis=0) == 2.) y3 = [0., 0., 0.] assert_(average(y3, axis=0) == 0.) y4 = np.ones((4, 4)) y4[0, 1] = 0 y4[1, 0] = 2 assert_almost_equal(y4.mean(0), average(y4, 0)) assert_almost_equal(y4.mean(1), average(y4, 1)) y5 = rand(5, 5) assert_almost_equal(y5.mean(0), average(y5, 0)) assert_almost_equal(y5.mean(1), average(y5, 1)) y6 = np.matrix(rand(5, 5)) assert_array_equal(y6.mean(0), average(y6, 0)) def test_weights(self): y = np.arange(10) w = np.arange(10) actual = average(y, weights=w) desired = (np.arange(10) ** 2).sum() * 1. / np.arange(10).sum() assert_almost_equal(actual, desired) y1 = np.array([[1, 2, 3], [4, 5, 6]]) w0 = [1, 2] actual = average(y1, weights=w0, axis=0) desired = np.array([3., 4., 5.]) assert_almost_equal(actual, desired) w1 = [0, 0, 1] actual = average(y1, weights=w1, axis=1) desired = np.array([3., 6.]) assert_almost_equal(actual, desired) # This should raise an error. Can we test for that ? # assert_equal(average(y1, weights=w1), 9./2.) # 2D Case w2 = [[0, 0, 1], [0, 0, 2]] desired = np.array([3., 6.]) assert_array_equal(average(y1, weights=w2, axis=1), desired) assert_equal(average(y1, weights=w2), 5.) y3 = rand(5).astype(np.float32) w3 = rand(5).astype(np.float64) assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3)) def test_returned(self): y = np.array([[1, 2, 3], [4, 5, 6]]) # No weights avg, scl = average(y, returned=True) assert_equal(scl, 6.) avg, scl = average(y, 0, returned=True) assert_array_equal(scl, np.array([2., 2., 2.])) avg, scl = average(y, 1, returned=True) assert_array_equal(scl, np.array([3., 3.])) # With weights w0 = [1, 2] avg, scl = average(y, weights=w0, axis=0, returned=True) assert_array_equal(scl, np.array([3., 3., 3.])) w1 = [1, 2, 3] avg, scl = average(y, weights=w1, axis=1, returned=True) assert_array_equal(scl, np.array([6., 6.])) w2 = [[0, 0, 1], [1, 2, 3]] avg, scl = average(y, weights=w2, axis=1, returned=True) assert_array_equal(scl, np.array([1., 6.])) def test_subclasses(self): class subclass(np.ndarray): pass a = np.array([[1,2],[3,4]]).view(subclass) w = np.array([[1,2],[3,4]]).view(subclass) assert_equal(type(np.average(a)), subclass) assert_equal(type(np.average(a, weights=w)), subclass) # also test matrices a = np.matrix([[1,2],[3,4]]) w = np.matrix([[1,2],[3,4]]) r = np.average(a, axis=0, weights=w) assert_equal(type(r), np.matrix) assert_equal(r, [[2.5, 10.0/3]]) def test_upcasting(self): types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')] for at, wt, rt in types: a = np.array([[1,2],[3,4]], dtype=at) w = np.array([[1,2],[3,4]], dtype=wt) assert_equal(np.average(a, weights=w).dtype, np.dtype(rt)) def test_object_dtype(self): a = np.array([decimal.Decimal(x) for x in range(10)]) w = np.array([decimal.Decimal(1) for _ in range(10)]) w /= w.sum() assert_almost_equal(a.mean(0), average(a, weights=w)) class TestSelect(object): choices = [np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])] conditions = [np.array([False, False, False]), np.array([False, True, False]), np.array([False, False, True])] def _select(self, cond, values, default=0): output = [] for m in range(len(cond)): output += [V[m] for V, C in zip(values, cond) if C[m]] or [default] return output def test_basic(self): choices = self.choices conditions = self.conditions assert_array_equal(select(conditions, choices, default=15), self._select(conditions, choices, default=15)) assert_equal(len(choices), 3) assert_equal(len(conditions), 3) def test_broadcasting(self): conditions = [np.array(True), np.array([False, True, False])] choices = [1, np.arange(12).reshape(4, 3)] assert_array_equal(select(conditions, choices), np.ones((4, 3))) # default can broadcast too: assert_equal(select([True], [0], default=[0]).shape, (1,)) def test_return_dtype(self): assert_equal(select(self.conditions, self.choices, 1j).dtype, np.complex_) # But the conditions need to be stronger then the scalar default # if it is scalar. choices = [choice.astype(np.int8) for choice in self.choices] assert_equal(select(self.conditions, choices).dtype, np.int8) d = np.array([1, 2, 3, np.nan, 5, 7]) m = np.isnan(d) assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) def test_deprecated_empty(self): with warnings.catch_warnings(record=True): warnings.simplefilter("always") assert_equal(select([], [], 3j), 3j) with warnings.catch_warnings(): warnings.simplefilter("always") assert_warns(DeprecationWarning, select, [], []) warnings.simplefilter("error") assert_raises(DeprecationWarning, select, [], []) def test_non_bool_deprecation(self): choices = self.choices conditions = self.conditions[:] with warnings.catch_warnings(): warnings.filterwarnings("always") conditions[0] = conditions[0].astype(np.int_) assert_warns(DeprecationWarning, select, conditions, choices) conditions[0] = conditions[0].astype(np.uint8) assert_warns(DeprecationWarning, select, conditions, choices) warnings.filterwarnings("error") assert_raises(DeprecationWarning, select, conditions, choices) def test_many_arguments(self): # This used to be limited by NPY_MAXARGS == 32 conditions = [np.array([False])] * 100 choices = [np.array([1])] * 100 select(conditions, choices) class TestInsert(object): def test_basic(self): a = [1, 2, 3] assert_equal(insert(a, 0, 1), [1, 1, 2, 3]) assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3]) assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9]) assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3]) assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9]) b = np.array([0, 1], dtype=np.float64) assert_equal(insert(b, 0, b[0]), [0., 0., 1.]) assert_equal(insert(b, [], []), b) # Bools will be treated differently in the future: # assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9]) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', FutureWarning) assert_equal( insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3]) assert_(w[0].category is FutureWarning) def test_multidim(self): a = [[1, 1, 1]] r = [[2, 2, 2], [1, 1, 1]] assert_equal(insert(a, 0, [1]), [1, 1, 1, 1]) assert_equal(insert(a, 0, [2, 2, 2], axis=0), r) assert_equal(insert(a, 0, 2, axis=0), r) assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]]) a = np.array([[1, 1], [2, 2], [3, 3]]) b = np.arange(1, 4).repeat(3).reshape(3, 3) c = np.concatenate( (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T, a[:, 1:2]), axis=1) assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b) assert_equal(insert(a, [1], [1, 2, 3], axis=1), c) # scalars behave differently, in this case exactly opposite: assert_equal(insert(a, 1, [1, 2, 3], axis=1), b) assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c) a = np.arange(4).reshape(2, 2) assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a) assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a) # negative axis value a = np.arange(24).reshape((2, 3, 4)) assert_equal(insert(a, 1, a[:,:, 3], axis=-1), insert(a, 1, a[:,:, 3], axis=2)) assert_equal(insert(a, 1, a[:, 2,:], axis=-2), insert(a, 1, a[:, 2,:], axis=1)) # invalid axis value assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3) assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4) # negative axis value a = np.arange(24).reshape((2, 3, 4)) assert_equal(insert(a, 1, a[:, :, 3], axis=-1), insert(a, 1, a[:, :, 3], axis=2)) assert_equal(insert(a, 1, a[:, 2, :], axis=-2), insert(a, 1, a[:, 2, :], axis=1)) def test_0d(self): # This is an error in the future a = np.array(1) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', DeprecationWarning) assert_equal(insert(a, [], 2, axis=0), np.array(2)) assert_(w[0].category is DeprecationWarning) def test_subclass(self): class SubClass(np.ndarray): pass a = np.arange(10).view(SubClass) assert_(isinstance(np.insert(a, 0, [0]), SubClass)) assert_(isinstance(np.insert(a, [], []), SubClass)) assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass)) assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass)) assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass)) # This is an error in the future: a = np.array(1).view(SubClass) assert_(isinstance(np.insert(a, 0, [0]), SubClass)) def test_index_array_copied(self): x = np.array([1, 1, 1]) np.insert([0, 1, 2], x, [3, 4, 5]) assert_equal(x, np.array([1, 1, 1])) def test_structured_array(self): a = np.array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=[('foo', 'i'), ('bar', 'a1')]) val = (4, 'd') b = np.insert(a, 0, val) assert_array_equal(b[0], np.array(val, dtype=b.dtype)) val = [(4, 'd')] * 2 b = np.insert(a, [0, 2], val) assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype)) class TestAmax(object): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] assert_equal(np.amax(a), 10.0) b = [[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]] assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0]) assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0]) class TestAmin(object): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] assert_equal(np.amin(a), -5.0) b = [[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]] assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0]) assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0]) class TestPtp(object): def test_basic(self): a = np.array([3, 4, 5, 10, -3, -5, 6.0]) assert_equal(a.ptp(axis=0), 15.0) b = np.array([[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]]) assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0]) assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0]) class TestCumsum(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype) assert_array_equal(np.cumsum(a, axis=0), tgt) tgt = np.array( [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype) assert_array_equal(np.cumsum(a2, axis=0), tgt) tgt = np.array( [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype) assert_array_equal(np.cumsum(a2, axis=1), tgt) class TestProd(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: assert_raises(ArithmeticError, np.prod, a) assert_raises(ArithmeticError, np.prod, a2, 1) else: assert_equal(a.prod(axis=0), 26400) assert_array_equal(a2.prod(axis=0), np.array([50, 36, 84, 180], ctype)) assert_array_equal(a2.prod(axis=-1), np.array([24, 1890, 600], ctype)) class TestCumprod(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: assert_raises(ArithmeticError, np.cumprod, a) assert_raises(ArithmeticError, np.cumprod, a2, 1) assert_raises(ArithmeticError, np.cumprod, a) else: assert_array_equal(np.cumprod(a, axis=-1), np.array([1, 2, 20, 220, 1320, 6600, 26400], ctype)) assert_array_equal(np.cumprod(a2, axis=0), np.array([[1, 2, 3, 4], [5, 12, 21, 36], [50, 36, 84, 180]], ctype)) assert_array_equal(np.cumprod(a2, axis=-1), np.array([[1, 2, 6, 24], [5, 30, 210, 1890], [10, 30, 120, 600]], ctype)) class TestDiff(object): def test_basic(self): x = [1, 4, 6, 7, 12] out = np.array([3, 2, 1, 5]) out2 = np.array([-1, -1, 4]) out3 = np.array([0, 5]) assert_array_equal(diff(x), out) assert_array_equal(diff(x, n=2), out2) assert_array_equal(diff(x, n=3), out3) x = [1.1, 2.2, 3.0, -0.2, -0.1] out = np.array([1.1, 0.8, -3.2, 0.1]) assert_almost_equal(diff(x), out) x = [True, True, False, False] out = np.array([False, True, False]) out2 = np.array([True, True]) assert_array_equal(diff(x), out) assert_array_equal(diff(x, n=2), out2) def test_axis(self): x = np.zeros((10, 20, 30)) x[:, 1::2, :] = 1 exp = np.ones((10, 19, 30)) exp[:, 1::2, :] = -1 assert_array_equal(diff(x), np.zeros((10, 20, 29))) assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29))) assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30))) assert_array_equal(diff(x, axis=1), exp) assert_array_equal(diff(x, axis=-2), exp) assert_raises(np.AxisError, diff, x, axis=3) assert_raises(np.AxisError, diff, x, axis=-4) def test_nd(self): x = 20 * rand(10, 20, 30) out1 = x[:, :, 1:] - x[:, :, :-1] out2 = out1[:, :, 1:] - out1[:, :, :-1] out3 = x[1:, :, :] - x[:-1, :, :] out4 = out3[1:, :, :] - out3[:-1, :, :] assert_array_equal(diff(x), out1) assert_array_equal(diff(x, n=2), out2) assert_array_equal(diff(x, axis=0), out3) assert_array_equal(diff(x, n=2, axis=0), out4) def test_n(self): x = list(range(3)) assert_raises(ValueError, diff, x, n=-1) output = [diff(x, n=n) for n in range(1, 5)] expected = [[1, 1], [0], [], []] assert_(diff(x, n=0) is x) for n, (expected, out) in enumerate(zip(expected, output), start=1): assert_(type(out) is np.ndarray) assert_array_equal(out, expected) assert_equal(out.dtype, np.int_) assert_equal(len(out), max(0, len(x) - n)) def test_times(self): x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) expected = [ np.array([1, 1], dtype='timedelta64[D]'), np.array([0], dtype='timedelta64[D]'), ] expected.extend([np.array([], dtype='timedelta64[D]')] * 3) for n, exp in enumerate(expected, start=1): out = diff(x, n=n) assert_array_equal(out, exp) assert_equal(out.dtype, exp.dtype) def test_subclass(self): x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], mask=[[False, False], [True, False], [False, True], [True, True], [False, False]]) out = diff(x) assert_array_equal(out.data, [[1], [1], [1], [1], [1]]) assert_array_equal(out.mask, [[False], [True], [True], [True], [False]]) assert_(type(out) is type(x)) out3 = diff(x, n=3) assert_array_equal(out3.data, [[], [], [], [], []]) assert_array_equal(out3.mask, [[], [], [], [], []]) assert_(type(out3) is type(x)) class TestDelete(object): def setup(self): self.a = np.arange(5) self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) def _check_inverse_of_slicing(self, indices): a_del = delete(self.a, indices) nd_a_del = delete(self.nd_a, indices, axis=1) msg = 'Delete failed for obj: %r' % indices # NOTE: The cast should be removed after warning phase for bools if not isinstance(indices, (slice, int, long, np.integer)): indices = np.asarray(indices, dtype=np.intp) indices = indices[(indices >= 0) & (indices < 5)] assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, err_msg=msg) xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0]) assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg) def test_slices(self): lims = [-6, -2, 0, 1, 2, 4, 5] steps = [-3, -1, 1, 3] for start in lims: for stop in lims: for step in steps: s = slice(start, stop, step) self._check_inverse_of_slicing(s) def test_fancy(self): # Deprecation/FutureWarning tests should be kept after change. self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) with warnings.catch_warnings(): warnings.filterwarnings('error', category=DeprecationWarning) assert_raises(DeprecationWarning, delete, self.a, [100]) assert_raises(DeprecationWarning, delete, self.a, [-100]) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', category=FutureWarning) self._check_inverse_of_slicing([0, -1, 2, 2]) obj = np.array([True, False, False], dtype=bool) self._check_inverse_of_slicing(obj) assert_(w[0].category is FutureWarning) assert_(w[1].category is FutureWarning) def test_single(self): self._check_inverse_of_slicing(0) self._check_inverse_of_slicing(-4) def test_0d(self): a = np.array(1) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', DeprecationWarning) assert_equal(delete(a, [], axis=0), a) assert_(w[0].category is DeprecationWarning) def test_subclass(self): class SubClass(np.ndarray): pass a = self.a.view(SubClass) assert_(isinstance(delete(a, 0), SubClass)) assert_(isinstance(delete(a, []), SubClass)) assert_(isinstance(delete(a, [0, 1]), SubClass)) assert_(isinstance(delete(a, slice(1, 2)), SubClass)) assert_(isinstance(delete(a, slice(1, -2)), SubClass)) def test_array_order_preserve(self): # See gh-7113 k = np.arange(10).reshape(2, 5, order='F') m = delete(k, slice(60, None), axis=1) # 'k' is Fortran ordered, and 'm' should have the # same ordering as 'k' and NOT become C ordered assert_equal(m.flags.c_contiguous, k.flags.c_contiguous) assert_equal(m.flags.f_contiguous, k.flags.f_contiguous) class TestGradient(object): def test_basic(self): v = [[1, 1], [3, 4]] x = np.array(v) dx = [np.array([[2., 3.], [2., 3.]]), np.array([[0., 0.], [1., 1.]])] assert_array_equal(gradient(x), dx) assert_array_equal(gradient(v), dx) def test_args(self): dx = np.cumsum(np.ones(5)) dx_uneven = [1., 2., 5., 9., 11.] f_2d = np.arange(25).reshape(5, 5) # distances must be scalars or have size equal to gradient[axis] gradient(np.arange(5), 3.) gradient(np.arange(5), np.array(3.)) gradient(np.arange(5), dx) # dy is set equal to dx because scalar gradient(f_2d, 1.5) gradient(f_2d, np.array(1.5)) gradient(f_2d, dx_uneven, dx_uneven) # mix between even and uneven spaces and # mix between scalar and vector gradient(f_2d, dx, 2) # 2D but axis specified gradient(f_2d, dx, axis=1) # 2d coordinate arguments are not yet allowed assert_raises_regex(ValueError, '.*scalars or 1d', gradient, f_2d, np.stack([dx]*2, axis=-1), 1) def test_badargs(self): f_2d = np.arange(25).reshape(5, 5) x = np.cumsum(np.ones(5)) # wrong sizes assert_raises(ValueError, gradient, f_2d, x, np.ones(2)) assert_raises(ValueError, gradient, f_2d, 1, np.ones(2)) assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2)) # wrong number of arguments assert_raises(TypeError, gradient, f_2d, x) assert_raises(TypeError, gradient, f_2d, x, axis=(0,1)) assert_raises(TypeError, gradient, f_2d, x, x, x) assert_raises(TypeError, gradient, f_2d, 1, 1, 1) assert_raises(TypeError, gradient, f_2d, x, x, axis=1) assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1) def test_datetime64(self): # Make sure gradient() can handle special types like datetime64 x = np.array( ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12', '1910-10-12', '1910-12-12', '1912-12-12'], dtype='datetime64[D]') dx = np.array( [-5, -3, 0, 31, 61, 396, 731], dtype='timedelta64[D]') assert_array_equal(gradient(x), dx) assert_(dx.dtype == np.dtype('timedelta64[D]')) def test_masked(self): # Make sure that gradient supports subclasses like masked arrays x = np.ma.array([[1, 1], [3, 4]], mask=[[False, False], [False, False]]) out = gradient(x)[0] assert_equal(type(out), type(x)) # And make sure that the output and input don't have aliased mask # arrays assert_(x.mask is not out.mask) # Also check that edge_order=2 doesn't alter the original mask x2 = np.ma.arange(5) x2[2] = np.ma.masked np.gradient(x2, edge_order=2) assert_array_equal(x2.mask, [False, False, True, False, False]) def test_second_order_accurate(self): # Testing that the relative numerical error is less that 3% for # this example problem. This corresponds to second order # accurate finite differences for all interior and boundary # points. x = np.linspace(0, 1, 10) dx = x[1] - x[0] y = 2 * x ** 3 + 4 * x ** 2 + 2 * x analytical = 6 * x ** 2 + 8 * x + 2 num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1) assert_(np.all(num_error < 0.03) == True) # test with unevenly spaced np.random.seed(0) x = np.sort(np.random.random(10)) y = 2 * x ** 3 + 4 * x ** 2 + 2 * x analytical = 6 * x ** 2 + 8 * x + 2 num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1) assert_(np.all(num_error < 0.03) == True) def test_spacing(self): f = np.array([0, 2., 3., 4., 5., 5.]) f = np.tile(f, (6,1)) + f.reshape(-1, 1) x_uneven = np.array([0., 0.5, 1., 3., 5., 7.]) x_even = np.arange(6.) fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1)) fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1)) fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1)) fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1)) # evenly spaced for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]: res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order) res2 = gradient(f, x_even, x_even, axis=(0,1), edge_order=edge_order) res3 = gradient(f, x_even, x_even, axis=None, edge_order=edge_order) assert_array_equal(res1, res2) assert_array_equal(res2, res3) assert_almost_equal(res1[0], exp_res.T) assert_almost_equal(res1[1], exp_res) res1 = gradient(f, 1., axis=0, edge_order=edge_order) res2 = gradient(f, x_even, axis=0, edge_order=edge_order) assert_(res1.shape == res2.shape) assert_almost_equal(res2, exp_res.T) res1 = gradient(f, 1., axis=1, edge_order=edge_order) res2 = gradient(f, x_even, axis=1, edge_order=edge_order) assert_(res1.shape == res2.shape) assert_array_equal(res2, exp_res) # unevenly spaced for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]: res1 = gradient(f, x_uneven, x_uneven, axis=(0,1), edge_order=edge_order) res2 = gradient(f, x_uneven, x_uneven, axis=None, edge_order=edge_order) assert_array_equal(res1, res2) assert_almost_equal(res1[0], exp_res.T) assert_almost_equal(res1[1], exp_res) res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order) assert_almost_equal(res1, exp_res.T) res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order) assert_almost_equal(res1, exp_res) # mixed res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1) res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1) assert_array_equal(res1[0], res2[1]) assert_array_equal(res1[1], res2[0]) assert_almost_equal(res1[0], fdx_even_ord1.T) assert_almost_equal(res1[1], fdx_uneven_ord1) res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2) res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2) assert_array_equal(res1[0], res2[1]) assert_array_equal(res1[1], res2[0]) assert_almost_equal(res1[0], fdx_even_ord2.T) assert_almost_equal(res1[1], fdx_uneven_ord2) def test_specific_axes(self): # Testing that gradient can work on a given axis only v = [[1, 1], [3, 4]] x = np.array(v) dx = [np.array([[2., 3.], [2., 3.]]), np.array([[0., 0.], [1., 1.]])] assert_array_equal(gradient(x, axis=0), dx[0]) assert_array_equal(gradient(x, axis=1), dx[1]) assert_array_equal(gradient(x, axis=-1), dx[1]) assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]]) # test axis=None which means all axes assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]]) # and is the same as no axis keyword given assert_almost_equal(gradient(x, axis=None), gradient(x)) # test vararg order assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0]) # test maximal number of varargs assert_raises(TypeError, gradient, x, 1, 2, axis=1) assert_raises(np.AxisError, gradient, x, axis=3) assert_raises(np.AxisError, gradient, x, axis=-3) # assert_raises(TypeError, gradient, x, axis=[1,]) def test_timedelta64(self): # Make sure gradient() can handle special types like timedelta64 x = np.array( [-5, -3, 10, 12, 61, 321, 300], dtype='timedelta64[D]') dx = np.array( [2, 7, 7, 25, 154, 119, -21], dtype='timedelta64[D]') assert_array_equal(gradient(x), dx) assert_(dx.dtype == np.dtype('timedelta64[D]')) def test_inexact_dtypes(self): for dt in [np.float16, np.float32, np.float64]: # dtypes should not be promoted in a different way to what diff does x = np.array([1, 2, 3], dtype=dt) assert_equal(gradient(x).dtype, np.diff(x).dtype) def test_values(self): # needs at least 2 points for edge_order ==1 gradient(np.arange(2), edge_order=1) # needs at least 3 points for edge_order ==1 gradient(np.arange(3), edge_order=2) assert_raises(ValueError, gradient, np.arange(0), edge_order=1) assert_raises(ValueError, gradient, np.arange(0), edge_order=2) assert_raises(ValueError, gradient, np.arange(1), edge_order=1) assert_raises(ValueError, gradient, np.arange(1), edge_order=2) assert_raises(ValueError, gradient, np.arange(2), edge_order=2) class TestAngle(object): def test_basic(self): x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2, 1, 1j, -1, -1j, 1 - 3j, -1 + 3j] y = angle(x) yo = [ np.arctan(3.0 / 1.0), np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0, -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)] z = angle(x, deg=1) zo = np.array(yo) * 180 / np.pi assert_array_almost_equal(y, yo, 11) assert_array_almost_equal(z, zo, 11) class TestTrimZeros(object): """ Only testing for integer splits. """ def test_basic(self): a = np.array([0, 0, 1, 2, 3, 4, 0]) res = trim_zeros(a) assert_array_equal(res, np.array([1, 2, 3, 4])) def test_leading_skip(self): a = np.array([0, 0, 1, 0, 2, 3, 4, 0]) res = trim_zeros(a) assert_array_equal(res, np.array([1, 0, 2, 3, 4])) def test_trailing_skip(self): a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0]) res = trim_zeros(a) assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4])) class TestExtins(object): def test_basic(self): a = np.array([1, 3, 2, 1, 2, 3, 3]) b = extract(a > 1, a) assert_array_equal(b, [3, 2, 2, 3, 3]) def test_place(self): # Make sure that non-np.ndarray objects # raise an error instead of doing nothing assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) a = np.array([1, 4, 3, 2, 5, 8, 7]) place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) place(a, np.zeros(7), []) assert_array_equal(a, np.arange(1, 8)) place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) assert_raises_regex(ValueError, "Cannot insert from an empty array", lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) # See Issue #6974 a = np.array(['12', '34']) place(a, [0, 1], '9') assert_array_equal(a, ['12', '9']) def test_both(self): a = rand(10) mask = a > 0.5 ac = a.copy() c = extract(mask, a) place(a, mask, 0) place(a, mask, c) assert_array_equal(a, ac) class TestVectorize(object): def test_simple(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract) r = f([0, 3, 6, 9], [1, 3, 5, 7]) assert_array_equal(r, [1, 6, 1, 2]) def test_scalar(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract) r = f([0, 3, 6, 9], 5) assert_array_equal(r, [5, 8, 1, 4]) def test_large(self): x = np.linspace(-3, 2, 10000) f = vectorize(lambda x: x) y = f(x) assert_array_equal(y, x) def test_ufunc(self): import math f = vectorize(math.cos) args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) r1 = f(args) r2 = np.cos(args) assert_array_almost_equal(r1, r2) def test_keywords(self): def foo(a, b=1): return a + b f = vectorize(foo) args = np.array([1, 2, 3]) r1 = f(args) r2 = np.array([2, 3, 4]) assert_array_equal(r1, r2) r1 = f(args, 2) r2 = np.array([3, 4, 5]) assert_array_equal(r1, r2) def test_keywords_no_func_code(self): # This needs to test a function that has keywords but # no func_code attribute, since otherwise vectorize will # inspect the func_code. import random try: vectorize(random.randrange) # Should succeed except Exception: raise AssertionError() def test_keywords2_ticket_2100(self): # Test kwarg support: enhancement ticket 2100 def foo(a, b=1): return a + b f = vectorize(foo) args = np.array([1, 2, 3]) r1 = f(a=args) r2 = np.array([2, 3, 4]) assert_array_equal(r1, r2) r1 = f(b=1, a=args) assert_array_equal(r1, r2) r1 = f(args, b=2) r2 = np.array([3, 4, 5]) assert_array_equal(r1, r2) def test_keywords3_ticket_2100(self): # Test excluded with mixed positional and kwargs: ticket 2100 def mypolyval(x, p): _p = list(p) res = _p.pop(0) while _p: res = res * x + _p.pop(0) return res vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) ans = [3, 6] assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) def test_keywords4_ticket_2100(self): # Test vectorizing function with no positional args. @vectorize def f(**kw): res = 1.0 for _k in kw: res *= kw[_k] return res assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) def test_keywords5_ticket_2100(self): # Test vectorizing function with no kwargs args. @vectorize def f(*v): return np.prod(v) assert_array_equal(f([1, 2], [3, 4]), [3, 8]) def test_coverage1_ticket_2100(self): def foo(): return 1 f = vectorize(foo) assert_array_equal(f(), 1) def test_assigning_docstring(self): def foo(x): """Original documentation""" return x f = vectorize(foo) assert_equal(f.__doc__, foo.__doc__) doc = "Provided documentation" f = vectorize(foo, doc=doc) assert_equal(f.__doc__, doc) def test_UnboundMethod_ticket_1156(self): # Regression test for issue 1156 class Foo: b = 2 def bar(self, a): return a ** self.b assert_array_equal(vectorize(Foo().bar)(np.arange(9)), np.arange(9) ** 2) assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), np.arange(9) ** 2) def test_execution_order_ticket_1487(self): # Regression test for dependence on execution order: issue 1487 f1 = vectorize(lambda x: x) res1a = f1(np.arange(3)) res1b = f1(np.arange(0.1, 3)) f2 = vectorize(lambda x: x) res2b = f2(np.arange(0.1, 3)) res2a = f2(np.arange(3)) assert_equal(res1a, res2a) assert_equal(res1b, res2b) def test_string_ticket_1892(self): # Test vectorization over strings: issue 1892. f = np.vectorize(lambda x: x) s = '0123456789' * 10 assert_equal(s, f(s)) def test_cache(self): # Ensure that vectorized func called exactly once per argument. _calls = [0] @vectorize def f(x): _calls[0] += 1 return x ** 2 f.cache = True x = np.arange(5) assert_array_equal(f(x), x * x) assert_equal(_calls[0], len(x)) def test_otypes(self): f = np.vectorize(lambda x: x) f.otypes = 'i' x = np.arange(5) assert_array_equal(f(x), x) def test_parse_gufunc_signature(self): assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), ([('x', 'y')], [()])) assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), ([('x',), ('y',)], [()])) assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), ([('x',)], [('y',)])) assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), ([('x',)], [('y',), ()])) assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) with assert_raises(ValueError): nfb._parse_gufunc_signature('(x)(y)->()') with assert_raises(ValueError): nfb._parse_gufunc_signature('(x),(y)->') with assert_raises(ValueError): nfb._parse_gufunc_signature('((x))->(x)') def test_signature_simple(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract, signature='(),()->()') r = f([0, 3, 6, 9], [1, 3, 5, 7]) assert_array_equal(r, [1, 6, 1, 2]) def test_signature_mean_last(self): def mean(a): return a.mean() f = vectorize(mean, signature='(n)->()') r = f([[1, 3], [2, 4]]) assert_array_equal(r, [2, 3]) def test_signature_center(self): def center(a): return a - a.mean() f = vectorize(center, signature='(n)->(n)') r = f([[1, 3], [2, 4]]) assert_array_equal(r, [[-1, 1], [-1, 1]]) def test_signature_two_outputs(self): f = vectorize(lambda x: (x, x), signature='()->(),()') r = f([1, 2, 3]) assert_(isinstance(r, tuple) and len(r) == 2) assert_array_equal(r[0], [1, 2, 3]) assert_array_equal(r[1], [1, 2, 3]) def test_signature_outer(self): f = vectorize(np.outer, signature='(a),(b)->(a,b)') r = f([1, 2], [1, 2, 3]) assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) r = f([[[1, 2]]], [1, 2, 3]) assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) r = f([[1, 0], [2, 0]], [1, 2, 3]) assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], [[2, 4, 6], [0, 0, 0]]]) r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], [[0, 0, 0], [0, 0, 0]]]) def test_signature_computed_size(self): f = vectorize(lambda x: x[:-1], signature='(n)->(m)') r = f([1, 2, 3]) assert_array_equal(r, [1, 2]) r = f([[1, 2, 3], [2, 3, 4]]) assert_array_equal(r, [[1, 2], [2, 3]]) def test_signature_excluded(self): def foo(a, b=1): return a + b f = vectorize(foo, signature='()->()', excluded={'b'}) assert_array_equal(f([1, 2, 3]), [2, 3, 4]) assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) def test_signature_otypes(self): f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) r = f([1, 2, 3]) assert_equal(r.dtype, np.dtype('float64')) assert_array_equal(r, [1, 2, 3]) def test_signature_invalid_inputs(self): f = vectorize(operator.add, signature='(n),(n)->(n)') with assert_raises_regex(TypeError, 'wrong number of positional'): f([1, 2]) with assert_raises_regex( ValueError, 'does not have enough dimensions'): f(1, 2) with assert_raises_regex( ValueError, 'inconsistent size for core dimension'): f([1, 2], [1, 2, 3]) f = vectorize(operator.add, signature='()->()') with assert_raises_regex(TypeError, 'wrong number of positional'): f(1, 2) def test_signature_invalid_outputs(self): f = vectorize(lambda x: x[:-1], signature='(n)->(n)') with assert_raises_regex( ValueError, 'inconsistent size for core dimension'): f([1, 2, 3]) f = vectorize(lambda x: x, signature='()->(),()') with assert_raises_regex(ValueError, 'wrong number of outputs'): f(1) f = vectorize(lambda x: (x, x), signature='()->()') with assert_raises_regex(ValueError, 'wrong number of outputs'): f([1, 2]) def test_size_zero_output(self): # see issue 5868 f = np.vectorize(lambda x: x) x = np.zeros([0, 5], dtype=int) with assert_raises_regex(ValueError, 'otypes'): f(x) f.otypes = 'i' assert_array_equal(f(x), x) f = np.vectorize(lambda x: x, signature='()->()') with assert_raises_regex(ValueError, 'otypes'): f(x) f = np.vectorize(lambda x: x, signature='()->()', otypes='i') assert_array_equal(f(x), x) f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') assert_array_equal(f(x), x) f = np.vectorize(lambda x: x, signature='(n)->(n)') assert_array_equal(f(x.T), x.T) f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') with assert_raises_regex(ValueError, 'new output dimensions'): f(x) class TestDigitize(object): def test_forward(self): x = np.arange(-6, 5) bins = np.arange(-5, 5) assert_array_equal(digitize(x, bins), np.arange(11)) def test_reverse(self): x = np.arange(5, -6, -1) bins = np.arange(5, -5, -1) assert_array_equal(digitize(x, bins), np.arange(11)) def test_random(self): x = rand(10) bin = np.linspace(x.min(), x.max(), 10) assert_(np.all(digitize(x, bin) != 0)) def test_right_basic(self): x = [1, 5, 4, 10, 8, 11, 0] bins = [1, 5, 10] default_answer = [1, 2, 1, 3, 2, 3, 0] assert_array_equal(digitize(x, bins), default_answer) right_answer = [0, 1, 1, 2, 2, 3, 0] assert_array_equal(digitize(x, bins, True), right_answer) def test_right_open(self): x = np.arange(-6, 5) bins = np.arange(-6, 4) assert_array_equal(digitize(x, bins, True), np.arange(11)) def test_right_open_reverse(self): x = np.arange(5, -6, -1) bins = np.arange(4, -6, -1) assert_array_equal(digitize(x, bins, True), np.arange(11)) def test_right_open_random(self): x = rand(10) bins = np.linspace(x.min(), x.max(), 10) assert_(np.all(digitize(x, bins, True) != 10)) def test_monotonic(self): x = [-1, 0, 1, 2] bins = [0, 0, 1] assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) bins = [1, 1, 0] assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) bins = [1, 1, 1, 1] assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) bins = [0, 0, 1, 0] assert_raises(ValueError, digitize, x, bins) bins = [1, 1, 0, 1] assert_raises(ValueError, digitize, x, bins) def test_casting_error(self): x = [1, 2, 3 + 1.j] bins = [1, 2, 3] assert_raises(TypeError, digitize, x, bins) x, bins = bins, x assert_raises(TypeError, digitize, x, bins) def test_return_type(self): # Functions returning indices should always return base ndarrays class A(np.ndarray): pass a = np.arange(5).view(A) b = np.arange(1, 3).view(A) assert_(not isinstance(digitize(b, a, False), A)) assert_(not isinstance(digitize(b, a, True), A)) class TestUnwrap(object): def test_simple(self): # check that unwrap removes jumps greather that 2*pi assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) # check that unwrap maintans continuity assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) class TestFilterwindows(object): def test_hanning(self): # check symmetry w = hanning(10) assert_array_almost_equal(w, flipud(w), 7) # check known value assert_almost_equal(np.sum(w, axis=0), 4.500, 4) def test_hamming(self): # check symmetry w = hamming(10) assert_array_almost_equal(w, flipud(w), 7) # check known value assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) def test_bartlett(self): # check symmetry w = bartlett(10) assert_array_almost_equal(w, flipud(w), 7) # check known value assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) def test_blackman(self): # check symmetry w = blackman(10) assert_array_almost_equal(w, flipud(w), 7) # check known value assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) class TestTrapz(object): def test_simple(self): x = np.arange(-10, 10, .1) r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) # check integral of normal equals 1 assert_almost_equal(r, 1, 7) def test_ndim(self): x = np.linspace(0, 1, 3) y = np.linspace(0, 2, 8) z = np.linspace(0, 3, 13) wx = np.ones_like(x) * (x[1] - x[0]) wx[0] /= 2 wx[-1] /= 2 wy = np.ones_like(y) * (y[1] - y[0]) wy[0] /= 2 wy[-1] /= 2 wz = np.ones_like(z) * (z[1] - z[0]) wz[0] /= 2 wz[-1] /= 2 q = x[:, None, None] + y[None,:, None] + z[None, None,:] qx = (q * wx[:, None, None]).sum(axis=0) qy = (q * wy[None, :, None]).sum(axis=1) qz = (q * wz[None, None, :]).sum(axis=2) # n-d `x` r = trapz(q, x=x[:, None, None], axis=0) assert_almost_equal(r, qx) r = trapz(q, x=y[None,:, None], axis=1) assert_almost_equal(r, qy) r = trapz(q, x=z[None, None,:], axis=2) assert_almost_equal(r, qz) # 1-d `x` r = trapz(q, x=x, axis=0) assert_almost_equal(r, qx) r = trapz(q, x=y, axis=1) assert_almost_equal(r, qy) r = trapz(q, x=z, axis=2) assert_almost_equal(r, qz) def test_masked(self): # Testing that masked arrays behave as if the function is 0 where # masked x = np.arange(5) y = x * x mask = x == 2 ym = np.ma.array(y, mask=mask) r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) assert_almost_equal(trapz(ym, x), r) xm = np.ma.array(x, mask=mask) assert_almost_equal(trapz(ym, xm), r) xm = np.ma.array(x, mask=mask) assert_almost_equal(trapz(y, xm), r) def test_matrix(self): # Test to make sure matrices give the same answer as ndarrays x = np.linspace(0, 5) y = x * x r = trapz(y, x) mx = np.matrix(x) my = np.matrix(y) mr = trapz(my, mx) assert_almost_equal(mr, r) class TestSinc(object): def test_simple(self): assert_(sinc(0) == 1) w = sinc(np.linspace(-1, 1, 100)) # check symmetry assert_array_almost_equal(w, flipud(w), 7) def test_array_like(self): x = [0, 0.5] y1 = sinc(np.array(x)) y2 = sinc(list(x)) y3 = sinc(tuple(x)) assert_array_equal(y1, y2) assert_array_equal(y1, y3) class TestHistogram(object): def setup(self): pass def teardown(self): pass def test_simple(self): n = 100 v = rand(n) (a, b) = histogram(v) # check if the sum of the bins equals the number of samples assert_equal(np.sum(a, axis=0), n) # check that the bin counts are evenly spaced when the data is from # a linear function (a, b) = histogram(np.linspace(0, 10, 100)) assert_array_equal(a, 10) def test_one_bin(self): # Ticket 632 hist, edges = histogram([1, 2, 3, 4], [1, 2]) assert_array_equal(hist, [2, ]) assert_array_equal(edges, [1, 2]) assert_raises(ValueError, histogram, [1, 2], bins=0) h, e = histogram([1, 2], bins=1) assert_equal(h, np.array([2])) assert_allclose(e, np.array([1., 2.])) def test_normed(self): # Check that the integral of the density equals 1. n = 100 v = rand(n) a, b = histogram(v, normed=True) area = np.sum(a * diff(b)) assert_almost_equal(area, 1) # Check with non-constant bin widths (buggy but backwards # compatible) v = np.arange(10) bins = [0, 1, 5, 9, 10] a, b = histogram(v, bins, normed=True) area = np.sum(a * diff(b)) assert_almost_equal(area, 1) def test_density(self): # Check that the integral of the density equals 1. n = 100 v = rand(n) a, b = histogram(v, density=True) area = np.sum(a * diff(b)) assert_almost_equal(area, 1) # Check with non-constant bin widths v = np.arange(10) bins = [0, 1, 3, 6, 10] a, b = histogram(v, bins, density=True) assert_array_equal(a, .1) assert_equal(np.sum(a * diff(b)), 1) # Variale bin widths are especially useful to deal with # infinities. v = np.arange(10) bins = [0, 1, 3, 6, np.inf] a, b = histogram(v, bins, density=True) assert_array_equal(a, [.1, .1, .1, 0.]) # Taken from a bug report from N. Becker on the numpy-discussion # mailing list Aug. 6, 2010. counts, dmy = np.histogram( [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) assert_equal(counts, [.25, 0]) def test_outliers(self): # Check that outliers are not tallied a = np.arange(10) + .5 # Lower outliers h, b = histogram(a, range=[0, 9]) assert_equal(h.sum(), 9) # Upper outliers h, b = histogram(a, range=[1, 10]) assert_equal(h.sum(), 9) # Normalization h, b = histogram(a, range=[1, 9], normed=True) assert_almost_equal((h * diff(b)).sum(), 1, decimal=15) # Weights w = np.arange(10) + .5 h, b = histogram(a, range=[1, 9], weights=w, normed=True) assert_equal((h * diff(b)).sum(), 1) h, b = histogram(a, bins=8, range=[1, 9], weights=w) assert_equal(h, w[1:-1]) def test_type(self): # Check the type of the returned histogram a = np.arange(10) + .5 h, b = histogram(a) assert_(np.issubdtype(h.dtype, np.integer)) h, b = histogram(a, normed=True) assert_(np.issubdtype(h.dtype, np.floating)) h, b = histogram(a, weights=np.ones(10, int)) assert_(np.issubdtype(h.dtype, np.integer)) h, b = histogram(a, weights=np.ones(10, float)) assert_(np.issubdtype(h.dtype, np.floating)) def test_f32_rounding(self): # gh-4799, check that the rounding of the edges works with float32 x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) assert_equal(counts_hist.sum(), 3.) def test_weights(self): v = rand(100) w = np.ones(100) * 5 a, b = histogram(v) na, nb = histogram(v, normed=True) wa, wb = histogram(v, weights=w) nwa, nwb = histogram(v, weights=w, normed=True) assert_array_almost_equal(a * 5, wa) assert_array_almost_equal(na, nwa) # Check weights are properly applied. v = np.linspace(0, 10, 10) w = np.concatenate((np.zeros(5), np.ones(5))) wa, wb = histogram(v, bins=np.arange(11), weights=w) assert_array_almost_equal(wa, w) # Check with integer weights wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) assert_array_equal(wa, [4, 5, 0, 1]) wa, wb = histogram( [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True) assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) # Check weights with non-uniform bin widths a, b = histogram( np.arange(9), [0, 1, 3, 6, 10], weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) assert_almost_equal(a, [.2, .1, .1, .075]) def test_exotic_weights(self): # Test the use of weights that are not integer or floats, but e.g. # complex numbers or object types. # Complex weights values = np.array([1.3, 2.5, 2.3]) weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) # Check with custom bins wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) # Check with even bins wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) # Decimal weights from decimal import Decimal values = np.array([1.3, 2.5, 2.3]) weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) # Check with custom bins wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) # Check with even bins wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) def test_no_side_effects(self): # This is a regression test that ensures that values passed to # ``histogram`` are unchanged. values = np.array([1.3, 2.5, 2.3]) np.histogram(values, range=[-10, 10], bins=100) assert_array_almost_equal(values, [1.3, 2.5, 2.3]) def test_empty(self): a, b = histogram([], bins=([0, 1])) assert_array_equal(a, np.array([0])) assert_array_equal(b, np.array([0, 1])) def test_error_binnum_type (self): # Tests if right Error is raised if bins argument is float vals = np.linspace(0.0, 1.0, num=100) histogram(vals, 5) assert_raises(TypeError, histogram, vals, 2.4) def test_finite_range(self): # Normal ranges should be fine vals = np.linspace(0.0, 1.0, num=100) histogram(vals, range=[0.25,0.75]) assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) def test_bin_edge_cases(self): # Ensure that floating-point computations correctly place edge cases. arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) mask = hist > 0 left_edges = edges[:-1][mask] right_edges = edges[1:][mask] for x, left, right in zip(arr, left_edges, right_edges): assert_(x >= left) assert_(x < right) def test_last_bin_inclusive_range(self): arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) assert_equal(hist[-1], 1) def test_unsigned_monotonicity_check(self): # Ensures ValueError is raised if bins not increasing monotonically # when bins contain unsigned values (see #9222) arr = np.array([2]) bins = np.array([1, 3, 1], dtype='uint64') with assert_raises(ValueError): hist, edges = np.histogram(arr, bins=bins) class TestHistogramOptimBinNums(object): """ Provide test coverage when using provided estimators for optimal number of bins """ def test_empty(self): estimator_list = ['fd', 'scott', 'rice', 'sturges', 'doane', 'sqrt', 'auto'] # check it can deal with empty data for estimator in estimator_list: a, b = histogram([], bins=estimator) assert_array_equal(a, np.array([0])) assert_array_equal(b, np.array([0, 1])) def test_simple(self): """ Straightforward testing with a mixture of linspace data (for consistency). All test values have been precomputed and the values shouldn't change """ # Some basic sanity checking, with some fixed data. # Checking for the correct number of bins basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, 'doane': 8, 'sqrt': 8, 'auto': 7}, 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, 'doane': 12, 'sqrt': 23, 'auto': 10}, 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, 'doane': 17, 'sqrt': 71, 'auto': 17}} for testlen, expectedResults in basic_test.items(): # Create some sort of non uniform data to test with # (2 peak uniform mixture) x1 = np.linspace(-10, -1, testlen // 5 * 2) x2 = np.linspace(1, 10, testlen // 5 * 3) x = np.concatenate((x1, x2)) for estimator, numbins in expectedResults.items(): a, b = np.histogram(x, estimator) assert_equal(len(a), numbins, err_msg="For the {0} estimator " "with datasize of {1}".format(estimator, testlen)) def test_small(self): """ Smaller datasets have the potential to cause issues with the data adaptive methods, especially the FD method. All bin numbers have been precalculated. """ small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, 'doane': 1, 'sqrt': 1}, 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, 'doane': 1, 'sqrt': 2}, 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, 'doane': 3, 'sqrt': 2}} for testlen, expectedResults in small_dat.items(): testdat = np.arange(testlen) for estimator, expbins in expectedResults.items(): a, b = np.histogram(testdat, estimator) assert_equal(len(a), expbins, err_msg="For the {0} estimator " "with datasize of {1}".format(estimator, testlen)) def test_incorrect_methods(self): """ Check a Value Error is thrown when an unknown string is passed in """ check_list = ['mad', 'freeman', 'histograms', 'IQR'] for estimator in check_list: assert_raises(ValueError, histogram, [1, 2, 3], estimator) def test_novariance(self): """ Check that methods handle no variance in data Primarily for Scott and FD as the SD and IQR are both 0 in this case """ novar_dataset = np.ones(100) novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, 'doane': 1, 'sqrt': 1, 'auto': 1} for estimator, numbins in novar_resultdict.items(): a, b = np.histogram(novar_dataset, estimator) assert_equal(len(a), numbins, err_msg="{0} estimator, " "No Variance test".format(estimator)) def test_outlier(self): """ Check the FD, Scott and Doane with outliers. The FD estimates a smaller binwidth since it's less affected by outliers. Since the range is so (artificially) large, this means more bins, most of which will be empty, but the data of interest usually is unaffected. The Scott estimator is more affected and returns fewer bins, despite most of the variance being in one area of the data. The Doane estimator lies somewhere between the other two. """ xcenter = np.linspace(-10, 10, 50) outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11} for estimator, numbins in outlier_resultdict.items(): a, b = np.histogram(outlier_dataset, estimator) assert_equal(len(a), numbins) def test_simple_range(self): """ Straightforward testing with a mixture of linspace data (for consistency). Adding in a 3rd mixture that will then be completely ignored. All test values have been precomputed and the shouldn't change. """ # some basic sanity checking, with some fixed data. # Checking for the correct number of bins basic_test = { 50: {'fd': 8, 'scott': 8, 'rice': 15, 'sturges': 14, 'auto': 14}, 500: {'fd': 15, 'scott': 16, 'rice': 32, 'sturges': 20, 'auto': 20}, 5000: {'fd': 33, 'scott': 33, 'rice': 69, 'sturges': 27, 'auto': 33} } for testlen, expectedResults in basic_test.items(): # create some sort of non uniform data to test with # (3 peak uniform mixture) x1 = np.linspace(-10, -1, testlen // 5 * 2) x2 = np.linspace(1, 10, testlen // 5 * 3) x3 = np.linspace(-100, -50, testlen) x = np.hstack((x1, x2, x3)) for estimator, numbins in expectedResults.items(): a, b = np.histogram(x, estimator, range = (-20, 20)) msg = "For the {0} estimator".format(estimator) msg += " with datasize of {0}".format(testlen) assert_equal(len(a), numbins, err_msg=msg) def test_simple_weighted(self): """ Check that weighted data raises a TypeError """ estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] for estimator in estimator_list: assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) class TestHistogramdd(object): def test_simple(self): x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) H, edges = histogramdd(x, (2, 3, 3), range=[[-1, 1], [0, 3], [0, 3]]) answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) assert_array_equal(H, answer) # Check normalization ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] H, edges = histogramdd(x, bins=ed, normed=True) assert_(np.all(H == answer / 12.)) # Check that H has the correct shape. H, edges = histogramdd(x, (2, 3, 4), range=[[-1, 1], [0, 3], [0, 4]], normed=True) answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) assert_array_almost_equal(H, answer / 6., 4) # Check that a sequence of arrays is accepted and H has the correct # shape. z = [np.squeeze(y) for y in split(x, 3, axis=1)] H, edges = histogramdd( z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) answer = np.array([[[0, 0], [0, 0], [0, 0]], [[0, 1], [0, 0], [1, 0]], [[0, 1], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0]]]) assert_array_equal(H, answer) Z = np.zeros((5, 5, 5)) Z[list(range(5)), list(range(5)), list(range(5))] = 1. H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) assert_array_equal(H, Z) def test_shape_3d(self): # All possible permutations for bins of different lengths in 3D. bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), (4, 5, 6)) r = rand(10, 3) for b in bins: H, edges = histogramdd(r, b) assert_(H.shape == b) def test_shape_4d(self): # All possible permutations for bins of different lengths in 4D. bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) r = rand(10, 4) for b in bins: H, edges = histogramdd(r, b) assert_(H.shape == b) def test_weights(self): v = rand(100, 2) hist, edges = histogramdd(v) n_hist, edges = histogramdd(v, normed=True) w_hist, edges = histogramdd(v, weights=np.ones(100)) assert_array_equal(w_hist, hist) w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True) assert_array_equal(w_hist, n_hist) w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) assert_array_equal(w_hist, 2 * hist) def test_identical_samples(self): x = np.zeros((10, 2), int) hist, edges = histogramdd(x, bins=2) assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) def test_empty(self): a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) assert_array_max_ulp(a, np.array([[0.]])) a, b = np.histogramdd([[], [], []], bins=2) assert_array_max_ulp(a, np.zeros((2, 2, 2))) def test_bins_errors(self): # There are two ways to specify bins. Check for the right errors # when mixing those. x = np.arange(8).reshape(2, 4) assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) assert_raises( ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]]) assert_raises( ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) def test_inf_edges(self): # Test using +/-inf bin edges works. See #1788. with np.errstate(invalid='ignore'): x = np.arange(6).reshape(3, 2) expected = np.array([[1, 0], [0, 1], [0, 1]]) h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) assert_allclose(h, expected) h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) assert_allclose(h, expected) h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) assert_allclose(h, expected) def test_rightmost_binedge(self): # Test event very close to rightmost binedge. See Github issue #4266 x = [0.9999999995] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0000000001] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0001] bins = [[0., 0.5, 1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 0.0) def test_finite_range(self): vals = np.random.random((100, 3)) histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) assert_raises(ValueError, histogramdd, vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) assert_raises(ValueError, histogramdd, vals, range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) class TestUnique(object): def test_simple(self): x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) class TestCheckFinite(object): def test_simple(self): a = [1, 2, 3] b = [1, 2, np.inf] c = [1, 2, np.nan] np.lib.asarray_chkfinite(a) assert_raises(ValueError, np.lib.asarray_chkfinite, b) assert_raises(ValueError, np.lib.asarray_chkfinite, c) def test_dtype_order(self): # Regression test for missing dtype and order arguments a = [1, 2, 3] a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64) assert_(a.dtype == np.float64) class TestCorrCoef(object): A = np.array( [[0.15391142, 0.18045767, 0.14197213], [0.70461506, 0.96474128, 0.27906989], [0.9297531, 0.32296769, 0.19267156]]) B = np.array( [[0.10377691, 0.5417086, 0.49807457], [0.82872117, 0.77801674, 0.39226705], [0.9314666, 0.66800209, 0.03538394]]) res1 = np.array( [[1., 0.9379533, -0.04931983], [0.9379533, 1., 0.30007991], [-0.04931983, 0.30007991, 1.]]) res2 = np.array( [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) def test_non_array(self): assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), [[1., -1.], [-1., 1.]]) def test_simple(self): tgt1 = corrcoef(self.A) assert_almost_equal(tgt1, self.res1) assert_(np.all(np.abs(tgt1) <= 1.0)) tgt2 = corrcoef(self.A, self.B) assert_almost_equal(tgt2, self.res2) assert_(np.all(np.abs(tgt2) <= 1.0)) def test_ddof(self): # ddof raises DeprecationWarning with suppress_warnings() as sup: warnings.simplefilter("always") assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) sup.filter(DeprecationWarning) # ddof has no or negligible effect on the function assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) def test_bias(self): # bias raises DeprecationWarning with suppress_warnings() as sup: warnings.simplefilter("always") assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) sup.filter(DeprecationWarning) # bias has no or negligible effect on the function assert_almost_equal(corrcoef(self.A, bias=1), self.res1) def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) res = corrcoef(x) tgt = np.array([[1., -1.j], [1.j, 1.]]) assert_allclose(res, tgt) assert_(np.all(np.abs(res) <= 1.0)) def test_xy(self): x = np.array([[1, 2, 3]]) y = np.array([[1j, 2j, 3j]]) assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) def test_empty(self): with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) assert_array_equal(corrcoef(np.array([])), np.nan) assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), np.array([]).reshape(0, 0)) assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), np.array([[np.nan, np.nan], [np.nan, np.nan]])) def test_extreme(self): x = [[1e-100, 1e100], [1e100, 1e-100]] with np.errstate(all='raise'): c = corrcoef(x) assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) assert_(np.all(np.abs(c) <= 1.0)) class TestCov(object): x1 = np.array([[0, 2], [1, 1], [2, 0]]).T res1 = np.array([[1., -1.], [-1., 1.]]) x2 = np.array([0.0, 1.0, 2.0], ndmin=2) frequencies = np.array([1, 4, 1]) x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) unit_frequencies = np.ones(3, dtype=np.integer) weights = np.array([1.0, 4.0, 1.0]) res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) unit_weights = np.ones(3) x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) def test_basic(self): assert_allclose(cov(self.x1), self.res1) def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) assert_allclose(cov(x), np.array([[1., -1.j], [1.j, 1.]])) def test_xy(self): x = np.array([[1, 2, 3]]) y = np.array([[1j, 2j, 3j]]) assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) def test_empty(self): with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) assert_array_equal(cov(np.array([])), np.nan) assert_array_equal(cov(np.array([]).reshape(0, 2)), np.array([]).reshape(0, 0)) assert_array_equal(cov(np.array([]).reshape(2, 0)), np.array([[np.nan, np.nan], [np.nan, np.nan]])) def test_wrong_ddof(self): with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) assert_array_equal(cov(self.x1, ddof=5), np.array([[np.inf, -np.inf], [-np.inf, np.inf]])) def test_1D_rowvar(self): assert_allclose(cov(self.x3), cov(self.x3, rowvar=0)) y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=0)) def test_1D_variance(self): assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) def test_fweights(self): assert_allclose(cov(self.x2, fweights=self.frequencies), cov(self.x2_repeats)) assert_allclose(cov(self.x1, fweights=self.frequencies), self.res2) assert_allclose(cov(self.x1, fweights=self.unit_frequencies), self.res1) nonint = self.frequencies + 0.5 assert_raises(TypeError, cov, self.x1, fweights=nonint) f = np.ones((2, 3), dtype=np.integer) assert_raises(RuntimeError, cov, self.x1, fweights=f) f = np.ones(2, dtype=np.integer) assert_raises(RuntimeError, cov, self.x1, fweights=f) f = -1 * np.ones(3, dtype=np.integer) assert_raises(ValueError, cov, self.x1, fweights=f) def test_aweights(self): assert_allclose(cov(self.x1, aweights=self.weights), self.res3) assert_allclose(cov(self.x1, aweights=3.0 * self.weights), cov(self.x1, aweights=self.weights)) assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) w = np.ones((2, 3)) assert_raises(RuntimeError, cov, self.x1, aweights=w) w = np.ones(2) assert_raises(RuntimeError, cov, self.x1, aweights=w) w = -1.0 * np.ones(3) assert_raises(ValueError, cov, self.x1, aweights=w) def test_unit_fweights_and_aweights(self): assert_allclose(cov(self.x2, fweights=self.frequencies, aweights=self.unit_weights), cov(self.x2_repeats)) assert_allclose(cov(self.x1, fweights=self.frequencies, aweights=self.unit_weights), self.res2) assert_allclose(cov(self.x1, fweights=self.unit_frequencies, aweights=self.unit_weights), self.res1) assert_allclose(cov(self.x1, fweights=self.unit_frequencies, aweights=self.weights), self.res3) assert_allclose(cov(self.x1, fweights=self.unit_frequencies, aweights=3.0 * self.weights), cov(self.x1, aweights=self.weights)) assert_allclose(cov(self.x1, fweights=self.unit_frequencies, aweights=self.unit_weights), self.res1) class Test_I0(object): def test_simple(self): assert_almost_equal( i0(0.5), np.array(1.0634833707413234)) A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549]) assert_almost_equal( i0(A), np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])) B = np.array([[0.827002, 0.99959078], [0.89694769, 0.39298162], [0.37954418, 0.05206293], [0.36465447, 0.72446427], [0.48164949, 0.50324519]]) assert_almost_equal( i0(B), np.array([[1.17843223, 1.26583466], [1.21147086, 1.03898290], [1.03633899, 1.00067775], [1.03352052, 1.13557954], [1.05884290, 1.06432317]])) class TestKaiser(object): def test_simple(self): assert_(np.isfinite(kaiser(1, 1.0))) assert_almost_equal(kaiser(0, 1.0), np.array([])) assert_almost_equal(kaiser(2, 1.0), np.array([0.78984831, 0.78984831])) assert_almost_equal(kaiser(5, 1.0), np.array([0.78984831, 0.94503323, 1., 0.94503323, 0.78984831])) assert_almost_equal(kaiser(5, 1.56789), np.array([0.58285404, 0.88409679, 1., 0.88409679, 0.58285404])) def test_int_beta(self): kaiser(3, 4) class TestMsort(object): def test_simple(self): A = np.array([[0.44567325, 0.79115165, 0.54900530], [0.36844147, 0.37325583, 0.96098397], [0.64864341, 0.52929049, 0.39172155]]) assert_almost_equal( msort(A), np.array([[0.36844147, 0.37325583, 0.39172155], [0.44567325, 0.52929049, 0.54900530], [0.64864341, 0.79115165, 0.96098397]])) class TestMeshgrid(object): def test_simple(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) assert_array_equal(X, np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])) assert_array_equal(Y, np.array([[4, 4, 4], [5, 5, 5], [6, 6, 6], [7, 7, 7]])) def test_single_input(self): [X] = meshgrid([1, 2, 3, 4]) assert_array_equal(X, np.array([1, 2, 3, 4])) def test_no_input(self): args = [] assert_array_equal([], meshgrid(*args)) assert_array_equal([], meshgrid(*args, copy=False)) def test_indexing(self): x = [1, 2, 3] y = [4, 5, 6, 7] [X, Y] = meshgrid(x, y, indexing='ij') assert_array_equal(X, np.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]])) assert_array_equal(Y, np.array([[4, 5, 6, 7], [4, 5, 6, 7], [4, 5, 6, 7]])) # Test expected shapes: z = [8, 9] assert_(meshgrid(x, y)[0].shape == (4, 3)) assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') def test_sparse(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) assert_array_equal(X, np.array([[1, 2, 3]])) assert_array_equal(Y, np.array([[4], [5], [6], [7]])) def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments # Regression test for issue #4755: # https://github.com/numpy/numpy/issues/4755 assert_raises(TypeError, meshgrid, [1, 2, 3], [4, 5, 6, 7], indices='ij') def test_return_type(self): # Test for appropriate dtype in returned arrays. # Regression test for issue #5297 # https://github.com/numpy/numpy/issues/5297 x = np.arange(0, 10, dtype=np.float32) y = np.arange(10, 20, dtype=np.float64) X, Y = np.meshgrid(x,y) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) # copy X, Y = np.meshgrid(x,y, copy=True) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) # sparse X, Y = np.meshgrid(x,y, sparse=True) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) def test_writeback(self): # Issue 8561 X = np.array([1.1, 2.2]) Y = np.array([3.3, 4.4]) x, y = np.meshgrid(X, Y, sparse=False, copy=True) x[0, :] = 0 assert_equal(x[0, :], 0) assert_equal(x[1, :], X) class TestPiecewise(object): def test_simple(self): # Condition is single bool list x = piecewise([0, 0], [True, False], [1]) assert_array_equal(x, [1, 0]) # List of conditions: single bool list x = piecewise([0, 0], [[True, False]], [1]) assert_array_equal(x, [1, 0]) # Conditions is single bool array x = piecewise([0, 0], np.array([True, False]), [1]) assert_array_equal(x, [1, 0]) # Condition is single int array x = piecewise([0, 0], np.array([1, 0]), [1]) assert_array_equal(x, [1, 0]) # List of conditions: int array x = piecewise([0, 0], [np.array([1, 0])], [1]) assert_array_equal(x, [1, 0]) x = piecewise([0, 0], [[False, True]], [lambda x:-1]) assert_array_equal(x, [0, -1]) assert_raises_regex(ValueError, '1 or 2 functions are expected', piecewise, [0, 0], [[False, True]], []) assert_raises_regex(ValueError, '1 or 2 functions are expected', piecewise, [0, 0], [[False, True]], [1, 2, 3]) def test_two_conditions(self): x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) assert_array_equal(x, [3, 4]) def test_scalar_domains_three_conditions(self): x = piecewise(3, [True, False, False], [4, 2, 0]) assert_equal(x, 4) def test_default(self): # No value specified for x[1], should be 0 x = piecewise([1, 2], [True, False], [2]) assert_array_equal(x, [2, 0]) # Should set x[1] to 3 x = piecewise([1, 2], [True, False], [2, 3]) assert_array_equal(x, [2, 3]) def test_0d(self): x = np.array(3) y = piecewise(x, x > 3, [4, 0]) assert_(y.ndim == 0) assert_(y == 0) x = 5 y = piecewise(x, [True, False], [1, 0]) assert_(y.ndim == 0) assert_(y == 1) # With 3 ranges (It was failing, before) y = piecewise(x, [False, False, True], [1, 2, 3]) assert_array_equal(y, 3) def test_0d_comparison(self): x = 3 y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. assert_equal(y, 4) # With 3 ranges (It was failing, before) x = 4 y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) assert_array_equal(y, 2) assert_raises_regex(ValueError, '2 or 3 functions are expected', piecewise, x, [x <= 3, x > 3], [1]) assert_raises_regex(ValueError, '2 or 3 functions are expected', piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) def test_0d_0d_condition(self): x = np.array(3) c = np.array(x > 3) y = piecewise(x, [c], [1, 2]) assert_equal(y, 2) def test_multidimensional_extrafunc(self): x = np.array([[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]]) y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) assert_array_equal(y, np.array([[-1., -1., -1.], [3., 3., 1.]])) class TestBincount(object): def test_simple(self): y = np.bincount(np.arange(4)) assert_array_equal(y, np.ones(4)) def test_simple2(self): y = np.bincount(np.array([1, 5, 2, 4, 1])) assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) def test_simple_weight(self): x = np.arange(4) w = np.array([0.2, 0.3, 0.5, 0.1]) y = np.bincount(x, w) assert_array_equal(y, w) def test_simple_weight2(self): x = np.array([1, 2, 4, 5, 2]) w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) y = np.bincount(x, w) assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) def test_with_minlength(self): x = np.array([0, 1, 0, 1, 1]) y = np.bincount(x, minlength=3) assert_array_equal(y, np.array([2, 3, 0])) x = [] y = np.bincount(x, minlength=0) assert_array_equal(y, np.array([])) def test_with_minlength_smaller_than_maxvalue(self): x = np.array([0, 1, 1, 2, 2, 3, 3]) y = np.bincount(x, minlength=2) assert_array_equal(y, np.array([1, 2, 2, 2])) y = np.bincount(x, minlength=0) assert_array_equal(y, np.array([1, 2, 2, 2])) def test_with_minlength_and_weights(self): x = np.array([1, 2, 4, 5, 2]) w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) y = np.bincount(x, w, 8) assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) def test_empty(self): x = np.array([], dtype=int) y = np.bincount(x) assert_array_equal(x, y) def test_empty_with_minlength(self): x = np.array([], dtype=int) y = np.bincount(x, minlength=5) assert_array_equal(y, np.zeros(5, dtype=int)) def test_with_incorrect_minlength(self): x = np.array([], dtype=int) assert_raises_regex(TypeError, "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, "must not be negative", lambda: np.bincount(x, minlength=-1)) x = np.arange(5) assert_raises_regex(TypeError, "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, "must not be negative", lambda: np.bincount(x, minlength=-1)) @dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") def test_dtype_reference_leaks(self): # gh-6805 intp_refcount = sys.getrefcount(np.dtype(np.intp)) double_refcount = sys.getrefcount(np.dtype(np.double)) for j in range(10): np.bincount([1, 2, 3]) assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) for j in range(10): np.bincount([1, 2, 3], [4, 5, 6]) assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) class TestInterp(object): def test_exceptions(self): assert_raises(ValueError, interp, 0, [], []) assert_raises(ValueError, interp, 0, [0], [1, 2]) assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) assert_raises(ValueError, interp, 0, [], [], period=360) assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) def test_basic(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = np.linspace(0, 1, 50) assert_almost_equal(np.interp(x0, x, y), x0) def test_right_left_behavior(self): # Needs range of sizes to test different code paths. # size ==1 is special cased, 1 < size < 5 is linear search, and # size >= 5 goes through local search and possibly binary search. for size in range(1, 10): xp = np.arange(size, dtype=np.double) yp = np.ones(size, dtype=np.double) incpts = np.array([-1, 0, size - 1, size], dtype=np.double) decpts = incpts[::-1] incres = interp(incpts, xp, yp) decres = interp(decpts, xp, yp) inctgt = np.array([1, 1, 1, 1], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0) decres = interp(decpts, xp, yp, left=0) inctgt = np.array([0, 1, 1, 1], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, right=2) decres = interp(decpts, xp, yp, right=2) inctgt = np.array([1, 1, 1, 2], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0, right=2) decres = interp(decpts, xp, yp, left=0, right=2) inctgt = np.array([0, 1, 1, 2], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) def test_scalar_interpolation_point(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = 0 assert_almost_equal(np.interp(x0, x, y), x0) x0 = .3 assert_almost_equal(np.interp(x0, x, y), x0) x0 = np.float32(.3) assert_almost_equal(np.interp(x0, x, y), x0) x0 = np.float64(.3) assert_almost_equal(np.interp(x0, x, y), x0) x0 = np.nan assert_almost_equal(np.interp(x0, x, y), x0) def test_complex_interp(self): # test complex interpolation x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j x0 = 0.3 y0 = x0 + (1+x0)*1.0j assert_almost_equal(np.interp(x0, x, y), y0) # test complex left and right x0 = -1 left = 2 + 3.0j assert_almost_equal(np.interp(x0, x, y, left=left), left) x0 = 2.0 right = 2 + 3.0j assert_almost_equal(np.interp(x0, x, y, right=right), right) # test complex periodic x = [-180, -170, -185, 185, -10, -5, 0, 365] xp = [190, -190, 350, -350] fp = [5+1.0j, 10+2j, 3+3j, 4+4j] y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j, 3.5+3.5j, 3.75+3.75j] assert_almost_equal(np.interp(x, xp, fp, period=360), y) def test_zero_dimensional_interpolation_point(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = np.array(.3) assert_almost_equal(np.interp(x0, x, y), x0) x0 = np.array(.3, dtype=object) assert_almost_equal(np.interp(x0, x, y), .3) def test_if_len_x_is_small(self): xp = np.arange(0, 10, 0.0001) fp = np.sin(xp) assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) def test_period(self): x = [-180, -170, -185, 185, -10, -5, 0, 365] xp = [190, -190, 350, -350] fp = [5, 10, 3, 4] y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] assert_almost_equal(np.interp(x, xp, fp, period=360), y) x = np.array(x, order='F').reshape(2, -1) y = np.array(y, order='C').reshape(2, -1) assert_almost_equal(np.interp(x, xp, fp, period=360), y) def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i]) class TestPercentile(object): def test_basic(self): x = np.arange(8) * 0.5 assert_equal(np.percentile(x, 0), 0.) assert_equal(np.percentile(x, 100), 3.5) assert_equal(np.percentile(x, 50), 1.75) x[1] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(x, 0), np.nan) assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan) assert_(w[0].category is RuntimeWarning) def test_api(self): d = np.ones(5) np.percentile(d, 5, None, None, False) np.percentile(d, 5, None, None, False, 'linear') o = np.ones((1,)) np.percentile(d, 5, None, o, False, 'linear') def test_2D(self): x = np.array([[1, 1, 1], [1, 1, 1], [4, 4, 3], [1, 1, 1], [1, 1, 1]]) assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) def test_linear(self): # Test defaults assert_equal(np.percentile(range(10), 50), 4.5) # explicitly specify interpolation_method 'linear' (the default) assert_equal(np.percentile(range(10), 50, interpolation='linear'), 4.5) def test_lower_higher(self): # interpolation_method 'lower'/'higher' assert_equal(np.percentile(range(10), 50, interpolation='lower'), 4) assert_equal(np.percentile(range(10), 50, interpolation='higher'), 5) def test_midpoint(self): assert_equal(np.percentile(range(10), 51, interpolation='midpoint'), 4.5) assert_equal(np.percentile(range(11), 51, interpolation='midpoint'), 5.5) assert_equal(np.percentile(range(11), 50, interpolation='midpoint'), 5) def test_nearest(self): assert_equal(np.percentile(range(10), 51, interpolation='nearest'), 5) assert_equal(np.percentile(range(10), 49, interpolation='nearest'), 4) def test_sequence(self): x = np.arange(8) * 0.5 assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) def test_axis(self): x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) # ensure qth axis is always first as with np.array(old_percentile(..)) x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) assert_equal(np.percentile(x, (25, 50)).shape, (2,)) assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) assert_equal( np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) assert_equal(np.percentile(x, (25, 50), interpolation="higher").shape, (2,)) assert_equal(np.percentile(x, (25, 50, 75), interpolation="higher").shape, (3,)) assert_equal(np.percentile(x, (25, 50), axis=0, interpolation="higher").shape, (2, 4, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=1, interpolation="higher").shape, (2, 3, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=2, interpolation="higher").shape, (2, 3, 4, 6)) assert_equal(np.percentile(x, (25, 50), axis=3, interpolation="higher").shape, (2, 3, 4, 5)) assert_equal(np.percentile(x, (25, 50, 75), axis=1, interpolation="higher").shape, (3, 3, 5, 6)) def test_scalar_q(self): # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50), 5.5) assert_(np.isscalar(np.percentile(x, 50))) r0 = np.array([4., 5., 6., 7.]) assert_equal(np.percentile(x, 50, axis=0), r0) assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) r1 = np.array([1.5, 5.5, 9.5]) assert_almost_equal(np.percentile(x, 50, axis=1), r1) assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) out = np.empty(1) assert_equal(np.percentile(x, 50, out=out), 5.5) assert_equal(out, 5.5) out = np.empty(4) assert_equal(np.percentile(x, 50, axis=0, out=out), r0) assert_equal(out, r0) out = np.empty(3) assert_equal(np.percentile(x, 50, axis=1, out=out), r1) assert_equal(out, r1) # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) assert_(np.isscalar(np.percentile(x, 50))) r0 = np.array([4., 5., 6., 7.]) c0 = np.percentile(x, 50, interpolation='lower', axis=0) assert_equal(c0, r0) assert_equal(c0.shape, r0.shape) r1 = np.array([1., 5., 9.]) c1 = np.percentile(x, 50, interpolation='lower', axis=1) assert_almost_equal(c1, r1) assert_equal(c1.shape, r1.shape) out = np.empty((), dtype=x.dtype) c = np.percentile(x, 50, interpolation='lower', out=out) assert_equal(c, 5) assert_equal(out, 5) out = np.empty(4, dtype=x.dtype) c = np.percentile(x, 50, interpolation='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) out = np.empty(3, dtype=x.dtype) c = np.percentile(x, 50, interpolation='lower', axis=1, out=out) assert_equal(c, r1) assert_equal(out, r1) def test_exception(self): assert_raises(ValueError, np.percentile, [1, 2], 56, interpolation='foobar') assert_raises(ValueError, np.percentile, [1], 101) assert_raises(ValueError, np.percentile, [1], -1) assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) def test_percentile_list(self): assert_equal(np.percentile([1, 2, 3], 0), 1) def test_percentile_out(self): x = np.array([1, 2, 3]) y = np.zeros((3,)) p = (1, 2, 3) np.percentile(x, p, out=y) assert_equal(y, np.percentile(x, p)) x = np.array([[1, 2, 3], [4, 5, 6]]) y = np.zeros((3, 3)) np.percentile(x, p, axis=0, out=y) assert_equal(y, np.percentile(x, p, axis=0)) y = np.zeros((3, 2)) np.percentile(x, p, axis=1, out=y) assert_equal(y, np.percentile(x, p, axis=1)) x = np.arange(12).reshape(3, 4) # q.dim > 1, float r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) out = np.empty((2, 4)) assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0) assert_equal(out, r0) r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) out = np.empty((2, 3)) assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) assert_equal(out, r1) # q.dim > 1, int r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) out = np.empty((2, 4), dtype=x.dtype) c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) r1 = np.array([[0, 4, 8], [1, 5, 9]]) out = np.empty((2, 3), dtype=x.dtype) c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out) assert_equal(c, r1) assert_equal(out, r1) def test_percentile_empty_dim(self): # empty dims are preserved d = np.arange(11 * 2).reshape(11, 1, 2, 1) assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=2, interpolation='midpoint').shape, (11, 1, 1)) assert_array_equal(np.percentile(d, 50, axis=-2, interpolation='midpoint').shape, (11, 1, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, (2, 1, 2, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, (2, 11, 2, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, (2, 11, 1, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, (2, 11, 1, 2)) def test_percentile_no_overwrite(self): a = np.array([2, 3, 4, 1]) np.percentile(a, [50], overwrite_input=False) assert_equal(a, np.array([2, 3, 4, 1])) a = np.array([2, 3, 4, 1]) np.percentile(a, [50]) assert_equal(a, np.array([2, 3, 4, 1])) def test_no_p_overwrite(self): p = np.linspace(0., 100., num=5) np.percentile(np.arange(100.), p, interpolation="midpoint") assert_array_equal(p, np.linspace(0., 100., num=5)) p = np.linspace(0., 100., num=5).tolist() np.percentile(np.arange(100.), p, interpolation="midpoint") assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) def test_percentile_overwrite(self): a = np.array([2, 3, 4, 1]) b = np.percentile(a, [50], overwrite_input=True) assert_equal(b, np.array([2.5])) b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) assert_equal(b, np.array([2.5])) def test_extended_axis(self): o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) x = np.moveaxis(x, -1, 0) assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) x = x.swapaxes(0, 1).copy() assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) x = x.swapaxes(0, 1).copy() assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), np.percentile(x, [25, 60], axis=None)) assert_equal(np.percentile(x, [25, 60], axis=(0,)), np.percentile(x, [25, 60], axis=0)) d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) np.random.shuffle(d.ravel()) assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], np.percentile(d[:,:,:, 0].flatten(), 25)) assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], np.percentile(d[:,:, 1,:].flatten(), [10, 90])) assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], np.percentile(d[:,:, 2,:].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], np.percentile(d[2,:,:,:].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], np.percentile(d[2, 1,:,:].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], np.percentile(d[2,:,:, 1].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], np.percentile(d[2,:, 2,:].flatten(), 25)) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25) assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25) assert_raises(np.AxisError, np.percentile, d, axis=4, q=25) assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25) # each of these refers to the same axis twice assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) def test_keepdims(self): d = np.ones((3, 5, 7, 11)) assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11)) assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1)) assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, (3, 1, 7, 11)) assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1)) assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), keepdims=True).shape, (2, 1, 1, 7, 1)) assert_equal(np.percentile(d, [1, 7], axis=(0, 3), keepdims=True).shape, (2, 1, 5, 7, 1)) def test_out(self): o = np.zeros((4,)) d = np.ones((3, 4)) assert_equal(np.percentile(d, 0, 0, out=o), o) assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o) o = np.zeros((3,)) assert_equal(np.percentile(d, 1, 1, out=o), o) assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o) o = np.zeros(()) assert_equal(np.percentile(d, 2, out=o), o) assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o) def test_out_nan(self): with warnings.catch_warnings(record=True): warnings.filterwarnings('always', '', RuntimeWarning) o = np.zeros((4,)) d = np.ones((3, 4)) d[2, 1] = np.nan assert_equal(np.percentile(d, 0, 0, out=o), o) assert_equal( np.percentile(d, 0, 0, interpolation='nearest', out=o), o) o = np.zeros((3,)) assert_equal(np.percentile(d, 1, 1, out=o), o) assert_equal( np.percentile(d, 1, 1, interpolation='nearest', out=o), o) o = np.zeros(()) assert_equal(np.percentile(d, 1, out=o), o) assert_equal( np.percentile(d, 1, interpolation='nearest', out=o), o) def test_nan_behavior(self): a = np.arange(24, dtype=float) a[2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(a, 0.3), np.nan) assert_equal(np.percentile(a, 0.3, axis=0), np.nan) assert_equal(np.percentile(a, [0.3, 0.6], axis=0), np.array([np.nan] * 2)) assert_(w[0].category is RuntimeWarning) assert_(w[1].category is RuntimeWarning) assert_(w[2].category is RuntimeWarning) a = np.arange(24, dtype=float).reshape(2, 3, 4) a[1, 2, 3] = np.nan a[1, 1, 2] = np.nan # no axis with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(a, 0.3), np.nan) assert_equal(np.percentile(a, 0.3).ndim, 0) assert_(w[0].category is RuntimeWarning) # axis0 zerod b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) b[2, 3] = np.nan b[1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(a, 0.3, 0), b) # axis0 not zerod b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 0) b[:, 2, 3] = np.nan b[:, 1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(a, [0.3, 0.6], 0), b) # axis1 zerod b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) b[1, 3] = np.nan b[1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(a, 0.3, 1), b) # axis1 not zerod b = np.percentile( np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) b[:, 1, 3] = np.nan b[:, 1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(a, [0.3, 0.6], 1), b) # axis02 zerod b = np.percentile( np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) b[1] = np.nan b[2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(a, 0.3, (0, 2)), b) # axis02 not zerod b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], (0, 2)) b[:, 1] = np.nan b[:, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) # axis02 not zerod with nearest interpolation b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], (0, 2), interpolation='nearest') b[:, 1] = np.nan b[:, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.percentile( a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) class TestMedian(object): def test_basic(self): a0 = np.array(1) a1 = np.arange(2) a2 = np.arange(6).reshape(2, 3) assert_equal(np.median(a0), 1) assert_allclose(np.median(a1), 0.5) assert_allclose(np.median(a2), 2.5) assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) assert_equal(np.median(a2, axis=1), [1, 4]) assert_allclose(np.median(a2, axis=None), 2.5) a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) a = np.array([0.0463301, 0.0444502, 0.141249]) assert_equal(a[0], np.median(a)) a = np.array([0.0444502, 0.141249, 0.0463301]) assert_equal(a[-1], np.median(a)) # check array scalar result assert_equal(np.median(a).ndim, 0) a[1] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a).ndim, 0) assert_(w[0].category is RuntimeWarning) def test_axis_keyword(self): a3 = np.array([[2, 3], [0, 1], [6, 7], [4, 5]]) for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: orig = a.copy() np.median(a, axis=None) for ax in range(a.ndim): np.median(a, axis=ax) assert_array_equal(a, orig) assert_allclose(np.median(a3, axis=0), [3, 4]) assert_allclose(np.median(a3.T, axis=1), [3, 4]) assert_allclose(np.median(a3), 3.5) assert_allclose(np.median(a3, axis=None), 3.5) assert_allclose(np.median(a3.T), 3.5) def test_overwrite_keyword(self): a3 = np.array([[2, 3], [0, 1], [6, 7], [4, 5]]) a0 = np.array(1) a1 = np.arange(2) a2 = np.arange(6).reshape(2, 3) assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) assert_allclose( np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4]) a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) np.random.shuffle(a4.ravel()) assert_allclose(np.median(a4, axis=None), np.median(a4.copy(), axis=None, overwrite_input=True)) assert_allclose(np.median(a4, axis=0), np.median(a4.copy(), axis=0, overwrite_input=True)) assert_allclose(np.median(a4, axis=1), np.median(a4.copy(), axis=1, overwrite_input=True)) assert_allclose(np.median(a4, axis=2), np.median(a4.copy(), axis=2, overwrite_input=True)) def test_array_like(self): x = [1, 2, 3] assert_almost_equal(np.median(x), 2) x2 = [x] assert_almost_equal(np.median(x2), 2) assert_allclose(np.median(x2, axis=0), x) def test_subclass(self): # gh-3846 class MySubClass(np.ndarray): def __new__(cls, input_array, info=None): obj = np.asarray(input_array).view(cls) obj.info = info return obj def mean(self, axis=None, dtype=None, out=None): return -7 a = MySubClass([1, 2, 3]) assert_equal(np.median(a), -7) def test_out(self): o = np.zeros((4,)) d = np.ones((3, 4)) assert_equal(np.median(d, 0, out=o), o) o = np.zeros((3,)) assert_equal(np.median(d, 1, out=o), o) o = np.zeros(()) assert_equal(np.median(d, out=o), o) def test_out_nan(self): with warnings.catch_warnings(record=True): warnings.filterwarnings('always', '', RuntimeWarning) o = np.zeros((4,)) d = np.ones((3, 4)) d[2, 1] = np.nan assert_equal(np.median(d, 0, out=o), o) o = np.zeros((3,)) assert_equal(np.median(d, 1, out=o), o) o = np.zeros(()) assert_equal(np.median(d, out=o), o) def test_nan_behavior(self): a = np.arange(24, dtype=float) a[2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a), np.nan) assert_equal(np.median(a, axis=0), np.nan) assert_(w[0].category is RuntimeWarning) assert_(w[1].category is RuntimeWarning) a = np.arange(24, dtype=float).reshape(2, 3, 4) a[1, 2, 3] = np.nan a[1, 1, 2] = np.nan # no axis with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a), np.nan) assert_equal(np.median(a).ndim, 0) assert_(w[0].category is RuntimeWarning) # axis0 b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) b[2, 3] = np.nan b[1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a, 0), b) assert_equal(len(w), 1) # axis1 b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) b[1, 3] = np.nan b[1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a, 1), b) assert_equal(len(w), 1) # axis02 b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) b[1] = np.nan b[2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a, (0, 2)), b) assert_equal(len(w), 1) def test_empty(self): # empty arrays a = np.array([], dtype=float) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a), np.nan) assert_(w[0].category is RuntimeWarning) # multiple dimensions a = np.array([], dtype=float, ndmin=3) # no axis with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a), np.nan) assert_(w[0].category is RuntimeWarning) # axis 0 and 1 b = np.array([], dtype=float, ndmin=2) assert_equal(np.median(a, axis=0), b) assert_equal(np.median(a, axis=1), b) # axis 2 b = np.array(np.nan, dtype=float, ndmin=2) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a, axis=2), b) assert_(w[0].category is RuntimeWarning) def test_object(self): o = np.arange(7.) assert_(type(np.median(o.astype(object))), float) o[2] = np.nan assert_(type(np.median(o.astype(object))), float) def test_extended_axis(self): o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.median(x, axis=(0, 1)), np.median(o)) x = np.moveaxis(x, -1, 0) assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) x = x.swapaxes(0, 1).copy() assert_equal(np.median(x, axis=(0, -1)), np.median(o)) assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) np.random.shuffle(d.ravel()) assert_equal(np.median(d, axis=(0, 1, 2))[0], np.median(d[:,:,:, 0].flatten())) assert_equal(np.median(d, axis=(0, 1, 3))[1], np.median(d[:,:, 1,:].flatten())) assert_equal(np.median(d, axis=(3, 1, -4))[2], np.median(d[:,:, 2,:].flatten())) assert_equal(np.median(d, axis=(3, 1, 2))[2], np.median(d[2,:,:,:].flatten())) assert_equal(np.median(d, axis=(3, 2))[2, 1], np.median(d[2, 1,:,:].flatten())) assert_equal(np.median(d, axis=(1, -2))[2, 1], np.median(d[2,:,:, 1].flatten())) assert_equal(np.median(d, axis=(1, 3))[2, 2], np.median(d[2,:, 2,:].flatten())) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) assert_raises(np.AxisError, np.median, d, axis=-5) assert_raises(np.AxisError, np.median, d, axis=(0, -5)) assert_raises(np.AxisError, np.median, d, axis=4) assert_raises(np.AxisError, np.median, d, axis=(0, 4)) assert_raises(ValueError, np.median, d, axis=(1, 1)) def test_keepdims(self): d = np.ones((3, 5, 7, 11)) assert_equal(np.median(d, axis=None, keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11)) assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1)) assert_equal(np.median(d, axis=(1,), keepdims=True).shape, (3, 1, 7, 11)) assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1)) class TestAdd_newdoc_ufunc(object): def test_ufunc_arg(self): assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah") def test_string_arg(self): assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) class TestAdd_newdoc(object): @dec.skipif(sys.flags.optimize == 2) def test_add_doc(self): # test np.add_newdoc tgt = "Current flat index into the array." assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) assert_(len(np.core.ufunc.identity.__doc__) > 300) assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) if __name__ == "__main__": run_module_suite()
132,980
37.00543
102
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test__datasource.py
from __future__ import division, absolute_import, print_function import os import sys from tempfile import mkdtemp, mkstemp, NamedTemporaryFile from shutil import rmtree from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_raises, SkipTest, ) import numpy.lib._datasource as datasource if sys.version_info[0] >= 3: import urllib.request as urllib_request from urllib.parse import urlparse from urllib.error import URLError else: import urllib2 as urllib_request from urlparse import urlparse from urllib2 import URLError def urlopen_stub(url, data=None): '''Stub to replace urlopen for testing.''' if url == valid_httpurl(): tmpfile = NamedTemporaryFile(prefix='urltmp_') return tmpfile else: raise URLError('Name or service not known') # setup and teardown old_urlopen = None def setup(): global old_urlopen old_urlopen = urllib_request.urlopen urllib_request.urlopen = urlopen_stub def teardown(): urllib_request.urlopen = old_urlopen # A valid website for more robust testing http_path = 'http://www.google.com/' http_file = 'index.html' http_fakepath = 'http://fake.abc.web/site/' http_fakefile = 'fake.txt' malicious_files = ['/etc/shadow', '../../shadow', '..\\system.dat', 'c:\\windows\\system.dat'] magic_line = b'three is the magic number' # Utility functions used by many tests def valid_textfile(filedir): # Generate and return a valid temporary file. fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) os.close(fd) return path def invalid_textfile(filedir): # Generate and return an invalid filename. fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) os.close(fd) os.remove(path) return path def valid_httpurl(): return http_path+http_file def invalid_httpurl(): return http_fakepath+http_fakefile def valid_baseurl(): return http_path def invalid_baseurl(): return http_fakepath def valid_httpfile(): return http_file def invalid_httpfile(): return http_fakefile class TestDataSourceOpen(object): def setup(self): self.tmpdir = mkdtemp() self.ds = datasource.DataSource(self.tmpdir) def teardown(self): rmtree(self.tmpdir) del self.ds def test_ValidHTTP(self): fh = self.ds.open(valid_httpurl()) assert_(fh) fh.close() def test_InvalidHTTP(self): url = invalid_httpurl() assert_raises(IOError, self.ds.open, url) try: self.ds.open(url) except IOError as e: # Regression test for bug fixed in r4342. assert_(e.errno is None) def test_InvalidHTTPCacheURLError(self): assert_raises(URLError, self.ds._cache, invalid_httpurl()) def test_ValidFile(self): local_file = valid_textfile(self.tmpdir) fh = self.ds.open(local_file) assert_(fh) fh.close() def test_InvalidFile(self): invalid_file = invalid_textfile(self.tmpdir) assert_raises(IOError, self.ds.open, invalid_file) def test_ValidGzipFile(self): try: import gzip except ImportError: # We don't have the gzip capabilities to test. raise SkipTest # Test datasource's internal file_opener for Gzip files. filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') fp = gzip.open(filepath, 'w') fp.write(magic_line) fp.close() fp = self.ds.open(filepath) result = fp.readline() fp.close() assert_equal(magic_line, result) def test_ValidBz2File(self): try: import bz2 except ImportError: # We don't have the bz2 capabilities to test. raise SkipTest # Test datasource's internal file_opener for BZip2 files. filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') fp = bz2.BZ2File(filepath, 'w') fp.write(magic_line) fp.close() fp = self.ds.open(filepath) result = fp.readline() fp.close() assert_equal(magic_line, result) class TestDataSourceExists(object): def setup(self): self.tmpdir = mkdtemp() self.ds = datasource.DataSource(self.tmpdir) def teardown(self): rmtree(self.tmpdir) del self.ds def test_ValidHTTP(self): assert_(self.ds.exists(valid_httpurl())) def test_InvalidHTTP(self): assert_equal(self.ds.exists(invalid_httpurl()), False) def test_ValidFile(self): # Test valid file in destpath tmpfile = valid_textfile(self.tmpdir) assert_(self.ds.exists(tmpfile)) # Test valid local file not in destpath localdir = mkdtemp() tmpfile = valid_textfile(localdir) assert_(self.ds.exists(tmpfile)) rmtree(localdir) def test_InvalidFile(self): tmpfile = invalid_textfile(self.tmpdir) assert_equal(self.ds.exists(tmpfile), False) class TestDataSourceAbspath(object): def setup(self): self.tmpdir = os.path.abspath(mkdtemp()) self.ds = datasource.DataSource(self.tmpdir) def teardown(self): rmtree(self.tmpdir) del self.ds def test_ValidHTTP(self): scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) local_path = os.path.join(self.tmpdir, netloc, upath.strip(os.sep).strip('/')) assert_equal(local_path, self.ds.abspath(valid_httpurl())) def test_ValidFile(self): tmpfile = valid_textfile(self.tmpdir) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only assert_equal(tmpfile, self.ds.abspath(tmpfilename)) # Test filename with complete path assert_equal(tmpfile, self.ds.abspath(tmpfile)) def test_InvalidHTTP(self): scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) invalidhttp = os.path.join(self.tmpdir, netloc, upath.strip(os.sep).strip('/')) assert_(invalidhttp != self.ds.abspath(valid_httpurl())) def test_InvalidFile(self): invalidfile = valid_textfile(self.tmpdir) tmpfile = valid_textfile(self.tmpdir) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only assert_(invalidfile != self.ds.abspath(tmpfilename)) # Test filename with complete path assert_(invalidfile != self.ds.abspath(tmpfile)) def test_sandboxing(self): tmpfile = valid_textfile(self.tmpdir) tmpfilename = os.path.split(tmpfile)[-1] tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) assert_(tmp_path(tmpfile).startswith(self.tmpdir)) assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) for fn in malicious_files: assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) assert_(tmp_path(fn).startswith(self.tmpdir)) def test_windows_os_sep(self): orig_os_sep = os.sep try: os.sep = '\\' self.test_ValidHTTP() self.test_ValidFile() self.test_InvalidHTTP() self.test_InvalidFile() self.test_sandboxing() finally: os.sep = orig_os_sep class TestRepositoryAbspath(object): def setup(self): self.tmpdir = os.path.abspath(mkdtemp()) self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) def teardown(self): rmtree(self.tmpdir) del self.repos def test_ValidHTTP(self): scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) local_path = os.path.join(self.repos._destpath, netloc, upath.strip(os.sep).strip('/')) filepath = self.repos.abspath(valid_httpfile()) assert_equal(local_path, filepath) def test_sandboxing(self): tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) for fn in malicious_files: assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) assert_(tmp_path(fn).startswith(self.tmpdir)) def test_windows_os_sep(self): orig_os_sep = os.sep try: os.sep = '\\' self.test_ValidHTTP() self.test_sandboxing() finally: os.sep = orig_os_sep class TestRepositoryExists(object): def setup(self): self.tmpdir = mkdtemp() self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) def teardown(self): rmtree(self.tmpdir) del self.repos def test_ValidFile(self): # Create local temp file tmpfile = valid_textfile(self.tmpdir) assert_(self.repos.exists(tmpfile)) def test_InvalidFile(self): tmpfile = invalid_textfile(self.tmpdir) assert_equal(self.repos.exists(tmpfile), False) def test_RemoveHTTPFile(self): assert_(self.repos.exists(valid_httpurl())) def test_CachedHTTPFile(self): localfile = valid_httpurl() # Create a locally cached temp file with an URL based # directory structure. This is similar to what Repository.open # would do. scheme, netloc, upath, pms, qry, frg = urlparse(localfile) local_path = os.path.join(self.repos._destpath, netloc) os.mkdir(local_path, 0o0700) tmpfile = valid_textfile(local_path) assert_(self.repos.exists(tmpfile)) class TestOpenFunc(object): def setup(self): self.tmpdir = mkdtemp() def teardown(self): rmtree(self.tmpdir) def test_DataSourceOpen(self): local_file = valid_textfile(self.tmpdir) # Test case where destpath is passed in fp = datasource.open(local_file, destpath=self.tmpdir) assert_(fp) fp.close() # Test case where default destpath is used fp = datasource.open(local_file) assert_(fp) fp.close() if __name__ == "__main__": run_module_suite()
10,330
28.601719
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_polynomial.py
from __future__ import division, absolute_import, print_function ''' >>> p = np.poly1d([1.,2,3]) >>> p poly1d([ 1., 2., 3.]) >>> print(p) 2 1 x + 2 x + 3 >>> q = np.poly1d([3.,2,1]) >>> q poly1d([ 3., 2., 1.]) >>> print(q) 2 3 x + 2 x + 1 >>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j])) 3 2 (1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j) >>> print(np.poly1d([-3, -2, -1])) 2 -3 x - 2 x - 1 >>> p(0) 3.0 >>> p(5) 38.0 >>> q(0) 1.0 >>> q(5) 86.0 >>> p * q poly1d([ 3., 8., 14., 8., 3.]) >>> p / q (poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667])) >>> p + q poly1d([ 4., 4., 4.]) >>> p - q poly1d([-2., 0., 2.]) >>> p ** 4 poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.]) >>> p(q) poly1d([ 9., 12., 16., 8., 6.]) >>> q(p) poly1d([ 3., 12., 32., 40., 34.]) >>> np.asarray(p) array([ 1., 2., 3.]) >>> len(p) 2 >>> p[0], p[1], p[2], p[3] (3.0, 2.0, 1.0, 0) >>> p.integ() poly1d([ 0.33333333, 1. , 3. , 0. ]) >>> p.integ(1) poly1d([ 0.33333333, 1. , 3. , 0. ]) >>> p.integ(5) poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. , 0. , 0. , 0. ]) >>> p.deriv() poly1d([ 2., 2.]) >>> p.deriv(2) poly1d([ 2.]) >>> q = np.poly1d([1.,2,3], variable='y') >>> print(q) 2 1 y + 2 y + 3 >>> q = np.poly1d([1.,2,3], variable='lambda') >>> print(q) 2 1 lambda + 2 lambda + 3 >>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1])) (poly1d([ 1., -1.]), poly1d([ 0.])) ''' import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, rundocs ) class TestDocs(object): def test_doctests(self): return rundocs() def test_poly(self): assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), [1, -3, -2, 6]) # From matlab docs A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) # Should produce real output for perfect conjugates assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, 1-2j, 1.+3.5j, 1-3.5j]))) assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) assert_(np.isrealobj(np.poly([1j, -1j]))) assert_(np.isrealobj(np.poly([1, -1]))) assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) np.random.seed(42) a = np.random.randn(100) + 1j*np.random.randn(100) assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) def test_roots(self): assert_array_equal(np.roots([1, 0, 0]), [0, 0]) def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) p[3] = 0 assert_equal(str(p), " 2\n" "3 x + 2 x + 1") p = np.poly1d([1, 2]) p[0] = 0 p[1] = 0 assert_equal(str(p), " \n0") def test_polyfit(self): c = np.array([3., 2., 1.]) x = np.linspace(0, 2, 7) y = np.polyval(c, x) err = [1, -1, 1, -1, 1, -1, 1] weights = np.arange(8, 1, -1)**2/7.0 # Check exception when too few points for variance estimate. Note that # the Bayesian estimate requires the number of data points to exceed # degree + 3. assert_raises(ValueError, np.polyfit, [0, 1, 3], [0, 1, 3], deg=0, cov=True) # check 1D case m, cov = np.polyfit(x, y+err, 2, cov=True) est = [3.8571, 0.2857, 1.619] assert_almost_equal(est, m, decimal=4) val0 = [[2.9388, -5.8776, 1.6327], [-5.8776, 12.7347, -4.2449], [1.6327, -4.2449, 2.3220]] assert_almost_equal(val0, cov, decimal=4) m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) val = [[8.7929, -10.0103, 0.9756], [-10.0103, 13.6134, -1.8178], [0.9756, -1.8178, 0.6674]] assert_almost_equal(val, cov2, decimal=4) # check 2D (n,1) case y = y[:, np.newaxis] c = c[:, np.newaxis] assert_almost_equal(c, np.polyfit(x, y, 2)) # check 2D (n,2) case yy = np.concatenate((y, y), axis=1) cc = np.concatenate((c, c), axis=1) assert_almost_equal(cc, np.polyfit(x, yy, 2)) m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) assert_almost_equal(est, m[:, 0], decimal=4) assert_almost_equal(est, m[:, 1], decimal=4) assert_almost_equal(val0, cov[:, :, 0], decimal=4) assert_almost_equal(val0, cov[:, :, 1], decimal=4) def test_objects(self): from decimal import Decimal p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) p2 = p * Decimal('1.333333333333333') assert_(p2[1] == Decimal("3.9999999999999990")) p2 = p.deriv() assert_(p2[1] == Decimal('8.0')) p2 = p.integ() assert_(p2[3] == Decimal("1.333333333333333333333333333")) assert_(p2[2] == Decimal('1.5')) assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) p = np.poly([Decimal(1), Decimal(2)]) assert_equal(np.poly([Decimal(1), Decimal(2)]), [1, Decimal(-3), Decimal(2)]) def test_complex(self): p = np.poly1d([3j, 2j, 1j]) p2 = p.integ() assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) p2 = p.deriv() assert_((p2.coeffs == [6j, 2j]).all()) def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) assert_( (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) def test_zero_dims(self): try: np.poly(np.zeros((0, 0))) except ValueError: pass def test_poly_int_overflow(self): """ Regression test for gh-5096. """ v = np.arange(1, 21) assert_almost_equal(np.poly(v), np.poly(np.diag(v))) def test_poly_eq(self): p = np.poly1d([1, 2, 3]) p2 = np.poly1d([1, 2, 4]) assert_equal(p == None, False) assert_equal(p != None, True) assert_equal(p == p, True) assert_equal(p == p2, False) assert_equal(p != p2, True) def test_poly_coeffs_immutable(self): """ Coefficients should not be modifiable """ p = np.poly1d([1, 2, 3]) try: # despite throwing an exception, this used to change state p.coeffs += 1 except Exception: pass assert_equal(p.coeffs, [1, 2, 3]) p.coeffs[2] += 10 assert_equal(p.coeffs, [1, 2, 3]) if __name__ == "__main__": run_module_suite()
7,152
28.557851
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_financial.py
from __future__ import division, absolute_import, print_function from decimal import Decimal import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_almost_equal, assert_allclose, assert_equal, assert_raises ) class TestFinancial(object): def test_rate(self): assert_almost_equal( np.rate(10, 0, -3500, 10000), 0.1107, 4) def test_rate_decimal(self): rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000')) assert_equal(Decimal('0.1106908537142689284704528100'), rate) def test_irr(self): v = [-150000, 15000, 25000, 35000, 45000, 60000] assert_almost_equal(np.irr(v), 0.0524, 2) v = [-100, 0, 0, 74] assert_almost_equal(np.irr(v), -0.0955, 2) v = [-100, 39, 59, 55, 20] assert_almost_equal(np.irr(v), 0.28095, 2) v = [-100, 100, 0, -7] assert_almost_equal(np.irr(v), -0.0833, 2) v = [-100, 100, 0, 7] assert_almost_equal(np.irr(v), 0.06206, 2) v = [-5, 10.5, 1, -8, 1] assert_almost_equal(np.irr(v), 0.0886, 2) # Test that if there is no solution then np.irr returns nan # Fixes gh-6744 v = [-1, -2, -3] assert_equal(np.irr(v), np.nan) def test_pv(self): assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2) def test_pv_decimal(self): assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), Decimal('-127128.1709461939327295222005')) def test_fv(self): assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924) def test_fv_decimal(self): assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0), Decimal('86609.36267304300040536731624')) def test_pmt(self): res = np.pmt(0.08 / 12, 5 * 12, 15000) tgt = -304.145914 assert_allclose(res, tgt) # Test the edge case where rate == 0.0 res = np.pmt(0.0, 5 * 12, 15000) tgt = -250.0 assert_allclose(res, tgt) # Test the case where we use broadcast and # the arguments passed in are arrays. res = np.pmt([[0.0, 0.8], [0.3, 0.8]], [12, 3], [2000, 20000]) tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]]) assert_allclose(res, tgt) def test_pmt_decimal(self): res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000) tgt = Decimal('-304.1459143262052370338701494') assert_equal(res, tgt) # Test the edge case where rate == 0.0 res = np.pmt(Decimal('0'), Decimal('60'), Decimal('15000')) tgt = -250 assert_equal(res, tgt) # Test the case where we use broadcast and # the arguments passed in are arrays. res = np.pmt([[Decimal('0'), Decimal('0.8')], [Decimal('0.3'), Decimal('0.8')]], [Decimal('12'), Decimal('3')], [Decimal('2000'), Decimal('20000')]) tgt = np.array([[Decimal('-166.6666666666666666666666667'), Decimal('-19311.25827814569536423841060')], [Decimal('-626.9081401700757748402586600'), Decimal('-19311.25827814569536423841060')]]) # Cannot use the `assert_allclose` because it uses isfinite under the covers # which does not support the Decimal type # See issue: https://github.com/numpy/numpy/issues/9954 assert_equal(res[0][0], tgt[0][0]) assert_equal(res[0][1], tgt[0][1]) assert_equal(res[1][0], tgt[1][0]) assert_equal(res[1][1], tgt[1][1]) def test_ppmt(self): assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25) def test_ppmt_decimal(self): assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')), Decimal('-710.2541257864217612489830917')) # Two tests showing how Decimal is actually getting at a more exact result # .23 / 12 does not come out nicely as a float but does as a decimal def test_ppmt_special_rate(self): assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036) def test_ppmt_special_rate_decimal(self): # When rounded out to 8 decimal places like the float based test, this should not equal the same value # as the float, substituted for the decimal def raise_error_because_not_equal(): assert_equal( round(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), 8), Decimal('-90238044.232277036')) assert_raises(AssertionError, raise_error_because_not_equal) assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), Decimal('-90238044.2322778884413969909')) def test_ipmt(self): assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67) def test_ipmt_decimal(self): result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000) assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667')) def test_nper(self): assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), 21.54, 2) def test_nper2(self): assert_almost_equal(np.nper(0.0, -2000, 0, 100000.), 50.0, 1) def test_npv(self): assert_almost_equal( np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]), 122.89, 2) def test_npv_decimal(self): assert_equal( np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]), Decimal('122.894854950942692161628715')) def test_mirr(self): val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000] assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) val = [-120000, 39000, 30000, 21000, 37000, 46000] assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6) val = [100, 200, -50, 300, -200] assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4) val = [39000, 30000, 21000, 37000, 46000] assert_(np.isnan(np.mirr(val, 0.10, 0.12))) def test_mirr_decimal(self): val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'), Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'), Decimal('700'), Decimal('3000')] assert_equal(np.mirr(val, Decimal('0.08'), Decimal('0.055')), Decimal('0.066597175031553548874239618')) val = [Decimal('-120000'), Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')] assert_equal(np.mirr(val, Decimal('0.10'), Decimal('0.12')), Decimal('0.126094130365905145828421880')) val = [Decimal('100'), Decimal('200'), Decimal('-50'), Decimal('300'), Decimal('-200')] assert_equal(np.mirr(val, Decimal('0.05'), Decimal('0.06')), Decimal('0.342823387842176663647819868')) val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')] assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12')))) def test_when(self): # begin assert_equal(np.rate(10, 20, -3500, 10000, 1), np.rate(10, 20, -3500, 10000, 'begin')) # end assert_equal(np.rate(10, 20, -3500, 10000), np.rate(10, 20, -3500, 10000, 'end')) assert_equal(np.rate(10, 20, -3500, 10000, 0), np.rate(10, 20, -3500, 10000, 'end')) # begin assert_equal(np.pv(0.07, 20, 12000, 0, 1), np.pv(0.07, 20, 12000, 0, 'begin')) # end assert_equal(np.pv(0.07, 20, 12000, 0), np.pv(0.07, 20, 12000, 0, 'end')) assert_equal(np.pv(0.07, 20, 12000, 0, 0), np.pv(0.07, 20, 12000, 0, 'end')) # begin assert_equal(np.fv(0.075, 20, -2000, 0, 1), np.fv(0.075, 20, -2000, 0, 'begin')) # end assert_equal(np.fv(0.075, 20, -2000, 0), np.fv(0.075, 20, -2000, 0, 'end')) assert_equal(np.fv(0.075, 20, -2000, 0, 0), np.fv(0.075, 20, -2000, 0, 'end')) # begin assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 1), np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'begin')) # end assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0), np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end')) assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 0), np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end')) # begin assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 1), np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'begin')) # end assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0), np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end')) assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 0), np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end')) # begin assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 1), np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'begin')) # end assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0), np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end')) assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 0), np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end')) # begin assert_equal(np.nper(0.075, -2000, 0, 100000., 1), np.nper(0.075, -2000, 0, 100000., 'begin')) # end assert_equal(np.nper(0.075, -2000, 0, 100000.), np.nper(0.075, -2000, 0, 100000., 'end')) assert_equal(np.nper(0.075, -2000, 0, 100000., 0), np.nper(0.075, -2000, 0, 100000., 'end')) def test_decimal_with_when(self): """Test that decimals are still supported if the when argument is passed""" # begin assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('1')), np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'begin')) # end assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000')), np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end')) assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('0')), np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end')) # begin assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('1')), np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'begin')) # end assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end')) assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('0')), np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end')) # begin assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('1')), np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'begin')) # end assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0')), np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end')) assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('0')), np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end')) # begin assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), Decimal('0'), Decimal('1')), np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), Decimal('0'), 'begin')) # end assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), Decimal('0')), np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), Decimal('0'), 'end')) assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), Decimal('0'), Decimal('0')), np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), Decimal('0'), 'end')) # begin assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), Decimal('0'), Decimal('1')), np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), Decimal('0'), 'begin')) # end assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), Decimal('0')), np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), Decimal('0'), 'end')) assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), Decimal('0'), Decimal('0')), np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), Decimal('0'), 'end')) # begin assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), Decimal('0'), Decimal('1')).flat[0], np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), Decimal('0'), 'begin').flat[0]) # end assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), Decimal('0')).flat[0], np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), Decimal('0'), 'end').flat[0]) assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), Decimal('0'), Decimal('0')).flat[0], np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), Decimal('0'), 'end').flat[0]) def test_broadcast(self): assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]), [21.5449442, 20.76156441], 4) assert_almost_equal(np.ipmt(0.1 / 12, list(range(5)), 24, 2000), [-17.29165168, -16.66666667, -16.03647345, -15.40102862, -14.76028842], 4) assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000), [-74.998201, -75.62318601, -76.25337923, -76.88882405, -77.52956425], 4) assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000, 0, [0, 0, 1, 'end', 'begin']), [-74.998201, -75.62318601, -75.62318601, -76.88882405, -76.88882405], 4) def test_broadcast_decimal(self): # Use almost equal because precision is tested in the explicit tests, this test is to ensure # broadcast with Decimal is not broken. assert_almost_equal(np.ipmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')), [Decimal('-17.29165168'), Decimal('-16.66666667'), Decimal('-16.03647345'), Decimal('-15.40102862'), Decimal('-14.76028842')], 4) assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')), [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-76.25337923'), Decimal('-76.88882405'), Decimal('-77.52956425')], 4) assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000'), Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']), [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'), Decimal('-76.88882405'), Decimal('-76.88882405')], 4) if __name__ == "__main__": run_module_suite()
17,168
48.621387
116
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_format.py
from __future__ import division, absolute_import, print_function r''' Test the .npy file format. Set up: >>> import sys >>> from io import BytesIO >>> from numpy.lib import format >>> >>> scalars = [ ... np.uint8, ... np.int8, ... np.uint16, ... np.int16, ... np.uint32, ... np.int32, ... np.uint64, ... np.int64, ... np.float32, ... np.float64, ... np.complex64, ... np.complex128, ... object, ... ] >>> >>> basic_arrays = [] >>> >>> for scalar in scalars: ... for endian in '<>': ... dtype = np.dtype(scalar).newbyteorder(endian) ... basic = np.arange(15).astype(dtype) ... basic_arrays.extend([ ... np.array([], dtype=dtype), ... np.array(10, dtype=dtype), ... basic, ... basic.reshape((3,5)), ... basic.reshape((3,5)).T, ... basic.reshape((3,5))[::-1,::2], ... ]) ... >>> >>> Pdescr = [ ... ('x', 'i4', (2,)), ... ('y', 'f8', (2, 2)), ... ('z', 'u1')] >>> >>> >>> PbufferT = [ ... ([3,2], [[6.,4.],[6.,4.]], 8), ... ([4,3], [[7.,5.],[7.,5.]], 9), ... ] >>> >>> >>> Ndescr = [ ... ('x', 'i4', (2,)), ... ('Info', [ ... ('value', 'c16'), ... ('y2', 'f8'), ... ('Info2', [ ... ('name', 'S2'), ... ('value', 'c16', (2,)), ... ('y3', 'f8', (2,)), ... ('z3', 'u4', (2,))]), ... ('name', 'S2'), ... ('z2', 'b1')]), ... ('color', 'S2'), ... ('info', [ ... ('Name', 'U8'), ... ('Value', 'c16')]), ... ('y', 'f8', (2, 2)), ... ('z', 'u1')] >>> >>> >>> NbufferT = [ ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), ... ] >>> >>> >>> record_arrays = [ ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), ... ] Test the magic string writing. >>> format.magic(1, 0) '\x93NUMPY\x01\x00' >>> format.magic(0, 0) '\x93NUMPY\x00\x00' >>> format.magic(255, 255) '\x93NUMPY\xff\xff' >>> format.magic(2, 5) '\x93NUMPY\x02\x05' Test the magic string reading. >>> format.read_magic(BytesIO(format.magic(1, 0))) (1, 0) >>> format.read_magic(BytesIO(format.magic(0, 0))) (0, 0) >>> format.read_magic(BytesIO(format.magic(255, 255))) (255, 255) >>> format.read_magic(BytesIO(format.magic(2, 5))) (2, 5) Test the header writing. >>> for arr in basic_arrays + record_arrays: ... f = BytesIO() ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it ... print(repr(f.getvalue())) ... "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" "v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" ''' import sys import os import shutil import tempfile import warnings from io import BytesIO import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_array_equal, assert_raises, raises, dec, SkipTest ) from numpy.lib import format tempdir = None # Module-level setup. def setup_module(): global tempdir tempdir = tempfile.mkdtemp() def teardown_module(): global tempdir if tempdir is not None and os.path.isdir(tempdir): shutil.rmtree(tempdir) tempdir = None # Generate some basic arrays to test with. scalars = [ np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64, np.float32, np.float64, np.complex64, np.complex128, object, ] basic_arrays = [] for scalar in scalars: for endian in '<>': dtype = np.dtype(scalar).newbyteorder(endian) basic = np.arange(1500).astype(dtype) basic_arrays.extend([ # Empty np.array([], dtype=dtype), # Rank-0 np.array(10, dtype=dtype), # 1-D basic, # 2-D C-contiguous basic.reshape((30, 50)), # 2-D F-contiguous basic.reshape((30, 50)).T, # 2-D non-contiguous basic.reshape((30, 50))[::-1, ::2], ]) # More complicated record arrays. # This is the structure of the table used for plain objects: # # +-+-+-+ # |x|y|z| # +-+-+-+ # Structure of a plain array description: Pdescr = [ ('x', 'i4', (2,)), ('y', 'f8', (2, 2)), ('z', 'u1')] # A plain list of tuples with values for testing: PbufferT = [ # x y z ([3, 2], [[6., 4.], [6., 4.]], 8), ([4, 3], [[7., 5.], [7., 5.]], 9), ] # This is the structure of the table used for nested objects (DON'T PANIC!): # # +-+---------------------------------+-----+----------+-+-+ # |x|Info |color|info |y|z| # | +-----+--+----------------+----+--+ +----+-----+ | | # | |value|y2|Info2 |name|z2| |Name|Value| | | # | | | +----+-----+--+--+ | | | | | | | # | | | |name|value|y3|z3| | | | | | | | # +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ # # The corresponding nested array description: Ndescr = [ ('x', 'i4', (2,)), ('Info', [ ('value', 'c16'), ('y2', 'f8'), ('Info2', [ ('name', 'S2'), ('value', 'c16', (2,)), ('y3', 'f8', (2,)), ('z3', 'u4', (2,))]), ('name', 'S2'), ('z2', 'b1')]), ('color', 'S2'), ('info', [ ('Name', 'U8'), ('Value', 'c16')]), ('y', 'f8', (2, 2)), ('z', 'u1')] NbufferT = [ # x Info color info y z # value y2 Info2 name z2 Name Value # name value y3 z3 ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), ] record_arrays = [ np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), ] #BytesIO that reads a random number of bytes at a time class BytesIOSRandomSize(BytesIO): def read(self, size=None): import random size = random.randint(1, size) return super(BytesIOSRandomSize, self).read(size) def roundtrip(arr): f = BytesIO() format.write_array(f, arr) f2 = BytesIO(f.getvalue()) arr2 = format.read_array(f2) return arr2 def roundtrip_randsize(arr): f = BytesIO() format.write_array(f, arr) f2 = BytesIOSRandomSize(f.getvalue()) arr2 = format.read_array(f2) return arr2 def roundtrip_truncated(arr): f = BytesIO() format.write_array(f, arr) #BytesIO is one byte short f2 = BytesIO(f.getvalue()[0:-1]) arr2 = format.read_array(f2) return arr2 def assert_equal_(o1, o2): assert_(o1 == o2) def test_roundtrip(): for arr in basic_arrays + record_arrays: arr2 = roundtrip(arr) yield assert_array_equal, arr, arr2 def test_roundtrip_randsize(): for arr in basic_arrays + record_arrays: if arr.dtype != object: arr2 = roundtrip_randsize(arr) yield assert_array_equal, arr, arr2 def test_roundtrip_truncated(): for arr in basic_arrays: if arr.dtype != object: yield assert_raises, ValueError, roundtrip_truncated, arr def test_long_str(): # check items larger than internal buffer size, gh-4027 long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1))) long_str_arr2 = roundtrip(long_str_arr) assert_array_equal(long_str_arr, long_str_arr2) @dec.slow def test_memmap_roundtrip(): # Fixme: test crashes nose on windows. if not (sys.platform == 'win32' or sys.platform == 'cygwin'): for arr in basic_arrays + record_arrays: if arr.dtype.hasobject: # Skip these since they can't be mmap'ed. continue # Write it out normally and through mmap. nfn = os.path.join(tempdir, 'normal.npy') mfn = os.path.join(tempdir, 'memmap.npy') fp = open(nfn, 'wb') try: format.write_array(fp, arr) finally: fp.close() fortran_order = ( arr.flags.f_contiguous and not arr.flags.c_contiguous) ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype, shape=arr.shape, fortran_order=fortran_order) ma[...] = arr del ma # Check that both of these files' contents are the same. fp = open(nfn, 'rb') normal_bytes = fp.read() fp.close() fp = open(mfn, 'rb') memmap_bytes = fp.read() fp.close() yield assert_equal_, normal_bytes, memmap_bytes # Check that reading the file using memmap works. ma = format.open_memmap(nfn, mode='r') del ma def test_compressed_roundtrip(): arr = np.random.rand(200, 200) npz_file = os.path.join(tempdir, 'compressed.npz') np.savez_compressed(npz_file, arr=arr) arr1 = np.load(npz_file)['arr'] assert_array_equal(arr, arr1) def test_python2_python3_interoperability(): if sys.version_info[0] >= 3: fname = 'win64python2.npy' else: fname = 'python3.npy' path = os.path.join(os.path.dirname(__file__), 'data', fname) data = np.load(path) assert_array_equal(data, np.ones(2)) def test_pickle_python2_python3(): # Test that loading object arrays saved on Python 2 works both on # Python 2 and Python 3 and vice versa data_dir = os.path.join(os.path.dirname(__file__), 'data') if sys.version_info[0] >= 3: xrange = range else: import __builtin__ xrange = __builtin__.xrange expected = np.array([None, xrange, u'\u512a\u826f', b'\xe4\xb8\x8d\xe8\x89\xaf'], dtype=object) for fname in ['py2-objarr.npy', 'py2-objarr.npz', 'py3-objarr.npy', 'py3-objarr.npz']: path = os.path.join(data_dir, fname) for encoding in ['bytes', 'latin1']: data_f = np.load(path, encoding=encoding) if fname.endswith('.npz'): data = data_f['x'] data_f.close() else: data = data_f if sys.version_info[0] >= 3: if encoding == 'latin1' and fname.startswith('py2'): assert_(isinstance(data[3], str)) assert_array_equal(data[:-1], expected[:-1]) # mojibake occurs assert_array_equal(data[-1].encode(encoding), expected[-1]) else: assert_(isinstance(data[3], bytes)) assert_array_equal(data, expected) else: assert_array_equal(data, expected) if sys.version_info[0] >= 3: if fname.startswith('py2'): if fname.endswith('.npz'): data = np.load(path) assert_raises(UnicodeError, data.__getitem__, 'x') data.close() data = np.load(path, fix_imports=False, encoding='latin1') assert_raises(ImportError, data.__getitem__, 'x') data.close() else: assert_raises(UnicodeError, np.load, path) assert_raises(ImportError, np.load, path, encoding='latin1', fix_imports=False) def test_pickle_disallow(): data_dir = os.path.join(os.path.dirname(__file__), 'data') path = os.path.join(data_dir, 'py2-objarr.npy') assert_raises(ValueError, np.load, path, allow_pickle=False, encoding='latin1') path = os.path.join(data_dir, 'py2-objarr.npz') f = np.load(path, allow_pickle=False, encoding='latin1') assert_raises(ValueError, f.__getitem__, 'x') path = os.path.join(tempdir, 'pickle-disabled.npy') assert_raises(ValueError, np.save, path, np.array([None], dtype=object), allow_pickle=False) def test_version_2_0(): f = BytesIO() # requires more than 2 byte for header dt = [(("%d" % i) * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) format.write_array(f, d, version=(2, 0)) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', UserWarning) format.write_array(f, d) assert_(w[0].category is UserWarning) # check alignment of data portion f.seek(0) header = f.readline() assert_(len(header) % format.ARRAY_ALIGN == 0) f.seek(0) n = format.read_array(f) assert_array_equal(d, n) # 1.0 requested but data cannot be saved this way assert_raises(ValueError, format.write_array, f, d, (1, 0)) @dec.slow def test_version_2_0_memmap(): # requires more than 2 byte for header dt = [(("%d" % i) * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) tf = tempfile.mktemp('', 'mmap', dir=tempdir) # 1.0 requested but data cannot be saved this way assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype, shape=d.shape, version=(1, 0)) ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, shape=d.shape, version=(2, 0)) ma[...] = d del ma with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', UserWarning) ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, shape=d.shape, version=None) assert_(w[0].category is UserWarning) ma[...] = d del ma ma = format.open_memmap(tf, mode='r') assert_array_equal(ma, d) def test_write_version(): f = BytesIO() arr = np.arange(1) # These should pass. format.write_array(f, arr, version=(1, 0)) format.write_array(f, arr) format.write_array(f, arr, version=None) format.write_array(f, arr) format.write_array(f, arr, version=(2, 0)) format.write_array(f, arr) # These should all fail. bad_versions = [ (1, 1), (0, 0), (0, 1), (2, 2), (255, 255), ] for version in bad_versions: try: format.write_array(f, arr, version=version) except ValueError: pass else: raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,)) bad_version_magic = [ b'\x93NUMPY\x01\x01', b'\x93NUMPY\x00\x00', b'\x93NUMPY\x00\x01', b'\x93NUMPY\x02\x00', b'\x93NUMPY\x02\x02', b'\x93NUMPY\xff\xff', ] malformed_magic = [ b'\x92NUMPY\x01\x00', b'\x00NUMPY\x01\x00', b'\x93numpy\x01\x00', b'\x93MATLB\x01\x00', b'\x93NUMPY\x01', b'\x93NUMPY', b'', ] def test_read_magic(): s1 = BytesIO() s2 = BytesIO() arr = np.ones((3, 6), dtype=float) format.write_array(s1, arr, version=(1, 0)) format.write_array(s2, arr, version=(2, 0)) s1.seek(0) s2.seek(0) version1 = format.read_magic(s1) version2 = format.read_magic(s2) assert_(version1 == (1, 0)) assert_(version2 == (2, 0)) assert_(s1.tell() == format.MAGIC_LEN) assert_(s2.tell() == format.MAGIC_LEN) def test_read_magic_bad_magic(): for magic in malformed_magic: f = BytesIO(magic) yield raises(ValueError)(format.read_magic), f def test_read_version_1_0_bad_magic(): for magic in bad_version_magic + malformed_magic: f = BytesIO(magic) yield raises(ValueError)(format.read_array), f def test_bad_magic_args(): assert_raises(ValueError, format.magic, -1, 1) assert_raises(ValueError, format.magic, 256, 1) assert_raises(ValueError, format.magic, 1, -1) assert_raises(ValueError, format.magic, 1, 256) def test_large_header(): s = BytesIO() d = {'a': 1, 'b': 2} format.write_array_header_1_0(s, d) s = BytesIO() d = {'a': 1, 'b': 2, 'c': 'x'*256*256} assert_raises(ValueError, format.write_array_header_1_0, s, d) def test_read_array_header_1_0(): s = BytesIO() arr = np.ones((3, 6), dtype=float) format.write_array(s, arr, version=(1, 0)) s.seek(format.MAGIC_LEN) shape, fortran, dtype = format.read_array_header_1_0(s) assert_(s.tell() % format.ARRAY_ALIGN == 0) assert_((shape, fortran, dtype) == ((3, 6), False, float)) def test_read_array_header_2_0(): s = BytesIO() arr = np.ones((3, 6), dtype=float) format.write_array(s, arr, version=(2, 0)) s.seek(format.MAGIC_LEN) shape, fortran, dtype = format.read_array_header_2_0(s) assert_(s.tell() % format.ARRAY_ALIGN == 0) assert_((shape, fortran, dtype) == ((3, 6), False, float)) def test_bad_header(): # header of length less than 2 should fail s = BytesIO() assert_raises(ValueError, format.read_array_header_1_0, s) s = BytesIO(b'1') assert_raises(ValueError, format.read_array_header_1_0, s) # header shorter than indicated size should fail s = BytesIO(b'\x01\x00') assert_raises(ValueError, format.read_array_header_1_0, s) # headers without the exact keys required should fail d = {"shape": (1, 2), "descr": "x"} s = BytesIO() format.write_array_header_1_0(s, d) assert_raises(ValueError, format.read_array_header_1_0, s) d = {"shape": (1, 2), "fortran_order": False, "descr": "x", "extrakey": -1} s = BytesIO() format.write_array_header_1_0(s, d) assert_raises(ValueError, format.read_array_header_1_0, s) def test_large_file_support(): if (sys.platform == 'win32' or sys.platform == 'cygwin'): raise SkipTest("Unknown if Windows has sparse filesystems") # try creating a large sparse file tf_name = os.path.join(tempdir, 'sparse_file') try: # seek past end would work too, but linux truncate somewhat # increases the chances that we have a sparse filesystem and can # avoid actually writing 5GB import subprocess as sp sp.check_call(["truncate", "-s", "5368709120", tf_name]) except Exception: raise SkipTest("Could not create 5GB large file") # write a small array to the end with open(tf_name, "wb") as f: f.seek(5368709120) d = np.arange(5) np.save(f, d) # read it back with open(tf_name, "rb") as f: f.seek(5368709120) r = np.load(f) assert_array_equal(r, d) @dec.slow @dec.skipif(np.dtype(np.intp).itemsize < 8, "test requires 64-bit system") def test_large_archive(): # Regression test for product of saving arrays with dimensions of array # having a product that doesn't fit in int32. See gh-7598 for details. try: a = np.empty((2**30, 2), dtype=np.uint8) except MemoryError: raise SkipTest("Could not create large file") fname = os.path.join(tempdir, "large_archive") with open(fname, "wb") as f: np.savez(f, arr=a) with open(fname, "rb") as f: new_a = np.load(f)["arr"] assert_(a.shape == new_a.shape) if __name__ == "__main__": run_module_suite()
34,531
39.247086
565
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_shape_base.py
from __future__ import division, absolute_import, print_function import numpy as np import warnings from numpy.lib.shape_base import ( apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, vsplit, dstack, column_stack, kron, tile, expand_dims, ) from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises, assert_warns ) class TestApplyAlongAxis(object): def test_simple(self): a = np.ones((20, 10), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_simple101(self): a = np.ones((10, 101), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_3d(self): a = np.arange(27).reshape((3, 3, 3)) assert_array_equal(apply_along_axis(np.sum, 0, a), [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) def test_preserve_subclass(self): # this test is particularly malicious because matrix # refuses to become 1d def double(row): return row * 2 m = np.matrix([[0, 1], [2, 3]]) expected = np.matrix([[0, 2], [4, 6]]) result = apply_along_axis(double, 0, m) assert_(isinstance(result, np.matrix)) assert_array_equal(result, expected) result = apply_along_axis(double, 1, m) assert_(isinstance(result, np.matrix)) assert_array_equal(result, expected) def test_subclass(self): class MinimalSubclass(np.ndarray): data = 1 def minimal_function(array): return array.data a = np.zeros((6, 3)).view(MinimalSubclass) assert_array_equal( apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) ) def test_scalar_array(self, cls=np.ndarray): a = np.ones((6, 3)).view(cls) res = apply_along_axis(np.sum, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) def test_0d_array(self, cls=np.ndarray): def sum_to_0d(x): """ Sum x, returning a 0d array of the same class """ assert_equal(x.ndim, 1) return np.squeeze(np.sum(x, keepdims=True)) a = np.ones((6, 3)).view(cls) res = apply_along_axis(sum_to_0d, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) res = apply_along_axis(sum_to_0d, 1, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) def test_axis_insertion(self, cls=np.ndarray): def f1to2(x): """produces an assymmetric non-square matrix from x""" assert_equal(x.ndim, 1) return (x[::-1] * x[1:,None]).view(cls) a2d = np.arange(6*3).reshape((6, 3)) # 2d insertion along first axis actual = apply_along_axis(f1to2, 0, a2d) expected = np.stack([ f1to2(a2d[:,i]) for i in range(a2d.shape[1]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 2d insertion along last axis actual = apply_along_axis(f1to2, 1, a2d) expected = np.stack([ f1to2(a2d[i,:]) for i in range(a2d.shape[0]) ], axis=0).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 3d insertion along middle axis a3d = np.arange(6*5*3).reshape((6, 5, 3)) actual = apply_along_axis(f1to2, 1, a3d) expected = np.stack([ np.stack([ f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) ], axis=0) for j in range(a3d.shape[2]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) def test_subclass_preservation(self): class MinimalSubclass(np.ndarray): pass self.test_scalar_array(MinimalSubclass) self.test_0d_array(MinimalSubclass) self.test_axis_insertion(MinimalSubclass) def test_axis_insertion_ma(self): def f1to2(x): """produces an assymmetric non-square matrix from x""" assert_equal(x.ndim, 1) res = x[::-1] * x[1:,None] return np.ma.masked_where(res%5==0, res) a = np.arange(6*3).reshape((6, 3)) res = apply_along_axis(f1to2, 0, a) assert_(isinstance(res, np.ma.masked_array)) assert_equal(res.ndim, 3) assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) def test_tuple_func1d(self): def sample_1d(x): return x[1], x[0] res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) assert_array_equal(res, np.array([[2, 1], [4, 3]])) def test_empty(self): # can't apply_along_axis when there's no chance to call the function def never_call(x): assert_(False) # should never be reached a = np.empty((0, 0)) assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) # but it's sometimes ok with some non-zero dimensions def empty_to_1(x): assert_(len(x) == 0) return 1 a = np.empty((10, 0)) actual = np.apply_along_axis(empty_to_1, 1, a) assert_equal(actual, np.ones(10)) assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) def test_with_iterable_object(self): # from issue 5248 d = np.array([ [set([1, 11]), set([2, 22]), set([3, 33])], [set([4, 44]), set([5, 55]), set([6, 66])] ]) actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) assert_equal(actual, expected) # issue 8642 - assert_equal doesn't detect this! for i in np.ndindex(actual.shape): assert_equal(type(actual[i]), type(expected[i])) class TestApplyOverAxes(object): def test_simple(self): a = np.arange(24).reshape(2, 3, 4) aoa_a = apply_over_axes(np.sum, a, [0, 2]) assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) class TestExpandDims(object): def test_functionality(self): s = (2, 3, 4, 5) a = np.empty(s) for axis in range(-5, 4): b = expand_dims(a, axis) assert_(b.shape[axis] == 1) assert_(np.squeeze(b).shape == s) def test_deprecations(self): # 2017-05-17, 1.13.0 s = (2, 3, 4, 5) a = np.empty(s) with warnings.catch_warnings(): warnings.simplefilter("always") assert_warns(DeprecationWarning, expand_dims, a, -6) assert_warns(DeprecationWarning, expand_dims, a, 5) class TestArraySplit(object): def test_integer_0_split(self): a = np.arange(10) assert_raises(ValueError, array_split, a, 0) def test_integer_split(self): a = np.arange(10) res = array_split(a, 1) desired = [np.arange(10)] compare_results(res, desired) res = array_split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) res = array_split(a, 3) desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] compare_results(res, desired) res = array_split(a, 4) desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 5) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 6) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 7) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 8) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 9) desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 10) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 11) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10), np.array([])] compare_results(res, desired) def test_integer_split_2D_rows(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=0) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # Same thing for manual splits: res = array_split(a, [0, 1, 2], axis=0) tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), np.array([np.arange(10)])] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) def test_integer_split_2D_cols(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=-1) desired = [np.array([np.arange(4), np.arange(4)]), np.array([np.arange(4, 7), np.arange(4, 7)]), np.array([np.arange(7, 10), np.arange(7, 10)])] compare_results(res, desired) def test_integer_split_2D_default(self): """ This will fail if we change default axis """ a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions def test_index_split_simple(self): a = np.arange(10) indices = [1, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_low_bound(self): a = np.arange(10) indices = [0, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_high_bound(self): a = np.arange(10) indices = [0, 5, 7, 10, 12] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10), np.array([]), np.array([])] compare_results(res, desired) class TestSplit(object): # The split function is essentially the same as array_split, # except that it test if splitting will result in an # equal split. Only test for this case. def test_equal_split(self): a = np.arange(10) res = split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) def test_unequal_split(self): a = np.arange(10) assert_raises(ValueError, split, a, 3) class TestColumnStack(object): def test_non_iterable(self): assert_raises(TypeError, column_stack, 1) class TestDstack(object): def test_non_iterable(self): assert_raises(TypeError, dstack, 1) def test_0D_array(self): a = np.array(1) b = np.array(2) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_1D_array(self): a = np.array([1]) b = np.array([2]) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_2D_array(self): a = np.array([[1], [2]]) b = np.array([[1], [2]]) res = dstack([a, b]) desired = np.array([[[1, 1]], [[2, 2, ]]]) assert_array_equal(res, desired) def test_2D_array2(self): a = np.array([1, 2]) b = np.array([1, 2]) res = dstack([a, b]) desired = np.array([[[1, 1], [2, 2]]]) assert_array_equal(res, desired) # array_split has more comprehensive test of splitting. # only do simple test on hsplit, vsplit, and dsplit class TestHsplit(object): """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, hsplit, 1, 1) def test_0D_array(self): a = np.array(1) try: hsplit(a, 2) assert_(0) except ValueError: pass def test_1D_array(self): a = np.array([1, 2, 3, 4]) res = hsplit(a, 2) desired = [np.array([1, 2]), np.array([3, 4])] compare_results(res, desired) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = hsplit(a, 2) desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] compare_results(res, desired) class TestVsplit(object): """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, vsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, vsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) try: vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = vsplit(a, 2) desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] compare_results(res, desired) class TestDsplit(object): # Only testing for integer splits. def test_non_iterable(self): assert_raises(ValueError, dsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, dsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) assert_raises(ValueError, dsplit, a, 2) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) try: dsplit(a, 2) assert_(0) except ValueError: pass def test_3D_array(self): a = np.array([[[1, 2, 3, 4], [1, 2, 3, 4]], [[1, 2, 3, 4], [1, 2, 3, 4]]]) res = dsplit(a, 2) desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] compare_results(res, desired) class TestSqueeze(object): def test_basic(self): from numpy.random import rand a = rand(20, 10, 10, 1, 1) b = rand(20, 1, 10, 1, 20) c = rand(1, 1, 20, 10) assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) # Squeezing to 0-dim should still give an ndarray a = [[[1.5]]] res = np.squeeze(a) assert_equal(res, 1.5) assert_equal(res.ndim, 0) assert_equal(type(res), np.ndarray) class TestKron(object): def test_return_type(self): a = np.ones([2, 2]) m = np.asmatrix(a) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(m, m)), np.matrix) assert_equal(type(kron(a, m)), np.matrix) assert_equal(type(kron(m, a)), np.matrix) class myarray(np.ndarray): __array_priority__ = 0.0 ma = myarray(a.shape, a.dtype, a.data) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(ma, ma)), myarray) assert_equal(type(kron(a, ma)), np.ndarray) assert_equal(type(kron(ma, a)), myarray) class TestTile(object): def test_basic(self): a = np.array([0, 1, 2]) b = [[1, 2], [3, 4]] assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) def test_tile_one_repetition_on_array_gh4679(self): a = np.arange(5) b = tile(a, 1) b += 2 assert_equal(a, np.arange(5)) def test_empty(self): a = np.array([[[]]]) b = np.array([[], []]) c = tile(b, 2).shape d = tile(a, (3, 2, 5)).shape assert_equal(c, (2, 0)) assert_equal(d, (3, 2, 0)) def test_kroncompare(self): from numpy.random import randint reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: b = randint(0, 10, size=s) for r in reps: a = np.ones(r, b.dtype) large = tile(b, r) klarge = kron(a, b) assert_equal(large, klarge) class TestMayShareMemory(object): def test_basic(self): d = np.ones((50, 60)) d2 = np.ones((30, 60, 6)) assert_(np.may_share_memory(d, d)) assert_(np.may_share_memory(d, d[::-1])) assert_(np.may_share_memory(d, d[::2])) assert_(np.may_share_memory(d, d[1:, ::-1])) assert_(not np.may_share_memory(d[::-1], d2)) assert_(not np.may_share_memory(d[::2], d2)) assert_(not np.may_share_memory(d[1:, ::-1], d2)) assert_(np.may_share_memory(d2[1:, ::-1], d2)) # Utility def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i]) if __name__ == "__main__": run_module_suite()
19,369
32.628472
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_mixins.py
from __future__ import division, absolute_import, print_function import numbers import operator import sys import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_raises ) PY2 = sys.version_info.major < 3 # NOTE: This class should be kept as an exact copy of the example from the # docstring for NDArrayOperatorsMixin. class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): def __init__(self, value): self.value = np.asarray(value) # One might also consider adding the built-in list type to this # list, to support operations like np.add(array_like, list) _HANDLED_TYPES = (np.ndarray, numbers.Number) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): out = kwargs.get('out', ()) for x in inputs + out: # Only support operations with instances of _HANDLED_TYPES. # Use ArrayLike instead of type(self) for isinstance to # allow subclasses that don't override __array_ufunc__ to # handle ArrayLike objects. if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): return NotImplemented # Defer to the implementation of the ufunc on unwrapped values. inputs = tuple(x.value if isinstance(x, ArrayLike) else x for x in inputs) if out: kwargs['out'] = tuple( x.value if isinstance(x, ArrayLike) else x for x in out) result = getattr(ufunc, method)(*inputs, **kwargs) if type(result) is tuple: # multiple return values return tuple(type(self)(x) for x in result) elif method == 'at': # no return value return None else: # one return value return type(self)(result) def __repr__(self): return '%s(%r)' % (type(self).__name__, self.value) def wrap_array_like(result): if type(result) is tuple: return tuple(ArrayLike(r) for r in result) else: return ArrayLike(result) def _assert_equal_type_and_value(result, expected, err_msg=None): assert_equal(type(result), type(expected), err_msg=err_msg) if isinstance(result, tuple): assert_equal(len(result), len(expected), err_msg=err_msg) for result_item, expected_item in zip(result, expected): _assert_equal_type_and_value(result_item, expected_item, err_msg) else: assert_equal(result.value, expected.value, err_msg=err_msg) assert_equal(getattr(result.value, 'dtype', None), getattr(expected.value, 'dtype', None), err_msg=err_msg) _ALL_BINARY_OPERATORS = [ operator.lt, operator.le, operator.eq, operator.ne, operator.gt, operator.ge, operator.add, operator.sub, operator.mul, operator.truediv, operator.floordiv, # TODO: test div on Python 2, only operator.mod, divmod, pow, operator.lshift, operator.rshift, operator.and_, operator.xor, operator.or_, ] class TestNDArrayOperatorsMixin(object): def test_array_like_add(self): def check(result): _assert_equal_type_and_value(result, ArrayLike(0)) check(ArrayLike(0) + 0) check(0 + ArrayLike(0)) check(ArrayLike(0) + np.array(0)) check(np.array(0) + ArrayLike(0)) check(ArrayLike(np.array(0)) + 0) check(0 + ArrayLike(np.array(0))) check(ArrayLike(np.array(0)) + np.array(0)) check(np.array(0) + ArrayLike(np.array(0))) def test_inplace(self): array_like = ArrayLike(np.array([0])) array_like += 1 _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) array = np.array([0]) array += ArrayLike(1) _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) def test_opt_out(self): class OptOut(object): """Object that opts out of __array_ufunc__.""" __array_ufunc__ = None def __add__(self, other): return self def __radd__(self, other): return self array_like = ArrayLike(1) opt_out = OptOut() # supported operations assert_(array_like + opt_out is opt_out) assert_(opt_out + array_like is opt_out) # not supported with assert_raises(TypeError): # don't use the Python default, array_like = array_like + opt_out array_like += opt_out with assert_raises(TypeError): array_like - opt_out with assert_raises(TypeError): opt_out - array_like def test_subclass(self): class SubArrayLike(ArrayLike): """Should take precedence over ArrayLike.""" x = ArrayLike(0) y = SubArrayLike(1) _assert_equal_type_and_value(x + y, y) _assert_equal_type_and_value(y + x, y) def test_object(self): x = ArrayLike(0) obj = object() with assert_raises(TypeError): x + obj with assert_raises(TypeError): obj + x with assert_raises(TypeError): x += obj def test_unary_methods(self): array = np.array([-1, 0, 1, 2]) array_like = ArrayLike(array) for op in [operator.neg, operator.pos, abs, operator.invert]: _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) def test_forward_binary_methods(self): array = np.array([-1, 0, 1, 2]) array_like = ArrayLike(array) for op in _ALL_BINARY_OPERATORS: expected = wrap_array_like(op(array, 1)) actual = op(array_like, 1) err_msg = 'failed for operator {}'.format(op) _assert_equal_type_and_value(expected, actual, err_msg=err_msg) def test_reflected_binary_methods(self): for op in _ALL_BINARY_OPERATORS: expected = wrap_array_like(op(2, 1)) actual = op(2, ArrayLike(1)) err_msg = 'failed for operator {}'.format(op) _assert_equal_type_and_value(expected, actual, err_msg=err_msg) def test_ufunc_at(self): array = ArrayLike(np.array([1, 2, 3, 4])) assert_(np.negative.at(array, np.array([0, 1])) is None) _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) def test_ufunc_two_outputs(self): mantissa, exponent = np.frexp(2 ** -3) expected = (ArrayLike(mantissa), ArrayLike(exponent)) _assert_equal_type_and_value( np.frexp(ArrayLike(2 ** -3)), expected) _assert_equal_type_and_value( np.frexp(ArrayLike(np.array(2 ** -3))), expected) if __name__ == "__main__": run_module_suite()
6,849
30.136364
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_arraysetops.py
"""Test functions for 1D array set operations. """ from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import ( run_module_suite, assert_array_equal, assert_equal, assert_raises, ) from numpy.lib.arraysetops import ( ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin ) class TestSetOps(object): def test_intersect1d(self): # unique inputs a = np.array([5, 7, 1, 2]) b = np.array([2, 4, 3, 1, 5]) ec = np.array([1, 2, 5]) c = intersect1d(a, b, assume_unique=True) assert_array_equal(c, ec) # non-unique inputs a = np.array([5, 5, 7, 1, 2]) b = np.array([2, 1, 4, 3, 3, 1, 5]) ed = np.array([1, 2, 5]) c = intersect1d(a, b) assert_array_equal(c, ed) assert_array_equal([], intersect1d([], [])) def test_setxor1d(self): a = np.array([5, 7, 1, 2]) b = np.array([2, 4, 3, 1, 5]) ec = np.array([3, 4, 7]) c = setxor1d(a, b) assert_array_equal(c, ec) a = np.array([1, 2, 3]) b = np.array([6, 5, 4]) ec = np.array([1, 2, 3, 4, 5, 6]) c = setxor1d(a, b) assert_array_equal(c, ec) a = np.array([1, 8, 2, 3]) b = np.array([6, 5, 4, 8]) ec = np.array([1, 2, 3, 4, 5, 6]) c = setxor1d(a, b) assert_array_equal(c, ec) assert_array_equal([], setxor1d([], [])) def test_ediff1d(self): zero_elem = np.array([]) one_elem = np.array([1]) two_elem = np.array([1, 2]) assert_array_equal([], ediff1d(zero_elem)) assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) assert_array_equal([0], ediff1d(zero_elem, to_end=0)) assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) assert_array_equal([], ediff1d(one_elem)) assert_array_equal([1], ediff1d(two_elem)) assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9)) assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8])) assert_array_equal([1,9], ediff1d(two_elem, to_end=9)) assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8])) assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6])) assert(isinstance(ediff1d(np.matrix(1)), np.matrix)) assert(isinstance(ediff1d(np.matrix(1), to_begin=1), np.matrix)) def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test # isin instead. def _isin_slow(a, b): b = np.asarray(b).flatten().tolist() return a in b isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) def assert_isin_equal(a, b): x = isin(a, b) y = isin_slow(a, b) assert_array_equal(x, y) #multidimensional arrays in both arguments a = np.arange(24).reshape([2, 3, 4]) b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) assert_isin_equal(a, b) #array-likes as both arguments c = [(9, 8), (7, 6)] d = (9, 7) assert_isin_equal(c, d) #zero-d array: f = np.array(3) assert_isin_equal(f, b) assert_isin_equal(a, f) assert_isin_equal(f, f) #scalar: assert_isin_equal(5, b) assert_isin_equal(a, 6) assert_isin_equal(5, 6) #empty array-like: x = [] assert_isin_equal(x, b) assert_isin_equal(a, x) assert_isin_equal(x, x) def test_in1d(self): # we use two different sizes for the b array here to test the # two different paths in in1d(). for mult in (1, 10): # One check without np.array to make sure lists are handled correct a = [5, 7, 1, 2] b = [2, 4, 3, 1, 5] * mult ec = np.array([True, False, True, True]) c = in1d(a, b, assume_unique=True) assert_array_equal(c, ec) a[0] = 8 ec = np.array([False, False, True, True]) c = in1d(a, b, assume_unique=True) assert_array_equal(c, ec) a[0], a[3] = 4, 8 ec = np.array([True, False, True, False]) c = in1d(a, b, assume_unique=True) assert_array_equal(c, ec) a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) b = [2, 3, 4] * mult ec = [False, True, False, True, True, True, True, True, True, False, True, False, False, False] c = in1d(a, b) assert_array_equal(c, ec) b = b + [5, 5, 4] * mult ec = [True, True, True, True, True, True, True, True, True, True, True, False, True, True] c = in1d(a, b) assert_array_equal(c, ec) a = np.array([5, 7, 1, 2]) b = np.array([2, 4, 3, 1, 5] * mult) ec = np.array([True, False, True, True]) c = in1d(a, b) assert_array_equal(c, ec) a = np.array([5, 7, 1, 1, 2]) b = np.array([2, 4, 3, 3, 1, 5] * mult) ec = np.array([True, False, True, True, True]) c = in1d(a, b) assert_array_equal(c, ec) a = np.array([5, 5]) b = np.array([2, 2] * mult) ec = np.array([False, False]) c = in1d(a, b) assert_array_equal(c, ec) a = np.array([5]) b = np.array([2]) ec = np.array([False]) c = in1d(a, b) assert_array_equal(c, ec) assert_array_equal(in1d([], []), []) def test_in1d_char_array(self): a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) b = np.array(['a', 'c']) ec = np.array([True, False, True, False, False, True, False, False]) c = in1d(a, b) assert_array_equal(c, ec) def test_in1d_invert(self): "Test in1d's invert parameter" # We use two different sizes for the b array here to test the # two different paths in in1d(). for mult in (1, 10): a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) b = [2, 3, 4] * mult assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) def test_in1d_ravel(self): # Test that in1d ravels its input arrays. This is not documented # behavior however. The test is to ensure consistentency. a = np.arange(6).reshape(2, 3) b = np.arange(3, 9).reshape(3, 2) long_b = np.arange(3, 63).reshape(30, 2) ec = np.array([False, False, False, True, True, True]) assert_array_equal(in1d(a, b, assume_unique=True), ec) assert_array_equal(in1d(a, b, assume_unique=False), ec) assert_array_equal(in1d(a, long_b, assume_unique=True), ec) assert_array_equal(in1d(a, long_b, assume_unique=False), ec) def test_in1d_first_array_is_object(self): ar1 = [None] ar2 = np.array([1]*10) expected = np.array([False]) result = np.in1d(ar1, ar2) assert_array_equal(result, expected) def test_in1d_second_array_is_object(self): ar1 = 1 ar2 = np.array([None]*10) expected = np.array([False]) result = np.in1d(ar1, ar2) assert_array_equal(result, expected) def test_in1d_both_arrays_are_object(self): ar1 = [None] ar2 = np.array([None]*10) expected = np.array([True]) result = np.in1d(ar1, ar2) assert_array_equal(result, expected) def test_in1d_both_arrays_have_structured_dtype(self): # Test arrays of a structured data type containing an integer field # and a field of dtype `object` allowing for arbitrary Python objects dt = np.dtype([('field1', int), ('field2', object)]) ar1 = np.array([(1, None)], dtype=dt) ar2 = np.array([(1, None)]*10, dtype=dt) expected = np.array([True]) result = np.in1d(ar1, ar2) assert_array_equal(result, expected) def test_union1d(self): a = np.array([5, 4, 7, 1, 2]) b = np.array([2, 4, 3, 3, 2, 1, 5]) ec = np.array([1, 2, 3, 4, 5, 7]) c = union1d(a, b) assert_array_equal(c, ec) # Tests gh-10340, arguments to union1d should be # flattened if they are not already 1D x = np.array([[0, 1, 2], [3, 4, 5]]) y = np.array([0, 1, 2, 3, 4]) ez = np.array([0, 1, 2, 3, 4, 5]) z = union1d(x, y) assert_array_equal(z, ez) assert_array_equal([], union1d([], [])) def test_setdiff1d(self): a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) b = np.array([2, 4, 3, 3, 2, 1, 5]) ec = np.array([6, 7]) c = setdiff1d(a, b) assert_array_equal(c, ec) a = np.arange(21) b = np.arange(19) ec = np.array([19, 20]) c = setdiff1d(a, b) assert_array_equal(c, ec) assert_array_equal([], setdiff1d([], [])) a = np.array((), np.uint32) assert_equal(setdiff1d(a, []).dtype, np.uint32) def test_setdiff1d_char_array(self): a = np.array(['a', 'b', 'c']) b = np.array(['a', 'b', 's']) assert_array_equal(setdiff1d(a, b), np.array(['c'])) def test_manyways(self): a = np.array([5, 7, 1, 2, 8]) b = np.array([9, 8, 2, 4, 3, 1, 5]) c1 = setxor1d(a, b) aux1 = intersect1d(a, b) aux2 = union1d(a, b) c2 = setdiff1d(aux2, aux1) assert_array_equal(c1, c2) class TestUnique(object): def test_unique_1d(self): def check_all(a, b, i1, i2, c, dt): base_msg = 'check {0} failed for type {1}' msg = base_msg.format('values', dt) v = unique(a) assert_array_equal(v, b, msg) msg = base_msg.format('return_index', dt) v, j = unique(a, 1, 0, 0) assert_array_equal(v, b, msg) assert_array_equal(j, i1, msg) msg = base_msg.format('return_inverse', dt) v, j = unique(a, 0, 1, 0) assert_array_equal(v, b, msg) assert_array_equal(j, i2, msg) msg = base_msg.format('return_counts', dt) v, j = unique(a, 0, 0, 1) assert_array_equal(v, b, msg) assert_array_equal(j, c, msg) msg = base_msg.format('return_index and return_inverse', dt) v, j1, j2 = unique(a, 1, 1, 0) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, i2, msg) msg = base_msg.format('return_index and return_counts', dt) v, j1, j2 = unique(a, 1, 0, 1) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, c, msg) msg = base_msg.format('return_inverse and return_counts', dt) v, j1, j2 = unique(a, 0, 1, 1) assert_array_equal(v, b, msg) assert_array_equal(j1, i2, msg) assert_array_equal(j2, c, msg) msg = base_msg.format(('return_index, return_inverse ' 'and return_counts'), dt) v, j1, j2, j3 = unique(a, 1, 1, 1) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, i2, msg) assert_array_equal(j3, c, msg) a = [5, 7, 1, 2, 1, 5, 7]*10 b = [1, 2, 5, 7] i1 = [2, 3, 0, 1] i2 = [2, 3, 0, 1, 0, 2, 3]*10 c = np.multiply([2, 1, 2, 2], 10) # test for numeric arrays types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) types.append('datetime64[D]') types.append('timedelta64[D]') for dt in types: aa = np.array(a, dt) bb = np.array(b, dt) check_all(aa, bb, i1, i2, c, dt) # test for object arrays dt = 'O' aa = np.empty(len(a), dt) aa[:] = a bb = np.empty(len(b), dt) bb[:] = b check_all(aa, bb, i1, i2, c, dt) # test for structured arrays dt = [('', 'i'), ('', 'i')] aa = np.array(list(zip(a, a)), dt) bb = np.array(list(zip(b, b)), dt) check_all(aa, bb, i1, i2, c, dt) # test for ticket #2799 aa = [1. + 0.j, 1 - 1.j, 1] assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) # test for ticket #4785 a = [(1, 2), (1, 2), (2, 3)] unq = [1, 2, 3] inv = [0, 1, 0, 1, 1, 2] a1 = unique(a) assert_array_equal(a1, unq) a2, a2_inv = unique(a, return_inverse=True) assert_array_equal(a2, unq) assert_array_equal(a2_inv, inv) # test for chararrays with return_inverse (gh-5099) a = np.chararray(5) a[...] = '' a2, a2_inv = np.unique(a, return_inverse=True) assert_array_equal(a2_inv, np.zeros(5)) # test for ticket #9137 a = [] a1_idx = np.unique(a, return_index=True)[1] a2_inv = np.unique(a, return_inverse=True)[1] a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:] assert_equal(a1_idx.dtype, np.intp) assert_equal(a2_inv.dtype, np.intp) assert_equal(a3_idx.dtype, np.intp) assert_equal(a3_inv.dtype, np.intp) def test_unique_axis_errors(self): assert_raises(TypeError, self._run_axis_tests, object) assert_raises(TypeError, self._run_axis_tests, [('a', int), ('b', object)]) assert_raises(ValueError, unique, np.arange(10), axis=2) assert_raises(ValueError, unique, np.arange(10), axis=-2) def test_unique_axis_list(self): msg = "Unique failed on list of lists" inp = [[0, 1, 0], [0, 1, 0]] inp_arr = np.asarray(inp) assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) def test_unique_axis(self): types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) types.append('datetime64[D]') types.append('timedelta64[D]') types.append([('a', int), ('b', int)]) types.append([('a', int), ('b', float)]) for dtype in types: self._run_axis_tests(dtype) msg = 'Non-bitwise-equal booleans test failed' data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) result = np.array([[False, True], [True, True]], dtype=bool) assert_array_equal(unique(data, axis=0), result, msg) msg = 'Negative zero equality test failed' data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) result = np.array([[-0.0, 0.0]]) assert_array_equal(unique(data, axis=0), result, msg) def test_unique_masked(self): # issue 8664 x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], dtype='uint8') y = np.ma.masked_equal(x, 0) v = np.unique(y) v2, i, c = np.unique(y, return_index=True, return_counts=True) msg = 'Unique returned different results when asked for index' assert_array_equal(v.data, v2.data, msg) assert_array_equal(v.mask, v2.mask, msg) def test_unique_sort_order_with_axis(self): # These tests fail if sorting along axis is done by treating subarrays # as unsigned byte strings. See gh-10495. fmt = "sort order incorrect for integer type '%s'" for dt in 'bhilq': a = np.array([[-1],[0]], dt) b = np.unique(a, axis=0) assert_array_equal(a, b, fmt % dt) def _run_axis_tests(self, dtype): data = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0]]).astype(dtype) msg = 'Unique with 1d array and axis=0 failed' result = np.array([0, 1]) assert_array_equal(unique(data), result.astype(dtype), msg) msg = 'Unique with 2d array and axis=0 failed' result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) msg = 'Unique with 2d array and axis=1 failed' result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) msg = 'Unique with 3d array and axis=2 failed' data3d = np.dstack([data] * 3) result = data3d[..., :1] assert_array_equal(unique(data3d, axis=2), result, msg) uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, return_inverse=True, return_counts=True) msg = "Unique's return_index=True failed with axis=0" assert_array_equal(data[idx], uniq, msg) msg = "Unique's return_inverse=True failed with axis=0" assert_array_equal(uniq[inv], data) msg = "Unique's return_counts=True failed with axis=0" assert_array_equal(cnt, np.array([2, 2]), msg) uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, return_inverse=True, return_counts=True) msg = "Unique's return_index=True failed with axis=1" assert_array_equal(data[:, idx], uniq) msg = "Unique's return_inverse=True failed with axis=1" assert_array_equal(uniq[:, inv], data) msg = "Unique's return_counts=True failed with axis=1" assert_array_equal(cnt, np.array([2, 1, 1]), msg) if __name__ == "__main__": run_module_suite()
18,032
34.428291
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test__iotools.py
from __future__ import division, absolute_import, print_function import sys import time from datetime import date import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_allclose, assert_raises, ) from numpy.lib._iotools import ( LineSplitter, NameValidator, StringConverter, has_nested_fields, easy_dtype, flatten_dtype ) from numpy.compat import unicode class TestLineSplitter(object): "Tests the LineSplitter class." def test_no_delimiter(self): "Test LineSplitter w/o delimiter" strg = " 1 2 3 4 5 # test" test = LineSplitter()(strg) assert_equal(test, ['1', '2', '3', '4', '5']) test = LineSplitter('')(strg) assert_equal(test, ['1', '2', '3', '4', '5']) def test_space_delimiter(self): "Test space delimiter" strg = " 1 2 3 4 5 # test" test = LineSplitter(' ')(strg) assert_equal(test, ['1', '2', '3', '4', '', '5']) test = LineSplitter(' ')(strg) assert_equal(test, ['1 2 3 4', '5']) def test_tab_delimiter(self): "Test tab delimiter" strg = " 1\t 2\t 3\t 4\t 5 6" test = LineSplitter('\t')(strg) assert_equal(test, ['1', '2', '3', '4', '5 6']) strg = " 1 2\t 3 4\t 5 6" test = LineSplitter('\t')(strg) assert_equal(test, ['1 2', '3 4', '5 6']) def test_other_delimiter(self): "Test LineSplitter on delimiter" strg = "1,2,3,4,,5" test = LineSplitter(',')(strg) assert_equal(test, ['1', '2', '3', '4', '', '5']) # strg = " 1,2,3,4,,5 # test" test = LineSplitter(',')(strg) assert_equal(test, ['1', '2', '3', '4', '', '5']) # gh-11028 bytes comment/delimiters should get decoded strg = b" 1,2,3,4,,5 % test" test = LineSplitter(delimiter=b',', comments=b'%')(strg) assert_equal(test, ['1', '2', '3', '4', '', '5']) def test_constant_fixed_width(self): "Test LineSplitter w/ fixed-width fields" strg = " 1 2 3 4 5 # test" test = LineSplitter(3)(strg) assert_equal(test, ['1', '2', '3', '4', '', '5', '']) # strg = " 1 3 4 5 6# test" test = LineSplitter(20)(strg) assert_equal(test, ['1 3 4 5 6']) # strg = " 1 3 4 5 6# test" test = LineSplitter(30)(strg) assert_equal(test, ['1 3 4 5 6']) def test_variable_fixed_width(self): strg = " 1 3 4 5 6# test" test = LineSplitter((3, 6, 6, 3))(strg) assert_equal(test, ['1', '3', '4 5', '6']) # strg = " 1 3 4 5 6# test" test = LineSplitter((6, 6, 9))(strg) assert_equal(test, ['1', '3 4', '5 6']) # ----------------------------------------------------------------------------- class TestNameValidator(object): def test_case_sensitivity(self): "Test case sensitivity" names = ['A', 'a', 'b', 'c'] test = NameValidator().validate(names) assert_equal(test, ['A', 'a', 'b', 'c']) test = NameValidator(case_sensitive=False).validate(names) assert_equal(test, ['A', 'A_1', 'B', 'C']) test = NameValidator(case_sensitive='upper').validate(names) assert_equal(test, ['A', 'A_1', 'B', 'C']) test = NameValidator(case_sensitive='lower').validate(names) assert_equal(test, ['a', 'a_1', 'b', 'c']) # check exceptions assert_raises(ValueError, NameValidator, case_sensitive='foobar') def test_excludelist(self): "Test excludelist" names = ['dates', 'data', 'Other Data', 'mask'] validator = NameValidator(excludelist=['dates', 'data', 'mask']) test = validator.validate(names) assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) def test_missing_names(self): "Test validate missing names" namelist = ('a', 'b', 'c') validator = NameValidator() assert_equal(validator(namelist), ['a', 'b', 'c']) namelist = ('', 'b', 'c') assert_equal(validator(namelist), ['f0', 'b', 'c']) namelist = ('a', 'b', '') assert_equal(validator(namelist), ['a', 'b', 'f0']) namelist = ('', 'f0', '') assert_equal(validator(namelist), ['f1', 'f0', 'f2']) def test_validate_nb_names(self): "Test validate nb names" namelist = ('a', 'b', 'c') validator = NameValidator() assert_equal(validator(namelist, nbfields=1), ('a',)) assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), ['a', 'b', 'c', 'g0', 'g1']) def test_validate_wo_names(self): "Test validate no names" namelist = None validator = NameValidator() assert_(validator(namelist) is None) assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) # ----------------------------------------------------------------------------- def _bytes_to_date(s): return date(*time.strptime(s, "%Y-%m-%d")[:3]) class TestStringConverter(object): "Test StringConverter" def test_creation(self): "Test creation of a StringConverter" converter = StringConverter(int, -99999) assert_equal(converter._status, 1) assert_equal(converter.default, -99999) def test_upgrade(self): "Tests the upgrade method." converter = StringConverter() assert_equal(converter._status, 0) # test int assert_equal(converter.upgrade('0'), 0) assert_equal(converter._status, 1) # On systems where long defaults to 32-bit, the statuses will be # offset by one, so we check for this here. import numpy.core.numeric as nx status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) # test int > 2**32 assert_equal(converter.upgrade('17179869184'), 17179869184) assert_equal(converter._status, 1 + status_offset) # test float assert_allclose(converter.upgrade('0.'), 0.0) assert_equal(converter._status, 2 + status_offset) # test complex assert_equal(converter.upgrade('0j'), complex('0j')) assert_equal(converter._status, 3 + status_offset) # test str # note that the longdouble type has been skipped, so the # _status increases by 2. Everything should succeed with # unicode conversion (5). for s in ['a', u'a', b'a']: res = converter.upgrade(s) assert_(type(res) is unicode) assert_equal(res, u'a') assert_equal(converter._status, 5 + status_offset) def test_missing(self): "Tests the use of missing values." converter = StringConverter(missing_values=('missing', 'missed')) converter.upgrade('0') assert_equal(converter('0'), 0) assert_equal(converter(''), converter.default) assert_equal(converter('missing'), converter.default) assert_equal(converter('missed'), converter.default) try: converter('miss') except ValueError: pass def test_upgrademapper(self): "Tests updatemapper" dateparser = _bytes_to_date StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) convert = StringConverter(dateparser, date(2000, 1, 1)) test = convert('2001-01-01') assert_equal(test, date(2001, 1, 1)) test = convert('2009-01-01') assert_equal(test, date(2009, 1, 1)) test = convert('') assert_equal(test, date(2000, 1, 1)) def test_string_to_object(self): "Make sure that string-to-object functions are properly recognized" old_mapper = StringConverter._mapper[:] # copy of list conv = StringConverter(_bytes_to_date) assert_equal(conv._mapper, old_mapper) assert_(hasattr(conv, 'default')) def test_keep_default(self): "Make sure we don't lose an explicit default" converter = StringConverter(None, missing_values='', default=-999) converter.upgrade('3.14159265') assert_equal(converter.default, -999) assert_equal(converter.type, np.dtype(float)) # converter = StringConverter( None, missing_values='', default=0) converter.upgrade('3.14159265') assert_equal(converter.default, 0) assert_equal(converter.type, np.dtype(float)) def test_keep_default_zero(self): "Check that we don't lose a default of 0" converter = StringConverter(int, default=0, missing_values="N/A") assert_equal(converter.default, 0) def test_keep_missing_values(self): "Check that we're not losing missing values" converter = StringConverter(int, default=0, missing_values="N/A") assert_equal( converter.missing_values, set(['', 'N/A'])) def test_int64_dtype(self): "Check that int64 integer types can be specified" converter = StringConverter(np.int64, default=0) val = "-9223372036854775807" assert_(converter(val) == -9223372036854775807) val = "9223372036854775807" assert_(converter(val) == 9223372036854775807) def test_uint64_dtype(self): "Check that uint64 integer types can be specified" converter = StringConverter(np.uint64, default=0) val = "9223372043271415339" assert_(converter(val) == 9223372043271415339) class TestMiscFunctions(object): def test_has_nested_dtype(self): "Test has_nested_dtype" ndtype = np.dtype(float) assert_equal(has_nested_fields(ndtype), False) ndtype = np.dtype([('A', '|S3'), ('B', float)]) assert_equal(has_nested_fields(ndtype), False) ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) assert_equal(has_nested_fields(ndtype), True) def test_easy_dtype(self): "Test ndtype on dtypes" # Simple case ndtype = float assert_equal(easy_dtype(ndtype), np.dtype(float)) # As string w/o names ndtype = "i4, f8" assert_equal(easy_dtype(ndtype), np.dtype([('f0', "i4"), ('f1', "f8")])) # As string w/o names but different default format assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), np.dtype([('field_000', "i4"), ('field_001', "f8")])) # As string w/ names ndtype = "i4, f8" assert_equal(easy_dtype(ndtype, names="a, b"), np.dtype([('a', "i4"), ('b', "f8")])) # As string w/ names (too many) ndtype = "i4, f8" assert_equal(easy_dtype(ndtype, names="a, b, c"), np.dtype([('a', "i4"), ('b', "f8")])) # As string w/ names (not enough) ndtype = "i4, f8" assert_equal(easy_dtype(ndtype, names=", b"), np.dtype([('f0', "i4"), ('b', "f8")])) # ... (with different default format) assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), np.dtype([('a', "i4"), ('f00', "f8")])) # As list of tuples w/o names ndtype = [('A', int), ('B', float)] assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) # As list of tuples w/ names assert_equal(easy_dtype(ndtype, names="a,b"), np.dtype([('a', int), ('b', float)])) # As list of tuples w/ not enough names assert_equal(easy_dtype(ndtype, names="a"), np.dtype([('a', int), ('f0', float)])) # As list of tuples w/ too many names assert_equal(easy_dtype(ndtype, names="a,b,c"), np.dtype([('a', int), ('b', float)])) # As list of types w/o names ndtype = (int, float, float) assert_equal(easy_dtype(ndtype), np.dtype([('f0', int), ('f1', float), ('f2', float)])) # As list of types w names ndtype = (int, float, float) assert_equal(easy_dtype(ndtype, names="a, b, c"), np.dtype([('a', int), ('b', float), ('c', float)])) # As simple dtype w/ names ndtype = np.dtype(float) assert_equal(easy_dtype(ndtype, names="a, b, c"), np.dtype([(_, float) for _ in ('a', 'b', 'c')])) # As simple dtype w/o names (but multiple fields) ndtype = np.dtype(float) assert_equal( easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) def test_flatten_dtype(self): "Testing flatten_dtype" # Standard dtype dt = np.dtype([("a", "f8"), ("b", "f8")]) dt_flat = flatten_dtype(dt) assert_equal(dt_flat, [float, float]) # Recursive dtype dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) dt_flat = flatten_dtype(dt) assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) # dtype with shaped fields dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) dt_flat = flatten_dtype(dt) assert_equal(dt_flat, [float, int]) dt_flat = flatten_dtype(dt, True) assert_equal(dt_flat, [float] * 2 + [int] * 3) # dtype w/ titles dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) dt_flat = flatten_dtype(dt) assert_equal(dt_flat, [float, float]) if __name__ == "__main__": run_module_suite()
13,799
37.655462
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/lib/tests/test_utils.py
from __future__ import division, absolute_import, print_function import sys from numpy.core import arange from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_raises_regex, dec ) from numpy.lib import deprecate import numpy.lib.utils as utils if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO @dec.skipif(sys.flags.optimize == 2) def test_lookfor(): out = StringIO() utils.lookfor('eigenvalue', module='numpy', output=out, import_modules=False) out = out.getvalue() assert_('numpy.linalg.eig' in out) @deprecate def old_func(self, x): return x @deprecate(message="Rather use new_func2") def old_func2(self, x): return x def old_func3(self, x): return x new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") def test_deprecate_decorator(): assert_('deprecated' in old_func.__doc__) def test_deprecate_decorator_message(): assert_('Rather use new_func2' in old_func2.__doc__) def test_deprecate_fn(): assert_('old_func3' in new_func3.__doc__) assert_('new_func3' in new_func3.__doc__) def test_safe_eval_nameconstant(): # Test if safe_eval supports Python 3.4 _ast.NameConstant utils.safe_eval('None') def test_byte_bounds(): a = arange(12).reshape(3, 4) low, high = utils.byte_bounds(a) assert_equal(high - low, a.size * a.itemsize) def test_assert_raises_regex_context_manager(): with assert_raises_regex(ValueError, 'no deprecation warning'): raise ValueError('no deprecation warning') if __name__ == "__main__": run_module_suite()
1,656
22.013889
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/core.py
""" numpy.ma : a package to handle missing or invalid values. This package was initially written for numarray by Paul F. Dubois at Lawrence Livermore National Laboratory. In 2006, the package was completely rewritten by Pierre Gerard-Marchant (University of Georgia) to make the MaskedArray class a subclass of ndarray, and to improve support of structured arrays. Copyright 1999, 2000, 2001 Regents of the University of California. Released for unlimited redistribution. * Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. * Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant (pgmdevlist_AT_gmail_DOT_com) * Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) .. moduleauthor:: Pierre Gerard-Marchant """ # pylint: disable-msg=E1002 from __future__ import division, absolute_import, print_function import sys import operator import warnings import textwrap from functools import reduce if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins import numpy as np import numpy.core.umath as umath import numpy.core.numerictypes as ntypes from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue from numpy import array as narray from numpy.lib.function_base import angle from numpy.compat import ( getargspec, formatargspec, long, basestring, unicode, bytes ) from numpy import expand_dims as n_expand_dims from numpy.core.multiarray import normalize_axis_index from numpy.core.numeric import normalize_axis_tuple if sys.version_info[0] >= 3: import pickle else: import cPickle as pickle __all__ = [ 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', 'masked_array', 'masked_equal', 'masked_greater', 'masked_greater_equal', 'masked_inside', 'masked_invalid', 'masked_less', 'masked_less_equal', 'masked_not_equal', 'masked_object', 'masked_outside', 'masked_print_option', 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod', 'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', 'var', 'where', 'zeros', ] MaskType = np.bool_ nomask = MaskType(0) class MaskedArrayFutureWarning(FutureWarning): pass def _deprecate_argsort_axis(arr): """ Adjust the axis passed to argsort, warning if necessary Parameters ---------- arr The array which argsort was called on np.ma.argsort has a long-term bug where the default of the axis argument is wrong (gh-8701), which now must be kept for backwards compatibiity. Thankfully, this only makes a difference when arrays are 2- or more- dimensional, so we only need a warning then. """ if arr.ndim <= 1: # no warning needed - but switch to -1 anyway, to avoid surprising # subclasses, which are more likely to implement scalar axes. return -1 else: # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default warnings.warn( "In the future the default for argsort will be axis=-1, not the " "current None, to match its documentation and np.argsort. " "Explicitly pass -1 or None to silence this warning.", MaskedArrayFutureWarning, stacklevel=3) return None def doc_note(initialdoc, note): """ Adds a Notes section to an existing docstring. """ if initialdoc is None: return if note is None: return initialdoc # FIXME: disable this function for the moment until we figure out what to # do with it. Currently it may result in duplicate Notes sections or Notes # sections in the wrong place return initialdoc newdoc = """ %s Notes ----- %s """ return newdoc % (initialdoc, note) def get_object_signature(obj): """ Get the signature from obj """ try: sig = formatargspec(*getargspec(obj)) except TypeError: sig = '' return sig ############################################################################### # Exceptions # ############################################################################### class MAError(Exception): """ Class for masked array related errors. """ pass class MaskError(MAError): """ Class for mask related errors. """ pass ############################################################################### # Filling options # ############################################################################### # b: boolean - c: complex - f: floats - i: integer - O: object - S: string default_filler = {'b': True, 'c': 1.e20 + 0.0j, 'f': 1.e20, 'i': 999999, 'O': '?', 'S': b'N/A', 'u': 999999, 'V': b'???', 'U': u'N/A' } # Add datetime64 and timedelta64 types for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"]: default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) max_filler = ntypes._minvals max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]]) min_filler = ntypes._maxvals min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]]) if 'float128' in ntypes.typeDict: max_filler.update([(np.float128, -np.inf)]) min_filler.update([(np.float128, +np.inf)]) def _recursive_fill_value(dtype, f): """ Recursively produce a fill value for `dtype`, calling f on scalar dtypes """ if dtype.names: vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names) return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: subtype, shape = dtype.subdtype subval = _recursive_fill_value(subtype, f) return np.full(shape, subval) else: return f(dtype) def _get_dtype_of(obj): """ Convert the argument for *_fill_value into a dtype """ if isinstance(obj, np.dtype): return obj elif hasattr(obj, 'dtype'): return obj.dtype else: return np.asanyarray(obj).dtype def default_fill_value(obj): """ Return the default fill value for the argument object. The default filling value depends on the datatype of the input array or the type of the input scalar: ======== ======== datatype default ======== ======== bool True int 999999 float 1.e20 complex 1.e20+0j object '?' string 'N/A' ======== ======== For structured types, a structured scalar is returned, with each field the default fill value for its type. For subarray types, the fill value is an array of the same size containing the default scalar fill value. Parameters ---------- obj : ndarray, dtype or scalar The array data-type or scalar for which the default fill value is returned. Returns ------- fill_value : scalar The default fill value. Examples -------- >>> np.ma.default_fill_value(1) 999999 >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) 1e+20 >>> np.ma.default_fill_value(np.dtype(complex)) (1e+20+0j) """ def _scalar_fill_value(dtype): if dtype.kind in 'Mm': return default_filler.get(dtype.str[1:], '?') else: return default_filler.get(dtype.kind, '?') dtype = _get_dtype_of(obj) return _recursive_fill_value(dtype, _scalar_fill_value) def _extremum_fill_value(obj, extremum, extremum_name): def _scalar_fill_value(dtype): try: return extremum[dtype] except KeyError: raise TypeError( "Unsuitable type {} for calculating {}." .format(dtype, extremum_name) ) dtype = _get_dtype_of(obj) return _recursive_fill_value(dtype, _scalar_fill_value) def minimum_fill_value(obj): """ Return the maximum value that can be represented by the dtype of an object. This function is useful for calculating a fill value suitable for taking the minimum of an array with a given dtype. Parameters ---------- obj : ndarray, dtype or scalar An object that can be queried for it's numeric type. Returns ------- val : scalar The maximum representable value. Raises ------ TypeError If `obj` isn't a suitable numeric type. See Also -------- maximum_fill_value : The inverse function. set_fill_value : Set the filling value of a masked array. MaskedArray.fill_value : Return current fill value. Examples -------- >>> import numpy.ma as ma >>> a = np.int8() >>> ma.minimum_fill_value(a) 127 >>> a = np.int32() >>> ma.minimum_fill_value(a) 2147483647 An array of numeric data can also be passed. >>> a = np.array([1, 2, 3], dtype=np.int8) >>> ma.minimum_fill_value(a) 127 >>> a = np.array([1, 2, 3], dtype=np.float32) >>> ma.minimum_fill_value(a) inf """ return _extremum_fill_value(obj, min_filler, "minimum") def maximum_fill_value(obj): """ Return the minimum value that can be represented by the dtype of an object. This function is useful for calculating a fill value suitable for taking the maximum of an array with a given dtype. Parameters ---------- obj : ndarray, dtype or scalar An object that can be queried for it's numeric type. Returns ------- val : scalar The minimum representable value. Raises ------ TypeError If `obj` isn't a suitable numeric type. See Also -------- minimum_fill_value : The inverse function. set_fill_value : Set the filling value of a masked array. MaskedArray.fill_value : Return current fill value. Examples -------- >>> import numpy.ma as ma >>> a = np.int8() >>> ma.maximum_fill_value(a) -128 >>> a = np.int32() >>> ma.maximum_fill_value(a) -2147483648 An array of numeric data can also be passed. >>> a = np.array([1, 2, 3], dtype=np.int8) >>> ma.maximum_fill_value(a) -128 >>> a = np.array([1, 2, 3], dtype=np.float32) >>> ma.maximum_fill_value(a) -inf """ return _extremum_fill_value(obj, max_filler, "maximum") def _recursive_set_fill_value(fillvalue, dt): """ Create a fill value for a structured dtype. Parameters ---------- fillvalue: scalar or array_like Scalar or array representing the fill value. If it is of shorter length than the number of fields in dt, it will be resized. dt: dtype The structured dtype for which to create the fill value. Returns ------- val: tuple A tuple of values corresponding to the structured fill value. """ fillvalue = np.resize(fillvalue, len(dt.names)) output_value = [] for (fval, name) in zip(fillvalue, dt.names): cdtype = dt[name] if cdtype.subdtype: cdtype = cdtype.subdtype[0] if cdtype.names: output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) else: output_value.append(np.array(fval, dtype=cdtype).item()) return tuple(output_value) def _check_fill_value(fill_value, ndtype): """ Private function validating the given `fill_value` for the given dtype. If fill_value is None, it is set to the default corresponding to the dtype. If fill_value is not None, its value is forced to the given dtype. The result is always a 0d array. """ ndtype = np.dtype(ndtype) fields = ndtype.fields if fill_value is None: fill_value = default_fill_value(ndtype) elif fields: fdtype = [(_[0], _[1]) for _ in ndtype.descr] if isinstance(fill_value, (ndarray, np.void)): try: fill_value = np.array(fill_value, copy=False, dtype=fdtype) except ValueError: err_msg = "Unable to transform %s to dtype %s" raise ValueError(err_msg % (fill_value, fdtype)) else: fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) else: if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'): err_msg = "Cannot set fill value of string with array of dtype %s" raise TypeError(err_msg % ndtype) else: # In case we want to convert 1e20 to int. try: fill_value = np.array(fill_value, copy=False, dtype=ndtype) except OverflowError: # Raise TypeError instead of OverflowError. OverflowError # is seldom used, and the real problem here is that the # passed fill_value is not compatible with the ndtype. err_msg = "Fill value %s overflows dtype %s" raise TypeError(err_msg % (fill_value, ndtype)) return np.array(fill_value) def set_fill_value(a, fill_value): """ Set the filling value of a, if a is a masked array. This function changes the fill value of the masked array `a` in place. If `a` is not a masked array, the function returns silently, without doing anything. Parameters ---------- a : array_like Input array. fill_value : dtype Filling value. A consistency test is performed to make sure the value is compatible with the dtype of `a`. Returns ------- None Nothing returned by this function. See Also -------- maximum_fill_value : Return the default fill value for a dtype. MaskedArray.fill_value : Return current fill value. MaskedArray.set_fill_value : Equivalent method. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> a = ma.masked_where(a < 3, a) >>> a masked_array(data = [-- -- -- 3 4], mask = [ True True True False False], fill_value=999999) >>> ma.set_fill_value(a, -999) >>> a masked_array(data = [-- -- -- 3 4], mask = [ True True True False False], fill_value=-999) Nothing happens if `a` is not a masked array. >>> a = range(5) >>> a [0, 1, 2, 3, 4] >>> ma.set_fill_value(a, 100) >>> a [0, 1, 2, 3, 4] >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> ma.set_fill_value(a, 100) >>> a array([0, 1, 2, 3, 4]) """ if isinstance(a, MaskedArray): a.set_fill_value(fill_value) return def get_fill_value(a): """ Return the filling value of a, if any. Otherwise, returns the default filling value for that type. """ if isinstance(a, MaskedArray): result = a.fill_value else: result = default_fill_value(a) return result def common_fill_value(a, b): """ Return the common filling value of two masked arrays, if any. If ``a.fill_value == b.fill_value``, return the fill value, otherwise return None. Parameters ---------- a, b : MaskedArray The masked arrays for which to compare fill values. Returns ------- fill_value : scalar or None The common fill value, or None. Examples -------- >>> x = np.ma.array([0, 1.], fill_value=3) >>> y = np.ma.array([0, 1.], fill_value=3) >>> np.ma.common_fill_value(x, y) 3.0 """ t1 = get_fill_value(a) t2 = get_fill_value(b) if t1 == t2: return t1 return None def filled(a, fill_value=None): """ Return input as an array with masked data replaced by a fill value. If `a` is not a `MaskedArray`, `a` itself is returned. If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to ``a.fill_value``. Parameters ---------- a : MaskedArray or array_like An input object. fill_value : scalar, optional Filling value. Default is None. Returns ------- a : ndarray The filled array. See Also -------- compressed Examples -------- >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> x.filled() array([[999999, 1, 2], [999999, 4, 5], [ 6, 7, 8]]) """ if hasattr(a, 'filled'): return a.filled(fill_value) elif isinstance(a, ndarray): # Should we check for contiguity ? and a.flags['CONTIGUOUS']: return a elif isinstance(a, dict): return np.array(a, 'O') else: return np.array(a) def get_masked_subclass(*arrays): """ Return the youngest subclass of MaskedArray from a list of (masked) arrays. In case of siblings, the first listed takes over. """ if len(arrays) == 1: arr = arrays[0] if isinstance(arr, MaskedArray): rcls = type(arr) else: rcls = MaskedArray else: arrcls = [type(a) for a in arrays] rcls = arrcls[0] if not issubclass(rcls, MaskedArray): rcls = MaskedArray for cls in arrcls[1:]: if issubclass(cls, rcls): rcls = cls # Don't return MaskedConstant as result: revert to MaskedArray if rcls.__name__ == 'MaskedConstant': return MaskedArray return rcls def getdata(a, subok=True): """ Return the data of a masked array as an ndarray. Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, else return `a` as a ndarray or subclass (depending on `subok`) if not. Parameters ---------- a : array_like Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. subok : bool Whether to force the output to be a `pure` ndarray (False) or to return a subclass of ndarray if appropriate (True, default). See Also -------- getmask : Return the mask of a masked array, or nomask. getmaskarray : Return the mask of a masked array, or full array of False. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array(data = [[1 --] [3 4]], mask = [[False True] [False False]], fill_value=999999) >>> ma.getdata(a) array([[1, 2], [3, 4]]) Equivalently use the ``MaskedArray`` `data` attribute. >>> a.data array([[1, 2], [3, 4]]) """ try: data = a._data except AttributeError: data = np.array(a, copy=False, subok=subok) if not subok: return data.view(ndarray) return data get_data = getdata def fix_invalid(a, mask=nomask, copy=True, fill_value=None): """ Return input with invalid data masked and replaced by a fill value. Invalid data means values of `nan`, `inf`, etc. Parameters ---------- a : array_like Input array, a (subclass of) ndarray. mask : sequence, optional Mask. Must be convertible to an array of booleans with the same shape as `data`. True indicates a masked (i.e. invalid) data. copy : bool, optional Whether to use a copy of `a` (True) or to fix `a` in place (False). Default is True. fill_value : scalar, optional Value used for fixing invalid data. Default is None, in which case the ``a.fill_value`` is used. Returns ------- b : MaskedArray The input array with invalid entries fixed. Notes ----- A copy is performed by default. Examples -------- >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) >>> x masked_array(data = [-- -1.0 nan inf], mask = [ True False False False], fill_value = 1e+20) >>> np.ma.fix_invalid(x) masked_array(data = [-- -1.0 -- --], mask = [ True False True True], fill_value = 1e+20) >>> fixed = np.ma.fix_invalid(x) >>> fixed.data array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20, 1.00000000e+20]) >>> x.data array([ 1., -1., NaN, Inf]) """ a = masked_array(a, copy=copy, mask=mask, subok=True) invalid = np.logical_not(np.isfinite(a._data)) if not invalid.any(): return a a._mask |= invalid if fill_value is None: fill_value = a.fill_value a._data[invalid] = fill_value return a ############################################################################### # Ufuncs # ############################################################################### ufunc_domain = {} ufunc_fills = {} class _DomainCheckInterval(object): """ Define a valid interval, so that : ``domain_check_interval(a,b)(x) == True`` where ``x < a`` or ``x > b``. """ def __init__(self, a, b): "domain_check_interval(a,b)(x) = true where x < a or y > b" if (a > b): (a, b) = (b, a) self.a = a self.b = b def __call__(self, x): "Execute the call behavior." # nans at masked positions cause RuntimeWarnings, even though # they are masked. To avoid this we suppress warnings. with np.errstate(invalid='ignore'): return umath.logical_or(umath.greater(x, self.b), umath.less(x, self.a)) class _DomainTan(object): """ Define a valid interval for the `tan` function, so that: ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` """ def __init__(self, eps): "domain_tan(eps) = true where abs(cos(x)) < eps)" self.eps = eps def __call__(self, x): "Executes the call behavior." with np.errstate(invalid='ignore'): return umath.less(umath.absolute(umath.cos(x)), self.eps) class _DomainSafeDivide(object): """ Define a domain for safe division. """ def __init__(self, tolerance=None): self.tolerance = tolerance def __call__(self, a, b): # Delay the selection of the tolerance to here in order to reduce numpy # import times. The calculation of these parameters is a substantial # component of numpy's import time. if self.tolerance is None: self.tolerance = np.finfo(float).tiny # don't call ma ufuncs from __array_wrap__ which would fail for scalars a, b = np.asarray(a), np.asarray(b) with np.errstate(invalid='ignore'): return umath.absolute(a) * self.tolerance >= umath.absolute(b) class _DomainGreater(object): """ DomainGreater(v)(x) is True where x <= v. """ def __init__(self, critical_value): "DomainGreater(v)(x) = true where x <= v" self.critical_value = critical_value def __call__(self, x): "Executes the call behavior." with np.errstate(invalid='ignore'): return umath.less_equal(x, self.critical_value) class _DomainGreaterEqual(object): """ DomainGreaterEqual(v)(x) is True where x < v. """ def __init__(self, critical_value): "DomainGreaterEqual(v)(x) = true where x < v" self.critical_value = critical_value def __call__(self, x): "Executes the call behavior." with np.errstate(invalid='ignore'): return umath.less(x, self.critical_value) class _MaskedUFunc(object): def __init__(self, ufunc): self.f = ufunc self.__doc__ = ufunc.__doc__ self.__name__ = ufunc.__name__ def __str__(self): return "Masked version of {}".format(self.f) class _MaskedUnaryOperation(_MaskedUFunc): """ Defines masked version of unary operations, where invalid values are pre-masked. Parameters ---------- mufunc : callable The function for which to define a masked version. Made available as ``_MaskedUnaryOperation.f``. fill : scalar, optional Filling value, default is 0. domain : class instance Domain for the function. Should be one of the ``_Domain*`` classes. Default is None. """ def __init__(self, mufunc, fill=0, domain=None): super(_MaskedUnaryOperation, self).__init__(mufunc) self.fill = fill self.domain = domain ufunc_domain[mufunc] = domain ufunc_fills[mufunc] = fill def __call__(self, a, *args, **kwargs): """ Execute the call behavior. """ d = getdata(a) # Deal with domain if self.domain is not None: # Case 1.1. : Domained function # nans at masked positions cause RuntimeWarnings, even though # they are masked. To avoid this we suppress warnings. with np.errstate(divide='ignore', invalid='ignore'): result = self.f(d, *args, **kwargs) # Make a mask m = ~umath.isfinite(result) m |= self.domain(d) m |= getmask(a) else: # Case 1.2. : Function without a domain # Get the result and the mask with np.errstate(divide='ignore', invalid='ignore'): result = self.f(d, *args, **kwargs) m = getmask(a) if not result.ndim: # Case 2.1. : The result is scalarscalar if m: return masked return result if m is not nomask: # Case 2.2. The result is an array # We need to fill the invalid data back w/ the input Now, # that's plain silly: in C, we would just skip the element and # keep the original, but we do have to do it that way in Python # In case result has a lower dtype than the inputs (as in # equal) try: np.copyto(result, d, where=m) except TypeError: pass # Transform to masked_result = result.view(get_masked_subclass(a)) masked_result._mask = m masked_result._update_from(a) return masked_result class _MaskedBinaryOperation(_MaskedUFunc): """ Define masked version of binary operations, where invalid values are pre-masked. Parameters ---------- mbfunc : function The function for which to define a masked version. Made available as ``_MaskedBinaryOperation.f``. domain : class instance Default domain for the function. Should be one of the ``_Domain*`` classes. Default is None. fillx : scalar, optional Filling value for the first argument, default is 0. filly : scalar, optional Filling value for the second argument, default is 0. """ def __init__(self, mbfunc, fillx=0, filly=0): """ abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce. """ super(_MaskedBinaryOperation, self).__init__(mbfunc) self.fillx = fillx self.filly = filly ufunc_domain[mbfunc] = None ufunc_fills[mbfunc] = (fillx, filly) def __call__(self, a, b, *args, **kwargs): """ Execute the call behavior. """ # Get the data, as ndarray (da, db) = (getdata(a), getdata(b)) # Get the result with np.errstate(): np.seterr(divide='ignore', invalid='ignore') result = self.f(da, db, *args, **kwargs) # Get the mask for the result (ma, mb) = (getmask(a), getmask(b)) if ma is nomask: if mb is nomask: m = nomask else: m = umath.logical_or(getmaskarray(a), mb) elif mb is nomask: m = umath.logical_or(ma, getmaskarray(b)) else: m = umath.logical_or(ma, mb) # Case 1. : scalar if not result.ndim: if m: return masked return result # Case 2. : array # Revert result to da where masked if m is not nomask and m.any(): # any errors, just abort; impossible to guarantee masked values try: np.copyto(result, da, casting='unsafe', where=m) except Exception: pass # Transforms to a (subclass of) MaskedArray masked_result = result.view(get_masked_subclass(a, b)) masked_result._mask = m if isinstance(a, MaskedArray): masked_result._update_from(a) elif isinstance(b, MaskedArray): masked_result._update_from(b) return masked_result def reduce(self, target, axis=0, dtype=None): """ Reduce `target` along the given `axis`. """ tclass = get_masked_subclass(target) m = getmask(target) t = filled(target, self.filly) if t.shape == (): t = t.reshape(1) if m is not nomask: m = make_mask(m, copy=1) m.shape = (1,) if m is nomask: tr = self.f.reduce(t, axis) mr = nomask else: tr = self.f.reduce(t, axis, dtype=dtype or t.dtype) mr = umath.logical_and.reduce(m, axis) if not tr.shape: if mr: return masked else: return tr masked_tr = tr.view(tclass) masked_tr._mask = mr return masked_tr def outer(self, a, b): """ Return the function applied to the outer product of a and b. """ (da, db) = (getdata(a), getdata(b)) d = self.f.outer(da, db) ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: m = nomask else: ma = getmaskarray(a) mb = getmaskarray(b) m = umath.logical_or.outer(ma, mb) if (not m.ndim) and m: return masked if m is not nomask: np.copyto(d, da, where=m) if not d.shape: return d masked_d = d.view(get_masked_subclass(a, b)) masked_d._mask = m return masked_d def accumulate(self, target, axis=0): """Accumulate `target` along `axis` after filling with y fill value. """ tclass = get_masked_subclass(target) t = filled(target, self.filly) result = self.f.accumulate(t, axis) masked_result = result.view(tclass) return masked_result class _DomainedBinaryOperation(_MaskedUFunc): """ Define binary operations that have a domain, like divide. They have no reduce, outer or accumulate. Parameters ---------- mbfunc : function The function for which to define a masked version. Made available as ``_DomainedBinaryOperation.f``. domain : class instance Default domain for the function. Should be one of the ``_Domain*`` classes. fillx : scalar, optional Filling value for the first argument, default is 0. filly : scalar, optional Filling value for the second argument, default is 0. """ def __init__(self, dbfunc, domain, fillx=0, filly=0): """abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce. """ super(_DomainedBinaryOperation, self).__init__(dbfunc) self.domain = domain self.fillx = fillx self.filly = filly ufunc_domain[dbfunc] = domain ufunc_fills[dbfunc] = (fillx, filly) def __call__(self, a, b, *args, **kwargs): "Execute the call behavior." # Get the data (da, db) = (getdata(a), getdata(b)) # Get the result with np.errstate(divide='ignore', invalid='ignore'): result = self.f(da, db, *args, **kwargs) # Get the mask as a combination of the source masks and invalid m = ~umath.isfinite(result) m |= getmask(a) m |= getmask(b) # Apply the domain domain = ufunc_domain.get(self.f, None) if domain is not None: m |= domain(da, db) # Take care of the scalar case first if (not m.ndim): if m: return masked else: return result # When the mask is True, put back da if possible # any errors, just abort; impossible to guarantee masked values try: np.copyto(result, 0, casting='unsafe', where=m) # avoid using "*" since this may be overlaid masked_da = umath.multiply(m, da) # only add back if it can be cast safely if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): result += masked_da except Exception: pass # Transforms to a (subclass of) MaskedArray masked_result = result.view(get_masked_subclass(a, b)) masked_result._mask = m if isinstance(a, MaskedArray): masked_result._update_from(a) elif isinstance(b, MaskedArray): masked_result._update_from(b) return masked_result # Unary ufuncs exp = _MaskedUnaryOperation(umath.exp) conjugate = _MaskedUnaryOperation(umath.conjugate) sin = _MaskedUnaryOperation(umath.sin) cos = _MaskedUnaryOperation(umath.cos) tan = _MaskedUnaryOperation(umath.tan) arctan = _MaskedUnaryOperation(umath.arctan) arcsinh = _MaskedUnaryOperation(umath.arcsinh) sinh = _MaskedUnaryOperation(umath.sinh) cosh = _MaskedUnaryOperation(umath.cosh) tanh = _MaskedUnaryOperation(umath.tanh) abs = absolute = _MaskedUnaryOperation(umath.absolute) angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base fabs = _MaskedUnaryOperation(umath.fabs) negative = _MaskedUnaryOperation(umath.negative) floor = _MaskedUnaryOperation(umath.floor) ceil = _MaskedUnaryOperation(umath.ceil) around = _MaskedUnaryOperation(np.round_) logical_not = _MaskedUnaryOperation(umath.logical_not) # Domained unary ufuncs sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, _DomainGreaterEqual(0.0)) log = _MaskedUnaryOperation(umath.log, 1.0, _DomainGreater(0.0)) log2 = _MaskedUnaryOperation(umath.log2, 1.0, _DomainGreater(0.0)) log10 = _MaskedUnaryOperation(umath.log10, 1.0, _DomainGreater(0.0)) tan = _MaskedUnaryOperation(umath.tan, 0.0, _DomainTan(1e-35)) arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, _DomainCheckInterval(-1.0, 1.0)) arccos = _MaskedUnaryOperation(umath.arccos, 0.0, _DomainCheckInterval(-1.0, 1.0)) arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, _DomainGreaterEqual(1.0)) arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) # Binary ufuncs add = _MaskedBinaryOperation(umath.add) subtract = _MaskedBinaryOperation(umath.subtract) multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) equal = _MaskedBinaryOperation(umath.equal) equal.reduce = None not_equal = _MaskedBinaryOperation(umath.not_equal) not_equal.reduce = None less_equal = _MaskedBinaryOperation(umath.less_equal) less_equal.reduce = None greater_equal = _MaskedBinaryOperation(umath.greater_equal) greater_equal.reduce = None less = _MaskedBinaryOperation(umath.less) less.reduce = None greater = _MaskedBinaryOperation(umath.greater) greater.reduce = None logical_and = _MaskedBinaryOperation(umath.logical_and) alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce logical_or = _MaskedBinaryOperation(umath.logical_or) sometrue = logical_or.reduce logical_xor = _MaskedBinaryOperation(umath.logical_xor) bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) hypot = _MaskedBinaryOperation(umath.hypot) # Domained binary ufuncs divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) true_divide = _DomainedBinaryOperation(umath.true_divide, _DomainSafeDivide(), 0, 1) floor_divide = _DomainedBinaryOperation(umath.floor_divide, _DomainSafeDivide(), 0, 1) remainder = _DomainedBinaryOperation(umath.remainder, _DomainSafeDivide(), 0, 1) fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) ############################################################################### # Mask creation functions # ############################################################################### def _replace_dtype_fields_recursive(dtype, primitive_dtype): "Private function allowing recursion in _replace_dtype_fields." _recurse = _replace_dtype_fields_recursive # Do we have some name fields ? if dtype.names: descr = [] for name in dtype.names: field = dtype.fields[name] if len(field) == 3: # Prepend the title to the name name = (field[-1], name) descr.append((name, _recurse(field[0], primitive_dtype))) new_dtype = np.dtype(descr) # Is this some kind of composite a la (float,2) elif dtype.subdtype: descr = list(dtype.subdtype) descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) new_dtype = np.dtype(tuple(descr)) # this is a primitive type, so do a direct replacement else: new_dtype = primitive_dtype # preserve identity of dtypes if new_dtype == dtype: new_dtype = dtype return new_dtype def _replace_dtype_fields(dtype, primitive_dtype): """ Construct a dtype description list from a given dtype. Returns a new dtype object, with all fields and subtypes in the given type recursively replaced with `primitive_dtype`. Arguments are coerced to dtypes first. """ dtype = np.dtype(dtype) primitive_dtype = np.dtype(primitive_dtype) return _replace_dtype_fields_recursive(dtype, primitive_dtype) def make_mask_descr(ndtype): """ Construct a dtype description list from a given dtype. Returns a new dtype object, with the type of all fields in `ndtype` to a boolean type. Field names are not altered. Parameters ---------- ndtype : dtype The dtype to convert. Returns ------- result : dtype A dtype that looks like `ndtype`, the type of all fields is boolean. Examples -------- >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], 'formats':[np.float32, int]}) >>> dtype dtype([('foo', '<f4'), ('bar', '<i4')]) >>> ma.make_mask_descr(dtype) dtype([('foo', '|b1'), ('bar', '|b1')]) >>> ma.make_mask_descr(np.float32) dtype('bool') """ return _replace_dtype_fields(ndtype, MaskType) def getmask(a): """ Return the mask of a masked array, or nomask. Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the mask is not `nomask`, else return `nomask`. To guarantee a full array of booleans of the same shape as a, use `getmaskarray`. Parameters ---------- a : array_like Input `MaskedArray` for which the mask is required. See Also -------- getdata : Return the data of a masked array as an ndarray. getmaskarray : Return the mask of a masked array, or full array of False. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array(data = [[1 --] [3 4]], mask = [[False True] [False False]], fill_value=999999) >>> ma.getmask(a) array([[False, True], [False, False]]) Equivalently use the `MaskedArray` `mask` attribute. >>> a.mask array([[False, True], [False, False]]) Result when mask == `nomask` >>> b = ma.masked_array([[1,2],[3,4]]) >>> b masked_array(data = [[1 2] [3 4]], mask = False, fill_value=999999) >>> ma.nomask False >>> ma.getmask(b) == ma.nomask True >>> b.mask == ma.nomask True """ return getattr(a, '_mask', nomask) get_mask = getmask def getmaskarray(arr): """ Return the mask of a masked array, or full boolean array of False. Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and the mask is not `nomask`, else return a full boolean array of False of the same shape as `arr`. Parameters ---------- arr : array_like Input `MaskedArray` for which the mask is required. See Also -------- getmask : Return the mask of a masked array, or nomask. getdata : Return the data of a masked array as an ndarray. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array(data = [[1 --] [3 4]], mask = [[False True] [False False]], fill_value=999999) >>> ma.getmaskarray(a) array([[False, True], [False, False]]) Result when mask == ``nomask`` >>> b = ma.masked_array([[1,2],[3,4]]) >>> b masked_array(data = [[1 2] [3 4]], mask = False, fill_value=999999) >>> >ma.getmaskarray(b) array([[False, False], [False, False]]) """ mask = getmask(arr) if mask is nomask: mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) return mask def is_mask(m): """ Return True if m is a valid, standard mask. This function does not check the contents of the input, only that the type is MaskType. In particular, this function returns False if the mask has a flexible dtype. Parameters ---------- m : array_like Array to test. Returns ------- result : bool True if `m.dtype.type` is MaskType, False otherwise. See Also -------- isMaskedArray : Test whether input is an instance of MaskedArray. Examples -------- >>> import numpy.ma as ma >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> m masked_array(data = [-- 1 -- 2 3], mask = [ True False True False False], fill_value=999999) >>> ma.is_mask(m) False >>> ma.is_mask(m.mask) True Input must be an ndarray (or have similar attributes) for it to be considered a valid mask. >>> m = [False, True, False] >>> ma.is_mask(m) False >>> m = np.array([False, True, False]) >>> m array([False, True, False]) >>> ma.is_mask(m) True Arrays with complex dtypes don't return True. >>> dtype = np.dtype({'names':['monty', 'pithon'], 'formats':[bool, bool]}) >>> dtype dtype([('monty', '|b1'), ('pithon', '|b1')]) >>> m = np.array([(True, False), (False, True), (True, False)], dtype=dtype) >>> m array([(True, False), (False, True), (True, False)], dtype=[('monty', '|b1'), ('pithon', '|b1')]) >>> ma.is_mask(m) False """ try: return m.dtype.type is MaskType except AttributeError: return False def _shrink_mask(m): """ Shrink a mask to nomask if possible """ if not m.dtype.names and not m.any(): return nomask else: return m def make_mask(m, copy=False, shrink=True, dtype=MaskType): """ Create a boolean mask from an array. Return `m` as a boolean mask, creating a copy if necessary or requested. The function can accept any sequence that is convertible to integers, or ``nomask``. Does not require that contents must be 0s and 1s, values of 0 are interepreted as False, everything else as True. Parameters ---------- m : array_like Potential mask. copy : bool, optional Whether to return a copy of `m` (True) or `m` itself (False). shrink : bool, optional Whether to shrink `m` to ``nomask`` if all its values are False. dtype : dtype, optional Data-type of the output mask. By default, the output mask has a dtype of MaskType (bool). If the dtype is flexible, each field has a boolean dtype. This is ignored when `m` is ``nomask``, in which case ``nomask`` is always returned. Returns ------- result : ndarray A boolean mask derived from `m`. Examples -------- >>> import numpy.ma as ma >>> m = [True, False, True, True] >>> ma.make_mask(m) array([ True, False, True, True]) >>> m = [1, 0, 1, 1] >>> ma.make_mask(m) array([ True, False, True, True]) >>> m = [1, 0, 2, -3] >>> ma.make_mask(m) array([ True, False, True, True]) Effect of the `shrink` parameter. >>> m = np.zeros(4) >>> m array([ 0., 0., 0., 0.]) >>> ma.make_mask(m) False >>> ma.make_mask(m, shrink=False) array([False, False, False, False]) Using a flexible `dtype`. >>> m = [1, 0, 1, 1] >>> n = [0, 1, 0, 0] >>> arr = [] >>> for man, mouse in zip(m, n): ... arr.append((man, mouse)) >>> arr [(1, 0), (0, 1), (1, 0), (1, 0)] >>> dtype = np.dtype({'names':['man', 'mouse'], 'formats':[int, int]}) >>> arr = np.array(arr, dtype=dtype) >>> arr array([(1, 0), (0, 1), (1, 0), (1, 0)], dtype=[('man', '<i4'), ('mouse', '<i4')]) >>> ma.make_mask(arr, dtype=dtype) array([(True, False), (False, True), (True, False), (True, False)], dtype=[('man', '|b1'), ('mouse', '|b1')]) """ if m is nomask: return nomask # Make sure the input dtype is valid. dtype = make_mask_descr(dtype) # legacy boolean special case: "existence of fields implies true" if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_: return np.ones(m.shape, dtype=dtype) # Fill the mask in case there are missing data; turn it into an ndarray. result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) # Bas les masques ! if shrink: result = _shrink_mask(result) return result def make_mask_none(newshape, dtype=None): """ Return a boolean mask of the given shape, filled with False. This function returns a boolean ndarray with all entries False, that can be used in common mask manipulations. If a complex dtype is specified, the type of each field is converted to a boolean type. Parameters ---------- newshape : tuple A tuple indicating the shape of the mask. dtype : {None, dtype}, optional If None, use a MaskType instance. Otherwise, use a new datatype with the same fields as `dtype`, converted to boolean types. Returns ------- result : ndarray An ndarray of appropriate shape and dtype, filled with False. See Also -------- make_mask : Create a boolean mask from an array. make_mask_descr : Construct a dtype description list from a given dtype. Examples -------- >>> import numpy.ma as ma >>> ma.make_mask_none((3,)) array([False, False, False]) Defining a more complex dtype. >>> dtype = np.dtype({'names':['foo', 'bar'], 'formats':[np.float32, int]}) >>> dtype dtype([('foo', '<f4'), ('bar', '<i4')]) >>> ma.make_mask_none((3,), dtype=dtype) array([(False, False), (False, False), (False, False)], dtype=[('foo', '|b1'), ('bar', '|b1')]) """ if dtype is None: result = np.zeros(newshape, dtype=MaskType) else: result = np.zeros(newshape, dtype=make_mask_descr(dtype)) return result def mask_or(m1, m2, copy=False, shrink=True): """ Combine two masks with the ``logical_or`` operator. The result may be a view on `m1` or `m2` if the other is `nomask` (i.e. False). Parameters ---------- m1, m2 : array_like Input masks. copy : bool, optional If copy is False and one of the inputs is `nomask`, return a view of the other input mask. Defaults to False. shrink : bool, optional Whether to shrink the output to `nomask` if all its values are False. Defaults to True. Returns ------- mask : output mask The result masks values that are masked in either `m1` or `m2`. Raises ------ ValueError If `m1` and `m2` have different flexible dtypes. Examples -------- >>> m1 = np.ma.make_mask([0, 1, 1, 0]) >>> m2 = np.ma.make_mask([1, 0, 0, 0]) >>> np.ma.mask_or(m1, m2) array([ True, True, True, False]) """ def _recursive_mask_or(m1, m2, newmask): names = m1.dtype.names for name in names: current1 = m1[name] if current1.dtype.names: _recursive_mask_or(current1, m2[name], newmask[name]) else: umath.logical_or(current1, m2[name], newmask[name]) return if (m1 is nomask) or (m1 is False): dtype = getattr(m2, 'dtype', MaskType) return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) if (m2 is nomask) or (m2 is False): dtype = getattr(m1, 'dtype', MaskType) return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) if m1 is m2 and is_mask(m1): return m1 (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) if (dtype1 != dtype2): raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) if dtype1.names: # Allocate an output mask array with the properly broadcast shape. newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) _recursive_mask_or(m1, m2, newmask) return newmask return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) def flatten_mask(mask): """ Returns a completely flattened version of the mask, where nested fields are collapsed. Parameters ---------- mask : array_like Input array, which will be interpreted as booleans. Returns ------- flattened_mask : ndarray of bools The flattened input. Examples -------- >>> mask = np.array([0, 0, 1]) >>> flatten_mask(mask) array([False, False, True]) >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) >>> flatten_mask(mask) array([False, False, False, True]) >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) >>> flatten_mask(mask) array([False, False, False, False, False, True]) """ def _flatmask(mask): "Flatten the mask and returns a (maybe nested) sequence of booleans." mnames = mask.dtype.names if mnames: return [flatten_mask(mask[name]) for name in mnames] else: return mask def _flatsequence(sequence): "Generates a flattened version of the sequence." try: for element in sequence: if hasattr(element, '__iter__'): for f in _flatsequence(element): yield f else: yield element except TypeError: yield sequence mask = np.asarray(mask) flattened = _flatsequence(_flatmask(mask)) return np.array([_ for _ in flattened], dtype=bool) def _check_mask_axis(mask, axis, keepdims=np._NoValue): "Check whether there are masked values along the given axis" kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} if mask is not nomask: return mask.all(axis=axis, **kwargs) return nomask ############################################################################### # Masking functions # ############################################################################### def masked_where(condition, a, copy=True): """ Mask an array where a condition is met. Return `a` as an array masked where `condition` is True. Any masked values of `a` or `condition` are also masked in the output. Parameters ---------- condition : array_like Masking condition. When `condition` tests floating point values for equality, consider using ``masked_values`` instead. a : array_like Array to mask. copy : bool If True (default) make a copy of `a` in the result. If False modify `a` in place and return a view. Returns ------- result : MaskedArray The result of masking `a` where `condition` is True. See Also -------- masked_values : Mask using floating point equality. masked_equal : Mask where equal to a given value. masked_not_equal : Mask where `not` equal to a given value. masked_less_equal : Mask where less than or equal to a given value. masked_greater_equal : Mask where greater than or equal to a given value. masked_less : Mask where less than a given value. masked_greater : Mask where greater than a given value. masked_inside : Mask inside a given interval. masked_outside : Mask outside a given interval. masked_invalid : Mask invalid values (NaNs or infs). Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_where(a <= 2, a) masked_array(data = [-- -- -- 3], mask = [ True True True False], fill_value=999999) Mask array `b` conditional on `a`. >>> b = ['a', 'b', 'c', 'd'] >>> ma.masked_where(a == 2, b) masked_array(data = [a b -- d], mask = [False False True False], fill_value=N/A) Effect of the `copy` argument. >>> c = ma.masked_where(a <= 2, a) >>> c masked_array(data = [-- -- -- 3], mask = [ True True True False], fill_value=999999) >>> c[0] = 99 >>> c masked_array(data = [99 -- -- 3], mask = [False True True False], fill_value=999999) >>> a array([0, 1, 2, 3]) >>> c = ma.masked_where(a <= 2, a, copy=False) >>> c[0] = 99 >>> c masked_array(data = [99 -- -- 3], mask = [False True True False], fill_value=999999) >>> a array([99, 1, 2, 3]) When `condition` or `a` contain masked values. >>> a = np.arange(4) >>> a = ma.masked_where(a == 2, a) >>> a masked_array(data = [0 1 -- 3], mask = [False False True False], fill_value=999999) >>> b = np.arange(4) >>> b = ma.masked_where(b == 0, b) >>> b masked_array(data = [-- 1 2 3], mask = [ True False False False], fill_value=999999) >>> ma.masked_where(a == 3, b) masked_array(data = [-- 1 -- --], mask = [ True False True True], fill_value=999999) """ # Make sure that condition is a valid standard-type mask. cond = make_mask(condition, shrink=False) a = np.array(a, copy=copy, subok=True) (cshape, ashape) = (cond.shape, a.shape) if cshape and cshape != ashape: raise IndexError("Inconsistent shape between the condition and the input" " (got %s and %s)" % (cshape, ashape)) if hasattr(a, '_mask'): cond = mask_or(cond, a._mask) cls = type(a) else: cls = MaskedArray result = a.view(cls) # Assign to *.mask so that structured masks are handled correctly. result.mask = _shrink_mask(cond) return result def masked_greater(x, value, copy=True): """ Mask an array where greater than a given value. This function is a shortcut to ``masked_where``, with `condition` = (x > value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater(a, 2) masked_array(data = [0 1 2 --], mask = [False False False True], fill_value=999999) """ return masked_where(greater(x, value), x, copy=copy) def masked_greater_equal(x, value, copy=True): """ Mask an array where greater than or equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x >= value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater_equal(a, 2) masked_array(data = [0 1 -- --], mask = [False False True True], fill_value=999999) """ return masked_where(greater_equal(x, value), x, copy=copy) def masked_less(x, value, copy=True): """ Mask an array where less than a given value. This function is a shortcut to ``masked_where``, with `condition` = (x < value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less(a, 2) masked_array(data = [-- -- 2 3], mask = [ True True False False], fill_value=999999) """ return masked_where(less(x, value), x, copy=copy) def masked_less_equal(x, value, copy=True): """ Mask an array where less than or equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x <= value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less_equal(a, 2) masked_array(data = [-- -- -- 3], mask = [ True True True False], fill_value=999999) """ return masked_where(less_equal(x, value), x, copy=copy) def masked_not_equal(x, value, copy=True): """ Mask an array where `not` equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x != value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_not_equal(a, 2) masked_array(data = [-- -- 2 --], mask = [ True True False True], fill_value=999999) """ return masked_where(not_equal(x, value), x, copy=copy) def masked_equal(x, value, copy=True): """ Mask an array where equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x == value). For floating point arrays, consider using ``masked_values(x, value)``. See Also -------- masked_where : Mask where a condition is met. masked_values : Mask using floating point equality. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_equal(a, 2) masked_array(data = [0 1 -- 3], mask = [False False True False], fill_value=999999) """ output = masked_where(equal(x, value), x, copy=copy) output.fill_value = value return output def masked_inside(x, v1, v2, copy=True): """ Mask an array inside a given interval. Shortcut to ``masked_where``, where `condition` is True for `x` inside the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` can be given in either order. See Also -------- masked_where : Mask where a condition is met. Notes ----- The array `x` is prefilled with its filling value. Examples -------- >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], mask = [False False True True False False], fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_inside(x, 0.3, -0.3) masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], mask = [False False True True False False], fill_value=1e+20) """ if v2 < v1: (v1, v2) = (v2, v1) xf = filled(x) condition = (xf >= v1) & (xf <= v2) return masked_where(condition, x, copy=copy) def masked_outside(x, v1, v2, copy=True): """ Mask an array outside a given interval. Shortcut to ``masked_where``, where `condition` is True for `x` outside the interval [v1,v2] (x < v1)|(x > v2). The boundaries `v1` and `v2` can be given in either order. See Also -------- masked_where : Mask where a condition is met. Notes ----- The array `x` is prefilled with its filling value. Examples -------- >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_outside(x, -0.3, 0.3) masked_array(data = [-- -- 0.01 0.2 -- --], mask = [ True True False False True True], fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_outside(x, 0.3, -0.3) masked_array(data = [-- -- 0.01 0.2 -- --], mask = [ True True False False True True], fill_value=1e+20) """ if v2 < v1: (v1, v2) = (v2, v1) xf = filled(x) condition = (xf < v1) | (xf > v2) return masked_where(condition, x, copy=copy) def masked_object(x, value, copy=True, shrink=True): """ Mask the array `x` where the data are exactly equal to value. This function is similar to `masked_values`, but only suitable for object arrays: for floating point, use `masked_values` instead. Parameters ---------- x : array_like Array to mask value : object Comparison value copy : {True, False}, optional Whether to return a copy of `x`. shrink : {True, False}, optional Whether to collapse a mask full of False to nomask Returns ------- result : MaskedArray The result of masking `x` where equal to `value`. See Also -------- masked_where : Mask where a condition is met. masked_equal : Mask where equal to a given value (integers). masked_values : Mask using floating point equality. Examples -------- >>> import numpy.ma as ma >>> food = np.array(['green_eggs', 'ham'], dtype=object) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') >>> print(eat) [-- ham] >>> # plain ol` ham is boring >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) >>> eat = ma.masked_object(fresh_food, 'green_eggs') >>> print(eat) [cheese ham pineapple] Note that `mask` is set to ``nomask`` if possible. >>> eat masked_array(data = [cheese ham pineapple], mask = False, fill_value=?) """ if isMaskedArray(x): condition = umath.equal(x._data, value) mask = x._mask else: condition = umath.equal(np.asarray(x), value) mask = nomask mask = mask_or(mask, make_mask(condition, shrink=shrink)) return masked_array(x, mask=mask, copy=copy, fill_value=value) def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): """ Mask using floating point equality. Return a MaskedArray, masked where the data in array `x` are approximately equal to `value`, determined using `isclose`. The default tolerances for `masked_values` are the same as those for `isclose`. For integer types, exact equality is used, in the same way as `masked_equal`. The fill_value is set to `value` and the mask is set to ``nomask`` if possible. Parameters ---------- x : array_like Array to mask. value : float Masking value. rtol, atol : float, optional Tolerance parameters passed on to `isclose` copy : bool, optional Whether to return a copy of `x`. shrink : bool, optional Whether to collapse a mask full of False to ``nomask``. Returns ------- result : MaskedArray The result of masking `x` where approximately equal to `value`. See Also -------- masked_where : Mask where a condition is met. masked_equal : Mask where equal to a given value (integers). Examples -------- >>> import numpy.ma as ma >>> x = np.array([1, 1.1, 2, 1.1, 3]) >>> ma.masked_values(x, 1.1) masked_array(data = [1.0 -- 2.0 -- 3.0], mask = [False True False True False], fill_value=1.1) Note that `mask` is set to ``nomask`` if possible. >>> ma.masked_values(x, 1.5) masked_array(data = [ 1. 1.1 2. 1.1 3. ], mask = False, fill_value=1.5) For integers, the fill value will be different in general to the result of ``masked_equal``. >>> x = np.arange(5) >>> x array([0, 1, 2, 3, 4]) >>> ma.masked_values(x, 2) masked_array(data = [0 1 -- 3 4], mask = [False False True False False], fill_value=2) >>> ma.masked_equal(x, 2) masked_array(data = [0 1 -- 3 4], mask = [False False True False False], fill_value=999999) """ xnew = filled(x, value) if np.issubdtype(xnew.dtype, np.floating): mask = np.isclose(xnew, value, atol=atol, rtol=rtol) else: mask = umath.equal(xnew, value) return masked_array( xnew, mask=mask, copy=copy, fill_value=value, shrink=shrink) def masked_invalid(a, copy=True): """ Mask an array where invalid values occur (NaNs or infs). This function is a shortcut to ``masked_where``, with `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. Only applies to arrays with a dtype where NaNs or infs make sense (i.e. floating point types), but accepts any array_like object. See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(5, dtype=float) >>> a[2] = np.NaN >>> a[3] = np.PINF >>> a array([ 0., 1., NaN, Inf, 4.]) >>> ma.masked_invalid(a) masked_array(data = [0.0 1.0 -- -- 4.0], mask = [False False True True False], fill_value=1e+20) """ a = np.array(a, copy=copy, subok=True) mask = getattr(a, '_mask', None) if mask is not None: condition = ~(np.isfinite(getdata(a))) if mask is not nomask: condition |= mask cls = type(a) else: condition = ~(np.isfinite(a)) cls = MaskedArray result = a.view(cls) result._mask = condition return result ############################################################################### # Printing options # ############################################################################### class _MaskedPrintOption(object): """ Handle the string used to represent missing data in a masked array. """ def __init__(self, display): """ Create the masked_print_option object. """ self._display = display self._enabled = True def display(self): """ Display the string to print for masked values. """ return self._display def set_display(self, s): """ Set the string to print for masked values. """ self._display = s def enabled(self): """ Is the use of the display value enabled? """ return self._enabled def enable(self, shrink=1): """ Set the enabling shrink to `shrink`. """ self._enabled = shrink def __str__(self): return str(self._display) __repr__ = __str__ # if you single index into a masked location you get this object. masked_print_option = _MaskedPrintOption('--') def _recursive_printoption(result, mask, printopt): """ Puts printoptions in result where mask is True. Private function allowing for recursion """ names = result.dtype.names if names: for name in names: curdata = result[name] curmask = mask[name] _recursive_printoption(curdata, curmask, printopt) else: np.copyto(result, printopt, where=mask) return # For better or worse, these end in a newline _legacy_print_templates = dict( long_std=textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """), long_flx=textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """), short_std=textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """), short_flx=textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """) ) ############################################################################### # MaskedArray class # ############################################################################### def _recursive_filled(a, mask, fill_value): """ Recursively fill `a` with `fill_value`. """ names = a.dtype.names for name in names: current = a[name] if current.dtype.names: _recursive_filled(current, mask[name], fill_value[name]) else: np.copyto(current, fill_value[name], where=mask[name]) def flatten_structured_array(a): """ Flatten a structured array. The data type of the output is chosen such that it can represent all of the (nested) fields. Parameters ---------- a : structured array Returns ------- output : masked array or ndarray A flattened masked array if the input is a masked array, otherwise a standard ndarray. Examples -------- >>> ndtype = [('a', int), ('b', float)] >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) >>> flatten_structured_array(a) array([[1., 1.], [2., 2.]]) """ def flatten_sequence(iterable): """ Flattens a compound of nested iterables. """ for elm in iter(iterable): if hasattr(elm, '__iter__'): for f in flatten_sequence(elm): yield f else: yield elm a = np.asanyarray(a) inishape = a.shape a = a.ravel() if isinstance(a, MaskedArray): out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) out = out.view(MaskedArray) out._mask = np.array([tuple(flatten_sequence(d.item())) for d in getmaskarray(a)]) else: out = np.array([tuple(flatten_sequence(d.item())) for d in a]) if len(inishape) > 1: newshape = list(out.shape) newshape[0] = inishape out.shape = tuple(flatten_sequence(newshape)) return out def _arraymethod(funcname, onmask=True): """ Return a class method wrapper around a basic array method. Creates a class method which returns a masked array, where the new ``_data`` array is the output of the corresponding basic method called on the original ``_data``. If `onmask` is True, the new mask is the output of the method called on the initial mask. Otherwise, the new mask is just a reference to the initial mask. Parameters ---------- funcname : str Name of the function to apply on data. onmask : bool Whether the mask must be processed also (True) or left alone (False). Default is True. Make available as `_onmask` attribute. Returns ------- method : instancemethod Class method wrapper of the specified basic array method. """ def wrapped_method(self, *args, **params): result = getattr(self._data, funcname)(*args, **params) result = result.view(type(self)) result._update_from(self) mask = self._mask if not onmask: result.__setmask__(mask) elif mask is not nomask: # __setmask__ makes a copy, which we don't want result._mask = getattr(mask, funcname)(*args, **params) return result methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) if methdoc is not None: wrapped_method.__doc__ = methdoc.__doc__ wrapped_method.__name__ = funcname return wrapped_method class MaskedIterator(object): """ Flat iterator object to iterate over masked arrays. A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array `x`. It allows iterating over the array as if it were a 1-D array, either in a for-loop or by calling its `next` method. Iteration is done in C-contiguous style, with the last index varying the fastest. The iterator can also be indexed using basic slicing or advanced indexing. See Also -------- MaskedArray.flat : Return a flat iterator over an array. MaskedArray.flatten : Returns a flattened copy of an array. Notes ----- `MaskedIterator` is not exported by the `ma` module. Instead of instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. Examples -------- >>> x = np.ma.array(arange(6).reshape(2, 3)) >>> fl = x.flat >>> type(fl) <class 'numpy.ma.core.MaskedIterator'> >>> for item in fl: ... print(item) ... 0 1 2 3 4 5 Extracting more than a single element b indexing the `MaskedIterator` returns a masked array: >>> fl[2:4] masked_array(data = [2 3], mask = False, fill_value = 999999) """ def __init__(self, ma): self.ma = ma self.dataiter = ma._data.flat if ma._mask is nomask: self.maskiter = None else: self.maskiter = ma._mask.flat def __iter__(self): return self def __getitem__(self, indx): result = self.dataiter.__getitem__(indx).view(type(self.ma)) if self.maskiter is not None: _mask = self.maskiter.__getitem__(indx) if isinstance(_mask, ndarray): # set shape to match that of data; this is needed for matrices _mask.shape = result.shape result._mask = _mask elif isinstance(_mask, np.void): return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) elif _mask: # Just a scalar, masked return masked return result # This won't work if ravel makes a copy def __setitem__(self, index, value): self.dataiter[index] = getdata(value) if self.maskiter is not None: self.maskiter[index] = getmaskarray(value) def __next__(self): """ Return the next value, or raise StopIteration. Examples -------- >>> x = np.ma.array([3, 2], mask=[0, 1]) >>> fl = x.flat >>> fl.next() 3 >>> fl.next() masked_array(data = --, mask = True, fill_value = 1e+20) >>> fl.next() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next d = self.dataiter.next() StopIteration """ d = next(self.dataiter) if self.maskiter is not None: m = next(self.maskiter) if isinstance(m, np.void): return mvoid(d, mask=m, hardmask=self.ma._hardmask) elif m: # Just a scalar, masked return masked return d next = __next__ class MaskedArray(ndarray): """ An array class with possibly masked values. Masked values of True exclude the corresponding element from any computation. Construction:: x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, shrink=True, order=None) Parameters ---------- data : array_like Input data. mask : sequence, optional Mask. Must be convertible to an array of booleans with the same shape as `data`. True indicates a masked (i.e. invalid) data. dtype : dtype, optional Data type of the output. If `dtype` is None, the type of the data argument (``data.dtype``) is used. If `dtype` is not None and different from ``data.dtype``, a copy is performed. copy : bool, optional Whether to copy the input data (True), or to use a reference instead. Default is False. subok : bool, optional Whether to return a subclass of `MaskedArray` if possible (True) or a plain `MaskedArray`. Default is True. ndmin : int, optional Minimum number of dimensions. Default is 0. fill_value : scalar, optional Value used to fill in the masked values when necessary. If None, a default based on the data-type is used. keep_mask : bool, optional Whether to combine `mask` with the mask of the input data, if any (True), or to use only `mask` for the output (False). Default is True. hard_mask : bool, optional Whether to use a hard mask or not. With a hard mask, masked values cannot be unmasked. Default is False. shrink : bool, optional Whether to force compression of an empty mask. Default is True. order : {'C', 'F', 'A'}, optional Specify the order of the array. If order is 'C', then the array will be in C-contiguous order (last-index varies the fastest). If order is 'F', then the returned array will be in Fortran-contiguous order (first-index varies the fastest). If order is 'A' (default), then the returned array may be in any order (either C-, Fortran-contiguous, or even discontiguous), unless a copy is required, in which case it will be C-contiguous. """ __array_priority__ = 15 _defaultmask = nomask _defaulthardmask = False _baseclass = ndarray # Maximum number of elements per axis used when printing an array. The # 1d case is handled separately because we need more values in this case. _print_width = 100 _print_width_1d = 1500 def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, shrink=True, order=None, **options): """ Create a new masked array from scratch. Notes ----- A masked array can also be created by taking a .view(MaskedArray). """ # Process data. _data = np.array(data, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin) _baseclass = getattr(data, '_baseclass', type(_data)) # Check that we're not erasing the mask. if isinstance(data, MaskedArray) and (data.shape != _data.shape): copy = True # Here, we copy the _view_, so that we can attach new properties to it # we must never do .view(MaskedConstant), as that would create a new # instance of np.ma.masked, which make identity comparison fail if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): _data = ndarray.view(_data, type(data)) else: _data = ndarray.view(_data, cls) # Backwards compatibility w/ numpy.core.ma. if hasattr(data, '_mask') and not isinstance(data, ndarray): _data._mask = data._mask # FIXME _sharedmask is never used. _sharedmask = True # Process mask. # Number of named fields (or zero if none) names_ = _data.dtype.names or () # Type of the mask if names_: mdtype = make_mask_descr(_data.dtype) else: mdtype = MaskType if mask is nomask: # Case 1. : no mask in input. # Erase the current mask ? if not keep_mask: # With a reduced version if shrink: _data._mask = nomask # With full version else: _data._mask = np.zeros(_data.shape, dtype=mdtype) # Check whether we missed something elif isinstance(data, (tuple, list)): try: # If data is a sequence of masked array mask = np.array([getmaskarray(m) for m in data], dtype=mdtype) except ValueError: # If data is nested mask = nomask # Force shrinking of the mask if needed (and possible) if (mdtype == MaskType) and mask.any(): _data._mask = mask _data._sharedmask = False else: if copy: _data._mask = _data._mask.copy() _data._sharedmask = False # Reset the shape of the original mask if getmask(data) is not nomask: data._mask.shape = data.shape else: _data._sharedmask = True else: # Case 2. : With a mask in input. # If mask is boolean, create an array of True or False if mask is True and mdtype == MaskType: mask = np.ones(_data.shape, dtype=mdtype) elif mask is False and mdtype == MaskType: mask = np.zeros(_data.shape, dtype=mdtype) else: # Read the mask with the current mdtype try: mask = np.array(mask, copy=copy, dtype=mdtype) # Or assume it's a sequence of bool/int except TypeError: mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) # Make sure the mask and the data have the same shape if mask.shape != _data.shape: (nd, nm) = (_data.size, mask.size) if nm == 1: mask = np.resize(mask, _data.shape) elif nm == nd: mask = np.reshape(mask, _data.shape) else: msg = "Mask and data not compatible: data size is %i, " + \ "mask size is %i." raise MaskError(msg % (nd, nm)) copy = True # Set the mask to the new value if _data._mask is nomask: _data._mask = mask _data._sharedmask = not copy else: if not keep_mask: _data._mask = mask _data._sharedmask = not copy else: if names_: def _recursive_or(a, b): "do a|=b on each field of a, recursively" for name in a.dtype.names: (af, bf) = (a[name], b[name]) if af.dtype.names: _recursive_or(af, bf) else: af |= bf return _recursive_or(_data._mask, mask) else: _data._mask = np.logical_or(mask, _data._mask) _data._sharedmask = False # Update fill_value. if fill_value is None: fill_value = getattr(data, '_fill_value', None) # But don't run the check unless we have something to check. if fill_value is not None: _data._fill_value = _check_fill_value(fill_value, _data.dtype) # Process extra options .. if hard_mask is None: _data._hardmask = getattr(data, '_hardmask', False) else: _data._hardmask = hard_mask _data._baseclass = _baseclass return _data def _update_from(self, obj): """ Copies some attributes of obj to self. """ if isinstance(obj, ndarray): _baseclass = type(obj) else: _baseclass = ndarray # We need to copy the _basedict to avoid backward propagation _optinfo = {} _optinfo.update(getattr(obj, '_optinfo', {})) _optinfo.update(getattr(obj, '_basedict', {})) if not isinstance(obj, MaskedArray): _optinfo.update(getattr(obj, '__dict__', {})) _dict = dict(_fill_value=getattr(obj, '_fill_value', None), _hardmask=getattr(obj, '_hardmask', False), _sharedmask=getattr(obj, '_sharedmask', False), _isfield=getattr(obj, '_isfield', False), _baseclass=getattr(obj, '_baseclass', _baseclass), _optinfo=_optinfo, _basedict=_optinfo) self.__dict__.update(_dict) self.__dict__.update(_optinfo) return def __array_finalize__(self, obj): """ Finalizes the masked array. """ # Get main attributes. self._update_from(obj) # We have to decide how to initialize self.mask, based on # obj.mask. This is very difficult. There might be some # correspondence between the elements in the array we are being # created from (= obj) and us. Or there might not. This method can # be called in all kinds of places for all kinds of reasons -- could # be empty_like, could be slicing, could be a ufunc, could be a view. # The numpy subclassing interface simply doesn't give us any way # to know, which means that at best this method will be based on # guesswork and heuristics. To make things worse, there isn't even any # clear consensus about what the desired behavior is. For instance, # most users think that np.empty_like(marr) -- which goes via this # method -- should return a masked array with an empty mask (see # gh-3404 and linked discussions), but others disagree, and they have # existing code which depends on empty_like returning an array that # matches the input mask. # # Historically our algorithm was: if the template object mask had the # same *number of elements* as us, then we used *it's mask object # itself* as our mask, so that writes to us would also write to the # original array. This is horribly broken in multiple ways. # # Now what we do instead is, if the template object mask has the same # number of elements as us, and we do not have the same base pointer # as the template object (b/c views like arr[...] should keep the same # mask), then we make a copy of the template object mask and use # that. This is also horribly broken but somewhat less so. Maybe. if isinstance(obj, ndarray): # XX: This looks like a bug -- shouldn't it check self.dtype # instead? if obj.dtype.names: _mask = getmaskarray(obj) else: _mask = getmask(obj) # If self and obj point to exactly the same data, then probably # self is a simple view of obj (e.g., self = obj[...]), so they # should share the same mask. (This isn't 100% reliable, e.g. self # could be the first row of obj, or have strange strides, but as a # heuristic it's not bad.) In all other cases, we make a copy of # the mask, so that future modifications to 'self' do not end up # side-effecting 'obj' as well. if (obj.__array_interface__["data"][0] != self.__array_interface__["data"][0]): _mask = _mask.copy() else: _mask = nomask self._mask = _mask # Finalize the mask if self._mask is not nomask: try: self._mask.shape = self.shape except ValueError: self._mask = nomask except (TypeError, AttributeError): # When _mask.shape is not writable (because it's a void) pass # Finalize the fill_value for structured arrays if self.dtype.names: if self._fill_value is None: self._fill_value = _check_fill_value(None, self.dtype) return def __array_wrap__(self, obj, context=None): """ Special hook for ufuncs. Wraps the numpy array and sets the mask according to context. """ if obj is self: # for in-place operations result = obj else: result = obj.view(type(self)) result._update_from(self) if context is not None: result._mask = result._mask.copy() func, args, out_i = context # args sometimes contains outputs (gh-10459), which we don't want input_args = args[:func.nin] m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask domain = ufunc_domain.get(func, None) if domain is not None: # Take the domain, and make sure it's a ndarray if len(input_args) > 2: with np.errstate(divide='ignore', invalid='ignore'): d = filled(reduce(domain, input_args), True) else: with np.errstate(divide='ignore', invalid='ignore'): d = filled(domain(*input_args), True) if d.any(): # Fill the result where the domain is wrong try: # Binary domain: take the last value fill_value = ufunc_fills[func][-1] except TypeError: # Unary domain: just use this one fill_value = ufunc_fills[func] except KeyError: # Domain not recognized, use fill_value instead fill_value = self.fill_value np.copyto(result, fill_value, where=d) # Update the mask if m is nomask: m = d else: # Don't modify inplace, we risk back-propagation m = (m | d) # Make sure the mask has the proper size if result is not self and result.shape == () and m: return masked else: result._mask = m result._sharedmask = False return result def view(self, dtype=None, type=None, fill_value=None): """ Return a view of the MaskedArray data Parameters ---------- dtype : data-type or ndarray sub-class, optional Data-type descriptor of the returned view, e.g., float32 or int16. The default, None, results in the view having the same data-type as `a`. As with ``ndarray.view``, dtype can also be specified as an ndarray sub-class, which then specifies the type of the returned object (this is equivalent to setting the ``type`` parameter). type : Python type, optional Type of the returned view, e.g., ndarray or matrix. Again, the default None results in type preservation. Notes ----- ``a.view()`` is used two different ways: ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view of the array's memory with a different data-type. This can cause a reinterpretation of the bytes of memory. ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just returns an instance of `ndarray_subclass` that looks at the same array (same shape, dtype, etc.) This does not cause a reinterpretation of the memory. If `fill_value` is not specified, but `dtype` is specified (and is not an ndarray sub-class), the `fill_value` of the MaskedArray will be reset. If neither `fill_value` nor `dtype` are specified (or if `dtype` is an ndarray sub-class), then the fill value is preserved. Finally, if `fill_value` is specified, but `dtype` is not, the fill value is set to the specified value. For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of bytes per entry than the previous dtype (for example, converting a regular array to a structured array), then the behavior of the view cannot be predicted just from the superficial appearance of ``a`` (shown by ``print(a)``). It also depends on exactly how ``a`` is stored in memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus defined as a slice or transpose, etc., the view may give different results. """ if dtype is None: if type is None: output = ndarray.view(self) else: output = ndarray.view(self, type) elif type is None: try: if issubclass(dtype, ndarray): output = ndarray.view(self, dtype) dtype = None else: output = ndarray.view(self, dtype) except TypeError: output = ndarray.view(self, dtype) else: output = ndarray.view(self, dtype, type) # also make the mask be a view (so attr changes to the view's # mask do no affect original object's mask) # (especially important to avoid affecting np.masked singleton) if (getmask(output) is not nomask): output._mask = output._mask.view() # Make sure to reset the _fill_value if needed if getattr(output, '_fill_value', None) is not None: if fill_value is None: if dtype is None: pass # leave _fill_value as is else: output._fill_value = None else: output.fill_value = fill_value return output view.__doc__ = ndarray.view.__doc__ def astype(self, newtype): """ Returns a copy of the MaskedArray cast to given newtype. Returns ------- output : MaskedArray A copy of self cast to input newtype. The returned record shape matches self.shape. Examples -------- >>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print(x) [[1.0 -- 3.1] [-- 5.0 --] [7.0 -- 9.0]] >>> print(x.astype(int32)) [[1 -- 3] [-- 5 --] [7 -- 9]] """ newtype = np.dtype(newtype) newmasktype = make_mask_descr(newtype) output = self._data.astype(newtype).view(type(self)) output._update_from(self) if self._mask is nomask: output._mask = nomask else: output._mask = self._mask.astype(newmasktype) # Don't check _fill_value if it's None, that'll speed things up if self._fill_value is not None: output._fill_value = _check_fill_value(self._fill_value, newtype) return output def __getitem__(self, indx): """ x.__getitem__(y) <==> x[y] Return the item described by i, as a masked array. """ # We could directly use ndarray.__getitem__ on self. # But then we would have to modify __array_finalize__ to prevent the # mask of being reshaped if it hasn't been set up properly yet # So it's easier to stick to the current version dout = self.data[indx] _mask = self._mask def _is_scalar(m): return not isinstance(m, np.ndarray) def _scalar_heuristic(arr, elem): """ Return whether `elem` is a scalar result of indexing `arr`, or None if undecidable without promoting nomask to a full mask """ # obviously a scalar if not isinstance(elem, np.ndarray): return True # object array scalar indexing can return anything elif arr.dtype.type is np.object_: if arr.dtype is not elem.dtype: # elem is an array, but dtypes do not match, so must be # an element return True # well-behaved subclass that only returns 0d arrays when # expected - this is not a scalar elif type(arr).__getitem__ == ndarray.__getitem__: return False return None if _mask is not nomask: # _mask cannot be a subclass, so it tells us whether we should # expect a scalar. It also cannot be of dtype object. mout = _mask[indx] scalar_expected = _is_scalar(mout) else: # attempt to apply the heuristic to avoid constructing a full mask mout = nomask scalar_expected = _scalar_heuristic(self.data, dout) if scalar_expected is None: # heuristics have failed # construct a full array, so we can be certain. This is costly. # we could also fall back on ndarray.__getitem__(self.data, indx) scalar_expected = _is_scalar(getmaskarray(self)[indx]) # Did we extract a single item? if scalar_expected: # A record if isinstance(dout, np.void): # We should always re-cast to mvoid, otherwise users can # change masks on rows that already have masked values, but not # on rows that have no masked values, which is inconsistent. return mvoid(dout, mask=mout, hardmask=self._hardmask) # special case introduced in gh-5962 elif (self.dtype.type is np.object_ and isinstance(dout, np.ndarray) and dout is not masked): # If masked, turn into a MaskedArray, with everything masked. if mout: return MaskedArray(dout, mask=True) else: return dout # Just a scalar else: if mout: return masked else: return dout else: # Force dout to MA dout = dout.view(type(self)) # Inherit attributes from self dout._update_from(self) # Check the fill_value if isinstance(indx, basestring): if self._fill_value is not None: dout._fill_value = self._fill_value[indx] # If we're indexing a multidimensional field in a # structured array (such as dtype("(2,)i2,(2,)i1")), # dimensionality goes up (M[field].ndim == M.ndim + # M.dtype[field].ndim). That's fine for # M[field] but problematic for M[field].fill_value # which should have shape () to avoid breaking several # methods. There is no great way out, so set to # first element. See issue #6723. if dout._fill_value.ndim > 0: if not (dout._fill_value == dout._fill_value.flat[0]).all(): warnings.warn( "Upon accessing multidimensional field " "{indx:s}, need to keep dimensionality " "of fill_value at 0. Discarding " "heterogeneous fill_value and setting " "all to {fv!s}.".format(indx=indx, fv=dout._fill_value[0]), stacklevel=2) dout._fill_value = dout._fill_value.flat[0] dout._isfield = True # Update the mask if needed if mout is not nomask: # set shape to match that of data; this is needed for matrices dout._mask = reshape(mout, dout.shape) dout._sharedmask = True # Note: Don't try to check for m.any(), that'll take too long return dout def __setitem__(self, indx, value): """ x.__setitem__(i, y) <==> x[i]=y Set item described by index. If value is masked, masks those locations. """ if self is masked: raise MaskError('Cannot alter the masked element.') _data = self._data _mask = self._mask if isinstance(indx, basestring): _data[indx] = value if _mask is nomask: self._mask = _mask = make_mask_none(self.shape, self.dtype) _mask[indx] = getmask(value) return _dtype = _data.dtype nbfields = len(_dtype.names or ()) if value is masked: # The mask wasn't set: create a full version. if _mask is nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) # Now, set the mask to its value. if nbfields: _mask[indx] = tuple([True] * nbfields) else: _mask[indx] = True return # Get the _data part of the new value dval = getattr(value, '_data', value) # Get the _mask part of the new value mval = getmask(value) if nbfields and mval is nomask: mval = tuple([False] * nbfields) if _mask is nomask: # Set the data, then the mask _data[indx] = dval if mval is not nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) _mask[indx] = mval elif not self._hardmask: # Set the data, then the mask _data[indx] = dval _mask[indx] = mval elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): indx = indx * umath.logical_not(_mask) _data[indx] = dval else: if nbfields: err_msg = "Flexible 'hard' masks are not yet supported." raise NotImplementedError(err_msg) mindx = mask_or(_mask[indx], mval, copy=True) dindx = self._data[indx] if dindx.size > 1: np.copyto(dindx, dval, where=~mindx) elif mindx is nomask: dindx = dval _data[indx] = dindx _mask[indx] = mindx return def __setattr__(self, attr, value): super(MaskedArray, self).__setattr__(attr, value) if attr == 'dtype' and self._mask is not nomask: self._mask = self._mask.view(make_mask_descr(value), ndarray) # Try to reset the shape of the mask (if we don't have a void) # This raises a ValueError if the dtype change won't work try: self._mask.shape = self.shape except (AttributeError, TypeError): pass def __setmask__(self, mask, copy=False): """ Set the mask. """ idtype = self.dtype current_mask = self._mask if mask is masked: mask = True if (current_mask is nomask): # Make sure the mask is set # Just don't do anything if there's nothing to do. if mask is nomask: return current_mask = self._mask = make_mask_none(self.shape, idtype) if idtype.names is None: # No named fields. # Hardmask: don't unmask the data if self._hardmask: current_mask |= mask # Softmask: set everything to False # If it's obviously a compatible scalar, use a quick update # method. elif isinstance(mask, (int, float, np.bool_, np.number)): current_mask[...] = mask # Otherwise fall back to the slower, general purpose way. else: current_mask.flat = mask else: # Named fields w/ mdtype = current_mask.dtype mask = np.array(mask, copy=False) # Mask is a singleton if not mask.ndim: # It's a boolean : make a record if mask.dtype.kind == 'b': mask = np.array(tuple([mask.item()] * len(mdtype)), dtype=mdtype) # It's a record: make sure the dtype is correct else: mask = mask.astype(mdtype) # Mask is a sequence else: # Make sure the new mask is a ndarray with the proper dtype try: mask = np.array(mask, copy=copy, dtype=mdtype) # Or assume it's a sequence of bool/int except TypeError: mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) # Hardmask: don't unmask the data if self._hardmask: for n in idtype.names: current_mask[n] |= mask[n] # Softmask: set everything to False # If it's obviously a compatible scalar, use a quick update # method. elif isinstance(mask, (int, float, np.bool_, np.number)): current_mask[...] = mask # Otherwise fall back to the slower, general purpose way. else: current_mask.flat = mask # Reshape if needed if current_mask.shape: current_mask.shape = self.shape return _set_mask = __setmask__ def _get_mask(self): """Return the current mask. """ # We could try to force a reshape, but that wouldn't work in some # cases. return self._mask mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") def _get_recordmask(self): """ Return the mask of the records. A record is masked when all the fields are masked. """ _mask = self._mask.view(ndarray) if _mask.dtype.names is None: return _mask return np.all(flatten_structured_array(_mask), axis=-1) def _set_recordmask(self): """ Return the mask of the records. A record is masked when all the fields are masked. """ raise NotImplementedError("Coming soon: setting the mask per records!") recordmask = property(fget=_get_recordmask) def harden_mask(self): """ Force the mask to hard. Whether the mask of a masked array is hard or soft is determined by its `hardmask` property. `harden_mask` sets `hardmask` to True. See Also -------- hardmask """ self._hardmask = True return self def soften_mask(self): """ Force the mask to soft. Whether the mask of a masked array is hard or soft is determined by its `hardmask` property. `soften_mask` sets `hardmask` to False. See Also -------- hardmask """ self._hardmask = False return self hardmask = property(fget=lambda self: self._hardmask, doc="Hardness of the mask") def unshare_mask(self): """ Copy the mask and set the sharedmask flag to False. Whether the mask is shared between masked arrays can be seen from the `sharedmask` property. `unshare_mask` ensures the mask is not shared. A copy of the mask is only made if it was shared. See Also -------- sharedmask """ if self._sharedmask: self._mask = self._mask.copy() self._sharedmask = False return self sharedmask = property(fget=lambda self: self._sharedmask, doc="Share status of the mask (read-only).") def shrink_mask(self): """ Reduce a mask to nomask when possible. Parameters ---------- None Returns ------- None Examples -------- >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], [False, False]]) >>> x.shrink_mask() >>> x.mask False """ self._mask = _shrink_mask(self._mask) return self baseclass = property(fget=lambda self: self._baseclass, doc="Class of the underlying data (read-only).") def _get_data(self): """Return the current data, as a view of the original underlying data. """ return ndarray.view(self, self._baseclass) _data = property(fget=_get_data) data = property(fget=_get_data) def _get_flat(self): "Return a flat iterator." return MaskedIterator(self) def _set_flat(self, value): "Set a flattened version of self to value." y = self.ravel() y[:] = value flat = property(fget=_get_flat, fset=_set_flat, doc="Flat version of the array.") def get_fill_value(self): """ Return the filling value of the masked array. Returns ------- fill_value : scalar The filling value. Examples -------- >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... 999999 999999 1e+20 (1e+20+0j) >>> x = np.ma.array([0, 1.], fill_value=-np.inf) >>> x.get_fill_value() -inf """ if self._fill_value is None: self._fill_value = _check_fill_value(None, self.dtype) # Temporary workaround to account for the fact that str and bytes # scalars cannot be indexed with (), whereas all other numpy # scalars can. See issues #7259 and #7267. # The if-block can be removed after #7267 has been fixed. if isinstance(self._fill_value, ndarray): return self._fill_value[()] return self._fill_value def set_fill_value(self, value=None): """ Set the filling value of the masked array. Parameters ---------- value : scalar, optional The new filling value. Default is None, in which case a default based on the data type is used. See Also -------- ma.set_fill_value : Equivalent function. Examples -------- >>> x = np.ma.array([0, 1.], fill_value=-np.inf) >>> x.fill_value -inf >>> x.set_fill_value(np.pi) >>> x.fill_value 3.1415926535897931 Reset to default: >>> x.set_fill_value() >>> x.fill_value 1e+20 """ target = _check_fill_value(value, self.dtype) _fill_value = self._fill_value if _fill_value is None: # Create the attribute if it was undefined self._fill_value = target else: # Don't overwrite the attribute, just fill it (for propagation) _fill_value[()] = target fill_value = property(fget=get_fill_value, fset=set_fill_value, doc="Filling value.") def filled(self, fill_value=None): """ Return a copy of self, with masked values filled with a given value. **However**, if there are no masked values to fill, self will be returned instead as an ndarray. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries (None by default). If None, the `fill_value` attribute of the array is used instead. Returns ------- filled_array : ndarray A copy of ``self`` with invalid entries replaced by *fill_value* (be it the function argument or the attribute of ``self``), or ``self`` itself as an ndarray if there are no invalid entries to be replaced. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) >>> x.filled() array([1, 2, -999, 4, -999]) >>> type(x.filled()) <type 'numpy.ndarray'> Subclassing is preserved. This means that if the data part of the masked array is a matrix, `filled` returns a matrix: >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.filled() matrix([[ 1, 999999], [999999, 4]]) """ m = self._mask if m is nomask: return self._data if fill_value is None: fill_value = self.fill_value else: fill_value = _check_fill_value(fill_value, self.dtype) if self is masked_singleton: return np.asanyarray(fill_value) if m.dtype.names: result = self._data.copy('K') _recursive_filled(result, self._mask, fill_value) elif not m.any(): return self._data else: result = self._data.copy('K') try: np.copyto(result, fill_value, where=m) except (TypeError, AttributeError): fill_value = narray(fill_value, dtype=object) d = result.astype(object) result = np.choose(m, (d, fill_value)) except IndexError: # ok, if scalar if self._data.shape: raise elif m: result = np.array(fill_value, dtype=self.dtype) else: result = self._data return result def compressed(self): """ Return all the non-masked data as a 1-D array. Returns ------- data : ndarray A new `ndarray` holding the non-masked data is returned. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) >>> x.compressed() array([0, 1]) >>> type(x.compressed()) <type 'numpy.ndarray'> """ data = ndarray.ravel(self._data) if self._mask is not nomask: data = data.compress(np.logical_not(ndarray.ravel(self._mask))) return data def compress(self, condition, axis=None, out=None): """ Return `a` where condition is ``True``. If condition is a `MaskedArray`, missing values are considered as ``False``. Parameters ---------- condition : var Boolean 1-d array selecting which entries to return. If len(condition) is less than the size of a along the axis, then output is truncated to length of condition array. axis : {None, int}, optional Axis along which the operation must be performed. out : {None, ndarray}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. Returns ------- result : MaskedArray A :class:`MaskedArray` object. Notes ----- Please note the difference with :meth:`compressed` ! The output of :meth:`compress` has a mask, the output of :meth:`compressed` does not. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] >>> x.compress([1, 0, 1]) masked_array(data = [1 3], mask = [False False], fill_value=999999) >>> x.compress([1, 0, 1], axis=1) masked_array(data = [[1 3] [-- --] [7 9]], mask = [[False False] [ True True] [False False]], fill_value=999999) """ # Get the basic components (_data, _mask) = (self._data, self._mask) # Force the condition to a regular ndarray and forget the missing # values. condition = np.array(condition, copy=False, subok=False) _new = _data.compress(condition, axis=axis, out=out).view(type(self)) _new._update_from(self) if _mask is not nomask: _new._mask = _mask.compress(condition, axis=axis) return _new def _insert_masked_print(self): """ Replace masked values with masked_print_option, casting all innermost dtypes to object. """ if masked_print_option.enabled(): mask = self._mask if mask is nomask: res = self._data else: # convert to object array to make filled work data = self._data # For big arrays, to avoid a costly conversion to the # object dtype, extract the corners before the conversion. print_width = (self._print_width if self.ndim > 1 else self._print_width_1d) for axis in range(self.ndim): if data.shape[axis] > print_width: ind = print_width // 2 arr = np.split(data, (ind, -ind), axis=axis) data = np.concatenate((arr[0], arr[2]), axis=axis) arr = np.split(mask, (ind, -ind), axis=axis) mask = np.concatenate((arr[0], arr[2]), axis=axis) rdtype = _replace_dtype_fields(self.dtype, "O") res = data.astype(rdtype) _recursive_printoption(res, mask, masked_print_option) else: res = self.filled(self.fill_value) return res def __str__(self): return str(self._insert_masked_print()) if sys.version_info.major < 3: def __unicode__(self): return unicode(self._insert_masked_print()) def __repr__(self): """ Literal string representation. """ if self._baseclass is np.ndarray: name = 'array' else: name = self._baseclass.__name__ # 2016-11-19: Demoted to legacy format if np.get_printoptions()['legacy'] == '1.13': is_long = self.ndim > 1 parameters = dict( name=name, nlen=" " * len(name), data=str(self), mask=str(self._mask), fill=str(self.fill_value), dtype=str(self.dtype) ) is_structured = bool(self.dtype.names) key = '{}_{}'.format( 'long' if is_long else 'short', 'flx' if is_structured else 'std' ) return _legacy_print_templates[key] % parameters prefix = 'masked_{}('.format(name) dtype_needed = ( not np.core.arrayprint.dtype_is_implied(self.dtype) or np.all(self.mask) or self.size == 0 ) # determine which keyword args need to be shown keys = ['data', 'mask', 'fill_value'] if dtype_needed: keys.append('dtype') # array has only one row (non-column) is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) # choose what to indent each keyword with min_indent = 2 if is_one_row: # first key on the same line as the type, remaining keys # aligned by equals indents = {} indents[keys[0]] = prefix for k in keys[1:]: n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) indents[k] = ' ' * n prefix = '' # absorbed into the first indent else: # each key on its own line, indented by two spaces indents = {k: ' ' * min_indent for k in keys} prefix = prefix + '\n' # first key on the next line # format the field values reprs = {} reprs['data'] = np.array2string( self._insert_masked_print(), separator=", ", prefix=indents['data'] + 'data=', suffix=',') reprs['mask'] = np.array2string( self._mask, separator=", ", prefix=indents['mask'] + 'mask=', suffix=',') reprs['fill_value'] = repr(self.fill_value) if dtype_needed: reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype) # join keys with values and indentations result = ',\n'.join( '{}{}={}'.format(indents[k], k, reprs[k]) for k in keys ) return prefix + result + ')' def _delegate_binop(self, other): # This emulates the logic in # private/binop_override.h:forward_binop_should_defer if isinstance(other, type(self)): return False array_ufunc = getattr(other, "__array_ufunc__", False) if array_ufunc is False: other_priority = getattr(other, "__array_priority__", -1000000) return self.__array_priority__ < other_priority else: # If array_ufunc is not None, it will be called inside the ufunc; # None explicitly tells us to not call the ufunc, i.e., defer. return array_ufunc is None def _comparison(self, other, compare): """Compare self with other using operator.eq or operator.ne. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ omask = getmask(other) smask = self.mask mask = mask_or(smask, omask, copy=True) odata = getdata(other) if mask.dtype.names: # For possibly masked structured arrays we need to be careful, # since the standard structured array comparison will use all # fields, masked or not. To avoid masked fields influencing the # outcome, we set all masked fields in self to other, so they'll # count as equal. To prepare, we ensure we have the right shape. broadcast_shape = np.broadcast(self, odata).shape sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) sbroadcast._mask = mask sdata = sbroadcast.filled(odata) # Now take care of the mask; the merged mask should have an item # masked if all fields were masked (in one and/or other). mask = (mask == np.ones((), mask.dtype)) else: # For regular arrays, just use the data as they come. sdata = self.data check = compare(sdata, odata) if isinstance(check, (np.bool_, bool)): return masked if mask else check if mask is not nomask: # Adjust elements that were masked, which should be treated # as equal if masked in both, unequal if masked in one. # Note that this works automatically for structured arrays too. check = np.where(mask, compare(smask, omask), check) if mask.shape != check.shape: # Guarantee consistency of the shape, making a copy since the # the mask may need to get written to later. mask = np.broadcast_to(mask, check.shape).copy() check = check.view(type(self)) check._update_from(self) check._mask = mask return check def __eq__(self, other): """Check whether other equals self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ return self._comparison(other, operator.eq) def __ne__(self, other): """Check whether other does not equal self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ return self._comparison(other, operator.ne) def __add__(self, other): """ Add self to other, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return add(self, other) def __radd__(self, other): """ Add other to self, and return a new masked array. """ # In analogy with __rsub__ and __rdiv__, use original order: # we get here from `other + self`. return add(other, self) def __sub__(self, other): """ Subtract other from self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return subtract(self, other) def __rsub__(self, other): """ Subtract self from other, and return a new masked array. """ return subtract(other, self) def __mul__(self, other): "Multiply self by other, and return a new masked array." if self._delegate_binop(other): return NotImplemented return multiply(self, other) def __rmul__(self, other): """ Multiply other by self, and return a new masked array. """ # In analogy with __rsub__ and __rdiv__, use original order: # we get here from `other * self`. return multiply(other, self) def __div__(self, other): """ Divide other into self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return divide(self, other) def __truediv__(self, other): """ Divide other into self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return true_divide(self, other) def __rtruediv__(self, other): """ Divide self into other, and return a new masked array. """ return true_divide(other, self) def __floordiv__(self, other): """ Divide other into self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return floor_divide(self, other) def __rfloordiv__(self, other): """ Divide self into other, and return a new masked array. """ return floor_divide(other, self) def __pow__(self, other): """ Raise self to the power other, masking the potential NaNs/Infs """ if self._delegate_binop(other): return NotImplemented return power(self, other) def __rpow__(self, other): """ Raise other to the power self, masking the potential NaNs/Infs """ return power(other, self) def __iadd__(self, other): """ Add other to self in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m else: if m is not nomask: self._mask += m self._data.__iadd__(np.where(self._mask, self.dtype.type(0), getdata(other))) return self def __isub__(self, other): """ Subtract other from self in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m self._data.__isub__(np.where(self._mask, self.dtype.type(0), getdata(other))) return self def __imul__(self, other): """ Multiply self by other in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m self._data.__imul__(np.where(self._mask, self.dtype.type(1), getdata(other))) return self def __idiv__(self, other): """ Divide self by other in-place. """ other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.divide] other_data = np.where(dom_mask, fval, other_data) self._mask |= new_mask self._data.__idiv__(np.where(self._mask, self.dtype.type(1), other_data)) return self def __ifloordiv__(self, other): """ Floor divide self by other in-place. """ other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.floor_divide] other_data = np.where(dom_mask, fval, other_data) self._mask |= new_mask self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1), other_data)) return self def __itruediv__(self, other): """ True divide self by other in-place. """ other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.true_divide] other_data = np.where(dom_mask, fval, other_data) self._mask |= new_mask self._data.__itruediv__(np.where(self._mask, self.dtype.type(1), other_data)) return self def __ipow__(self, other): """ Raise self to the power other, in place. """ other_data = getdata(other) other_mask = getmask(other) with np.errstate(divide='ignore', invalid='ignore'): self._data.__ipow__(np.where(self._mask, self.dtype.type(1), other_data)) invalid = np.logical_not(np.isfinite(self._data)) if invalid.any(): if self._mask is not nomask: self._mask |= invalid else: self._mask = invalid np.copyto(self._data, self.fill_value, where=invalid) new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) return self def __float__(self): """ Convert to float. """ if self.size > 1: raise TypeError("Only length-1 arrays can be converted " "to Python scalars") elif self._mask: warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) return np.nan return float(self.item()) def __int__(self): """ Convert to int. """ if self.size > 1: raise TypeError("Only length-1 arrays can be converted " "to Python scalars") elif self._mask: raise MaskError('Cannot convert masked element to a Python int.') return int(self.item()) def __long__(self): """ Convert to long. """ if self.size > 1: raise TypeError("Only length-1 arrays can be conveted " "to Python scalars") elif self._mask: raise MaskError('Cannot convert masked element to a Python long.') return long(self.item()) def get_imag(self): """ Return the imaginary part of the masked array. The returned array is a view on the imaginary part of the `MaskedArray` whose `get_imag` method is called. Parameters ---------- None Returns ------- result : MaskedArray The imaginary part of the masked array. See Also -------- get_real, real, imag Examples -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.get_imag() masked_array(data = [1.0 -- 1.6], mask = [False True False], fill_value = 1e+20) """ result = self._data.imag.view(type(self)) result.__setmask__(self._mask) return result imag = property(fget=get_imag, doc="Imaginary part.") def get_real(self): """ Return the real part of the masked array. The returned array is a view on the real part of the `MaskedArray` whose `get_real` method is called. Parameters ---------- None Returns ------- result : MaskedArray The real part of the masked array. See Also -------- get_imag, real, imag Examples -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.get_real() masked_array(data = [1.0 -- 3.45], mask = [False True False], fill_value = 1e+20) """ result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result real = property(fget=get_real, doc="Real part") def count(self, axis=None, keepdims=np._NoValue): """ Count the non-masked elements of the array along the given axis. Parameters ---------- axis : None or int or tuple of ints, optional Axis or axes along which the count is performed. The default (`axis` = `None`) performs the count over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.10.0 If this is a tuple of ints, the count is performed on multiple axes, instead of a single axis or all the axes as before. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- result : ndarray or scalar An array with the same shape as the input array, with the specified axis removed. If the array is a 0-d array, or if `axis` is None, a scalar is returned. See Also -------- count_masked : Count masked elements in array or along a given axis. Examples -------- >>> import numpy.ma as ma >>> a = ma.arange(6).reshape((2, 3)) >>> a[1, :] = ma.masked >>> a masked_array(data = [[0 1 2] [-- -- --]], mask = [[False False False] [ True True True]], fill_value = 999999) >>> a.count() 3 When the `axis` keyword is specified an array of appropriate size is returned. >>> a.count(axis=0) array([1, 1, 1]) >>> a.count(axis=1) array([3, 0]) """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} m = self._mask # special case for matrices (we assume no other subclasses modify # their dimensions) if isinstance(self.data, np.matrix): if m is nomask: m = np.zeros(self.shape, dtype=np.bool_) m = m.view(type(self.data)) if m is nomask: # compare to _count_reduce_items in _methods.py if self.shape is (): if axis not in (None, 0): raise np.AxisError(axis=axis, ndim=self.ndim) return 1 elif axis is None: if kwargs.get('keepdims', False): return np.array(self.size, dtype=np.intp, ndmin=self.ndim) return self.size axes = normalize_axis_tuple(axis, self.ndim) items = 1 for ax in axes: items *= self.shape[ax] if kwargs.get('keepdims', False): out_dims = list(self.shape) for a in axes: out_dims[a] = 1 else: out_dims = [d for n, d in enumerate(self.shape) if n not in axes] # make sure to return a 0-d array if axis is supplied return np.full(out_dims, items, dtype=np.intp) # take care of the masked singleton if self is masked: return 0 return (~m).sum(axis=axis, dtype=np.intp, **kwargs) def ravel(self, order='C'): """ Returns a 1D version of self, as a view. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional The elements of `a` are read using this index order. 'C' means to index the elements in C-like order, with the last axis index changing fastest, back to the first axis index changing slowest. 'F' means to index the elements in Fortran-like index order, with the first index changing fastest, and the last index changing slowest. Note that the 'C' and 'F' options take no account of the memory layout of the underlying array, and only refer to the order of axis indexing. 'A' means to read the elements in Fortran-like index order if `m` is Fortran *contiguous* in memory, C-like order otherwise. 'K' means to read the elements in the order they occur in memory, except for reversing the data when strides are negative. By default, 'C' index order is used. Returns ------- MaskedArray Output view is of shape ``(self.size,)`` (or ``(np.ma.product(self.shape),)``). Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] >>> print(x.ravel()) [1 -- 3 -- 5 -- 7 -- 9] """ r = ndarray.ravel(self._data, order=order).view(type(self)) r._update_from(self) if self._mask is not nomask: r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) else: r._mask = nomask return r def reshape(self, *s, **kwargs): """ Give a new shape to the array without changing its data. Returns a masked array containing the same data, but with a new shape. The result is a view on the original array; if this is not possible, a ValueError is raised. Parameters ---------- shape : int or tuple of ints The new shape should be compatible with the original shape. If an integer is supplied, then the result will be a 1-D array of that length. order : {'C', 'F'}, optional Determines whether the array data should be viewed as in C (row-major) or FORTRAN (column-major) order. Returns ------- reshaped_array : array A new view on the array. See Also -------- reshape : Equivalent function in the masked array module. numpy.ndarray.reshape : Equivalent method on ndarray object. numpy.reshape : Equivalent function in the NumPy module. Notes ----- The reshaping operation cannot guarantee that a copy will not be made, to modify the shape in place, use ``a.shape = s`` Examples -------- >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) >>> print(x) [[-- 2] [3 --]] >>> x = x.reshape((4,1)) >>> print(x) [[--] [2] [3] [--]] """ kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask if mask is not nomask: result._mask = mask.reshape(*s, **kwargs) return result def resize(self, newshape, refcheck=True, order=False): """ .. warning:: This method does nothing, except raise a ValueError exception. A masked array does not own its data and therefore cannot safely be resized in place. Use the `numpy.ma.resize` function instead. This method is difficult to implement safely and may be deprecated in future releases of NumPy. """ # Note : the 'order' keyword looks broken, let's just drop it errmsg = "A masked array does not own its data "\ "and therefore cannot be resized.\n" \ "Use the numpy.ma.resize function instead." raise ValueError(errmsg) def put(self, indices, values, mode='raise'): """ Set storage-indexed locations to corresponding values. Sets self._data.flat[n] = values[n] for each n in indices. If `values` is shorter than `indices` then it will repeat. If `values` has some masked values, the initial mask is updated in consequence, else the corresponding values are unmasked. Parameters ---------- indices : 1-D array_like Target indices, interpreted as integers. values : array_like Values to place in self._data copy at target indices. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. 'raise' : raise an error. 'wrap' : wrap around. 'clip' : clip to the range. Notes ----- `values` can be a scalar or length 1 array. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] >>> x.put([0,4,8],[10,20,30]) >>> print(x) [[10 -- 3] [-- 20 --] [7 -- 30]] >>> x.put(4,999) >>> print(x) [[10 -- 3] [-- 999 --] [7 -- 30]] """ # Hard mask: Get rid of the values/indices that fall on masked data if self._hardmask and self._mask is not nomask: mask = self._mask[indices] indices = narray(indices, copy=False) values = narray(values, copy=False, subok=True) values.resize(indices.shape) indices = indices[~mask] values = values[~mask] self._data.put(indices, values, mode=mode) # short circuit if neither self nor values are masked if self._mask is nomask and getmask(values) is nomask: return m = getmaskarray(self) if getmask(values) is nomask: m.put(indices, False, mode=mode) else: m.put(indices, values._mask, mode=mode) m = make_mask(m, copy=False, shrink=True) self._mask = m return def ids(self): """ Return the addresses of the data and mask areas. Parameters ---------- None Examples -------- >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) If the array has no mask, the address of `nomask` is returned. This address is typically not close to the data in memory: >>> x = np.ma.array([1, 2, 3]) >>> x.ids() (166691080, 3083169284L) """ if self._mask is nomask: return (self.ctypes.data, id(nomask)) return (self.ctypes.data, self._mask.ctypes.data) def iscontiguous(self): """ Return a boolean indicating whether the data is contiguous. Parameters ---------- None Examples -------- >>> x = np.ma.array([1, 2, 3]) >>> x.iscontiguous() True `iscontiguous` returns one of the flags of the masked array: >>> x.flags C_CONTIGUOUS : True F_CONTIGUOUS : True OWNDATA : False WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False UPDATEIFCOPY : False """ return self.flags['CONTIGUOUS'] def all(self, axis=None, out=None, keepdims=np._NoValue): """ Returns True if all elements evaluate to True. The output array is masked where all the values along the given axis are masked: if the output would have been a scalar and that all the values are masked, then the output is `masked`. Refer to `numpy.all` for full documentation. See Also -------- ndarray.all : corresponding function for ndarrays numpy.all : equivalent function Examples -------- >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) >>> (a.all() is np.ma.masked) True """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} mask = _check_mask_axis(self._mask, axis, **kwargs) if out is None: d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: return masked return d self.filled(True).all(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out def any(self, axis=None, out=None, keepdims=np._NoValue): """ Returns True if any of the elements of `a` evaluate to True. Masked values are considered as False during computation. Refer to `numpy.any` for full documentation. See Also -------- ndarray.any : corresponding function for ndarrays numpy.any : equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} mask = _check_mask_axis(self._mask, axis, **kwargs) if out is None: d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: d = masked return d self.filled(False).any(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out def nonzero(self): """ Return the indices of unmasked elements that are not zero. Returns a tuple of arrays, one for each dimension, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with:: a[a.nonzero()] To group the indices by element, rather than dimension, use instead:: np.transpose(a.nonzero()) The result of this is always a 2d array, with a row for each non-zero element. Parameters ---------- None Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- numpy.nonzero : Function operating on ndarrays. flatnonzero : Return indices that are non-zero in the flattened version of the input array. ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. Examples -------- >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x masked_array(data = [[ 1. 0. 0.] [ 0. 1. 0.] [ 0. 0. 1.]], mask = False, fill_value=1e+20) >>> x.nonzero() (array([0, 1, 2]), array([0, 1, 2])) Masked elements are ignored. >>> x[1, 1] = ma.masked >>> x masked_array(data = [[1.0 0.0 0.0] [0.0 -- 0.0] [0.0 0.0 1.0]], mask = [[False False False] [False True False] [False False False]], fill_value=1e+20) >>> x.nonzero() (array([0, 2]), array([0, 2])) Indices can also be grouped by element. >>> np.transpose(x.nonzero()) array([[0, 0], [2, 2]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, ma.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 masked_array(data = [[False False False] [ True True True] [ True True True]], mask = False, fill_value=999999) >>> ma.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) The ``nonzero`` method of the condition array can also be called. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) """ return narray(self.filled(0), copy=False).nonzero() def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ (this docstring should be overwritten) """ #!!!: implement out + test! m = self._mask if m is nomask: result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, axis2=axis2, out=out) return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) return D.astype(dtype).filled(0).sum(axis=-1, out=out) trace.__doc__ = ndarray.trace.__doc__ def dot(self, b, out=None, strict=False): """ a.dot(b, out=None) Masked dot product of two arrays. Note that `out` and `strict` are located in different positions than in `ma.dot`. In order to maintain compatibility with the functional version, it is recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. .. versionadded:: 1.10.0 Parameters ---------- b : masked_array_like Inputs array. out : masked_array, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for `ma.dot(a,b)`. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. strict : bool, optional Whether masked data are propagated (True) or set to 0 (False) for the computation. Default is False. Propagating the mask means that if a masked value appears in a row or column, the whole row or column is considered masked. .. versionadded:: 1.10.2 See Also -------- numpy.ma.dot : equivalent function """ return dot(self, b, out=out, strict=strict) def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the sum of the array elements over the given axis. Masked elements are set to 0 internally. Refer to `numpy.sum` for full documentation. See Also -------- ndarray.sum : corresponding function for ndarrays numpy.sum : equivalent function Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] >>> print(x.sum()) 25 >>> print(x.sum(axis=1)) [4 5 16] >>> print(x.sum(axis=0)) [8 5 12] >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) <type 'numpy.int64'> """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) # No explicit output if out is None: result = self.filled(0).sum(axis, dtype=dtype, **kwargs) rndim = getattr(result, 'ndim', 0) if rndim: result = result.view(type(self)) result.__setmask__(newmask) elif newmask: result = masked return result # Explicit output result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask return out def cumsum(self, axis=None, dtype=None, out=None): """ Return the cumulative sum of the array elements over the given axis. Masked values are set to 0 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Refer to `numpy.cumsum` for full documentation. Notes ----- The mask is lost if `out` is not a valid :class:`MaskedArray` ! Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- ndarray.cumsum : corresponding function for ndarrays numpy.cumsum : equivalent function Examples -------- >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> print(marr.cumsum()) [0 1 3 -- -- -- 9 16 24 33] """ result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(self.mask) return out result = result.view(type(self)) result.__setmask__(self._mask) return result def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the product of the array elements over the given axis. Masked elements are set to 1 internally for computation. Refer to `numpy.prod` for full documentation. Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- ndarray.prod : corresponding function for ndarrays numpy.prod : equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) # No explicit output if out is None: result = self.filled(1).prod(axis, dtype=dtype, **kwargs) rndim = getattr(result, 'ndim', 0) if rndim: result = result.view(type(self)) result.__setmask__(newmask) elif newmask: result = masked return result # Explicit output result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask return out product = prod def cumprod(self, axis=None, dtype=None, out=None): """ Return the cumulative product of the array elements over the given axis. Masked values are set to 1 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Refer to `numpy.cumprod` for full documentation. Notes ----- The mask is lost if `out` is not a valid MaskedArray ! Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- ndarray.cumprod : corresponding function for ndarrays numpy.cumprod : equivalent function """ result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(self._mask) return out result = result.view(type(self)) result.__setmask__(self._mask) return result def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Returns the average of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to `numpy.mean` for full documentation. See Also -------- ndarray.mean : corresponding function for ndarrays numpy.mean : Equivalent function numpy.ma.average: Weighted average. Examples -------- >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data = [1 2 --], mask = [False False True], fill_value = 999999) >>> a.mean() 1.5 """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} if self._mask is nomask: result = super(MaskedArray, self).mean(axis=axis, dtype=dtype, **kwargs)[()] else: dsum = self.sum(axis=axis, dtype=dtype, **kwargs) cnt = self.count(axis=axis, **kwargs) if cnt.shape == () and (cnt == 0): result = masked else: result = dsum * 1. / cnt if out is not None: out.flat = result if isinstance(out, MaskedArray): outmask = getmask(out) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = getmask(result) return out return result def anom(self, axis=None, dtype=None): """ Compute the anomalies (deviations from the arithmetic mean) along the given axis. Returns an array of anomalies, with the same shape as the input and where the arithmetic mean is computed along the given axis. Parameters ---------- axis : int, optional Axis over which the anomalies are taken. The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type the default is float32; for arrays of float types it is the same as the array type. See Also -------- mean : Compute the mean of the array. Examples -------- >>> a = np.ma.array([1,2,3]) >>> a.anom() masked_array(data = [-1. 0. 1.], mask = False, fill_value = 1e+20) """ m = self.mean(axis, dtype) if m is masked: return m if not axis: return (self - m) else: return (self - expand_dims(m, axis)) def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Returns the variance of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to `numpy.var` for full documentation. See Also -------- ndarray.var : corresponding function for ndarrays numpy.var : Equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} # Easy case: nomask, business as usual if self._mask is nomask: ret = super(MaskedArray, self).var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)[()] if out is not None: if isinstance(out, MaskedArray): out.__setmask__(nomask) return out return ret # Some data are masked, yay! cnt = self.count(axis=axis, **kwargs) - ddof danom = self - self.mean(axis, dtype, keepdims=True) if iscomplexobj(self): danom = umath.absolute(danom) ** 2 else: danom *= danom dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) # Apply the mask if it's not a scalar if dvar.ndim: dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) dvar._update_from(self) elif getmask(dvar): # Make sure that masked is returned when the scalar is masked. dvar = masked if out is not None: if isinstance(out, MaskedArray): out.flat = 0 out.__setmask__(True) elif out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or "\ "more location." raise MaskError(errmsg) else: out.flat = np.nan return out # In case with have an explicit output if out is not None: # Set the data out.flat = dvar # Set the mask if needed if isinstance(out, MaskedArray): out.__setmask__(dvar.mask) return out return dvar var.__doc__ = np.var.__doc__ def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Returns the standard deviation of the array elements along given axis. Masked entries are ignored. Refer to `numpy.std` for full documentation. See Also -------- ndarray.std : corresponding function for ndarrays numpy.std : Equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} dvar = self.var(axis, dtype, out, ddof, **kwargs) if dvar is not masked: if out is not None: np.power(out, 0.5, out=out, casting='unsafe') return out dvar = sqrt(dvar) return dvar def round(self, decimals=0, out=None): """ Return each element rounded to the given number of decimals. Refer to `numpy.around` for full documentation. See Also -------- ndarray.around : corresponding function for ndarrays numpy.around : equivalent function """ result = self._data.round(decimals=decimals, out=out).view(type(self)) if result.ndim > 0: result._mask = self._mask result._update_from(self) elif self._mask: # Return masked when the scalar is masked result = masked # No explicit output: we're done if out is None: return result if isinstance(out, MaskedArray): out.__setmask__(self._mask) return out def argsort(self, axis=np._NoValue, kind='quicksort', order=None, endwith=True, fill_value=None): """ Return an ndarray of indices that sort the array along the specified axis. Masked values are filled beforehand to `fill_value`. Parameters ---------- axis : int, optional Axis along which to sort. If None, the default, the flattened array is used. .. versionchanged:: 1.13.0 Previously, the default was documented to be -1, but that was in error. At some future date, the default will change to -1, as originally intended. Until then, the axis should be given explicitly when ``arr.ndim > 1``, to avoid a FutureWarning. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. order : list, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. endwith : {True, False}, optional Whether missing values (if any) should be treated as the largest values (True) or the smallest values (False) When the array contains unmasked values at the same extremes of the datatype, the ordering of these values and the masked values is undefined. fill_value : {var}, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. Returns ------- index_array : ndarray, int Array of indices that sort `a` along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. See Also -------- MaskedArray.sort : Describes sorting algorithms used. lexsort : Indirect stable sort with multiple keys. ndarray.sort : Inplace sort. Notes ----- See `sort` for notes on the different sorting algorithms. Examples -------- >>> a = np.ma.array([3,2,1], mask=[False, False, True]) >>> a masked_array(data = [3 2 --], mask = [False False True], fill_value = 999999) >>> a.argsort() array([1, 0, 2]) """ # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default if axis is np._NoValue: axis = _deprecate_argsort_axis(self) if fill_value is None: if endwith: # nan > inf if np.issubdtype(self.dtype, np.floating): fill_value = np.nan else: fill_value = minimum_fill_value(self) else: fill_value = maximum_fill_value(self) filled = self.filled(fill_value) return filled.argsort(axis=axis, kind=kind, order=order) def argmin(self, axis=None, fill_value=None, out=None): """ Return array of indices to the minimum values along the given axis. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : {var}, optional Value used to fill in the masked values. If None, the output of minimum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- ndarray or scalar If multi-dimension input, returns a new ndarray of indices to the minimum values along the given axis. Otherwise, returns a scalar of index to the minimum values along the given axis. Examples -------- >>> x = np.ma.array(arange(4), mask=[1,1,0,0]) >>> x.shape = (2,2) >>> print(x) [[-- --] [2 3]] >>> print(x.argmin(axis=0, fill_value=-1)) [0 0] >>> print(x.argmin(axis=0, fill_value=9)) [1 1] """ if fill_value is None: fill_value = minimum_fill_value(self) d = self.filled(fill_value).view(ndarray) return d.argmin(axis, out=out) def argmax(self, axis=None, fill_value=None, out=None): """ Returns array of indices of the maximum values along the given axis. Masked values are treated as if they had the value fill_value. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : {var}, optional Value used to fill in the masked values. If None, the output of maximum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- index_array : {integer_array} Examples -------- >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 >>> a.argmax(0) array([1, 1, 1]) >>> a.argmax(1) array([2, 2]) """ if fill_value is None: fill_value = maximum_fill_value(self._data) d = self.filled(fill_value).view(ndarray) return d.argmax(axis, out=out) def sort(self, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): """ Sort the array, in-place Parameters ---------- a : array_like Array to be sorted. axis : int, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. Default is 'quicksort'. order : list, optional When `a` is a structured array, this argument specifies which fields to compare first, second, and so on. This list does not need to include all of the fields. endwith : {True, False}, optional Whether missing values (if any) should be treated as the largest values (True) or the smallest values (False) When the array contains unmasked values at the same extremes of the datatype, the ordering of these values and the masked values is undefined. fill_value : {var}, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- ndarray.sort : Method to sort an array in-place. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in a sorted array. Notes ----- See ``sort`` for notes on the different sorting algorithms. Examples -------- >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() >>> print(a) [1 3 5 -- --] >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Put missing values in the front >>> a.sort(endwith=False) >>> print(a) [-- -- 1 3 5] >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # fill_value takes over endwith >>> a.sort(endwith=False, fill_value=3) >>> print(a) [1 -- -- 3 5] """ if self._mask is nomask: ndarray.sort(self, axis=axis, kind=kind, order=order) return if self is masked: return sidx = self.argsort(axis=axis, kind=kind, order=order, fill_value=fill_value, endwith=endwith) # save memory for 1d arrays if self.ndim == 1: idx = sidx else: idx = list(np.ix_(*[np.arange(x) for x in self.shape])) idx[axis] = sidx self[...] = self[idx] def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): """ Return the minimum along a given axis. Parameters ---------- axis : {None, int}, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. fill_value : {var}, optional Value used to fill in the masked values. If None, use the output of `minimum_fill_value`. Returns ------- amin : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. See Also -------- minimum_fill_value Returns the minimum filling value for a given datatype. """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) if fill_value is None: fill_value = minimum_fill_value(self) # No explicit output if out is None: result = self.filled(fill_value).min( axis=axis, out=out, **kwargs).view(type(self)) if result.ndim: # Set the mask result.__setmask__(newmask) # Get rid of Infs if newmask.ndim: np.copyto(result, result.fill_value, where=newmask) elif newmask: result = masked return result # Explicit output result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." raise MaskError(errmsg) np.copyto(out, np.nan, where=newmask) return out # unique to masked arrays def mini(self, axis=None): """ Return the array minimum along the specified axis. .. deprecated:: 1.13.0 This function is identical to both: * ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)`` * ``np.ma.minimum.reduce(self, axis=axis)`` Typically though, ``self.min(axis=axis)`` is sufficient. Parameters ---------- axis : int, optional The axis along which to find the minima. Default is None, in which case the minimum value in the whole array is returned. Returns ------- min : scalar or MaskedArray If `axis` is None, the result is a scalar. Otherwise, if `axis` is given and the array is at least 2-D, the result is a masked array with dimension one smaller than the array on which `mini` is called. Examples -------- >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) >>> print(x) [[0 --] [2 3] [4 --]] >>> x.mini() 0 >>> x.mini(axis=0) masked_array(data = [0 3], mask = [False False], fill_value = 999999) >>> print(x.mini(axis=1)) [0 2 4] There is a small difference between `mini` and `min`: >>> x[:,1].mini(axis=0) masked_array(data = --, mask = True, fill_value = 999999) >>> x[:,1].min(axis=0) masked """ # 2016-04-13, 1.13.0, gh-8764 warnings.warn( "`mini` is deprecated; use the `min` method or " "`np.ma.minimum.reduce instead.", DeprecationWarning, stacklevel=2) return minimum.reduce(self, axis) def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): """ Return the maximum along a given axis. Parameters ---------- axis : {None, int}, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. fill_value : {var}, optional Value used to fill in the masked values. If None, use the output of maximum_fill_value(). Returns ------- amax : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. See Also -------- maximum_fill_value Returns the maximum filling value for a given datatype. """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) if fill_value is None: fill_value = maximum_fill_value(self) # No explicit output if out is None: result = self.filled(fill_value).max( axis=axis, out=out, **kwargs).view(type(self)) if result.ndim: # Set the mask result.__setmask__(newmask) # Get rid of Infs if newmask.ndim: np.copyto(result, result.fill_value, where=newmask) elif newmask: result = masked return result # Explicit output result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." raise MaskError(errmsg) np.copyto(out, np.nan, where=newmask) return out def ptp(self, axis=None, out=None, fill_value=None): """ Return (maximum - minimum) along the given dimension (i.e. peak-to-peak value). Parameters ---------- axis : {None, int}, optional Axis along which to find the peaks. If None (default) the flattened array is used. out : {None, array_like}, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. fill_value : {var}, optional Value used to fill in the masked values. Returns ------- ptp : ndarray. A new array holding the result, unless ``out`` was specified, in which case a reference to ``out`` is returned. """ if out is None: result = self.max(axis=axis, fill_value=fill_value) result -= self.min(axis=axis, fill_value=fill_value) return result out.flat = self.max(axis=axis, out=out, fill_value=fill_value) min_value = self.min(axis=axis, fill_value=fill_value) np.subtract(out, min_value, out=out, casting='unsafe') return out def partition(self, *args, **kwargs): warnings.warn("Warning: 'partition' will ignore the 'mask' " "of the {}.".format(self.__class__.__name__), stacklevel=2) return super(MaskedArray, self).partition(*args, **kwargs) def argpartition(self, *args, **kwargs): warnings.warn("Warning: 'argpartition' will ignore the 'mask' " "of the {}.".format(self.__class__.__name__), stacklevel=2) return super(MaskedArray, self).argpartition(*args, **kwargs) def take(self, indices, axis=None, out=None, mode='raise'): """ """ (_data, _mask) = (self._data, self._mask) cls = type(self) # Make sure the indices are not masked maskindices = getmask(indices) if maskindices is not nomask: indices = indices.filled(0) # Get the data, promoting scalars to 0d arrays with [...] so that # .view works correctly if out is None: out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) else: np.take(_data, indices, axis=axis, mode=mode, out=out) # Get the mask if isinstance(out, MaskedArray): if _mask is nomask: outmask = maskindices else: outmask = _mask.take(indices, axis=axis, mode=mode) outmask |= maskindices out.__setmask__(outmask) # demote 0d arrays back to scalars, for consistency with ndarray.take return out[()] # Array methods clip = _arraymethod('clip', onmask=False) copy = _arraymethod('copy') diagonal = _arraymethod('diagonal') flatten = _arraymethod('flatten') repeat = _arraymethod('repeat') squeeze = _arraymethod('squeeze') swapaxes = _arraymethod('swapaxes') T = property(fget=lambda self: self.transpose()) transpose = _arraymethod('transpose') def tolist(self, fill_value=None): """ Return the data portion of the masked array as a hierarchical Python list. Data items are converted to the nearest compatible Python type. Masked values are converted to `fill_value`. If `fill_value` is None, the corresponding entries in the output list will be ``None``. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries. Default is None. Returns ------- result : list The Python list representation of the masked array. Examples -------- >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] >>> x.tolist(-999) [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] """ _mask = self._mask # No mask ? Just return .data.tolist ? if _mask is nomask: return self._data.tolist() # Explicit fill_value: fill the array and get the list if fill_value is not None: return self.filled(fill_value).tolist() # Structured array. names = self.dtype.names if names: result = self._data.astype([(_, object) for _ in names]) for n in names: result[n][_mask[n]] = None return result.tolist() # Standard arrays. if _mask is nomask: return [None] # Set temps to save time when dealing w/ marrays. inishape = self.shape result = np.array(self._data.ravel(), dtype=object) result[_mask.ravel()] = None result.shape = inishape return result.tolist() def tostring(self, fill_value=None, order='C'): """ This function is a compatibility alias for tobytes. Despite its name it returns bytes not strings. """ return self.tobytes(fill_value, order='C') def tobytes(self, fill_value=None, order='C'): """ Return the array data as a string containing the raw bytes in the array. The array is filled with a fill value before the string conversion. .. versionadded:: 1.9.0 Parameters ---------- fill_value : scalar, optional Value used to fill in the masked values. Default is None, in which case `MaskedArray.fill_value` is used. order : {'C','F','A'}, optional Order of the data item in the copy. Default is 'C'. - 'C' -- C order (row major). - 'F' -- Fortran order (column major). - 'A' -- Any, current order of array. - None -- Same as 'A'. See Also -------- ndarray.tobytes tolist, tofile Notes ----- As for `ndarray.tobytes`, information about the shape, dtype, etc., but also about `fill_value`, will be lost. Examples -------- >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.tobytes() '\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00' """ return self.filled(fill_value).tobytes(order=order) def tofile(self, fid, sep="", format="%s"): """ Save a masked array to a file in binary format. .. warning:: This function is not implemented yet. Raises ------ NotImplementedError When `tofile` is called. """ raise NotImplementedError("MaskedArray.tofile() not implemented yet.") def toflex(self): """ Transforms a masked array into a flexible-type array. The flexible type array that is returned will have two fields: * the ``_data`` field stores the ``_data`` part of the array. * the ``_mask`` field stores the ``_mask`` part of the array. Parameters ---------- None Returns ------- record : ndarray A new flexible-type `ndarray` with two fields: the first element containing a value, the second element containing the corresponding mask boolean. The returned record shape matches self.shape. Notes ----- A side-effect of transforming a masked array into a flexible `ndarray` is that meta information (``fill_value``, ...) will be lost. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] >>> print(x.toflex()) [[(1, False) (2, True) (3, False)] [(4, True) (5, False) (6, True)] [(7, False) (8, True) (9, False)]] """ # Get the basic dtype. ddtype = self.dtype # Make sure we have a mask _mask = self._mask if _mask is None: _mask = make_mask_none(self.shape, ddtype) # And get its dtype mdtype = self._mask.dtype record = np.ndarray(shape=self.shape, dtype=[('_data', ddtype), ('_mask', mdtype)]) record['_data'] = self._data record['_mask'] = self._mask return record torecords = toflex # Pickling def __getstate__(self): """Return the internal state of the masked array, for pickling purposes. """ cf = 'CF'[self.flags.fnc] data_state = super(MaskedArray, self).__reduce__()[2] return data_state + (getmaskarray(self).tobytes(cf), self._fill_value) def __setstate__(self, state): """Restore the internal state of the masked array, for pickling purposes. ``state`` is typically the output of the ``__getstate__`` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask. """ (_, shp, typ, isf, raw, msk, flv) = state super(MaskedArray, self).__setstate__((shp, typ, isf, raw)) self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) self.fill_value = flv def __reduce__(self): """Return a 3-tuple for pickling a MaskedArray. """ return (_mareconstruct, (self.__class__, self._baseclass, (0,), 'b',), self.__getstate__()) def __deepcopy__(self, memo=None): from copy import deepcopy copied = MaskedArray.__new__(type(self), self, copy=True) if memo is None: memo = {} memo[id(self)] = copied for (k, v) in self.__dict__.items(): copied.__dict__[k] = deepcopy(v, memo) return copied def _mareconstruct(subtype, baseclass, baseshape, basetype,): """Internal function that builds a new MaskedArray from the information stored in a pickle. """ _data = ndarray.__new__(baseclass, baseshape, basetype) _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) class mvoid(MaskedArray): """ Fake a 'void' object to use for masked array with structured dtypes. """ def __new__(self, data, mask=nomask, dtype=None, fill_value=None, hardmask=False, copy=False, subok=True): _data = np.array(data, copy=copy, subok=subok, dtype=dtype) _data = _data.view(self) _data._hardmask = hardmask if mask is not nomask: if isinstance(mask, np.void): _data._mask = mask else: try: # Mask is already a 0D array _data._mask = np.void(mask) except TypeError: # Transform the mask to a void mdtype = make_mask_descr(dtype) _data._mask = np.array(mask, dtype=mdtype)[()] if fill_value is not None: _data.fill_value = fill_value return _data def _get_data(self): # Make sure that the _data part is a np.void return super(mvoid, self)._data[()] _data = property(fget=_get_data) def __getitem__(self, indx): """ Get the index. """ m = self._mask if isinstance(m[indx], ndarray): # Can happen when indx is a multi-dimensional field: # A = ma.masked_array(data=[([0,1],)], mask=[([True, # False],)], dtype=[("A", ">i2", (2,))]) # x = A[0]; y = x["A"]; then y.mask["A"].size==2 # and we can not say masked/unmasked. # The result is no longer mvoid! # See also issue #6724. return masked_array( data=self._data[indx], mask=m[indx], fill_value=self._fill_value[indx], hard_mask=self._hardmask) if m is not nomask and m[indx]: return masked return self._data[indx] def __setitem__(self, indx, value): self._data[indx] = value if self._hardmask: self._mask[indx] |= getattr(value, "_mask", False) else: self._mask[indx] = getattr(value, "_mask", False) def __str__(self): m = self._mask if m is nomask: return str(self._data) rdtype = _replace_dtype_fields(self._data.dtype, "O") data_arr = super(mvoid, self)._data res = data_arr.astype(rdtype) _recursive_printoption(res, self._mask, masked_print_option) return str(res) __repr__ = __str__ def __iter__(self): "Defines an iterator for mvoid" (_data, _mask) = (self._data, self._mask) if _mask is nomask: for d in _data: yield d else: for (d, m) in zip(_data, _mask): if m: yield masked else: yield d def __len__(self): return self._data.__len__() def filled(self, fill_value=None): """ Return a copy with masked fields filled with a given value. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries (None by default). If None, the `fill_value` attribute is used instead. Returns ------- filled_void A `np.void` object See Also -------- MaskedArray.filled """ return asarray(self).filled(fill_value)[()] def tolist(self): """ Transforms the mvoid object into a tuple. Masked fields are replaced by None. Returns ------- returned_tuple Tuple of fields """ _mask = self._mask if _mask is nomask: return self._data.tolist() result = [] for (d, m) in zip(self._data, self._mask): if m: result.append(None) else: # .item() makes sure we return a standard Python object result.append(d.item()) return tuple(result) ############################################################################## # Shortcuts # ############################################################################## def isMaskedArray(x): """ Test whether input is an instance of MaskedArray. This function returns True if `x` is an instance of MaskedArray and returns False otherwise. Any object is accepted as input. Parameters ---------- x : object Object to test. Returns ------- result : bool True if `x` is a MaskedArray. See Also -------- isMA : Alias to isMaskedArray. isarray : Alias to isMaskedArray. Examples -------- >>> import numpy.ma as ma >>> a = np.eye(3, 3) >>> a array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> m = ma.masked_values(a, 0) >>> m masked_array(data = [[1.0 -- --] [-- 1.0 --] [-- -- 1.0]], mask = [[False True True] [ True False True] [ True True False]], fill_value=0.0) >>> ma.isMaskedArray(a) False >>> ma.isMaskedArray(m) True >>> ma.isMaskedArray([0, 1, 2]) False """ return isinstance(x, MaskedArray) isarray = isMaskedArray isMA = isMaskedArray # backward compatibility class MaskedConstant(MaskedArray): # the lone np.ma.masked instance __singleton = None @classmethod def __has_singleton(cls): # second case ensures `cls.__singleton` is not just a view on the # superclass singleton return cls.__singleton is not None and type(cls.__singleton) is cls def __new__(cls): if not cls.__has_singleton(): # We define the masked singleton as a float for higher precedence. # Note that it can be tricky sometimes w/ type comparison data = np.array(0.) mask = np.array(True) # prevent any modifications data.flags.writeable = False mask.flags.writeable = False # don't fall back on MaskedArray.__new__(MaskedConstant), since # that might confuse it - this way, the construction is entirely # within our control cls.__singleton = MaskedArray(data, mask=mask).view(cls) return cls.__singleton def __array_finalize__(self, obj): if not self.__has_singleton(): # this handles the `.view` in __new__, which we want to copy across # properties normally return super(MaskedConstant, self).__array_finalize__(obj) elif self is self.__singleton: # not clear how this can happen, play it safe pass else: # everywhere else, we want to downcast to MaskedArray, to prevent a # duplicate maskedconstant. self.__class__ = MaskedArray MaskedArray.__array_finalize__(self, obj) def __array_prepare__(self, obj, context=None): return self.view(MaskedArray).__array_prepare__(obj, context) def __array_wrap__(self, obj, context=None): return self.view(MaskedArray).__array_wrap__(obj, context) def __str__(self): return str(masked_print_option._display) if sys.version_info.major < 3: def __unicode__(self): return unicode(masked_print_option._display) def __repr__(self): if self is MaskedConstant.__singleton: return 'masked' else: # it's a subclass, or something is wrong, make it obvious return object.__repr__(self) def __reduce__(self): """Override of MaskedArray's __reduce__. """ return (self.__class__, ()) # inplace operations have no effect. We have to override them to avoid # trying to modify the readonly data and mask arrays def __iop__(self, other): return self __iadd__ = \ __isub__ = \ __imul__ = \ __ifloordiv__ = \ __itruediv__ = \ __ipow__ = \ __iop__ del __iop__ # don't leave this around def copy(self, *args, **kwargs): """ Copy is a no-op on the maskedconstant, as it is a scalar """ # maskedconstant is a scalar, so copy doesn't need to copy. There's # precedent for this with `np.bool_` scalars. return self masked = masked_singleton = MaskedConstant() masked_array = MaskedArray def array(data, dtype=None, copy=False, order=None, mask=nomask, fill_value=None, keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0): """ Shortcut to MaskedArray. The options are in a different order for convenience and backwards compatibility. """ return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, keep_mask=keep_mask, hard_mask=hard_mask, fill_value=fill_value, ndmin=ndmin, shrink=shrink, order=order) array.__doc__ = masked_array.__doc__ def is_masked(x): """ Determine whether input has masked values. Accepts any object as input, but always returns False unless the input is a MaskedArray containing masked values. Parameters ---------- x : array_like Array to check for masked values. Returns ------- result : bool True if `x` is a MaskedArray with masked values, False otherwise. Examples -------- >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x masked_array(data = [-- 1 -- 2 3], mask = [ True False True False False], fill_value=999999) >>> ma.is_masked(x) True >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) >>> x masked_array(data = [0 1 0 2 3], mask = False, fill_value=999999) >>> ma.is_masked(x) False Always returns False if `x` isn't a MaskedArray. >>> x = [False, True, False] >>> ma.is_masked(x) False >>> x = 'a string' >>> ma.is_masked(x) False """ m = getmask(x) if m is nomask: return False elif m.any(): return True return False ############################################################################## # Extrema functions # ############################################################################## class _extrema_operation(_MaskedUFunc): """ Generic class for maximum/minimum functions. .. note:: This is the base class for `_maximum_operation` and `_minimum_operation`. """ def __init__(self, ufunc, compare, fill_value): super(_extrema_operation, self).__init__(ufunc) self.compare = compare self.fill_value_func = fill_value def __call__(self, a, b=None): "Executes the call behavior." if b is None: # 2016-04-13, 1.13.0 warnings.warn( "Single-argument form of np.ma.{0} is deprecated. Use " "np.ma.{0}.reduce instead.".format(self.__name__), DeprecationWarning, stacklevel=2) return self.reduce(a) return where(self.compare(a, b), a, b) def reduce(self, target, axis=np._NoValue): "Reduce target along the given axis." target = narray(target, copy=False, subok=True) m = getmask(target) if axis is np._NoValue and target.ndim > 1: # 2017-05-06, Numpy 1.13.0: warn on axis default warnings.warn( "In the future the default for ma.{0}.reduce will be axis=0, " "not the current None, to match np.{0}.reduce. " "Explicitly pass 0 or None to silence this warning.".format( self.__name__ ), MaskedArrayFutureWarning, stacklevel=2) axis = None if axis is not np._NoValue: kwargs = dict(axis=axis) else: kwargs = dict() if m is nomask: t = self.f.reduce(target, **kwargs) else: target = target.filled( self.fill_value_func(target)).view(type(target)) t = self.f.reduce(target, **kwargs) m = umath.logical_and.reduce(m, **kwargs) if hasattr(t, '_mask'): t._mask = m elif m: t = masked return t def outer(self, a, b): "Return the function applied to the outer product of a and b." ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: m = nomask else: ma = getmaskarray(a) mb = getmaskarray(b) m = logical_or.outer(ma, mb) result = self.f.outer(filled(a), filled(b)) if not isinstance(result, MaskedArray): result = result.view(MaskedArray) result._mask = m return result def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} try: return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) except (AttributeError, TypeError): # If obj doesn't have a min method, or if the method doesn't accept a # fill_value argument return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out, **kwargs) min.__doc__ = MaskedArray.min.__doc__ def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} try: return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) except (AttributeError, TypeError): # If obj doesn't have a max method, or if the method doesn't accept a # fill_value argument return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out, **kwargs) max.__doc__ = MaskedArray.max.__doc__ def ptp(obj, axis=None, out=None, fill_value=None): """ a.ptp(axis=None) = a.max(axis) - a.min(axis) """ try: return obj.ptp(axis, out=out, fill_value=fill_value) except (AttributeError, TypeError): # If obj doesn't have a ptp method or if the method doesn't accept # a fill_value argument return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out) ptp.__doc__ = MaskedArray.ptp.__doc__ ############################################################################## # Definition of functions from the corresponding methods # ############################################################################## class _frommethod(object): """ Define functions from existing MaskedArray methods. Parameters ---------- methodname : str Name of the method to transform. """ def __init__(self, methodname, reversed=False): self.__name__ = methodname self.__doc__ = self.getdoc() self.reversed = reversed def getdoc(self): "Return the doc of the function (from the doc of the method)." meth = getattr(MaskedArray, self.__name__, None) or\ getattr(np, self.__name__, None) signature = self.__name__ + get_object_signature(meth) if meth is not None: doc = """ %s\n%s""" % ( signature, getattr(meth, '__doc__', None)) return doc def __call__(self, a, *args, **params): if self.reversed: args = list(args) a, args[0] = args[0], a marr = asanyarray(a) method_name = self.__name__ method = getattr(type(marr), method_name, None) if method is None: # use the corresponding np function method = getattr(np, method_name) return method(marr, *args, **params) all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') compress = _frommethod('compress', reversed=True) cumprod = _frommethod('cumprod') cumsum = _frommethod('cumsum') copy = _frommethod('copy') diagonal = _frommethod('diagonal') harden_mask = _frommethod('harden_mask') ids = _frommethod('ids') maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) mean = _frommethod('mean') minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) nonzero = _frommethod('nonzero') prod = _frommethod('prod') product = _frommethod('prod') ravel = _frommethod('ravel') repeat = _frommethod('repeat') shrink_mask = _frommethod('shrink_mask') soften_mask = _frommethod('soften_mask') std = _frommethod('std') sum = _frommethod('sum') swapaxes = _frommethod('swapaxes') #take = _frommethod('take') trace = _frommethod('trace') var = _frommethod('var') count = _frommethod('count') def take(a, indices, axis=None, out=None, mode='raise'): """ """ a = masked_array(a) return a.take(indices, axis=axis, out=out, mode=mode) def power(a, b, third=None): """ Returns element-wise base array raised to power from second array. This is the masked array version of `numpy.power`. For details see `numpy.power`. See Also -------- numpy.power Notes ----- The *out* argument to `numpy.power` is not supported, `third` has to be None. """ if third is not None: raise MaskError("3-argument power not supported.") # Get the masks ma = getmask(a) mb = getmask(b) m = mask_or(ma, mb) # Get the rawdata fa = getdata(a) fb = getdata(b) # Get the type of the result (so that we preserve subclasses) if isinstance(a, MaskedArray): basetype = type(a) else: basetype = MaskedArray # Get the result and view it as a (subclass of) MaskedArray with np.errstate(divide='ignore', invalid='ignore'): result = np.where(m, fa, umath.power(fa, fb)).view(basetype) result._update_from(a) # Find where we're in trouble w/ NaNs and Infs invalid = np.logical_not(np.isfinite(result.view(ndarray))) # Add the initial mask if m is not nomask: if not (result.ndim): return masked result._mask = np.logical_or(m, invalid) # Fix the invalid parts if invalid.any(): if not result.ndim: return masked elif result._mask is nomask: result._mask = invalid result._data[invalid] = result.fill_value return result argmin = _frommethod('argmin') argmax = _frommethod('argmax') def argsort(a, axis=np._NoValue, kind='quicksort', order=None, endwith=True, fill_value=None): "Function version of the eponymous method." a = np.asanyarray(a) # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default if axis is np._NoValue: axis = _deprecate_argsort_axis(a) if isinstance(a, MaskedArray): return a.argsort(axis=axis, kind=kind, order=order, endwith=endwith, fill_value=fill_value) else: return a.argsort(axis=axis, kind=kind, order=order) argsort.__doc__ = MaskedArray.argsort.__doc__ def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): "Function version of the eponymous method." a = np.array(a, copy=True, subok=True) if axis is None: a = a.flatten() axis = 0 if isinstance(a, MaskedArray): a.sort(axis=axis, kind=kind, order=order, endwith=endwith, fill_value=fill_value) else: a.sort(axis=axis, kind=kind, order=order) return a sort.__doc__ = MaskedArray.sort.__doc__ def compressed(x): """ Return all the non-masked data as a 1-D array. This function is equivalent to calling the "compressed" method of a `MaskedArray`, see `MaskedArray.compressed` for details. See Also -------- MaskedArray.compressed Equivalent method. """ return asanyarray(x).compressed() def concatenate(arrays, axis=0): """ Concatenate a sequence of arrays along the given axis. Parameters ---------- arrays : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- result : MaskedArray The concatenated array with any masked entries preserved. See Also -------- numpy.concatenate : Equivalent function in the top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> a = ma.arange(3) >>> a[1] = ma.masked >>> b = ma.arange(2, 5) >>> a masked_array(data = [0 -- 2], mask = [False True False], fill_value = 999999) >>> b masked_array(data = [2 3 4], mask = False, fill_value = 999999) >>> ma.concatenate([a, b]) masked_array(data = [0 -- 2 2 3 4], mask = [False True False False False False], fill_value = 999999) """ d = np.concatenate([getdata(a) for a in arrays], axis) rcls = get_masked_subclass(*arrays) data = d.view(rcls) # Check whether one of the arrays has a non-empty mask. for x in arrays: if getmask(x) is not nomask: break else: return data # OK, so we have to concatenate the masks dm = np.concatenate([getmaskarray(a) for a in arrays], axis) dm = dm.reshape(d.shape) # If we decide to keep a '_shrinkmask' option, we want to check that # all of them are True, and then check for dm.any() data._mask = _shrink_mask(dm) return data def diag(v, k=0): """ Extract a diagonal or construct a diagonal array. This function is the equivalent of `numpy.diag` that takes masked values into account, see `numpy.diag` for details. See Also -------- numpy.diag : Equivalent function for ndarrays. """ output = np.diag(v, k).view(MaskedArray) if getmask(v) is not nomask: output._mask = np.diag(v._mask, k) return output def expand_dims(x, axis): """ Expand the shape of an array. Expands the shape of the array by including a new axis before the one specified by the `axis` parameter. This function behaves the same as `numpy.expand_dims` but preserves masked elements. See Also -------- numpy.expand_dims : Equivalent function in top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> x = ma.array([1, 2, 4]) >>> x[1] = ma.masked >>> x masked_array(data = [1 -- 4], mask = [False True False], fill_value = 999999) >>> np.expand_dims(x, axis=0) array([[1, 2, 4]]) >>> ma.expand_dims(x, axis=0) masked_array(data = [[1 -- 4]], mask = [[False True False]], fill_value = 999999) The same result can be achieved using slicing syntax with `np.newaxis`. >>> x[np.newaxis, :] masked_array(data = [[1 -- 4]], mask = [[False True False]], fill_value = 999999) """ result = n_expand_dims(x, axis) if isinstance(x, MaskedArray): new_shape = result.shape result = x.view() result.shape = new_shape if result._mask is not nomask: result._mask.shape = new_shape return result def left_shift(a, n): """ Shift the bits of an integer to the left. This is the masked array version of `numpy.left_shift`, for details see that function. See Also -------- numpy.left_shift """ m = getmask(a) if m is nomask: d = umath.left_shift(filled(a), n) return masked_array(d) else: d = umath.left_shift(filled(a, 0), n) return masked_array(d, mask=m) def right_shift(a, n): """ Shift the bits of an integer to the right. This is the masked array version of `numpy.right_shift`, for details see that function. See Also -------- numpy.right_shift """ m = getmask(a) if m is nomask: d = umath.right_shift(filled(a), n) return masked_array(d) else: d = umath.right_shift(filled(a, 0), n) return masked_array(d, mask=m) def put(a, indices, values, mode='raise'): """ Set storage-indexed locations to corresponding values. This function is equivalent to `MaskedArray.put`, see that method for details. See Also -------- MaskedArray.put """ # We can't use 'frommethod', the order of arguments is different try: return a.put(indices, values, mode=mode) except AttributeError: return narray(a, copy=False).put(indices, values, mode=mode) def putmask(a, mask, values): # , mode='raise'): """ Changes elements of an array based on conditional and input values. This is the masked array version of `numpy.putmask`, for details see `numpy.putmask`. See Also -------- numpy.putmask Notes ----- Using a masked array as `values` will **not** transform a `ndarray` into a `MaskedArray`. """ # We can't use 'frommethod', the order of arguments is different if not isinstance(a, MaskedArray): a = a.view(MaskedArray) (valdata, valmask) = (getdata(values), getmask(values)) if getmask(a) is nomask: if valmask is not nomask: a._sharedmask = True a._mask = make_mask_none(a.shape, a.dtype) np.copyto(a._mask, valmask, where=mask) elif a._hardmask: if valmask is not nomask: m = a._mask.copy() np.copyto(m, valmask, where=mask) a.mask |= m else: if valmask is nomask: valmask = getmaskarray(values) np.copyto(a._mask, valmask, where=mask) np.copyto(a._data, valdata, where=mask) return def transpose(a, axes=None): """ Permute the dimensions of an array. This function is exactly equivalent to `numpy.transpose`. See Also -------- numpy.transpose : Equivalent function in top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> x = ma.arange(4).reshape((2,2)) >>> x[1, 1] = ma.masked >>>> x masked_array(data = [[0 1] [2 --]], mask = [[False False] [False True]], fill_value = 999999) >>> ma.transpose(x) masked_array(data = [[0 2] [1 --]], mask = [[False False] [False True]], fill_value = 999999) """ # We can't use 'frommethod', as 'transpose' doesn't take keywords try: return a.transpose(axes) except AttributeError: return narray(a, copy=False).transpose(axes).view(MaskedArray) def reshape(a, new_shape, order='C'): """ Returns an array containing the same data with a new shape. Refer to `MaskedArray.reshape` for full documentation. See Also -------- MaskedArray.reshape : equivalent function """ # We can't use 'frommethod', it whine about some parameters. Dmmit. try: return a.reshape(new_shape, order=order) except AttributeError: _tmp = narray(a, copy=False).reshape(new_shape, order=order) return _tmp.view(MaskedArray) def resize(x, new_shape): """ Return a new masked array with the specified size and shape. This is the masked equivalent of the `numpy.resize` function. The new array is filled with repeated copies of `x` (in the order that the data are stored in memory). If `x` is masked, the new array will be masked, and the new mask will be a repetition of the old one. See Also -------- numpy.resize : Equivalent function in the top level NumPy module. Examples -------- >>> import numpy.ma as ma >>> a = ma.array([[1, 2] ,[3, 4]]) >>> a[0, 1] = ma.masked >>> a masked_array(data = [[1 --] [3 4]], mask = [[False True] [False False]], fill_value = 999999) >>> np.resize(a, (3, 3)) array([[1, 2, 3], [4, 1, 2], [3, 4, 1]]) >>> ma.resize(a, (3, 3)) masked_array(data = [[1 -- 3] [4 1 --] [3 4 1]], mask = [[False True False] [False False True] [False False False]], fill_value = 999999) A MaskedArray is always returned, regardless of the input type. >>> a = np.array([[1, 2] ,[3, 4]]) >>> ma.resize(a, (3, 3)) masked_array(data = [[1 2 3] [4 1 2] [3 4 1]], mask = False, fill_value = 999999) """ # We can't use _frommethods here, as N.resize is notoriously whiny. m = getmask(x) if m is not nomask: m = np.resize(m, new_shape) result = np.resize(x, new_shape).view(get_masked_subclass(x)) if result.ndim: result._mask = m return result def rank(obj): """ maskedarray version of the numpy function. .. note:: Deprecated since 1.10.0 """ # 2015-04-12, 1.10.0 warnings.warn( "`rank` is deprecated; use the `ndim` function instead. ", np.VisibleDeprecationWarning, stacklevel=2) return np.ndim(getdata(obj)) rank.__doc__ = np.rank.__doc__ def ndim(obj): """ maskedarray version of the numpy function. """ return np.ndim(getdata(obj)) ndim.__doc__ = np.ndim.__doc__ def shape(obj): "maskedarray version of the numpy function." return np.shape(getdata(obj)) shape.__doc__ = np.shape.__doc__ def size(obj, axis=None): "maskedarray version of the numpy function." return np.size(getdata(obj), axis) size.__doc__ = np.size.__doc__ ############################################################################## # Extra functions # ############################################################################## def where(condition, x=_NoValue, y=_NoValue): """ Return a masked array with elements from x or y, depending on condition. Returns a masked array, shaped like condition, where the elements are from `x` when `condition` is True, and from `y` otherwise. If neither `x` nor `y` are given, the function returns a tuple of indices where `condition` is True (the result of ``condition.nonzero()``). Parameters ---------- condition : array_like, bool The condition to meet. For each True element, yield the corresponding element from `x`, otherwise from `y`. x, y : array_like, optional Values from which to choose. `x`, `y` and `condition` need to be broadcastable to some shape. Returns ------- out : MaskedArray or tuple of ndarrays The resulting masked array if `x` and `y` were given, otherwise the result of ``condition.nonzero()``. See Also -------- numpy.where : Equivalent function in the top-level NumPy module. Examples -------- >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) >>> print(x) [[0.0 -- 2.0] [-- 4.0 --] [6.0 -- 8.0]] >>> np.ma.where(x > 5) # return the indices where x > 5 (array([2, 2]), array([0, 2])) >>> print(np.ma.where(x > 5, x, -3.1416)) [[-3.1416 -- -3.1416] [-- -3.1416 --] [6.0 -- 8.0]] """ # handle the single-argument case missing = (x is _NoValue, y is _NoValue).count(True) if missing == 1: raise ValueError("Must provide both 'x' and 'y' or neither.") if missing == 2: return nonzero(condition) # we only care if the condition is true - false or masked pick y cf = filled(condition, False) xd = getdata(x) yd = getdata(y) # we need the full arrays here for correct final dimensions cm = getmaskarray(condition) xm = getmaskarray(x) ym = getmaskarray(y) # deal with the fact that masked.dtype == float64, but we don't actually # want to treat it as that. if x is masked and y is not masked: xd = np.zeros((), dtype=yd.dtype) xm = np.ones((), dtype=ym.dtype) elif y is masked and x is not masked: yd = np.zeros((), dtype=xd.dtype) ym = np.ones((), dtype=xm.dtype) data = np.where(cf, xd, yd) mask = np.where(cf, xm, ym) mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) # collapse the mask, for backwards compatibility mask = _shrink_mask(mask) return masked_array(data, mask=mask) def choose(indices, choices, out=None, mode='raise'): """ Use an index array to construct a new array from a set of choices. Given an array of integers and a set of n choice arrays, this method will create a new array that merges each of the choice arrays. Where a value in `a` is i, the new array will have the value that choices[i] contains in the same place. Parameters ---------- a : ndarray of ints This array must contain integers in ``[0, n-1]``, where n is the number of choices. choices : sequence of arrays Choice arrays. The index array and all of the choices should be broadcastable to the same shape. out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and `dtype`. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' : raise an error * 'wrap' : wrap around * 'clip' : clip to the range Returns ------- merged_array : array See Also -------- choose : equivalent function Examples -------- >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) masked_array(data = [3 2 1], mask = False, fill_value=999999) """ def fmask(x): "Returns the filled array, or True if masked." if x is masked: return True return filled(x) def nmask(x): "Returns the mask, True if ``masked``, False if ``nomask``." if x is masked: return True return getmask(x) # Get the indices. c = filled(indices, 0) # Get the masks. masks = [nmask(x) for x in choices] data = [fmask(x) for x in choices] # Construct the mask outputmask = np.choose(c, masks, mode=mode) outputmask = make_mask(mask_or(outputmask, getmask(indices)), copy=0, shrink=True) # Get the choices. d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(outputmask) return out d.__setmask__(outputmask) return d def round_(a, decimals=0, out=None): """ Return a copy of a, rounded to 'decimals' places. When 'decimals' is negative, it specifies the number of positions to the left of the decimal point. The real and imaginary parts of complex numbers are rounded separately. Nothing is done if the array is not of float type and 'decimals' is greater than or equal to 0. Parameters ---------- decimals : int Number of decimals to round to. May be negative. out : array_like Existing array to use for output. If not given, returns a default copy of a. Notes ----- If out is given and does not have a mask attribute, the mask of a is lost! """ if out is None: return np.round_(a, decimals, out) else: np.round_(getdata(a), decimals, out) if hasattr(out, '_mask'): out._mask = getmask(a) return out round = round_ # Needed by dot, so move here from extras.py. It will still be exported # from extras.py for compatibility. def mask_rowcols(a, axis=None): """ Mask rows and/or columns of a 2D array that contain masked values. Mask whole rows and/or columns of a 2D array that contain masked values. The masking behavior is selected using the `axis` parameter. - If `axis` is None, rows *and* columns are masked. - If `axis` is 0, only rows are masked. - If `axis` is 1 or -1, only columns are masked. Parameters ---------- a : array_like, MaskedArray The array to mask. If not a MaskedArray instance (or if no array elements are masked). The result is a MaskedArray with `mask` set to `nomask` (False). Must be a 2D array. axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- a : MaskedArray A modified version of the input array, masked depending on the value of the `axis` parameter. Raises ------ NotImplementedError If input array `a` is not 2D. See Also -------- mask_rows : Mask rows of a 2D array that contain masked values. mask_cols : Mask cols of a 2D array that contain masked values. masked_where : Mask where a condition is met. Notes ----- The input array's mask is modified by this function. Examples -------- >>> import numpy.ma as ma >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) >>> a = ma.masked_equal(a, 1) >>> a masked_array(data = [[0 0 0] [0 -- 0] [0 0 0]], mask = [[False False False] [False True False] [False False False]], fill_value=999999) >>> ma.mask_rowcols(a) masked_array(data = [[0 -- 0] [-- -- --] [0 -- 0]], mask = [[False True False] [ True True True] [False True False]], fill_value=999999) """ a = array(a, subok=False) if a.ndim != 2: raise NotImplementedError("mask_rowcols works for 2D arrays only.") m = getmask(a) # Nothing is masked: return a if m is nomask or not m.any(): return a maskedval = m.nonzero() a._mask = a._mask.copy() if not axis: a[np.unique(maskedval[0])] = masked if axis in [None, 1, -1]: a[:, np.unique(maskedval[1])] = masked return a # Include masked dot here to avoid import problems in getting it from # extras.py. Note that it is not included in __all__, but rather exported # from extras in order to avoid backward compatibility problems. def dot(a, b, strict=False, out=None): """ Return the dot product of two arrays. This function is the equivalent of `numpy.dot` that takes masked values into account. Note that `strict` and `out` are in different position than in the method version. In order to maintain compatibility with the corresponding method, it is recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. .. note:: Works only with 2-D arrays at the moment. Parameters ---------- a, b : masked_array_like Inputs arrays. strict : bool, optional Whether masked data are propagated (True) or set to 0 (False) for the computation. Default is False. Propagating the mask means that if a masked value appears in a row or column, the whole row or column is considered masked. out : masked_array, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for `dot(a,b)`. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. .. versionadded:: 1.10.2 See Also -------- numpy.dot : Equivalent function for ndarrays. Examples -------- >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) >>> np.ma.dot(a, b) masked_array(data = [[21 26] [45 64]], mask = [[False False] [False False]], fill_value = 999999) >>> np.ma.dot(a, b, strict=True) masked_array(data = [[-- --] [-- 64]], mask = [[ True True] [ True False]], fill_value = 999999) """ # !!!: Works only with 2D arrays. There should be a way to get it to run # with higher dimension if strict and (a.ndim == 2) and (b.ndim == 2): a = mask_rowcols(a, 0) b = mask_rowcols(b, 1) am = ~getmaskarray(a) bm = ~getmaskarray(b) if out is None: d = np.dot(filled(a, 0), filled(b, 0)) m = ~np.dot(am, bm) if d.ndim == 0: d = np.asarray(d) r = d.view(get_masked_subclass(a, b)) r.__setmask__(m) return r else: d = np.dot(filled(a, 0), filled(b, 0), out._data) if out.mask.shape != d.shape: out._mask = np.empty(d.shape, MaskType) np.dot(am, bm, out._mask) np.logical_not(out._mask, out._mask) return out def inner(a, b): """ Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension of a and b. The first argument is not conjugated. """ fa = filled(a, 0) fb = filled(b, 0) if fa.ndim == 0: fa.shape = (1,) if fb.ndim == 0: fb.shape = (1,) return np.inner(fa, fb).view(MaskedArray) inner.__doc__ = doc_note(np.inner.__doc__, "Masked values are replaced by 0.") innerproduct = inner def outer(a, b): "maskedarray version of the numpy function." fa = filled(a, 0).ravel() fb = filled(b, 0).ravel() d = np.outer(fa, fb) ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: return masked_array(d) ma = getmaskarray(a) mb = getmaskarray(b) m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0) return masked_array(d, mask=m) outer.__doc__ = doc_note(np.outer.__doc__, "Masked values are replaced by 0.") outerproduct = outer def _convolve_or_correlate(f, a, v, mode, propagate_mask): """ Helper function for ma.correlate and ma.convolve """ if propagate_mask: # results which are contributed to by either item in any pair being invalid mask = ( f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) ) data = f(getdata(a), getdata(v), mode=mode) else: # results which are not contributed to by any pair of valid elements mask = ~f(~getmaskarray(a), ~getmaskarray(v)) data = f(filled(a, 0), filled(v, 0), mode=mode) return masked_array(data, mask=mask) def correlate(a, v, mode='valid', propagate_mask=True): """ Cross-correlation of two 1-dimensional sequences. Parameters ---------- a, v : array_like Input sequences. mode : {'valid', 'same', 'full'}, optional Refer to the `np.convolve` docstring. Note that the default is 'valid', unlike `convolve`, which uses 'full'. propagate_mask : bool If True, then a result element is masked if any masked element contributes towards it. If False, then a result element is only masked if no non-masked element contribute towards it Returns ------- out : MaskedArray Discrete cross-correlation of `a` and `v`. See Also -------- numpy.correlate : Equivalent function in the top-level NumPy module. """ return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) def convolve(a, v, mode='full', propagate_mask=True): """ Returns the discrete, linear convolution of two one-dimensional sequences. Parameters ---------- a, v : array_like Input sequences. mode : {'valid', 'same', 'full'}, optional Refer to the `np.convolve` docstring. propagate_mask : bool If True, then if any masked element is included in the sum for a result element, then the result is masked. If False, then the result element is only masked if no non-masked cells contribute towards it Returns ------- out : MaskedArray Discrete, linear convolution of `a` and `v`. See Also -------- numpy.convolve : Equivalent function in the top-level NumPy module. """ return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) def allequal(a, b, fill_value=True): """ Return True if all entries of a and b are equal, using fill_value as a truth value where either or both are masked. Parameters ---------- a, b : array_like Input arrays to compare. fill_value : bool, optional Whether masked values in a or b are considered equal (True) or not (False). Returns ------- y : bool Returns True if the two arrays are equal within the given tolerance, False otherwise. If either array contains NaN, then False is returned. See Also -------- all, any numpy.ma.allclose Examples -------- >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data = [10000000000.0 1e-07 --], mask = [False False True], fill_value=1e+20) >>> b = array([1e10, 1e-7, -42.0]) >>> b array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) >>> ma.allequal(a, b, fill_value=False) False >>> ma.allequal(a, b) True """ m = mask_or(getmask(a), getmask(b)) if m is nomask: x = getdata(a) y = getdata(b) d = umath.equal(x, y) return d.all() elif fill_value: x = getdata(a) y = getdata(b) d = umath.equal(x, y) dm = array(d, mask=m, copy=False) return dm.filled(True).all(None) else: return False def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): """ Returns True if two arrays are element-wise equal within a tolerance. This function is equivalent to `allclose` except that masked values are treated as equal (default) or unequal, depending on the `masked_equal` argument. Parameters ---------- a, b : array_like Input arrays to compare. masked_equal : bool, optional Whether masked values in `a` and `b` are considered equal (True) or not (False). They are considered equal by default. rtol : float, optional Relative tolerance. The relative difference is equal to ``rtol * b``. Default is 1e-5. atol : float, optional Absolute tolerance. The absolute difference is equal to `atol`. Default is 1e-8. Returns ------- y : bool Returns True if the two arrays are equal within the given tolerance, False otherwise. If either array contains NaN, then False is returned. See Also -------- all, any numpy.allclose : the non-masked `allclose`. Notes ----- If the following equation is element-wise True, then `allclose` returns True:: absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) Return True if all elements of `a` and `b` are equal subject to given tolerances. Examples -------- >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data = [10000000000.0 1e-07 --], mask = [False False True], fill_value = 1e+20) >>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) >>> ma.allclose(a, b) False >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) >>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) >>> ma.allclose(a, b) True >>> ma.allclose(a, b, masked_equal=False) False Masked values are not compared directly. >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) >>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) >>> ma.allclose(a, b) True >>> ma.allclose(a, b, masked_equal=False) False """ x = masked_array(a, copy=False) y = masked_array(b, copy=False) # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = np.result_type(y, 1.) if y.dtype != dtype: y = masked_array(y, dtype=dtype, copy=False) m = mask_or(getmask(x), getmask(y)) xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) # If we have some infs, they should fall at the same place. if not np.all(xinf == filled(np.isinf(y), False)): return False # No infs at all if not np.any(xinf): d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), masked_equal) return np.all(d) if not np.all(filled(x[xinf] == y[xinf], masked_equal)): return False x = x[~xinf] y = y[~xinf] d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), masked_equal) return np.all(d) def asarray(a, dtype=None, order=None): """ Convert the input to a masked array of the given data-type. No copy is performed if the input is already an `ndarray`. If `a` is a subclass of `MaskedArray`, a base class `MaskedArray` is returned. Parameters ---------- a : array_like Input data, in any form that can be converted to a masked array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists, ndarrays and masked arrays. dtype : dtype, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Default is 'C'. Returns ------- out : MaskedArray Masked array interpretation of `a`. See Also -------- asanyarray : Similar to `asarray`, but conserves subclasses. Examples -------- >>> x = np.arange(10.).reshape(2, 5) >>> x array([[ 0., 1., 2., 3., 4.], [ 5., 6., 7., 8., 9.]]) >>> np.ma.asarray(x) masked_array(data = [[ 0. 1. 2. 3. 4.] [ 5. 6. 7. 8. 9.]], mask = False, fill_value = 1e+20) >>> type(np.ma.asarray(x)) <class 'numpy.ma.core.MaskedArray'> """ order = order or 'C' return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False, order=order) def asanyarray(a, dtype=None): """ Convert the input to a masked array, conserving subclasses. If `a` is a subclass of `MaskedArray`, its class is conserved. No copy is performed if the input is already an `ndarray`. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Default is 'C'. Returns ------- out : MaskedArray MaskedArray interpretation of `a`. See Also -------- asarray : Similar to `asanyarray`, but does not conserve subclass. Examples -------- >>> x = np.arange(10.).reshape(2, 5) >>> x array([[ 0., 1., 2., 3., 4.], [ 5., 6., 7., 8., 9.]]) >>> np.ma.asanyarray(x) masked_array(data = [[ 0. 1. 2. 3. 4.] [ 5. 6. 7. 8. 9.]], mask = False, fill_value = 1e+20) >>> type(np.ma.asanyarray(x)) <class 'numpy.ma.core.MaskedArray'> """ # workaround for #8666, to preserve identity. Ideally the bottom line # would handle this for us. if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): return a return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) ############################################################################## # Pickling # ############################################################################## def dump(a, F): """ Pickle a masked array to a file. This is a wrapper around ``cPickle.dump``. Parameters ---------- a : MaskedArray The array to be pickled. F : str or file-like object The file to pickle `a` to. If a string, the full path to the file. """ if not hasattr(F, 'readline'): with open(F, 'w') as F: pickle.dump(a, F) else: pickle.dump(a, F) def dumps(a): """ Return a string corresponding to the pickling of a masked array. This is a wrapper around ``cPickle.dumps``. Parameters ---------- a : MaskedArray The array for which the string representation of the pickle is returned. """ return pickle.dumps(a) def load(F): """ Wrapper around ``cPickle.load`` which accepts either a file-like object or a filename. Parameters ---------- F : str or file The file or file name to load. See Also -------- dump : Pickle an array Notes ----- This is different from `numpy.load`, which does not use cPickle but loads the NumPy binary .npy format. """ if not hasattr(F, 'readline'): with open(F, 'r') as F: return pickle.load(F) else: return pickle.load(F) def loads(strg): """ Load a pickle from the current string. The result of ``cPickle.loads(strg)`` is returned. Parameters ---------- strg : str The string to load. See Also -------- dumps : Return a string corresponding to the pickling of a masked array. """ return pickle.loads(strg) def fromfile(file, dtype=float, count=-1, sep=''): raise NotImplementedError( "fromfile() not yet implemented for a MaskedArray.") def fromflex(fxarray): """ Build a masked array from a suitable flexible-type array. The input array has to have a data-type with ``_data`` and ``_mask`` fields. This type of array is output by `MaskedArray.toflex`. Parameters ---------- fxarray : ndarray The structured input array, containing ``_data`` and ``_mask`` fields. If present, other fields are discarded. Returns ------- result : MaskedArray The constructed masked array. See Also -------- MaskedArray.toflex : Build a flexible-type array from a masked array. Examples -------- >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) >>> rec = x.toflex() >>> rec array([[(0, False), (1, True), (2, False)], [(3, True), (4, False), (5, True)], [(6, False), (7, True), (8, False)]], dtype=[('_data', '<i4'), ('_mask', '|b1')]) >>> x2 = np.ma.fromflex(rec) >>> x2 masked_array(data = [[0 -- 2] [-- 4 --] [6 -- 8]], mask = [[False True False] [ True False True] [False True False]], fill_value = 999999) Extra fields can be present in the structured array but are discarded: >>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')] >>> rec2 = np.zeros((2, 2), dtype=dt) >>> rec2 array([[(0, False, 0.0), (0, False, 0.0)], [(0, False, 0.0), (0, False, 0.0)]], dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]) >>> y = np.ma.fromflex(rec2) >>> y masked_array(data = [[0 0] [0 0]], mask = [[False False] [False False]], fill_value = 999999) """ return masked_array(fxarray['_data'], mask=fxarray['_mask']) class _convert2ma(object): """ Convert functions from numpy to numpy.ma. Parameters ---------- _methodname : string Name of the method to transform. """ __doc__ = None def __init__(self, funcname, params=None): self._func = getattr(np, funcname) self.__doc__ = self.getdoc() self._extras = params or {} def getdoc(self): "Return the doc of the function (from the doc of the method)." doc = getattr(self._func, '__doc__', None) sig = get_object_signature(self._func) if doc: # Add the signature of the function at the beginning of the doc if sig: sig = "%s%s\n" % (self._func.__name__, sig) doc = sig + doc return doc def __call__(self, *args, **params): # Find the common parameters to the call and the definition _extras = self._extras common_params = set(params).intersection(_extras) # Drop the common parameters from the call for p in common_params: _extras[p] = params.pop(p) # Get the result result = self._func.__call__(*args, **params).view(MaskedArray) if "fill_value" in common_params: result.fill_value = _extras.get("fill_value", None) if "hardmask" in common_params: result._hardmask = bool(_extras.get("hard_mask", False)) return result arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) clip = np.clip diff = np.diff empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) empty_like = _convert2ma('empty_like') frombuffer = _convert2ma('frombuffer') fromfunction = _convert2ma('fromfunction') identity = _convert2ma( 'identity', params=dict(fill_value=None, hardmask=False)) indices = np.indices ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) ones_like = np.ones_like squeeze = np.squeeze zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) zeros_like = np.zeros_like def append(a, b, axis=None): """Append values to the end of an array. .. versionadded:: 1.9.0 Parameters ---------- a : array_like Values are appended to a copy of this array. b : array_like These values are appended to a copy of `a`. It must be of the correct shape (the same shape as `a`, excluding `axis`). If `axis` is not specified, `b` can be any shape and will be flattened before use. axis : int, optional The axis along which `v` are appended. If `axis` is not given, both `a` and `b` are flattened before use. Returns ------- append : MaskedArray A copy of `a` with `b` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, the result is a flattened array. See Also -------- numpy.append : Equivalent function in the top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_values([1, 2, 3], 2) >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) >>> print(ma.append(a, b)) [1 -- 3 4 5 6 -- 8 9] """ return concatenate([a, b], axis)
255,843
30.531181
94
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/setup.py
#!/usr/bin/env python from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('ma', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == "__main__": from numpy.distutils.core import setup config = configuration(top_path='').todict() setup(**config)
429
29.714286
58
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/mrecords.py
""":mod:`numpy.ma..mrecords` Defines the equivalent of :class:`numpy.recarrays` for masked arrays, where fields can be accessed as attributes. Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes and the masking of individual fields. .. moduleauthor:: Pierre Gerard-Marchant """ from __future__ import division, absolute_import, print_function # We should make sure that no field is called '_mask','mask','_fieldmask', # or whatever restricted keywords. An idea would be to no bother in the # first place, and then rename the invalid fields with a trailing # underscore. Maybe we could just overload the parser function ? import sys import warnings import numpy as np import numpy.core.numerictypes as ntypes from numpy.compat import basestring from numpy import ( bool_, dtype, ndarray, recarray, array as narray ) from numpy.core.records import ( fromarrays as recfromarrays, fromrecords as recfromrecords ) _byteorderconv = np.core.records._byteorderconv _typestr = ntypes._typestr import numpy.ma as ma from numpy.ma import ( MAError, MaskedArray, masked, nomask, masked_array, getdata, getmaskarray, filled ) _check_fill_value = ma.core._check_fill_value __all__ = [ 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', 'fromtextfile', 'addfield', ] reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] def _getformats(data): """ Returns the formats of arrays in arraylist as a comma-separated string. """ if hasattr(data, 'dtype'): return ",".join([desc[1] for desc in data.dtype.descr]) formats = '' for obj in data: obj = np.asarray(obj) formats += _typestr[obj.dtype.type] if issubclass(obj.dtype.type, ntypes.flexible): formats += repr(obj.itemsize) formats += ',' return formats[:-1] def _checknames(descr, names=None): """ Checks that field names ``descr`` are not reserved keywords. If this is the case, a default 'f%i' is substituted. If the argument `names` is not None, updates the field names to valid names. """ ndescr = len(descr) default_names = ['f%i' % i for i in range(ndescr)] if names is None: new_names = default_names else: if isinstance(names, (tuple, list)): new_names = names elif isinstance(names, str): new_names = names.split(',') else: raise NameError("illegal input names %s" % repr(names)) nnames = len(new_names) if nnames < ndescr: new_names += default_names[nnames:] ndescr = [] for (n, d, t) in zip(new_names, default_names, descr.descr): if n in reserved_fields: if t[0] in reserved_fields: ndescr.append((d, t[1])) else: ndescr.append(t) else: ndescr.append((n, t[1])) return np.dtype(ndescr) def _get_fieldmask(self): mdescr = [(n, '|b1') for n in self.dtype.names] fdmask = np.empty(self.shape, dtype=mdescr) fdmask.flat = tuple([False] * len(mdescr)) return fdmask class MaskedRecords(MaskedArray, object): """ Attributes ---------- _data : recarray Underlying data, as a record array. _mask : boolean array Mask of the records. A record is masked when all its fields are masked. _fieldmask : boolean recarray Record array of booleans, setting the mask of each individual field of each record. _fill_value : record Filling values for each field. """ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, copy=False, **options): self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, strides=strides, formats=formats, names=names, titles=titles, byteorder=byteorder, aligned=aligned,) mdtype = ma.make_mask_descr(self.dtype) if mask is nomask or not np.size(mask): if not keep_mask: self._mask = tuple([False] * len(mdtype)) else: mask = np.array(mask, copy=copy) if mask.shape != self.shape: (nd, nm) = (self.size, mask.size) if nm == 1: mask = np.resize(mask, self.shape) elif nm == nd: mask = np.reshape(mask, self.shape) else: msg = "Mask and data not compatible: data size is %i, " + \ "mask size is %i." raise MAError(msg % (nd, nm)) copy = True if not keep_mask: self.__setmask__(mask) self._sharedmask = True else: if mask.dtype == mdtype: _mask = mask else: _mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) self._mask = _mask return self def __array_finalize__(self, obj): # Make sure we have a _fieldmask by default _mask = getattr(obj, '_mask', None) if _mask is None: objmask = getattr(obj, '_mask', nomask) _dtype = ndarray.__getattribute__(self, 'dtype') if objmask is nomask: _mask = ma.make_mask_none(self.shape, dtype=_dtype) else: mdescr = ma.make_mask_descr(_dtype) _mask = narray([tuple([m] * len(mdescr)) for m in objmask], dtype=mdescr).view(recarray) # Update some of the attributes _dict = self.__dict__ _dict.update(_mask=_mask) self._update_from(obj) if _dict['_baseclass'] == ndarray: _dict['_baseclass'] = recarray return def _getdata(self): """ Returns the data as a recarray. """ return ndarray.view(self, recarray) _data = property(fget=_getdata) def _getfieldmask(self): """ Alias to mask. """ return self._mask _fieldmask = property(fget=_getfieldmask) def __len__(self): """ Returns the length """ # We have more than one record if self.ndim: return len(self._data) # We have only one record: return the nb of fields return len(self.dtype) def __getattribute__(self, attr): try: return object.__getattribute__(self, attr) except AttributeError: # attr must be a fieldname pass fielddict = ndarray.__getattribute__(self, 'dtype').fields try: res = fielddict[attr][:2] except (TypeError, KeyError): raise AttributeError("record array has no attribute %s" % attr) # So far, so good _localdict = ndarray.__getattribute__(self, '__dict__') _data = ndarray.view(self, _localdict['_baseclass']) obj = _data.getfield(*res) if obj.dtype.fields: raise NotImplementedError("MaskedRecords is currently limited to" "simple records.") # Get some special attributes # Reset the object's mask hasmasked = False _mask = _localdict.get('_mask', None) if _mask is not None: try: _mask = _mask[attr] except IndexError: # Couldn't find a mask: use the default (nomask) pass hasmasked = _mask.view((bool, (len(_mask.dtype) or 1))).any() if (obj.shape or hasmasked): obj = obj.view(MaskedArray) obj._baseclass = ndarray obj._isfield = True obj._mask = _mask # Reset the field values _fill_value = _localdict.get('_fill_value', None) if _fill_value is not None: try: obj._fill_value = _fill_value[attr] except ValueError: obj._fill_value = None else: obj = obj.item() return obj def __setattr__(self, attr, val): """ Sets the attribute attr to the value val. """ # Should we call __setmask__ first ? if attr in ['mask', 'fieldmask']: self.__setmask__(val) return # Create a shortcut (so that we don't have to call getattr all the time) _localdict = object.__getattribute__(self, '__dict__') # Check whether we're creating a new field newattr = attr not in _localdict try: # Is attr a generic attribute ? ret = object.__setattr__(self, attr, val) except Exception: # Not a generic attribute: exit if it's not a valid field fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} optinfo = ndarray.__getattribute__(self, '_optinfo') or {} if not (attr in fielddict or attr in optinfo): exctype, value = sys.exc_info()[:2] raise exctype(value) else: # Get the list of names fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} # Check the attribute if attr not in fielddict: return ret if newattr: # We just added this one or this setattr worked on an # internal attribute. try: object.__delattr__(self, attr) except Exception: return ret # Let's try to set the field try: res = fielddict[attr][:2] except (TypeError, KeyError): raise AttributeError("record array has no attribute %s" % attr) if val is masked: _fill_value = _localdict['_fill_value'] if _fill_value is not None: dval = _localdict['_fill_value'][attr] else: dval = val mval = True else: dval = filled(val) mval = getmaskarray(val) obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) _localdict['_mask'].__setitem__(attr, mval) return obj def __getitem__(self, indx): """ Returns all the fields sharing the same fieldname base. The fieldname base is either `_data` or `_mask`. """ _localdict = self.__dict__ _mask = ndarray.__getattribute__(self, '_mask') _data = ndarray.view(self, _localdict['_baseclass']) # We want a field if isinstance(indx, basestring): # Make sure _sharedmask is True to propagate back to _fieldmask # Don't use _set_mask, there are some copies being made that # break propagation Don't force the mask to nomask, that wreaks # easy masking obj = _data[indx].view(MaskedArray) obj._mask = _mask[indx] obj._sharedmask = True fval = _localdict['_fill_value'] if fval is not None: obj._fill_value = fval[indx] # Force to masked if the mask is True if not obj.ndim and obj._mask: return masked return obj # We want some elements. # First, the data. obj = np.array(_data[indx], copy=False).view(mrecarray) obj._mask = np.array(_mask[indx], copy=False).view(recarray) return obj def __setitem__(self, indx, value): """ Sets the given record to value. """ MaskedArray.__setitem__(self, indx, value) if isinstance(indx, basestring): self._mask[indx] = ma.getmaskarray(value) def __str__(self): """ Calculates the string representation. """ if self.size > 1: mstr = ["(%s)" % ",".join([str(i) for i in s]) for s in zip(*[getattr(self, f) for f in self.dtype.names])] return "[%s]" % ", ".join(mstr) else: mstr = ["%s" % ",".join([str(i) for i in s]) for s in zip([getattr(self, f) for f in self.dtype.names])] return "(%s)" % ", ".join(mstr) def __repr__(self): """ Calculates the repr representation. """ _names = self.dtype.names fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] reprstr.insert(0, 'masked_records(') reprstr.extend([fmt % (' fill_value', self.fill_value), ' )']) return str("\n".join(reprstr)) def view(self, dtype=None, type=None): """ Returns a view of the mrecarray. """ # OK, basic copy-paste from MaskedArray.view. if dtype is None: if type is None: output = ndarray.view(self) else: output = ndarray.view(self, type) # Here again. elif type is None: try: if issubclass(dtype, ndarray): output = ndarray.view(self, dtype) dtype = None else: output = ndarray.view(self, dtype) # OK, there's the change except TypeError: dtype = np.dtype(dtype) # we need to revert to MaskedArray, but keeping the possibility # of subclasses (eg, TimeSeriesRecords), so we'll force a type # set to the first parent if dtype.fields is None: basetype = self.__class__.__bases__[0] output = self.__array__().view(dtype, basetype) output._update_from(self) else: output = ndarray.view(self, dtype) output._fill_value = None else: output = ndarray.view(self, dtype, type) # Update the mask, just like in MaskedArray.view if (getattr(output, '_mask', nomask) is not nomask): mdtype = ma.make_mask_descr(output.dtype) output._mask = self._mask.view(mdtype, ndarray) output._mask.shape = output.shape return output def harden_mask(self): """ Forces the mask to hard. """ self._hardmask = True def soften_mask(self): """ Forces the mask to soft """ self._hardmask = False def copy(self): """ Returns a copy of the masked record. """ copied = self._data.copy().view(type(self)) copied._mask = self._mask.copy() return copied def tolist(self, fill_value=None): """ Return the data portion of the array as a list. Data items are converted to the nearest compatible Python type. Masked values are converted to fill_value. If fill_value is None, the corresponding entries in the output list will be ``None``. """ if fill_value is not None: return self.filled(fill_value).tolist() result = narray(self.filled().tolist(), dtype=object) mask = narray(self._mask.tolist()) result[mask] = None return result.tolist() def __getstate__(self): """Return the internal state of the masked array. This is for pickling. """ state = (1, self.shape, self.dtype, self.flags.fnc, self._data.tobytes(), self._mask.tobytes(), self._fill_value, ) return state def __setstate__(self, state): """ Restore the internal state of the masked array. This is for pickling. ``state`` is typically the output of the ``__getstate__`` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask. """ (ver, shp, typ, isf, raw, msk, flv) = state ndarray.__setstate__(self, (shp, typ, isf, raw)) mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) self.fill_value = flv def __reduce__(self): """ Return a 3-tuple for pickling a MaskedArray. """ return (_mrreconstruct, (self.__class__, self._baseclass, (0,), 'b',), self.__getstate__()) def _mrreconstruct(subtype, baseclass, baseshape, basetype,): """ Build a new MaskedArray from the information stored in a pickle. """ _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) _mask = ndarray.__new__(ndarray, baseshape, 'b1') return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) mrecarray = MaskedRecords ############################################################################### # Constructors # ############################################################################### def fromarrays(arraylist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, fill_value=None): """ Creates a mrecarray from a (flat) list of masked arrays. Parameters ---------- arraylist : sequence A list of (masked) arrays. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line dtype : {None, dtype}, optional Data type descriptor. shape : {None, integer}, optional Number of records. If None, shape is defined from the shape of the first array in the list. formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. names : {None, sequence}, optional Sequence of the names of each field. fill_value : {None, sequence}, optional Sequence of data to be used as filling values. Notes ----- Lists of tuples should be preferred over lists of lists for faster processing. """ datalist = [getdata(x) for x in arraylist] masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] _array = recfromarrays(datalist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray) _array._mask.flat = list(zip(*masklist)) if fill_value is not None: _array.fill_value = fill_value return _array def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, fill_value=None, mask=nomask): """ Creates a MaskedRecords from a list of records. Parameters ---------- reclist : sequence A list of records. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line dtype : {None, dtype}, optional Data type descriptor. shape : {None,int}, optional Number of records. If None, ``shape`` is defined from the shape of the first array in the list. formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. names : {None, sequence}, optional Sequence of the names of each field. fill_value : {None, sequence}, optional Sequence of data to be used as filling values. mask : {nomask, sequence}, optional. External mask to apply on the data. Notes ----- Lists of tuples should be preferred over lists of lists for faster processing. """ # Grab the initial _fieldmask, if needed: _mask = getattr(reclist, '_mask', None) # Get the list of records. if isinstance(reclist, ndarray): # Make sure we don't have some hidden mask if isinstance(reclist, MaskedArray): reclist = reclist.filled().view(ndarray) # Grab the initial dtype, just in case if dtype is None: dtype = reclist.dtype reclist = reclist.tolist() mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray) # Set the fill_value if needed if fill_value is not None: mrec.fill_value = fill_value # Now, let's deal w/ the mask if mask is not nomask: mask = np.array(mask, copy=False) maskrecordlength = len(mask.dtype) if maskrecordlength: mrec._mask.flat = mask elif mask.ndim == 2: mrec._mask.flat = [tuple(m) for m in mask] else: mrec.__setmask__(mask) if _mask is not None: mrec._mask[:] = _mask return mrec def _guessvartypes(arr): """ Tries to guess the dtypes of the str_ ndarray `arr`. Guesses by testing element-wise conversion. Returns a list of dtypes. The array is first converted to ndarray. If the array is 2D, the test is performed on the first line. An exception is raised if the file is 3D or more. """ vartypes = [] arr = np.asarray(arr) if arr.ndim == 2: arr = arr[0] elif arr.ndim > 2: raise ValueError("The array should be 2D at most!") # Start the conversion loop. for f in arr: try: int(f) except (ValueError, TypeError): try: float(f) except (ValueError, TypeError): try: complex(f) except (ValueError, TypeError): vartypes.append(arr.dtype) else: vartypes.append(np.dtype(complex)) else: vartypes.append(np.dtype(float)) else: vartypes.append(np.dtype(int)) return vartypes def openfile(fname): """ Opens the file handle of file `fname`. """ # A file handle if hasattr(fname, 'readline'): return fname # Try to open the file and guess its type try: f = open(fname) except IOError: raise IOError("No such file: '%s'" % fname) if f.readline()[:2] != "\\x": f.seek(0, 0) return f f.close() raise NotImplementedError("Wow, binary file") def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', varnames=None, vartypes=None): """ Creates a mrecarray from data stored in the file `filename`. Parameters ---------- fname : {file name/handle} Handle of an opened file. delimitor : {None, string}, optional Alphanumeric character used to separate columns in the file. If None, any (group of) white spacestring(s) will be used. commentchar : {'#', string}, optional Alphanumeric character used to mark the start of a comment. missingchar : {'', string}, optional String indicating missing data, and used to create the masks. varnames : {None, sequence}, optional Sequence of the variable names. If None, a list will be created from the first non empty line of the file. vartypes : {None, sequence}, optional Sequence of the variables dtypes. If None, it will be estimated from the first non-commented line. Ultra simple: the varnames are in the header, one line""" # Try to open the file. ftext = openfile(fname) # Get the first non-empty line as the varnames while True: line = ftext.readline() firstline = line[:line.find(commentchar)].strip() _varnames = firstline.split(delimitor) if len(_varnames) > 1: break if varnames is None: varnames = _varnames # Get the data. _variables = masked_array([line.strip().split(delimitor) for line in ftext if line[0] != commentchar and len(line) > 1]) (_, nfields) = _variables.shape ftext.close() # Try to guess the dtype. if vartypes is None: vartypes = _guessvartypes(_variables[0]) else: vartypes = [np.dtype(v) for v in vartypes] if len(vartypes) != nfields: msg = "Attempting to %i dtypes for %i fields!" msg += " Reverting to default." warnings.warn(msg % (len(vartypes), nfields), stacklevel=2) vartypes = _guessvartypes(_variables[0]) # Construct the descriptor. mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] mfillv = [ma.default_fill_value(f) for f in vartypes] # Get the data and the mask. # We just need a list of masked_arrays. It's easier to create it like that: _mask = (_variables.T == missingchar) _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] return fromarrays(_datalist, dtype=mdescr) def addfield(mrecord, newfield, newfieldname=None): """Adds a new field to the masked record array Uses `newfield` as data and `newfieldname` as name. If `newfieldname` is None, the new field name is set to 'fi', where `i` is the number of existing fields. """ _data = mrecord._data _mask = mrecord._mask if newfieldname is None or newfieldname in reserved_fields: newfieldname = 'f%i' % len(_data.dtype) newfield = ma.array(newfield) # Get the new data. # Create a new empty recarray newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) newdata = recarray(_data.shape, newdtype) # Add the existing field [newdata.setfield(_data.getfield(*f), *f) for f in _data.dtype.fields.values()] # Add the new field newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) newdata = newdata.view(MaskedRecords) # Get the new mask # Create a new empty recarray newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) newmask = recarray(_data.shape, newmdtype) # Add the old masks [newmask.setfield(_mask.getfield(*f), *f) for f in _mask.dtype.fields.values()] # Add the mask of the new field newmask.setfield(getmaskarray(newfield), *newmask.dtype.fields[newfieldname]) newdata._mask = newmask return newdata
27,435
33.42409
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/extras.py
""" Masked arrays add-ons. A collection of utilities for `numpy.ma`. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu :version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ from __future__ import division, absolute_import, print_function __all__ = [ 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_', 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', 'setdiff1d', 'setxor1d', 'unique', 'union1d', 'vander', 'vstack', ] import itertools import warnings from . import core as ma from .core import ( MaskedArray, MAError, add, array, asarray, concatenate, filled, count, getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, nomask, ones, sort, zeros, getdata, get_masked_subclass, dot, mask_rowcols ) import numpy as np from numpy import ndarray, array as nxarray import numpy.core.umath as umath from numpy.core.multiarray import normalize_axis_index from numpy.core.numeric import normalize_axis_tuple from numpy.lib.function_base import _ureduce from numpy.lib.index_tricks import AxisConcatenator def issequence(seq): """ Is seq a sequence (ndarray, list or tuple)? """ return isinstance(seq, (ndarray, tuple, list)) def count_masked(arr, axis=None): """ Count the number of masked elements along the given axis. Parameters ---------- arr : array_like An array with (possibly) masked elements. axis : int, optional Axis along which to count. If None (default), a flattened version of the array is used. Returns ------- count : int, ndarray The total number of masked elements (axis=None) or the number of masked elements along each slice of the given axis. See Also -------- MaskedArray.count : Count non-masked elements. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(9).reshape((3,3)) >>> a = ma.array(a) >>> a[1, 0] = ma.masked >>> a[1, 2] = ma.masked >>> a[2, 1] = ma.masked >>> a masked_array(data = [[0 1 2] [-- 4 --] [6 -- 8]], mask = [[False False False] [ True False True] [False True False]], fill_value=999999) >>> ma.count_masked(a) 3 When the `axis` keyword is used an array is returned. >>> ma.count_masked(a, axis=0) array([1, 1, 1]) >>> ma.count_masked(a, axis=1) array([0, 2, 1]) """ m = getmaskarray(arr) return m.sum(axis) def masked_all(shape, dtype=float): """ Empty masked array with all elements masked. Return an empty masked array of the given shape and dtype, where all the data are masked. Parameters ---------- shape : tuple Shape of the required MaskedArray. dtype : dtype, optional Data type of the output. Returns ------- a : MaskedArray A masked array with all data masked. See Also -------- masked_all_like : Empty masked array modelled on an existing array. Examples -------- >>> import numpy.ma as ma >>> ma.masked_all((3, 3)) masked_array(data = [[-- -- --] [-- -- --] [-- -- --]], mask = [[ True True True] [ True True True] [ True True True]], fill_value=1e+20) The `dtype` parameter defines the underlying data type. >>> a = ma.masked_all((3, 3)) >>> a.dtype dtype('float64') >>> a = ma.masked_all((3, 3), dtype=np.int32) >>> a.dtype dtype('int32') """ a = masked_array(np.empty(shape, dtype), mask=np.ones(shape, make_mask_descr(dtype))) return a def masked_all_like(arr): """ Empty masked array with the properties of an existing array. Return an empty masked array of the same shape and dtype as the array `arr`, where all the data are masked. Parameters ---------- arr : ndarray An array describing the shape and dtype of the required MaskedArray. Returns ------- a : MaskedArray A masked array with all data masked. Raises ------ AttributeError If `arr` doesn't have a shape attribute (i.e. not an ndarray) See Also -------- masked_all : Empty masked array with all elements masked. Examples -------- >>> import numpy.ma as ma >>> arr = np.zeros((2, 3), dtype=np.float32) >>> arr array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) >>> ma.masked_all_like(arr) masked_array(data = [[-- -- --] [-- -- --]], mask = [[ True True True] [ True True True]], fill_value=1e+20) The dtype of the masked array matches the dtype of `arr`. >>> arr.dtype dtype('float32') >>> ma.masked_all_like(arr).dtype dtype('float32') """ a = np.empty_like(arr).view(MaskedArray) a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) return a #####-------------------------------------------------------------------------- #---- --- Standard functions --- #####-------------------------------------------------------------------------- class _fromnxfunction(object): """ Defines a wrapper to adapt NumPy functions to masked arrays. An instance of `_fromnxfunction` can be called with the same parameters as the wrapped NumPy function. The docstring of `newfunc` is adapted from the wrapped function as well, see `getdoc`. This class should not be used directly. Instead, one of its extensions that provides support for a specific type of input should be used. Parameters ---------- funcname : str The name of the function to be adapted. The function should be in the NumPy namespace (i.e. ``np.funcname``). """ def __init__(self, funcname): self.__name__ = funcname self.__doc__ = self.getdoc() def getdoc(self): """ Retrieve the docstring and signature from the function. The ``__doc__`` attribute of the function is used as the docstring for the new masked array version of the function. A note on application of the function to the mask is appended. .. warning:: If the function docstring already contained a Notes section, the new docstring will have two Notes sections instead of appending a note to the existing section. Parameters ---------- None """ npfunc = getattr(np, self.__name__, None) doc = getattr(npfunc, '__doc__', None) if doc: sig = self.__name__ + ma.get_object_signature(npfunc) locdoc = "Notes\n-----\nThe function is applied to both the _data"\ " and the _mask, if any." return '\n'.join((sig, doc, locdoc)) return def __call__(self, *args, **params): pass class _fromnxfunction_single(_fromnxfunction): """ A version of `_fromnxfunction` that is called with a single array argument followed by auxiliary args that are passed verbatim for both the data and mask calls. """ def __call__(self, x, *args, **params): func = getattr(np, self.__name__) if isinstance(x, ndarray): _d = func(x.__array__(), *args, **params) _m = func(getmaskarray(x), *args, **params) return masked_array(_d, mask=_m) else: _d = func(np.asarray(x), *args, **params) _m = func(getmaskarray(x), *args, **params) return masked_array(_d, mask=_m) class _fromnxfunction_seq(_fromnxfunction): """ A version of `_fromnxfunction` that is called with a single sequence of arrays followed by auxiliary args that are passed verbatim for both the data and mask calls. """ def __call__(self, x, *args, **params): func = getattr(np, self.__name__) _d = func(tuple([np.asarray(a) for a in x]), *args, **params) _m = func(tuple([getmaskarray(a) for a in x]), *args, **params) return masked_array(_d, mask=_m) class _fromnxfunction_args(_fromnxfunction): """ A version of `_fromnxfunction` that is called with multiple array arguments. The first non-array-like input marks the beginning of the arguments that are passed verbatim for both the data and mask calls. Array arguments are processed independently and the results are returned in a list. If only one array is found, the return value is just the processed array instead of a list. """ def __call__(self, *args, **params): func = getattr(np, self.__name__) arrays = [] args = list(args) while len(args) > 0 and issequence(args[0]): arrays.append(args.pop(0)) res = [] for x in arrays: _d = func(np.asarray(x), *args, **params) _m = func(getmaskarray(x), *args, **params) res.append(masked_array(_d, mask=_m)) if len(arrays) == 1: return res[0] return res class _fromnxfunction_allargs(_fromnxfunction): """ A version of `_fromnxfunction` that is called with multiple array arguments. Similar to `_fromnxfunction_args` except that all args are converted to arrays even if they are not so already. This makes it possible to process scalars as 1-D arrays. Only keyword arguments are passed through verbatim for the data and mask calls. Arrays arguments are processed independently and the results are returned in a list. If only one arg is present, the return value is just the processed array instead of a list. """ def __call__(self, *args, **params): func = getattr(np, self.__name__) res = [] for x in args: _d = func(np.asarray(x), **params) _m = func(getmaskarray(x), **params) res.append(masked_array(_d, mask=_m)) if len(args) == 1: return res[0] return res atleast_1d = _fromnxfunction_allargs('atleast_1d') atleast_2d = _fromnxfunction_allargs('atleast_2d') atleast_3d = _fromnxfunction_allargs('atleast_3d') vstack = row_stack = _fromnxfunction_seq('vstack') hstack = _fromnxfunction_seq('hstack') column_stack = _fromnxfunction_seq('column_stack') dstack = _fromnxfunction_seq('dstack') hsplit = _fromnxfunction_single('hsplit') diagflat = _fromnxfunction_single('diagflat') #####-------------------------------------------------------------------------- #---- #####-------------------------------------------------------------------------- def flatten_inplace(seq): """Flatten a sequence in place.""" k = 0 while (k != len(seq)): while hasattr(seq[k], '__iter__'): seq[k:(k + 1)] = seq[k] k += 1 return seq def apply_along_axis(func1d, axis, arr, *args, **kwargs): """ (This docstring should be overwritten) """ arr = array(arr, copy=False, subok=True) nd = arr.ndim axis = normalize_axis_index(axis, nd) ind = [0] * (nd - 1) i = np.zeros(nd, 'O') indlist = list(range(nd)) indlist.remove(axis) i[axis] = slice(None, None) outshape = np.asarray(arr.shape).take(indlist) i.put(indlist, ind) j = i.copy() res = func1d(arr[tuple(i.tolist())], *args, **kwargs) # if res is a number, then we have a smaller output array asscalar = np.isscalar(res) if not asscalar: try: len(res) except TypeError: asscalar = True # Note: we shouldn't set the dtype of the output from the first result # so we force the type to object, and build a list of dtypes. We'll # just take the largest, to avoid some downcasting dtypes = [] if asscalar: dtypes.append(np.asarray(res).dtype) outarr = zeros(outshape, object) outarr[tuple(ind)] = res Ntot = np.product(outshape) k = 1 while k < Ntot: # increment the index ind[-1] += 1 n = -1 while (ind[n] >= outshape[n]) and (n > (1 - nd)): ind[n - 1] += 1 ind[n] = 0 n -= 1 i.put(indlist, ind) res = func1d(arr[tuple(i.tolist())], *args, **kwargs) outarr[tuple(ind)] = res dtypes.append(asarray(res).dtype) k += 1 else: res = array(res, copy=False, subok=True) j = i.copy() j[axis] = ([slice(None, None)] * res.ndim) j.put(indlist, ind) Ntot = np.product(outshape) holdshape = outshape outshape = list(arr.shape) outshape[axis] = res.shape dtypes.append(asarray(res).dtype) outshape = flatten_inplace(outshape) outarr = zeros(outshape, object) outarr[tuple(flatten_inplace(j.tolist()))] = res k = 1 while k < Ntot: # increment the index ind[-1] += 1 n = -1 while (ind[n] >= holdshape[n]) and (n > (1 - nd)): ind[n - 1] += 1 ind[n] = 0 n -= 1 i.put(indlist, ind) j.put(indlist, ind) res = func1d(arr[tuple(i.tolist())], *args, **kwargs) outarr[tuple(flatten_inplace(j.tolist()))] = res dtypes.append(asarray(res).dtype) k += 1 max_dtypes = np.dtype(np.asarray(dtypes).max()) if not hasattr(arr, '_mask'): result = np.asarray(outarr, dtype=max_dtypes) else: result = asarray(outarr, dtype=max_dtypes) result.fill_value = ma.default_fill_value(result) return result apply_along_axis.__doc__ = np.apply_along_axis.__doc__ def apply_over_axes(func, a, axes): """ (This docstring will be overwritten) """ val = asarray(a) N = a.ndim if array(axes).ndim == 0: axes = (axes,) for axis in axes: if axis < 0: axis = N + axis args = (val, axis) res = func(*args) if res.ndim == val.ndim: val = res else: res = ma.expand_dims(res, axis) if res.ndim == val.ndim: val = res else: raise ValueError("function is not returning " "an array of the correct shape") return val if apply_over_axes.__doc__ is not None: apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ """ Examples -------- >>> a = ma.arange(24).reshape(2,3,4) >>> a[:,0,1] = ma.masked >>> a[:,1,:] = ma.masked >>> print(a) [[[0 -- 2 3] [-- -- -- --] [8 9 10 11]] [[12 -- 14 15] [-- -- -- --] [20 21 22 23]]] >>> print(ma.apply_over_axes(ma.sum, a, [0,2])) [[[46] [--] [124]]] Tuple axis arguments to ufuncs are equivalent: >>> print(ma.sum(a, axis=(0,2)).reshape((1,-1,1))) [[[46] [--] [124]]] """ def average(a, axis=None, weights=None, returned=False): """ Return the weighted average of array over the given axis. Parameters ---------- a : array_like Data to be averaged. Masked entries are not taken into account in the computation. axis : int, optional Axis along which to average `a`. If `None`, averaging is done over the flattened array. weights : array_like, optional The importance that each element has in the computation of the average. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If ``weights=None``, then all data in `a` are assumed to have a weight equal to one. If `weights` is complex, the imaginary parts are ignored. returned : bool, optional Flag indicating whether a tuple ``(result, sum of weights)`` should be returned as output (True), or just the result (False). Default is False. Returns ------- average, [sum_of_weights] : (tuple of) scalar or MaskedArray The average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum of the weights as the second element. The return type is `np.float64` if `a` is of integer type and floats smaller than `float64`, or the input data-type, otherwise. If returned, `sum_of_weights` is always `float64`. Examples -------- >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) >>> np.ma.average(a, weights=[3, 1, 0, 0]) 1.25 >>> x = np.ma.arange(6.).reshape(3, 2) >>> print(x) [[ 0. 1.] [ 2. 3.] [ 4. 5.]] >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], ... returned=True) >>> print(avg) [2.66666666667 3.66666666667] """ a = asarray(a) m = getmask(a) # inspired by 'average' in numpy/lib/function_base.py if weights is None: avg = a.mean(axis) scl = avg.dtype.type(a.count(axis)) else: wgt = np.asanyarray(weights) if issubclass(a.dtype.type, (np.integer, np.bool_)): result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') else: result_dtype = np.result_type(a.dtype, wgt.dtype) # Sanity checks if a.shape != wgt.shape: if axis is None: raise TypeError( "Axis must be specified when shapes of a and weights " "differ.") if wgt.ndim != 1: raise TypeError( "1D weights expected when shapes of a and weights differ.") if wgt.shape[0] != a.shape[axis]: raise ValueError( "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) wgt = wgt.swapaxes(-1, axis) if m is not nomask: wgt = wgt*(~a.mask) scl = wgt.sum(axis=axis, dtype=result_dtype) avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl if returned: if scl.shape != avg.shape: scl = np.broadcast_to(scl, avg.shape).copy() return avg, scl else: return avg def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): """ Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int, optional Axis along which the medians are computed. The default (None) is to compute the median along a flattened version of the array. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array (a) for calculations. The input array will be modified by the call to median. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. Note that, if `overwrite_input` is True, and the input is not already an `ndarray`, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. .. versionadded:: 1.10.0 Returns ------- median : ndarray A new array holding the result is returned unless out is specified, in which case a reference to out is returned. Return data-type is `float64` for integers and floats smaller than `float64`, or the input data-type, otherwise. See Also -------- mean Notes ----- Given a vector ``V`` with ``N`` non masked values, the median of ``V`` is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` when ``N`` is even. Examples -------- >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) >>> np.ma.median(x) 1.5 >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) >>> np.ma.median(x) 2.5 >>> np.ma.median(x, axis=-1, overwrite_input=True) masked_array(data = [ 2. 5.], mask = False, fill_value = 1e+20) """ if not hasattr(a, 'mask'): m = np.median(getdata(a, subok=True), axis=axis, out=out, overwrite_input=overwrite_input, keepdims=keepdims) if isinstance(m, np.ndarray) and 1 <= m.ndim: return masked_array(m, copy=False) else: return m r, k = _ureduce(a, func=_median, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims: return r.reshape(k) else: return r def _median(a, axis=None, out=None, overwrite_input=False): # when an unmasked NaN is present return it, so we need to sort the NaN # values behind the mask if np.issubdtype(a.dtype, np.inexact): fill_value = np.inf else: fill_value = None if overwrite_input: if axis is None: asorted = a.ravel() asorted.sort(fill_value=fill_value) else: a.sort(axis=axis, fill_value=fill_value) asorted = a else: asorted = sort(a, axis=axis, fill_value=fill_value) if axis is None: axis = 0 else: axis = normalize_axis_index(axis, asorted.ndim) if asorted.shape[axis] == 0: # for empty axis integer indices fail so use slicing to get same result # as median (which is mean of empty slice = nan) indexer = [slice(None)] * asorted.ndim indexer[axis] = slice(0, 0) return np.ma.mean(asorted[indexer], axis=axis, out=out) if asorted.ndim == 1: counts = count(asorted) idx, odd = divmod(count(asorted), 2) mid = asorted[idx + odd - 1:idx + 1] if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: # avoid inf / x = masked s = mid.sum(out=out) if not odd: s = np.true_divide(s, 2., casting='safe', out=out) s = np.lib.utils._median_nancheck(asorted, s, axis, out) else: s = mid.mean(out=out) # if result is masked either the input contained enough # minimum_fill_value so that it would be the median or all values # masked if np.ma.is_masked(s) and not np.all(asorted.mask): return np.ma.minimum_fill_value(asorted) return s counts = count(asorted, axis=axis) h = counts // 2 # create indexing mesh grid for all but reduced axis axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape) if i != axis] ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij') # insert indices of low and high median ind.insert(axis, h - 1) low = asorted[tuple(ind)] ind[axis] = np.minimum(h, asorted.shape[axis] - 1) high = asorted[tuple(ind)] def replace_masked(s): # Replace masked entries with minimum_full_value unless it all values # are masked. This is required as the sort order of values equal or # larger than the fill value is undefined and a valid value placed # elsewhere, e.g. [4, --, inf]. if np.ma.is_masked(s): rep = (~np.all(asorted.mask, axis=axis)) & s.mask s.data[rep] = np.ma.minimum_fill_value(asorted) s.mask[rep] = False replace_masked(low) replace_masked(high) # duplicate high if odd number of elements so mean does nothing odd = counts % 2 == 1 np.copyto(low, high, where=odd) # not necessary for scalar True/False masks try: np.copyto(low.mask, high.mask, where=odd) except Exception: pass if np.issubdtype(asorted.dtype, np.inexact): # avoid inf / x = masked s = np.ma.sum([low, high], axis=0, out=out) np.true_divide(s.data, 2., casting='unsafe', out=s.data) s = np.lib.utils._median_nancheck(asorted, s, axis, out) else: s = np.ma.mean([low, high], axis=0, out=out) return s def compress_nd(x, axis=None): """Suppress slices from multiple dimensions which contain masked values. Parameters ---------- x : array_like, MaskedArray The array to operate on. If not a MaskedArray instance (or if no array elements are masked, `x` is interpreted as a MaskedArray with `mask` set to `nomask`. axis : tuple of ints or int, optional Which dimensions to suppress slices from can be configured with this parameter. - If axis is a tuple of ints, those are the axes to suppress slices from. - If axis is an int, then that is the only axis to suppress slices from. - If axis is None, all axis are selected. Returns ------- compress_array : ndarray The compressed array. """ x = asarray(x) m = getmask(x) # Set axis to tuple of ints if axis is None: axis = tuple(range(x.ndim)) else: axis = normalize_axis_tuple(axis, x.ndim) # Nothing is masked: return x if m is nomask or not m.any(): return x._data # All is masked: return empty if m.all(): return nxarray([]) # Filter elements through boolean indexing data = x._data for ax in axis: axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) data = data[(slice(None),)*ax + (~m.any(axis=axes),)] return data def compress_rowcols(x, axis=None): """ Suppress the rows and/or columns of a 2-D array that contain masked values. The suppression behavior is selected with the `axis` parameter. - If axis is None, both rows and columns are suppressed. - If axis is 0, only rows are suppressed. - If axis is 1 or -1, only columns are suppressed. Parameters ---------- x : array_like, MaskedArray The array to operate on. If not a MaskedArray instance (or if no array elements are masked), `x` is interpreted as a MaskedArray with `mask` set to `nomask`. Must be a 2D array. axis : int, optional Axis along which to perform the operation. Default is None. Returns ------- compressed_array : ndarray The compressed array. Examples -------- >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> x masked_array(data = [[-- 1 2] [-- 4 5] [6 7 8]], mask = [[ True False False] [ True False False] [False False False]], fill_value = 999999) >>> np.ma.compress_rowcols(x) array([[7, 8]]) >>> np.ma.compress_rowcols(x, 0) array([[6, 7, 8]]) >>> np.ma.compress_rowcols(x, 1) array([[1, 2], [4, 5], [7, 8]]) """ if asarray(x).ndim != 2: raise NotImplementedError("compress_rowcols works for 2D arrays only.") return compress_nd(x, axis=axis) def compress_rows(a): """ Suppress whole rows of a 2-D array that contain masked values. This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see `extras.compress_rowcols` for details. See Also -------- extras.compress_rowcols """ a = asarray(a) if a.ndim != 2: raise NotImplementedError("compress_rows works for 2D arrays only.") return compress_rowcols(a, 0) def compress_cols(a): """ Suppress whole columns of a 2-D array that contain masked values. This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see `extras.compress_rowcols` for details. See Also -------- extras.compress_rowcols """ a = asarray(a) if a.ndim != 2: raise NotImplementedError("compress_cols works for 2D arrays only.") return compress_rowcols(a, 1) def mask_rows(a, axis=None): """ Mask rows of a 2D array that contain masked values. This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. See Also -------- mask_rowcols : Mask rows and/or columns of a 2D array. masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) >>> a = ma.masked_equal(a, 1) >>> a masked_array(data = [[0 0 0] [0 -- 0] [0 0 0]], mask = [[False False False] [False True False] [False False False]], fill_value=999999) >>> ma.mask_rows(a) masked_array(data = [[0 0 0] [-- -- --] [0 0 0]], mask = [[False False False] [ True True True] [False False False]], fill_value=999999) """ return mask_rowcols(a, 0) def mask_cols(a, axis=None): """ Mask columns of a 2D array that contain masked values. This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. See Also -------- mask_rowcols : Mask rows and/or columns of a 2D array. masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) >>> a = ma.masked_equal(a, 1) >>> a masked_array(data = [[0 0 0] [0 -- 0] [0 0 0]], mask = [[False False False] [False True False] [False False False]], fill_value=999999) >>> ma.mask_cols(a) masked_array(data = [[0 -- 0] [0 -- 0] [0 -- 0]], mask = [[False True False] [False True False] [False True False]], fill_value=999999) """ return mask_rowcols(a, 1) #####-------------------------------------------------------------------------- #---- --- arraysetops --- #####-------------------------------------------------------------------------- def ediff1d(arr, to_end=None, to_begin=None): """ Compute the differences between consecutive elements of an array. This function is the equivalent of `numpy.ediff1d` that takes masked values into account, see `numpy.ediff1d` for details. See Also -------- numpy.ediff1d : Equivalent function for ndarrays. """ arr = ma.asanyarray(arr).flat ed = arr[1:] - arr[:-1] arrays = [ed] # if to_begin is not None: arrays.insert(0, to_begin) if to_end is not None: arrays.append(to_end) # if len(arrays) != 1: # We'll save ourselves a copy of a potentially large array in the common # case where neither to_begin or to_end was given. ed = hstack(arrays) # return ed def unique(ar1, return_index=False, return_inverse=False): """ Finds the unique elements of an array. Masked values are considered the same element (masked). The output array is always a masked array. See `numpy.unique` for more details. See Also -------- numpy.unique : Equivalent function for ndarrays. """ output = np.unique(ar1, return_index=return_index, return_inverse=return_inverse) if isinstance(output, tuple): output = list(output) output[0] = output[0].view(MaskedArray) output = tuple(output) else: output = output.view(MaskedArray) return output def intersect1d(ar1, ar2, assume_unique=False): """ Returns the unique elements common to both arrays. Masked values are considered equal one to the other. The output is always a masked array. See `numpy.intersect1d` for more details. See Also -------- numpy.intersect1d : Equivalent function for ndarrays. Examples -------- >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) >>> intersect1d(x, y) masked_array(data = [1 3 --], mask = [False False True], fill_value = 999999) """ if assume_unique: aux = ma.concatenate((ar1, ar2)) else: # Might be faster than unique( intersect1d( ar1, ar2 ) )? aux = ma.concatenate((unique(ar1), unique(ar2))) aux.sort() return aux[:-1][aux[1:] == aux[:-1]] def setxor1d(ar1, ar2, assume_unique=False): """ Set exclusive-or of 1-D arrays with unique elements. The output is always a masked array. See `numpy.setxor1d` for more details. See Also -------- numpy.setxor1d : Equivalent function for ndarrays. """ if not assume_unique: ar1 = unique(ar1) ar2 = unique(ar2) aux = ma.concatenate((ar1, ar2)) if aux.size == 0: return aux aux.sort() auxf = aux.filled() # flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) # flag2 = ediff1d( flag ) == 0 flag2 = (flag[1:] == flag[:-1]) return aux[flag2] def in1d(ar1, ar2, assume_unique=False, invert=False): """ Test whether each element of an array is also present in a second array. The output is always a masked array. See `numpy.in1d` for more details. We recommend using :func:`isin` instead of `in1d` for new code. See Also -------- isin : Version of this function that preserves the shape of ar1. numpy.in1d : Equivalent function for ndarrays. Notes ----- .. versionadded:: 1.4.0 """ if not assume_unique: ar1, rev_idx = unique(ar1, return_inverse=True) ar2 = unique(ar2) ar = ma.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = ma.concatenate((bool_ar, [invert])) indx = order.argsort(kind='mergesort')[:len(ar1)] if assume_unique: return flag[indx] else: return flag[indx][rev_idx] def isin(element, test_elements, assume_unique=False, invert=False): """ Calculates `element in test_elements`, broadcasting over `element` only. The output is always a masked array of the same shape as `element`. See `numpy.isin` for more details. See Also -------- in1d : Flattened version of this function. numpy.isin : Equivalent function for ndarrays. Notes ----- .. versionadded:: 1.13.0 """ element = ma.asarray(element) return in1d(element, test_elements, assume_unique=assume_unique, invert=invert).reshape(element.shape) def union1d(ar1, ar2): """ Union of two arrays. The output is always a masked array. See `numpy.union1d` for more details. See also -------- numpy.union1d : Equivalent function for ndarrays. """ return unique(ma.concatenate((ar1, ar2), axis=None)) def setdiff1d(ar1, ar2, assume_unique=False): """ Set difference of 1D arrays with unique elements. The output is always a masked array. See `numpy.setdiff1d` for more details. See Also -------- numpy.setdiff1d : Equivalent function for ndarrays. Examples -------- >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) >>> np.ma.setdiff1d(x, [1, 2]) masked_array(data = [3 --], mask = [False True], fill_value = 999999) """ if assume_unique: ar1 = ma.asarray(ar1).ravel() else: ar1 = unique(ar1) ar2 = unique(ar2) return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] ############################################################################### # Covariance # ############################################################################### def _covhelper(x, y=None, rowvar=True, allow_masked=True): """ Private function for the computation of covariance and correlation coefficients. """ x = ma.array(x, ndmin=2, copy=True, dtype=float) xmask = ma.getmaskarray(x) # Quick exit if we can't process masked data if not allow_masked and xmask.any(): raise ValueError("Cannot process masked data.") # if x.shape[0] == 1: rowvar = True # Make sure that rowvar is either 0 or 1 rowvar = int(bool(rowvar)) axis = 1 - rowvar if rowvar: tup = (slice(None), None) else: tup = (None, slice(None)) # if y is None: xnotmask = np.logical_not(xmask).astype(int) else: y = array(y, copy=False, ndmin=2, dtype=float) ymask = ma.getmaskarray(y) if not allow_masked and ymask.any(): raise ValueError("Cannot process masked data.") if xmask.any() or ymask.any(): if y.shape == x.shape: # Define some common mask common_mask = np.logical_or(xmask, ymask) if common_mask is not nomask: xmask = x._mask = y._mask = ymask = common_mask x._sharedmask = False y._sharedmask = False x = ma.concatenate((x, y), axis) xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) x -= x.mean(axis=rowvar)[tup] return (x, xnotmask, rowvar) def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): """ Estimate the covariance matrix. Except for the handling of missing data this function does the same as `numpy.cov`. For more details and examples, see `numpy.cov`. By default, masked values are recognized as such. If `x` and `y` have the same shape, a common mask is allocated: if ``x[i,j]`` is masked, then ``y[i,j]`` will also be masked. Setting `allow_masked` to False will raise an exception if values are missing in either of the input arrays. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as `x`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : bool, optional Default normalization (False) is by ``(N-1)``, where ``N`` is the number of observations given (unbiased estimate). If `bias` is True, then normalization is by ``N``. This keyword can be overridden by the keyword ``ddof`` in numpy versions >= 1.5. allow_masked : bool, optional If True, masked values are propagated pair-wise: if a value is masked in `x`, the corresponding value is masked in `y`. If False, raises a `ValueError` exception when some values are missing. ddof : {None, int}, optional If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is the number of observations; this overrides the value implied by ``bias``. The default value is ``None``. .. versionadded:: 1.5 Raises ------ ValueError Raised if some values are missing and `allow_masked` is False. See Also -------- numpy.cov """ # Check inputs if ddof is not None and ddof != int(ddof): raise ValueError("ddof must be an integer") # Set up ddof if ddof is None: if bias: ddof = 0 else: ddof = 1 (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) if not rowvar: fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() else: fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() return result def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, ddof=np._NoValue): """ Return Pearson product-moment correlation coefficients. Except for the handling of missing data this function does the same as `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 allow_masked : bool, optional If True, masked values are propagated pair-wise: if a value is masked in `x`, the corresponding value is masked in `y`. If False, raises an exception. Because `bias` is deprecated, this argument needs to be treated as keyword only to avoid a warning. ddof : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 See Also -------- numpy.corrcoef : Equivalent function in top-level NumPy module. cov : Estimate the covariance matrix. Notes ----- This function accepts but discards arguments `bias` and `ddof`. This is for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ msg = 'bias and ddof have no effect and are deprecated' if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn(msg, DeprecationWarning, stacklevel=2) # Get the data (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) # Compute the covariance matrix if not rowvar: fact = np.dot(xnotmask.T, xnotmask) * 1. c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() else: fact = np.dot(xnotmask, xnotmask.T) * 1. c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() # Check whether we have a scalar try: diag = ma.diagonal(c) except ValueError: return 1 # if xnotmask.all(): _denom = ma.sqrt(ma.multiply.outer(diag, diag)) else: _denom = diagflat(diag) _denom._sharedmask = False # We know return is always a copy n = x.shape[1 - rowvar] if rowvar: for i in range(n - 1): for j in range(i + 1, n): _x = mask_cols(vstack((x[i], x[j]))).var(axis=1) _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) else: for i in range(n - 1): for j in range(i + 1, n): _x = mask_cols( vstack((x[:, i], x[:, j]))).var(axis=1) _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) return c / _denom #####-------------------------------------------------------------------------- #---- --- Concatenation helpers --- #####-------------------------------------------------------------------------- class MAxisConcatenator(AxisConcatenator): """ Translate slice objects to concatenation along an axis. For documentation on usage, see `mr_class`. See Also -------- mr_class """ concatenate = staticmethod(concatenate) @staticmethod def makemat(arr): return array(arr.data.view(np.matrix), mask=arr.mask) def __getitem__(self, key): # matrix builder syntax, like 'a, b; c, d' if isinstance(key, str): raise MAError("Unavailable for masked array.") return super(MAxisConcatenator, self).__getitem__(key) class mr_class(MAxisConcatenator): """ Translate slice objects to concatenation along the first axis. This is the masked array version of `lib.index_tricks.RClass`. See Also -------- lib.index_tricks.RClass Examples -------- >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] array([1, 2, 3, 0, 0, 4, 5, 6]) """ def __init__(self): MAxisConcatenator.__init__(self, 0) mr_ = mr_class() #####-------------------------------------------------------------------------- #---- Find unmasked data --- #####-------------------------------------------------------------------------- def flatnotmasked_edges(a): """ Find the indices of the first and last unmasked values. Expects a 1-D `MaskedArray`, returns None if all values are masked. Parameters ---------- a : array_like Input 1-D `MaskedArray` Returns ------- edges : ndarray or None The indices of first and last non-masked value in the array. Returns None if all values are masked. See Also -------- flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, clump_masked, clump_unmasked Notes ----- Only accepts 1-D arrays. Examples -------- >>> a = np.ma.arange(10) >>> flatnotmasked_edges(a) [0,-1] >>> mask = (a < 3) | (a > 8) | (a == 5) >>> a[mask] = np.ma.masked >>> np.array(a[~a.mask]) array([3, 4, 6, 7, 8]) >>> flatnotmasked_edges(a) array([3, 8]) >>> a[:] = np.ma.masked >>> print(flatnotmasked_edges(ma)) None """ m = getmask(a) if m is nomask or not np.any(m): return np.array([0, a.size - 1]) unmasked = np.flatnonzero(~m) if len(unmasked) > 0: return unmasked[[0, -1]] else: return None def notmasked_edges(a, axis=None): """ Find the indices of the first and last unmasked values along an axis. If all values are masked, return None. Otherwise, return a list of two tuples, corresponding to the indices of the first and last unmasked values respectively. Parameters ---------- a : array_like The input array. axis : int, optional Axis along which to perform the operation. If None (default), applies to a flattened version of the array. Returns ------- edges : ndarray or list An array of start and end indexes if there are any masked data in the array. If there are no masked data in the array, `edges` is a list of the first and last index. See Also -------- flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous, clump_masked, clump_unmasked Examples -------- >>> a = np.arange(9).reshape((3, 3)) >>> m = np.zeros_like(a) >>> m[1:, 1:] = 1 >>> am = np.ma.array(a, mask=m) >>> np.array(am[~am.mask]) array([0, 1, 2, 3, 6]) >>> np.ma.notmasked_edges(ma) array([0, 6]) """ a = asarray(a) if axis is None or a.ndim == 1: return flatnotmasked_edges(a) m = getmaskarray(a) idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] def flatnotmasked_contiguous(a): """ Find contiguous unmasked data in a masked array along the given axis. Parameters ---------- a : narray The input array. Returns ------- slice_list : list A sorted sequence of slices (start index, end index). See Also -------- flatnotmasked_edges, notmasked_contiguous, notmasked_edges, clump_masked, clump_unmasked Notes ----- Only accepts 2-D arrays at most. Examples -------- >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_contiguous(a) slice(0, 10, None) >>> mask = (a < 3) | (a > 8) | (a == 5) >>> a[mask] = np.ma.masked >>> np.array(a[~a.mask]) array([3, 4, 6, 7, 8]) >>> np.ma.flatnotmasked_contiguous(a) [slice(3, 5, None), slice(6, 9, None)] >>> a[:] = np.ma.masked >>> print(np.ma.flatnotmasked_edges(a)) None """ m = getmask(a) if m is nomask: return slice(0, a.size, None) i = 0 result = [] for (k, g) in itertools.groupby(m.ravel()): n = len(list(g)) if not k: result.append(slice(i, i + n)) i += n return result or None def notmasked_contiguous(a, axis=None): """ Find contiguous unmasked data in a masked array along the given axis. Parameters ---------- a : array_like The input array. axis : int, optional Axis along which to perform the operation. If None (default), applies to a flattened version of the array. Returns ------- endpoints : list A list of slices (start and end indexes) of unmasked indexes in the array. See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, clump_masked, clump_unmasked Notes ----- Only accepts 2-D arrays at most. Examples -------- >>> a = np.arange(9).reshape((3, 3)) >>> mask = np.zeros_like(a) >>> mask[1:, 1:] = 1 >>> ma = np.ma.array(a, mask=mask) >>> np.array(ma[~ma.mask]) array([0, 1, 2, 3, 6]) >>> np.ma.notmasked_contiguous(ma) [slice(0, 4, None), slice(6, 7, None)] """ a = asarray(a) nd = a.ndim if nd > 2: raise NotImplementedError("Currently limited to atmost 2D array.") if axis is None or nd == 1: return flatnotmasked_contiguous(a) # result = [] # other = (axis + 1) % 2 idx = [0, 0] idx[axis] = slice(None, None) # for i in range(a.shape[other]): idx[other] = i result.append(flatnotmasked_contiguous(a[idx]) or None) return result def _ezclump(mask): """ Finds the clumps (groups of data with the same values) for a 1D bool array. Returns a series of slices. """ if mask.ndim > 1: mask = mask.ravel() idx = (mask[1:] ^ mask[:-1]).nonzero() idx = idx[0] + 1 if mask[0]: if len(idx) == 0: return [slice(0, mask.size)] r = [slice(0, idx[0])] r.extend((slice(left, right) for left, right in zip(idx[1:-1:2], idx[2::2]))) else: if len(idx) == 0: return [] r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] if mask[-1]: r.append(slice(idx[-1], mask.size)) return r def clump_unmasked(a): """ Return list of slices corresponding to the unmasked clumps of a 1-D array. (A "clump" is defined as a contiguous region of the array). Parameters ---------- a : ndarray A one-dimensional masked array. Returns ------- slices : list of slice The list of slices, one for each continuous region of unmasked elements in `a`. Notes ----- .. versionadded:: 1.4.0 See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, notmasked_contiguous, clump_masked Examples -------- >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_unmasked(a) [slice(3, 6, None), slice(7, 8, None)] """ mask = getattr(a, '_mask', nomask) if mask is nomask: return [slice(0, a.size)] return _ezclump(~mask) def clump_masked(a): """ Returns a list of slices corresponding to the masked clumps of a 1-D array. (A "clump" is defined as a contiguous region of the array). Parameters ---------- a : ndarray A one-dimensional masked array. Returns ------- slices : list of slice The list of slices, one for each continuous region of masked elements in `a`. Notes ----- .. versionadded:: 1.4.0 See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, notmasked_contiguous, clump_unmasked Examples -------- >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_masked(a) [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] """ mask = ma.getmask(a) if mask is nomask: return [] return _ezclump(mask) ############################################################################### # Polynomial fit # ############################################################################### def vander(x, n=None): """ Masked values in the input array result in rows of zeros. """ _vander = np.vander(x, n) m = getmask(x) if m is not nomask: _vander[m] = 0 return _vander vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): """ Any masked values in x is propagated in y, and vice-versa. """ x = asarray(x) y = asarray(y) m = getmask(x) if y.ndim == 1: m = mask_or(m, getmask(y)) elif y.ndim == 2: my = getmask(mask_rows(y)) if my is not nomask: m = mask_or(m, my[:, 0]) else: raise TypeError("Expected a 1D or 2D array for y!") if w is not None: w = asarray(w) if w.ndim != 1: raise TypeError("expected a 1-d array for weights") if w.shape[0] != y.shape[0]: raise TypeError("expected w and y to have the same length") m = mask_or(m, getmask(w)) if m is not nomask: not_m = ~m if w is not None: w = w[not_m] return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) else: return np.polyfit(x, y, deg, rcond, full, w, cov) polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
55,963
28.720659
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/testutils.py
"""Miscellaneous functions for testing masked arrays and subclasses :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu :version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ """ from __future__ import division, absolute_import, print_function import operator import numpy as np from numpy import ndarray, float_ import numpy.core.umath as umath import numpy.testing from numpy.testing import ( TestCase, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_raises, build_err_msg, run_module_suite ) from .core import mask_or, getmask, masked_array, nomask, masked, filled __all__masked = [ 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', 'assert_array_approx_equal', 'assert_array_compare', 'assert_array_equal', 'assert_array_less', 'assert_close', 'assert_equal', 'assert_equal_records', 'assert_mask_equal', 'assert_not_equal', 'fail_if_array_equal', ] # Include some normal test functions to avoid breaking other projects who # have mistakenly included them from this file. SciPy is one. That is # unfortunate, as some of these functions are not intended to work with # masked arrays. But there was no way to tell before. __some__from_testing = [ 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', 'assert_raises', 'run_module_suite', ] __all__ = __all__masked + __some__from_testing def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): """ Returns true if all components of a and b are equal to given tolerances. If fill_value is True, masked values considered equal. Otherwise, masked values are considered unequal. The relative error rtol should be positive and << 1.0 The absolute error atol comes into play for those elements of b that are very small or zero; it says how small a must be also. """ m = mask_or(getmask(a), getmask(b)) d1 = filled(a) d2 = filled(b) if d1.dtype.char == "O" or d2.dtype.char == "O": return np.equal(d1, d2).ravel() x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) return d.ravel() def almost(a, b, decimal=6, fill_value=True): """ Returns True if a and b are equal up to decimal places. If fill_value is True, masked values considered equal. Otherwise, masked values are considered unequal. """ m = mask_or(getmask(a), getmask(b)) d1 = filled(a) d2 = filled(b) if d1.dtype.char == "O" or d2.dtype.char == "O": return np.equal(d1, d2).ravel() x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) return d.ravel() def _assert_equal_on_sequences(actual, desired, err_msg=''): """ Asserts the equality of two non-array sequences. """ assert_equal(len(actual), len(desired), err_msg) for k in range(len(desired)): assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) return def assert_equal_records(a, b): """ Asserts that two records are equal. Pretty crude for now. """ assert_equal(a.dtype, b.dtype) for f in a.dtype.names: (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) if not (af is masked) and not (bf is masked): assert_equal(operator.getitem(a, f), operator.getitem(b, f)) return def assert_equal(actual, desired, err_msg=''): """ Asserts that two items are equal. """ # Case #1: dictionary ..... if isinstance(desired, dict): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg) for k, i in desired.items(): if k not in actual: raise AssertionError("%s not in %s" % (k, actual)) assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) return # Case #2: lists ..... if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): return _assert_equal_on_sequences(actual, desired, err_msg='') if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): msg = build_err_msg([actual, desired], err_msg,) if not desired == actual: raise AssertionError(msg) return # Case #4. arrays or equivalent if ((actual is masked) and not (desired is masked)) or \ ((desired is masked) and not (actual is masked)): msg = build_err_msg([actual, desired], err_msg, header='', names=('x', 'y')) raise ValueError(msg) actual = np.array(actual, copy=False, subok=True) desired = np.array(desired, copy=False, subok=True) (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) if actual_dtype.char == "S" and desired_dtype.char == "S": return _assert_equal_on_sequences(actual.tolist(), desired.tolist(), err_msg='') return assert_array_equal(actual, desired, err_msg) def fail_if_equal(actual, desired, err_msg='',): """ Raises an assertion error if two items are equal. """ if isinstance(desired, dict): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) fail_if_equal(len(actual), len(desired), err_msg) for k, i in desired.items(): if k not in actual: raise AssertionError(repr(k)) fail_if_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): fail_if_equal(len(actual), len(desired), err_msg) for k in range(len(desired)): fail_if_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) return if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): return fail_if_array_equal(actual, desired, err_msg) msg = build_err_msg([actual, desired], err_msg) if not desired != actual: raise AssertionError(msg) assert_not_equal = fail_if_equal def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): """ Asserts that two items are almost equal. The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). """ if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): return assert_array_almost_equal(actual, desired, decimal=decimal, err_msg=err_msg, verbose=verbose) msg = build_err_msg([actual, desired], err_msg=err_msg, verbose=verbose) if not round(abs(desired - actual), decimal) == 0: raise AssertionError(msg) assert_close = assert_almost_equal def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', fill_value=True): """ Asserts that comparison between two masked arrays is satisfied. The comparison is elementwise. """ # Allocate a common mask and refill m = mask_or(getmask(x), getmask(y)) x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) if ((x is masked) and not (y is masked)) or \ ((y is masked) and not (x is masked)): msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, header=header, names=('x', 'y')) raise ValueError(msg) # OK, now run the basic tests on filled versions return np.testing.assert_array_compare(comparison, x.filled(fill_value), y.filled(fill_value), err_msg=err_msg, verbose=verbose, header=header) def assert_array_equal(x, y, err_msg='', verbose=True): """ Checks the elementwise equality of two masked arrays. """ assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not equal') def fail_if_array_equal(x, y, err_msg='', verbose=True): """ Raises an assertion error if two masked arrays are not equal elementwise. """ def compare(x, y): return (not np.alltrue(approx(x, y))) assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not equal') def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): """ Checks the equality of two masked arrays, up to given number odecimals. The equality is checked elementwise. """ def compare(x, y): "Returns the result of the loose comparison between x and y)." return approx(x, y, rtol=10. ** -decimal) assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not almost equal') def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): """ Checks the equality of two masked arrays, up to given number odecimals. The equality is checked elementwise. """ def compare(x, y): "Returns the result of the loose comparison between x and y)." return almost(x, y, decimal) assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not almost equal') def assert_array_less(x, y, err_msg='', verbose=True): """ Checks that x is smaller than y elementwise. """ assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not less-ordered') def assert_mask_equal(m1, m2, err_msg=''): """ Asserts the equality of two masks. """ if m1 is nomask: assert_(m2 is nomask) if m2 is nomask: assert_(m1 is nomask) assert_array_equal(m1, m2, err_msg=err_msg)
10,384
34.810345
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/version.py
"""Version number """ from __future__ import division, absolute_import, print_function version = '1.00' release = False if not release: from . import core from . import extras revision = [core.__revision__.split(':')[-1][:-1].strip(), extras.__revision__.split(':')[-1][:-1].strip(),] version += '.dev%04i' % max([int(rev) for rev in revision])
380
24.4
65
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/bench.py
#! /usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division, print_function import timeit import numpy ############################################################################### # Global variables # ############################################################################### # Small arrays xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3) ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3) zs = xs + 1j * ys m1 = [[True, False, False], [False, False, True]] m2 = [[True, False, True], [False, False, True]] nmxs = numpy.ma.array(xs, mask=m1) nmys = numpy.ma.array(ys, mask=m2) nmzs = numpy.ma.array(zs, mask=m1) # Big arrays xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) zl = xl + 1j * yl maskx = xl > 0.8 masky = yl < -0.8 nmxl = numpy.ma.array(xl, mask=maskx) nmyl = numpy.ma.array(yl, mask=masky) nmzl = numpy.ma.array(zl, mask=maskx) ############################################################################### # Functions # ############################################################################### def timer(s, v='', nloop=500, nrep=3): units = ["s", "ms", "µs", "ns"] scaling = [1, 1e3, 1e6, 1e9] print("%s : %-50s : " % (v, s), end=' ') varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) Timer = timeit.Timer(stmt=s, setup=setup) best = min(Timer.repeat(nrep, nloop)) / nloop if best > 0.0: order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) else: order = 3 print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, 3, best * scaling[order], units[order])) def compare_functions_1v(func, nloop=500, xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): funcname = func.__name__ print("-"*50) print("%s on small arrays" % funcname) module, data = "numpy.ma", "nmxs" timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) print("%s on large arrays" % funcname) module, data = "numpy.ma", "nmxl" timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) return def compare_methods(methodname, args, vars='x', nloop=500, test=True, xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): print("-"*50) print("%s on small arrays" % methodname) data, ver = "nm%ss" % vars, 'numpy.ma' timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) print("%s on large arrays" % methodname) data, ver = "nm%sl" % vars, 'numpy.ma' timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) return def compare_functions_2v(func, nloop=500, test=True, xs=xs, nmxs=nmxs, ys=ys, nmys=nmys, xl=xl, nmxl=nmxl, yl=yl, nmyl=nmyl): funcname = func.__name__ print("-"*50) print("%s on small arrays" % funcname) module, data = "numpy.ma", "nmxs,nmys" timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) print("%s on large arrays" % funcname) module, data = "numpy.ma", "nmxl,nmyl" timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) return if __name__ == '__main__': compare_functions_1v(numpy.sin) compare_functions_1v(numpy.log) compare_functions_1v(numpy.sqrt) compare_functions_2v(numpy.multiply) compare_functions_2v(numpy.divide) compare_functions_2v(numpy.power) compare_methods('ravel', '', nloop=1000) compare_methods('conjugate', '', 'z', nloop=1000) compare_methods('transpose', '', nloop=1000) compare_methods('compressed', '', nloop=1000) compare_methods('__getitem__', '0', nloop=1000) compare_methods('__getitem__', '(0,0)', nloop=1000) compare_methods('__getitem__', '[0,-1]', nloop=1000) compare_methods('__setitem__', '0, 17', nloop=1000, test=False) compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False) print("-"*50) print("__setitem__ on small arrays") timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) print("-"*50) print("__setitem__ on large arrays") timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) print("-"*50) print("where on small arrays") timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000) print("-"*50) print("where on large arrays") timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100)
4,941
35.880597
89
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/__init__.py
""" ============= Masked Arrays ============= Arrays sometimes contain invalid or missing data. When doing operations on such arrays, we wish to suppress invalid values, which is the purpose masked arrays fulfill (an example of typical use is given below). For example, examine the following array: >>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) When we try to calculate the mean of the data, the result is undetermined: >>> np.mean(x) nan The mean is calculated using roughly ``np.sum(x)/len(x)``, but since any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter masked arrays: >>> m = np.ma.masked_array(x, np.isnan(x)) >>> m masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], mask = [False False False True False False False True], fill_value=1e+20) Here, we construct a masked array that suppress all ``NaN`` values. We may now proceed to calculate the mean of the other values: >>> np.mean(m) 2.6666666666666665 .. [1] Not-a-Number, a floating point value that is the result of an invalid operation. .. moduleauthor:: Pierre Gerard-Marchant .. moduleauthor:: Jarrod Millman """ from __future__ import division, absolute_import, print_function from . import core from .core import * from . import extras from .extras import * __all__ = ['core', 'extras'] __all__ += core.__all__ __all__ += extras.__all__ from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench
1,476
24.912281
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/timer_comparison.py
from __future__ import division, absolute_import, print_function import timeit from functools import reduce import numpy as np from numpy import float_ import numpy.core.fromnumeric as fromnumeric from numpy.testing import build_err_msg # Fixme: this does not look right. np.seterr(all='ignore') pi = np.pi class ModuleTester(object): def __init__(self, module): self.module = module self.allequal = module.allequal self.arange = module.arange self.array = module.array self.concatenate = module.concatenate self.count = module.count self.equal = module.equal self.filled = module.filled self.getmask = module.getmask self.getmaskarray = module.getmaskarray self.id = id self.inner = module.inner self.make_mask = module.make_mask self.masked = module.masked self.masked_array = module.masked_array self.masked_values = module.masked_values self.mask_or = module.mask_or self.nomask = module.nomask self.ones = module.ones self.outer = module.outer self.repeat = module.repeat self.resize = module.resize self.sort = module.sort self.take = module.take self.transpose = module.transpose self.zeros = module.zeros self.MaskType = module.MaskType try: self.umath = module.umath except AttributeError: self.umath = module.core.umath self.testnames = [] def assert_array_compare(self, comparison, x, y, err_msg='', header='', fill_value=True): """ Assert that a comparison of two masked arrays is satisfied elementwise. """ xf = self.filled(x) yf = self.filled(y) m = self.mask_or(self.getmask(x), self.getmask(y)) x = self.filled(self.masked_array(xf, mask=m), fill_value) y = self.filled(self.masked_array(yf, mask=m), fill_value) if (x.dtype.char != "O"): x = x.astype(float_) if isinstance(x, np.ndarray) and x.size > 1: x[np.isnan(x)] = 0 elif np.isnan(x): x = 0 if (y.dtype.char != "O"): y = y.astype(float_) if isinstance(y, np.ndarray) and y.size > 1: y[np.isnan(y)] = 0 elif np.isnan(y): y = 0 try: cond = (x.shape == () or y.shape == ()) or x.shape == y.shape if not cond: msg = build_err_msg([x, y], err_msg + '\n(shapes %s, %s mismatch)' % (x.shape, y.shape), header=header, names=('x', 'y')) assert cond, msg val = comparison(x, y) if m is not self.nomask and fill_value: val = self.masked_array(val, mask=m) if isinstance(val, bool): cond = val reduced = [0] else: reduced = val.ravel() cond = reduced.all() reduced = reduced.tolist() if not cond: match = 100-100.0*reduced.count(1)/len(reduced) msg = build_err_msg([x, y], err_msg + '\n(mismatch %s%%)' % (match,), header=header, names=('x', 'y')) assert cond, msg except ValueError: msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) raise ValueError(msg) def assert_array_equal(self, x, y, err_msg=''): """ Checks the elementwise equality of two masked arrays. """ self.assert_array_compare(self.equal, x, y, err_msg=err_msg, header='Arrays are not equal') def test_0(self): """ Tests creation """ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] xm = self.masked_array(x, mask=m) xm[0] def test_1(self): """ Tests creation """ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = self.masked_array(x, mask=m1) ym = self.masked_array(y, mask=m2) xf = np.where(m1, 1.e+20, x) xm.set_fill_value(1.e+20) assert((xm-ym).filled(0).any()) s = x.shape assert(xm.size == reduce(lambda x, y:x*y, s)) assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) for s in [(4, 3), (6, 2)]: x.shape = s y.shape = s xm.shape = s ym.shape = s xf.shape = s assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) def test_2(self): """ Tests conversions and indexing. """ x1 = np.array([1, 2, 4, 3]) x2 = self.array(x1, mask=[1, 0, 0, 0]) x3 = self.array(x1, mask=[0, 1, 0, 1]) x4 = self.array(x1) # test conversion to strings, no errors str(x2) repr(x2) # tests of indexing assert type(x2[1]) is type(x1[1]) assert x1[1] == x2[1] x1[2] = 9 x2[2] = 9 self.assert_array_equal(x1, x2) x1[1:3] = 99 x2[1:3] = 99 x2[1] = self.masked x2[1:3] = self.masked x2[:] = x1 x2[1] = self.masked x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) x1 = np.arange(5)*1.0 x2 = self.masked_values(x1, 3.0) x1 = self.array([1, 'hello', 2, 3], object) x2 = np.array([1, 'hello', 2, 3], object) # check that no error occurs. x1[1] x2[1] assert x1[1:1].shape == (0,) # Tests copy-size n = [0, 0, 1, 0, 0] m = self.make_mask(n) m2 = self.make_mask(m) assert(m is m2) m3 = self.make_mask(m, copy=1) assert(m is not m3) def test_3(self): """ Tests resize/repeat """ x4 = self.arange(4) x4[2] = self.masked y4 = self.resize(x4, (8,)) assert self.allequal(self.concatenate([x4, x4]), y4) assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) y6 = self.repeat(x4, 2, axis=0) assert self.allequal(y5, y6) y7 = x4.repeat((2, 2, 2, 2), axis=0) assert self.allequal(y5, y7) y8 = x4.repeat(2, 0) assert self.allequal(y5, y8) def test_4(self): """ Test of take, transpose, inner, outer products. """ x = self.arange(24) y = np.arange(24) x[5:6] = self.masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), self.inner(x, y)) assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), self.outer(x, y)) y = self.array(['abc', 1, 'def', 2, 3], object) y[2] = self.masked t = self.take(y, [0, 3, 4]) assert t[0] == 'abc' assert t[1] == 2 assert t[2] == 3 def test_5(self): """ Tests inplace w/ scalar """ x = self.arange(10) y = self.arange(10) xm = self.arange(10) xm[2] = self.masked x += 1 assert self.allequal(x, y+1) xm += 1 assert self.allequal(xm, y+1) x = self.arange(10) xm = self.arange(10) xm[2] = self.masked x -= 1 assert self.allequal(x, y-1) xm -= 1 assert self.allequal(xm, y-1) x = self.arange(10)*1.0 xm = self.arange(10)*1.0 xm[2] = self.masked x *= 2.0 assert self.allequal(x, y*2) xm *= 2.0 assert self.allequal(xm, y*2) x = self.arange(10)*2 xm = self.arange(10)*2 xm[2] = self.masked x /= 2 assert self.allequal(x, y) xm /= 2 assert self.allequal(xm, y) x = self.arange(10)*1.0 xm = self.arange(10)*1.0 xm[2] = self.masked x /= 2.0 assert self.allequal(x, y/2.0) xm /= self.arange(10) self.assert_array_equal(xm, self.ones((10,))) x = self.arange(10).astype(float_) xm = self.arange(10) xm[2] = self.masked x += 1. assert self.allequal(x, y + 1.) def test_6(self): """ Tests inplace w/ array """ x = self.arange(10, dtype=float_) y = self.arange(10) xm = self.arange(10, dtype=float_) xm[2] = self.masked m = xm.mask a = self.arange(10, dtype=float_) a[-1] = self.masked x += a xm += a assert self.allequal(x, y+a) assert self.allequal(xm, y+a) assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) xm[2] = self.masked m = xm.mask a = self.arange(10, dtype=float_) a[-1] = self.masked x -= a xm -= a assert self.allequal(x, y-a) assert self.allequal(xm, y-a) assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) xm[2] = self.masked m = xm.mask a = self.arange(10, dtype=float_) a[-1] = self.masked x *= a xm *= a assert self.allequal(x, y*a) assert self.allequal(xm, y*a) assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) xm[2] = self.masked m = xm.mask a = self.arange(10, dtype=float_) a[-1] = self.masked x /= a xm /= a def test_7(self): "Tests ufunc" d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', # 'sin', 'cos', 'tan', # 'arcsin', 'arccos', 'arctan', # 'sinh', 'cosh', 'tanh', # 'arcsinh', # 'arccosh', # 'arctanh', # 'absolute', 'fabs', 'negative', # # 'nonzero', 'around', # 'floor', 'ceil', # # 'sometrue', 'alltrue', # 'logical_not', # 'add', 'subtract', 'multiply', # 'divide', 'true_divide', 'floor_divide', # 'remainder', 'fmod', 'hypot', 'arctan2', # 'equal', 'not_equal', 'less_equal', 'greater_equal', # 'less', 'greater', # 'logical_and', 'logical_or', 'logical_xor', ]: try: uf = getattr(self.umath, f) except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(self.module, f) args = d[:uf.nin] ur = uf(*args) mr = mf(*args) self.assert_array_equal(ur.filled(0), mr.filled(0), f) self.assert_array_equal(ur._mask, mr._mask) def test_99(self): # test average ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) self.assert_array_equal(2.0, self.average(ott, axis=0)) self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) self.assert_array_equal(2.0, result) assert(wts == 4.0) ott[:] = self.masked assert(self.average(ott, axis=0) is self.masked) ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) ott = ott.reshape(2, 2) ott[:, 1] = self.masked self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) assert(self.average(ott, axis=1)[0] is self.masked) self.assert_array_equal([2., 0.], self.average(ott, axis=0)) result, wts = self.average(ott, axis=0, returned=1) self.assert_array_equal(wts, [1., 0.]) w1 = [0, 1, 1, 1, 1, 0] w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] x = self.arange(6) self.assert_array_equal(self.average(x, axis=0), 2.5) self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) y = self.array([self.arange(6), 2.0*self.arange(6)]) self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) m1 = self.zeros(6) m2 = [0, 0, 1, 1, 0, 0] m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] m4 = self.ones(6) m5 = [0, 1, 1, 1, 1, 1] self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) z = self.masked_array(y, m3) self.assert_array_equal(self.average(z, None), 20./6.) self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) def test_A(self): x = self.arange(24) x[5:6] = self.masked x = x.reshape(2, 3, 4) if __name__ == '__main__': setup_base = ("from __main__ import ModuleTester \n" "import numpy\n" "tester = ModuleTester(module)\n") setup_cur = "import numpy.ma.core as module\n" + setup_base (nrepeat, nloop) = (10, 10) if 1: for i in range(1, 8): func = 'tester.test_%i()' % i cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) cur = np.sort(cur) print("#%i" % i + 50*'.') print(eval("ModuleTester.test_%i.__doc__" % i)) print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
15,586
34.344671
114
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/tests/test_old_ma.py
from __future__ import division, absolute_import, print_function from functools import reduce import numpy as np import numpy.core.umath as umath import numpy.core.fromnumeric as fromnumeric from numpy.testing import ( run_module_suite, assert_, assert_raises, assert_equal, ) from numpy.ma.testutils import assert_array_equal from numpy.ma import ( MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue, arange, arccos, arcsin, arctan, arctan2, array, average, choose, concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled, getmask, greater, greater_equal, inner, isMaskedArray, less, less_equal, log, log10, make_mask, masked, masked_array, masked_equal, masked_greater, masked_greater_equal, masked_inside, masked_less, masked_less_equal, masked_not_equal, masked_outside, masked_print_option, masked_values, masked_where, maximum, minimum, multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel, repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, ) pi = np.pi def eq(v, w, msg=''): result = allclose(v, w) if not result: print("Not eq:%s\n%s\n----%s" % (msg, str(v), str(w))) return result class TestMa(object): def setup(self): x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = array(x, mask=m1) ym = array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) s = x.shape xm.set_fill_value(1e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) def test_testBasic1d(self): # Test of basic array creation and properties in 1 dimension. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) assert_equal(xm.size, reduce(lambda x, y:x * y, s)) assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) assert_(eq(xm, xf)) assert_(eq(filled(xm, 1.e20), xf)) assert_(eq(x, xm)) def test_testBasic2d(self): # Test of basic array creation and properties in 2 dimensions. for s in [(4, 3), (6, 2)]: (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d x.shape = s y.shape = s xm.shape = s ym.shape = s xf.shape = s assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.size, reduce(lambda x, y:x * y, s)) assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) assert_(eq(xm, xf)) assert_(eq(filled(xm, 1.e20), xf)) assert_(eq(x, xm)) self.setup() def test_testArithmetic(self): # Test of basic arithmetic. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_(eq(a2d * a2d, a2d * a2dm)) assert_(eq(a2d + a2d, a2d + a2dm)) assert_(eq(a2d - a2d, a2d - a2dm)) for s in [(12,), (4, 3), (2, 6)]: x = x.reshape(s) y = y.reshape(s) xm = xm.reshape(s) ym = ym.reshape(s) xf = xf.reshape(s) assert_(eq(-x, -xm)) assert_(eq(x + y, xm + ym)) assert_(eq(x - y, xm - ym)) assert_(eq(x * y, xm * ym)) with np.errstate(divide='ignore', invalid='ignore'): assert_(eq(x / y, xm / ym)) assert_(eq(a10 + y, a10 + ym)) assert_(eq(a10 - y, a10 - ym)) assert_(eq(a10 * y, a10 * ym)) with np.errstate(divide='ignore', invalid='ignore'): assert_(eq(a10 / y, a10 / ym)) assert_(eq(x + a10, xm + a10)) assert_(eq(x - a10, xm - a10)) assert_(eq(x * a10, xm * a10)) assert_(eq(x / a10, xm / a10)) assert_(eq(x ** 2, xm ** 2)) assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5)) assert_(eq(x ** y, xm ** ym)) assert_(eq(np.add(x, y), add(xm, ym))) assert_(eq(np.subtract(x, y), subtract(xm, ym))) assert_(eq(np.multiply(x, y), multiply(xm, ym))) with np.errstate(divide='ignore', invalid='ignore'): assert_(eq(np.divide(x, y), divide(xm, ym))) def test_testMixedArithmetic(self): na = np.array([1]) ma = array([1]) assert_(isinstance(na + ma, MaskedArray)) assert_(isinstance(ma + na, MaskedArray)) def test_testUfuncs1(self): # Test various functions such as sin, cos. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d assert_(eq(np.cos(x), cos(xm))) assert_(eq(np.cosh(x), cosh(xm))) assert_(eq(np.sin(x), sin(xm))) assert_(eq(np.sinh(x), sinh(xm))) assert_(eq(np.tan(x), tan(xm))) assert_(eq(np.tanh(x), tanh(xm))) with np.errstate(divide='ignore', invalid='ignore'): assert_(eq(np.sqrt(abs(x)), sqrt(xm))) assert_(eq(np.log(abs(x)), log(xm))) assert_(eq(np.log10(abs(x)), log10(xm))) assert_(eq(np.exp(x), exp(xm))) assert_(eq(np.arcsin(z), arcsin(zm))) assert_(eq(np.arccos(z), arccos(zm))) assert_(eq(np.arctan(z), arctan(zm))) assert_(eq(np.arctan2(x, y), arctan2(xm, ym))) assert_(eq(np.absolute(x), absolute(xm))) assert_(eq(np.equal(x, y), equal(xm, ym))) assert_(eq(np.not_equal(x, y), not_equal(xm, ym))) assert_(eq(np.less(x, y), less(xm, ym))) assert_(eq(np.greater(x, y), greater(xm, ym))) assert_(eq(np.less_equal(x, y), less_equal(xm, ym))) assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym))) assert_(eq(np.conjugate(x), conjugate(xm))) assert_(eq(np.concatenate((x, y)), concatenate((xm, ym)))) assert_(eq(np.concatenate((x, y)), concatenate((x, y)))) assert_(eq(np.concatenate((x, y)), concatenate((xm, y)))) assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x)))) def test_xtestCount(self): # Test count ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) assert_(count(ott).dtype.type is np.intp) assert_equal(3, count(ott)) assert_equal(1, count(1)) assert_(eq(0, array(1, mask=[1]))) ott = ott.reshape((2, 2)) assert_(count(ott).dtype.type is np.intp) assert_(isinstance(count(ott, 0), np.ndarray)) assert_(count(ott).dtype.type is np.intp) assert_(eq(3, count(ott))) assert_(getmask(count(ott, 0)) is nomask) assert_(eq([1, 2], count(ott, 0))) def test_testMinMax(self): # Test minimum and maximum. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d xr = np.ravel(x) # max doesn't work if shaped xmr = ravel(xm) # true because of careful selection of data assert_(eq(max(xr), maximum.reduce(xmr))) assert_(eq(min(xr), minimum.reduce(xmr))) def test_testAddSumProd(self): # Test add, sum, product. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d assert_(eq(np.add.reduce(x), add.reduce(x))) assert_(eq(np.add.accumulate(x), add.accumulate(x))) assert_(eq(4, sum(array(4), axis=0))) assert_(eq(4, sum(array(4), axis=0))) assert_(eq(np.sum(x, axis=0), sum(x, axis=0))) assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))) assert_(eq(np.sum(x, 0), sum(x, 0))) assert_(eq(np.product(x, axis=0), product(x, axis=0))) assert_(eq(np.product(x, 0), product(x, 0))) assert_(eq(np.product(filled(xm, 1), axis=0), product(xm, axis=0))) if len(s) > 1: assert_(eq(np.concatenate((x, y), 1), concatenate((xm, ym), 1))) assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) assert_(eq(np.sum(x, 1), sum(x, 1))) assert_(eq(np.product(x, 1), product(x, 1))) def test_testCI(self): # Test of conversions and indexing x1 = np.array([1, 2, 4, 3]) x2 = array(x1, mask=[1, 0, 0, 0]) x3 = array(x1, mask=[0, 1, 0, 1]) x4 = array(x1) # test conversion to strings str(x2) # raises? repr(x2) # raises? assert_(eq(np.sort(x1), sort(x2, fill_value=0))) # tests of indexing assert_(type(x2[1]) is type(x1[1])) assert_(x1[1] == x2[1]) assert_(x2[0] is masked) assert_(eq(x1[2], x2[2])) assert_(eq(x1[2:5], x2[2:5])) assert_(eq(x1[:], x2[:])) assert_(eq(x1[1:], x3[1:])) x1[2] = 9 x2[2] = 9 assert_(eq(x1, x2)) x1[1:3] = 99 x2[1:3] = 99 assert_(eq(x1, x2)) x2[1] = masked assert_(eq(x1, x2)) x2[1:3] = masked assert_(eq(x1, x2)) x2[:] = x1 x2[1] = masked assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) assert_(allequal(x4, array([1, 2, 3, 4]))) x1 = np.arange(5) * 1.0 x2 = masked_values(x1, 3.0) assert_(eq(x1, x2)) assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) assert_(eq(3.0, x2.fill_value)) x1 = array([1, 'hello', 2, 3], object) x2 = np.array([1, 'hello', 2, 3], object) s1 = x1[1] s2 = x2[1] assert_equal(type(s2), str) assert_equal(type(s1), str) assert_equal(s1, s2) assert_(x1[1:1].shape == (0,)) def test_testCopySize(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] m = make_mask(n) m2 = make_mask(m) assert_(m is m2) m3 = make_mask(m, copy=1) assert_(m is not m3) x1 = np.arange(5) y1 = array(x1, mask=m) assert_(y1._data is not x1) assert_(allequal(x1, y1._data)) assert_(y1.mask is m) y1a = array(y1, copy=0) assert_(y1a.mask is y1.mask) y2 = array(x1, mask=m3, copy=0) assert_(y2.mask is m3) assert_(y2[2] is masked) y2[2] = 9 assert_(y2[2] is not masked) assert_(y2.mask is m3) assert_(allequal(y2.mask, 0)) y2a = array(x1, mask=m, copy=1) assert_(y2a.mask is not m) assert_(y2a[2] is masked) y2a[2] = 9 assert_(y2a[2] is not masked) assert_(y2a.mask is not m) assert_(allequal(y2a.mask, 0)) y3 = array(x1 * 1.0, mask=m) assert_(filled(y3).dtype is (x1 * 1.0).dtype) x4 = arange(4) x4[2] = masked y4 = resize(x4, (8,)) assert_(eq(concatenate([x4, x4]), y4)) assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) y5 = repeat(x4, (2, 2, 2, 2), axis=0) assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) y6 = repeat(x4, 2, axis=0) assert_(eq(y5, y6)) def test_testPut(self): # Test of put d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) m2 = m.copy() x = array(d, mask=m) assert_(x[3] is masked) assert_(x[4] is masked) x[[1, 4]] = [10, 40] assert_(x.mask is m) assert_(x[3] is masked) assert_(x[4] is not masked) assert_(eq(x, [0, 10, 2, -1, 40])) x = array(d, mask=m2, copy=True) x.put([0, 1, 2], [-1, 100, 200]) assert_(x.mask is not m2) assert_(x[3] is masked) assert_(x[4] is masked) assert_(eq(x, [-1, 100, 200, 0, 0])) def test_testPut2(self): # Test of put d = arange(5) x = array(d, mask=[0, 0, 0, 0, 0]) z = array([10, 40], mask=[1, 0]) assert_(x[2] is not masked) assert_(x[3] is not masked) x[2:4] = z assert_(x[2] is masked) assert_(x[3] is not masked) assert_(eq(x, [0, 1, 10, 40, 4])) d = arange(5) x = array(d, mask=[0, 0, 0, 0, 0]) y = x[2:4] z = array([10, 40], mask=[1, 0]) assert_(x[2] is not masked) assert_(x[3] is not masked) y[:] = z assert_(y[0] is masked) assert_(y[1] is not masked) assert_(eq(y, [10, 40])) assert_(x[2] is masked) assert_(x[3] is not masked) assert_(eq(x, [0, 1, 10, 40, 4])) def test_testMaPut(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] i = np.nonzero(m)[0] put(ym, i, zm) assert_(all(take(ym, i, axis=0) == zm)) def test_testOddFeatures(self): # Test of other odd features x = arange(20) x = x.reshape(4, 5) x.flat[5] = 12 assert_(x[1, 0] == 12) z = x + 10j * x assert_(eq(z.real, x)) assert_(eq(z.imag, 10 * x)) assert_(eq((z * conjugate(z)).real, 101 * x * x)) z.imag[...] = 0.0 x = arange(10) x[3] = masked assert_(str(x[3]) == str(masked)) c = x >= 8 assert_(count(where(c, masked, masked)) == 0) assert_(shape(where(c, masked, masked)) == c.shape) z = where(c, x, masked) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is masked) assert_(z[7] is masked) assert_(z[8] is not masked) assert_(z[9] is not masked) assert_(eq(x, z)) z = where(c, masked, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) z = masked_where(c, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) assert_(eq(x, z)) x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_(eq(z, [1., 2., 0., -4., -5])) c[0] = masked z = where(c, x, -x) assert_(eq(z, [1., 2., 0., -4., -5])) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) assert_(eq(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2))) assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) assert_(eq(masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0])) assert_(eq(masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1])) assert_(eq(masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0])) assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1])) assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5])) atest = ones((10, 10, 10), dtype=np.float32) btest = zeros(atest.shape, MaskType) ctest = masked_where(btest, atest) assert_(eq(atest, ctest)) z = choose(c, (-x, x)) assert_(eq(z, [1., 2., 0., -4., -5])) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) x = arange(6) x[5] = masked y = arange(6) * 10 y[2] = masked c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) cm = c.filled(1) z = where(c, x, y) zm = where(cm, x, y) assert_(eq(z, zm)) assert_(getmask(zm) is nomask) assert_(eq(zm, [0, 1, 2, 30, 40, 50])) z = where(c, masked, 1) assert_(eq(z, [99, 99, 99, 1, 1, 1])) z = where(c, 1, masked) assert_(eq(z, [99, 1, 1, 99, 99, 99])) def test_testMinMax2(self): # Test of minimum, maximum. assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) x = arange(5) y = arange(5) - 2 x[3] = masked y[0] = masked assert_(eq(minimum(x, y), where(less(x, y), x, y))) assert_(eq(maximum(x, y), where(greater(x, y), x, y))) assert_(minimum.reduce(x) == 0) assert_(maximum.reduce(x) == 4) def test_testTakeTransposeInnerOuter(self): # Test of take, transpose, inner, outer products x = arange(24) y = np.arange(24) x[5:6] = masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) assert_(eq(np.inner(filled(x, 0), filled(y, 0)), inner(x, y))) assert_(eq(np.outer(filled(x, 0), filled(y, 0)), outer(x, y))) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked t = take(y, [0, 3, 4]) assert_(t[0] == 'abc') assert_(t[1] == 2) assert_(t[2] == 3) def test_testInplace(self): # Test of inplace operations and rich comparisons y = arange(10) x = arange(10) xm = arange(10) xm[2] = masked x += 1 assert_(eq(x, y + 1)) xm += 1 assert_(eq(x, y + 1)) x = arange(10) xm = arange(10) xm[2] = masked x -= 1 assert_(eq(x, y - 1)) xm -= 1 assert_(eq(xm, y - 1)) x = arange(10) * 1.0 xm = arange(10) * 1.0 xm[2] = masked x *= 2.0 assert_(eq(x, y * 2)) xm *= 2.0 assert_(eq(xm, y * 2)) x = arange(10) * 2 xm = arange(10) xm[2] = masked x //= 2 assert_(eq(x, y)) xm //= 2 assert_(eq(x, y)) x = arange(10) * 1.0 xm = arange(10) * 1.0 xm[2] = masked x /= 2.0 assert_(eq(x, y / 2.0)) xm /= arange(10) assert_(eq(xm, ones((10,)))) x = arange(10).astype(np.float32) xm = arange(10) xm[2] = masked x += 1. assert_(eq(x, y + 1.)) def test_testPickle(self): # Test of pickling import pickle x = arange(12) x[4:10:2] = masked x = x.reshape(4, 3) s = pickle.dumps(x) y = pickle.loads(s) assert_(eq(x, y)) def test_testMasked(self): # Test of masked element xx = arange(6) xx[1] = masked assert_(str(masked) == '--') assert_(xx[1] is masked) assert_equal(filled(xx[1], 0), 0) def test_testAverage1(self): # Test of average. ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) assert_(eq(2.0, average(ott, axis=0))) assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) assert_(eq(2.0, result)) assert_(wts == 4.0) ott[:] = masked assert_(average(ott, axis=0) is masked) ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) ott = ott.reshape(2, 2) ott[:, 1] = masked assert_(eq(average(ott, axis=0), [2.0, 0.0])) assert_(average(ott, axis=1)[0] is masked) assert_(eq([2., 0.], average(ott, axis=0))) result, wts = average(ott, axis=0, returned=1) assert_(eq(wts, [1., 0.])) def test_testAverage2(self): # More tests of average. w1 = [0, 1, 1, 1, 1, 0] w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] x = arange(6) assert_(allclose(average(x, axis=0), 2.5)) assert_(allclose(average(x, axis=0, weights=w1), 2.5)) y = array([arange(6), 2.0 * arange(6)]) assert_(allclose(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)) assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) assert_(allclose(average(y, axis=1), [average(x, axis=0), average(x, axis=0)*2.0])) assert_(allclose(average(y, None, weights=w2), 20. / 6.)) assert_(allclose(average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])) assert_(allclose(average(y, axis=1), [average(x, axis=0), average(x, axis=0)*2.0])) m1 = zeros(6) m2 = [0, 0, 1, 1, 0, 0] m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] m4 = ones(6) m5 = [0, 1, 1, 1, 1, 1] assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) assert_(average(masked_array(x, m4), axis=0) is masked) assert_equal(average(masked_array(x, m5), axis=0), 0.0) assert_equal(count(average(masked_array(x, m4), axis=0)), 0) z = masked_array(y, m3) assert_(allclose(average(z, None), 20. / 6.)) assert_(allclose(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])) assert_(allclose(average(z, axis=1), [2.5, 5.0])) assert_(allclose(average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0])) a = arange(6) b = arange(6) * 3 r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) assert_equal(shape(r1), shape(w1)) assert_equal(r1.shape, w1.shape) r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) assert_equal(shape(w2), shape(r2)) r2, w2 = average(ones((2, 2, 3)), returned=1) assert_equal(shape(w2), shape(r2)) r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) assert_(shape(w2) == shape(r2)) a2d = array([[1, 2], [0, 4]], float) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) a2da = average(a2d, axis=0) assert_(eq(a2da, [0.5, 3.0])) a2dma = average(a2dm, axis=0) assert_(eq(a2dma, [1.0, 3.0])) a2dma = average(a2dm, axis=None) assert_(eq(a2dma, 7. / 3.)) a2dma = average(a2dm, axis=1) assert_(eq(a2dma, [1.5, 4.0])) def test_testToPython(self): assert_equal(1, int(array(1))) assert_equal(1.0, float(array(1))) assert_equal(1, int(array([[[1]]]))) assert_equal(1.0, float(array([[1]]))) assert_raises(TypeError, float, array([1, 1])) assert_raises(ValueError, bool, array([0, 1])) assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) def test_testScalarArithmetic(self): xm = array(0, mask=1) #TODO FIXME: Find out what the following raises a warning in r8247 with np.errstate(divide='ignore'): assert_((1 / array(0)).mask) assert_((1 + xm).mask) assert_((-xm).mask) assert_((-xm).mask) assert_(maximum(xm, xm).mask) assert_(minimum(xm, xm).mask) assert_(xm.filled().dtype is xm._data.dtype) x = array(0, mask=0) assert_(x.filled() == x._data) assert_equal(str(xm), str(masked_print_option)) def test_testArrayMethods(self): a = array([1, 3, 2]) assert_(eq(a.any(), a._data.any())) assert_(eq(a.all(), a._data.all())) assert_(eq(a.argmax(), a._data.argmax())) assert_(eq(a.argmin(), a._data.argmin())) assert_(eq(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))) assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) assert_(eq(a.conj(), a._data.conj())) assert_(eq(a.conjugate(), a._data.conjugate())) m = array([[1, 2], [3, 4]]) assert_(eq(m.diagonal(), m._data.diagonal())) assert_(eq(a.sum(), a._data.sum())) assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) assert_(eq(m.transpose(), m._data.transpose())) def test_testArrayAttributes(self): a = array([1, 3, 2]) assert_equal(a.ndim, 1) def test_testAPI(self): assert_(not [m for m in dir(np.ndarray) if m not in dir(MaskedArray) and not m.startswith('_')]) def test_testSingleElementSubscript(self): a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) assert_equal(a[0].shape, ()) assert_equal(b[0].shape, ()) assert_equal(b[1].shape, ()) class TestUfuncs(object): def setup(self): self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) def test_testUfuncRegression(self): f_invalid_ignore = [ 'sqrt', 'arctanh', 'arcsin', 'arccos', 'arccosh', 'arctanh', 'log', 'log10', 'divide', 'true_divide', 'floor_divide', 'remainder', 'fmod'] for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh', 'absolute', 'fabs', 'negative', 'floor', 'ceil', 'logical_not', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'floor_divide', 'remainder', 'fmod', 'hypot', 'arctan2', 'equal', 'not_equal', 'less_equal', 'greater_equal', 'less', 'greater', 'logical_and', 'logical_or', 'logical_xor']: try: uf = getattr(umath, f) except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(np.ma, f) args = self.d[:uf.nin] with np.errstate(): if f in f_invalid_ignore: np.seterr(invalid='ignore') if f in ['arctanh', 'log', 'log10']: np.seterr(divide='ignore') ur = uf(*args) mr = mf(*args) assert_(eq(ur.filled(0), mr.filled(0), f)) assert_(eqmask(ur.mask, mr.mask)) def test_reduce(self): a = self.d[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) assert_equal(product(a, axis=0), 0) def test_minmax(self): a = arange(1, 13).reshape(3, 4) amask = masked_where(a < 5, a) assert_equal(amask.max(), a.max()) assert_equal(amask.min(), 5) assert_((amask.max(0) == a.max(0)).all()) assert_((amask.min(0) == [5, 6, 7, 8]).all()) assert_(amask.max(1)[0].mask) assert_(amask.min(1)[0].mask) def test_nonzero(self): for t in "?bhilqpBHILQPfdgFDGO": x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) assert_(eq(nonzero(x), [0])) class TestArrayMethods(object): def setup(self): x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX) def test_trace(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_(eq(mX.trace(), X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0))) def test_clip(self): (x, X, XX, m, mx, mX, mXX,) = self.d clipped = mx.clip(2, 8) assert_(eq(clipped.mask, mx.mask)) assert_(eq(clipped._data, x.clip(2, 8))) assert_(eq(clipped._data, mx._data.clip(2, 8))) def test_ptp(self): (x, X, XX, m, mx, mX, mXX,) = self.d (n, m) = X.shape assert_equal(mx.ptp(), mx.compressed().ptp()) rows = np.zeros(n, np.float_) cols = np.zeros(m, np.float_) for k in range(m): cols[k] = mX[:, k].compressed().ptp() for k in range(n): rows[k] = mX[k].compressed().ptp() assert_(eq(mX.ptp(0), cols)) assert_(eq(mX.ptp(1), rows)) def test_swapaxes(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXswapped = mX.swapaxes(0, 1) assert_(eq(mXswapped[-1], mX[:, -1])) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_cumprod(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXcp = mX.cumprod(0) assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) mXcp = mX.cumprod(1) assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) def test_cumsum(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXcp = mX.cumsum(0) assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) mXcp = mX.cumsum(1) assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) def test_varstd(self): (x, X, XX, m, mx, mX, mXX,) = self.d assert_(eq(mX.var(axis=None), mX.compressed().var())) assert_(eq(mX.std(axis=None), mX.compressed().std())) assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) assert_(eq(mX.var().shape, X.var().shape)) (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) for k in range(6): assert_(eq(mXvar1[k], mX[k].compressed().var())) assert_(eq(mXvar0[k], mX[:, k].compressed().var())) assert_(eq(np.sqrt(mXvar0[k]), mX[:, k].compressed().std())) def eqmask(m1, m2): if m1 is nomask: return m2 is nomask if m2 is nomask: return m1 is nomask return (m1 == m2).all() if __name__ == "__main__": run_module_suite()
32,135
36.367442
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/tests/test_core.py
# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu """ from __future__ import division, absolute_import, print_function __author__ = "Pierre GF Gerard-Marchant" import sys import warnings import pickle import operator import itertools import sys import textwrap from functools import reduce import numpy as np import numpy.ma.core import numpy.core.fromnumeric as fromnumeric import numpy.core.umath as umath from numpy.testing import ( run_module_suite, assert_raises, assert_warns, suppress_warnings, dec ) from numpy import ndarray from numpy.compat import asbytes, asbytes_nested from numpy.ma.testutils import ( assert_, assert_array_equal, assert_equal, assert_almost_equal, assert_equal_records, fail_if_equal, assert_not_equal, assert_mask_equal ) from numpy.ma.core import ( MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, arcsin, arctan, argsort, array, asarray, choose, concatenate, conjugate, cos, cosh, count, default_fill_value, diag, divide, empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid, flatten_structured_array, fromflex, getmask, getmaskarray, greater, greater_equal, identity, inner, isMaskedArray, less, less_equal, log, log10, make_mask, make_mask_descr, mask_or, masked, masked_array, masked_equal, masked_greater, masked_greater_equal, masked_inside, masked_less, masked_less_equal, masked_not_equal, masked_outside, masked_print_option, masked_values, masked_where, max, maximum, maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, mvoid, nomask, not_equal, ones, outer, power, product, put, putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, ) from numpy.testing import dec pi = np.pi suppress_copy_mask_on_assignment = suppress_warnings() suppress_copy_mask_on_assignment.filter( numpy.ma.core.MaskedArrayFutureWarning, "setting an item on a masked array which has a shared mask will not copy") class TestMaskedArray(object): # Base test class for MaskedArrays. def setup(self): # Base data definition. x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) def test_basicattributes(self): # Tests some basic array attributes. a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) assert_equal(a.ndim, 1) assert_equal(b.ndim, 1) assert_equal(a.size, 3) assert_equal(b.size, 3) assert_equal(a.shape, (3,)) assert_equal(b.shape, (3,)) def test_basic0d(self): # Checks masking a scalar x = masked_array(0) assert_equal(str(x), '0') x = masked_array(0, mask=True) assert_equal(str(x), str(masked_print_option)) x = masked_array(0, mask=False) assert_equal(str(x), '0') x = array(0, mask=1) assert_(x.filled().dtype is x._data.dtype) def test_basic1d(self): # Test of basic array creation and properties in 1 dimension. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_((xm - ym).filled(0).any()) fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) s = x.shape assert_equal(np.shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) assert_equal(zm.dtype, z.dtype) assert_equal(xm.size, reduce(lambda x, y:x * y, s)) assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) assert_array_equal(xm, xf) assert_array_equal(filled(xm, 1.e20), xf) assert_array_equal(x, xm) def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d for s in [(4, 3), (6, 2)]: x.shape = s y.shape = s xm.shape = s ym.shape = s xf.shape = s assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.size, reduce(lambda x, y:x * y, s)) assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) assert_equal(xm, xf) assert_equal(filled(xm, 1.e20), xf) assert_equal(x, xm) def test_concatenate_basic(self): # Tests concatenations. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # basic concatenation assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) assert_equal(np.concatenate((x, y)), concatenate((x, y))) assert_equal(np.concatenate((x, y)), concatenate((xm, y))) assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) def test_concatenate_alongaxis(self): # Tests concatenations. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # Concatenation along an axis s = (3, 4) x.shape = y.shape = xm.shape = ym.shape = s assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) assert_equal(np.concatenate((x, y), 1), xmym) assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) x = zeros(2) y = array(ones(2), mask=[False, True]) z = concatenate((x, y)) assert_array_equal(z, [0, 0, 1, 1]) assert_array_equal(z.mask, [False, False, False, True]) z = concatenate((y, x)) assert_array_equal(z, [1, 1, 0, 0]) assert_array_equal(z.mask, [False, True, False, False]) def test_concatenate_flexible(self): # Tests the concatenation on flexible arrays. data = masked_array(list(zip(np.random.rand(10), np.arange(10))), dtype=[('a', float), ('b', int)]) test = concatenate([data[:5], data[5:]]) assert_equal_records(test, data) def test_creation_ndmin(self): # Check the use of ndmin x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) assert_equal(x.shape, (1, 3)) assert_equal(x._data, [[1, 2, 3]]) assert_equal(x._mask, [[1, 0, 0]]) def test_creation_ndmin_from_maskedarray(self): # Make sure we're not losing the original mask w/ ndmin x = array([1, 2, 3]) x[-1] = masked xx = array(x, ndmin=2, dtype=float) assert_equal(x.shape, x._mask.shape) assert_equal(xx.shape, xx._mask.shape) def test_creation_maskcreation(self): # Tests how masks are initialized at the creation of Maskedarrays. data = arange(24, dtype=float) data[[3, 6, 15]] = masked dma_1 = MaskedArray(data) assert_equal(dma_1.mask, data.mask) dma_2 = MaskedArray(dma_1) assert_equal(dma_2.mask, dma_1.mask) dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) fail_if_equal(dma_3.mask, dma_1.mask) x = array([1, 2, 3], mask=True) assert_equal(x._mask, [True, True, True]) x = array([1, 2, 3], mask=False) assert_equal(x._mask, [False, False, False]) y = array([1, 2, 3], mask=x._mask, copy=False) assert_(np.may_share_memory(x.mask, y.mask)) y = array([1, 2, 3], mask=x._mask, copy=True) assert_(not np.may_share_memory(x.mask, y.mask)) def test_creation_with_list_of_maskedarrays(self): # Tests creating a masked array from a list of masked arrays. x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) data = array((x, x[::-1])) assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) x.mask = nomask data = array((x, x[::-1])) assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) assert_(data.mask is nomask) def test_creation_from_ndarray_with_padding(self): x = np.array([('A', 0)], dtype={'names':['f0','f1'], 'formats':['S4','i8'], 'offsets':[0,8]}) data = array(x) # used to fail due to 'V' padding field in x.dtype.descr def test_asarray(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d xm.fill_value = -9999 xm._hardmask = True xmm = asarray(xm) assert_equal(xmm._data, xm._data) assert_equal(xmm._mask, xm._mask) assert_equal(xmm.fill_value, xm.fill_value) assert_equal(xmm._hardmask, xm._hardmask) def test_asarray_default_order(self): # See Issue #6646 m = np.eye(3).T assert_(not m.flags.c_contiguous) new_m = asarray(m) assert_(new_m.flags.c_contiguous) def test_asarray_enforce_order(self): # See Issue #6646 m = np.eye(3).T assert_(not m.flags.c_contiguous) new_m = asarray(m, order='C') assert_(new_m.flags.c_contiguous) def test_fix_invalid(self): # Checks fix_invalid. with np.errstate(invalid='ignore'): data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) data_fixed = fix_invalid(data) assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) assert_equal(data_fixed._mask, [1., 0., 1.]) def test_maskedelement(self): # Test of masked element x = arange(6) x[1] = masked assert_(str(masked) == '--') assert_(x[1] is masked) assert_equal(filled(x[1], 0), 0) def test_set_element_as_object(self): # Tests setting elements with object a = empty(1, dtype=object) x = (1, 2, 3, 4, 5) a[0] = x assert_equal(a[0], x) assert_(a[0] is x) import datetime dt = datetime.datetime.now() a[0] = dt assert_(a[0] is dt) def test_indexing(self): # Tests conversions and indexing x1 = np.array([1, 2, 4, 3]) x2 = array(x1, mask=[1, 0, 0, 0]) x3 = array(x1, mask=[0, 1, 0, 1]) x4 = array(x1) # test conversion to strings str(x2) # raises? repr(x2) # raises? assert_equal(np.sort(x1), sort(x2, endwith=False)) # tests of indexing assert_(type(x2[1]) is type(x1[1])) assert_(x1[1] == x2[1]) assert_(x2[0] is masked) assert_equal(x1[2], x2[2]) assert_equal(x1[2:5], x2[2:5]) assert_equal(x1[:], x2[:]) assert_equal(x1[1:], x3[1:]) x1[2] = 9 x2[2] = 9 assert_equal(x1, x2) x1[1:3] = 99 x2[1:3] = 99 assert_equal(x1, x2) x2[1] = masked assert_equal(x1, x2) x2[1:3] = masked assert_equal(x1, x2) x2[:] = x1 x2[1] = masked assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) assert_(allequal(x4, array([1, 2, 3, 4]))) x1 = np.arange(5) * 1.0 x2 = masked_values(x1, 3.0) assert_equal(x1, x2) assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) assert_equal(3.0, x2.fill_value) x1 = array([1, 'hello', 2, 3], object) x2 = np.array([1, 'hello', 2, 3], object) s1 = x1[1] s2 = x2[1] assert_equal(type(s2), str) assert_equal(type(s1), str) assert_equal(s1, s2) assert_(x1[1:1].shape == (0,)) def test_matrix_indexing(self): # Tests conversions and indexing x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]]) x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]]) x4 = array(x1) # test conversion to strings str(x2) # raises? repr(x2) # raises? # tests of indexing assert_(type(x2[1, 0]) is type(x1[1, 0])) assert_(x1[1, 0] == x2[1, 0]) assert_(x2[1, 1] is masked) assert_equal(x1[0, 2], x2[0, 2]) assert_equal(x1[0, 1:], x2[0, 1:]) assert_equal(x1[:, 2], x2[:, 2]) assert_equal(x1[:], x2[:]) assert_equal(x1[1:], x3[1:]) x1[0, 2] = 9 x2[0, 2] = 9 assert_equal(x1, x2) x1[0, 1:] = 99 x2[0, 1:] = 99 assert_equal(x1, x2) x2[0, 1] = masked assert_equal(x1, x2) x2[0, 1:] = masked assert_equal(x1, x2) x2[0, :] = x1[0, :] x2[0, 1] = masked assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) assert_(allequal(getmask(x3)[1], array([1, 1, 0]))) assert_(allequal(getmask(x3[1]), array([1, 1, 0]))) x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) assert_(allequal(getmask(x4[1]), array([1, 1, 0]))) assert_(allequal(x4[1], array([1, 2, 3]))) x1 = np.matrix(np.arange(5) * 1.0) x2 = masked_values(x1, 3.0) assert_equal(x1, x2) assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) assert_equal(3.0, x2.fill_value) @suppress_copy_mask_on_assignment def test_copy(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] m = make_mask(n) m2 = make_mask(m) assert_(m is m2) m3 = make_mask(m, copy=1) assert_(m is not m3) x1 = np.arange(5) y1 = array(x1, mask=m) assert_equal(y1._data.__array_interface__, x1.__array_interface__) assert_(allequal(x1, y1.data)) assert_equal(y1._mask.__array_interface__, m.__array_interface__) y1a = array(y1) assert_(y1a._data.__array_interface__ == y1._data.__array_interface__) assert_(y1a.mask is y1.mask) y2 = array(x1, mask=m3) assert_(y2._data.__array_interface__ == x1.__array_interface__) assert_(y2._mask.__array_interface__ == m3.__array_interface__) assert_(y2[2] is masked) y2[2] = 9 assert_(y2[2] is not masked) assert_(y2._mask.__array_interface__ == m3.__array_interface__) assert_(allequal(y2.mask, 0)) y2a = array(x1, mask=m, copy=1) assert_(y2a._data.__array_interface__ != x1.__array_interface__) #assert_( y2a.mask is not m) assert_(y2a._mask.__array_interface__ != m.__array_interface__) assert_(y2a[2] is masked) y2a[2] = 9 assert_(y2a[2] is not masked) #assert_( y2a.mask is not m) assert_(y2a._mask.__array_interface__ != m.__array_interface__) assert_(allequal(y2a.mask, 0)) y3 = array(x1 * 1.0, mask=m) assert_(filled(y3).dtype is (x1 * 1.0).dtype) x4 = arange(4) x4[2] = masked y4 = resize(x4, (8,)) assert_equal(concatenate([x4, x4]), y4) assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) y5 = repeat(x4, (2, 2, 2, 2), axis=0) assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) y6 = repeat(x4, 2, axis=0) assert_equal(y5, y6) y7 = x4.repeat((2, 2, 2, 2), axis=0) assert_equal(y5, y7) y8 = x4.repeat(2, 0) assert_equal(y5, y8) y9 = x4.copy() assert_equal(y9._data, x4._data) assert_equal(y9._mask, x4._mask) x = masked_array([1, 2, 3], mask=[0, 1, 0]) # Copy is False by default y = masked_array(x) assert_equal(y._data.ctypes.data, x._data.ctypes.data) assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) y = masked_array(x, copy=True) assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) def test_copy_0d(self): # gh-9430 x = np.ma.array(43, mask=True) xc = x.copy() assert_equal(xc.mask, True) def test_copy_on_python_builtins(self): # Tests copy works on python builtins (issue#8019) assert_(isMaskedArray(np.ma.copy([1,2,3]))) assert_(isMaskedArray(np.ma.copy((1,2,3)))) def test_copy_immutable(self): # Tests that the copy method is immutable, GitHub issue #5247 a = np.ma.array([1, 2, 3]) b = np.ma.array([4, 5, 6]) a_copy_method = a.copy b.copy assert_equal(a_copy_method(), [1, 2, 3]) def test_deepcopy(self): from copy import deepcopy a = array([0, 1, 2], mask=[False, True, False]) copied = deepcopy(a) assert_equal(copied.mask, a.mask) assert_not_equal(id(a._mask), id(copied._mask)) copied[1] = 1 assert_equal(copied.mask, [0, 0, 0]) assert_equal(a.mask, [0, 1, 0]) copied = deepcopy(a) assert_equal(copied.mask, a.mask) copied.mask[1] = False assert_equal(copied.mask, [0, 0, 0]) assert_equal(a.mask, [0, 1, 0]) def test_str_repr(self): a = array([0, 1, 2], mask=[False, True, False]) assert_equal(str(a), '[0 -- 2]') assert_equal( repr(a), textwrap.dedent('''\ masked_array(data=[0, --, 2], mask=[False, True, False], fill_value=999999)''') ) # arrays with a continuation a = np.ma.arange(2000) a[1:50] = np.ma.masked assert_equal( repr(a), textwrap.dedent('''\ masked_array(data=[0, --, --, ..., 1997, 1998, 1999], mask=[False, True, True, ..., False, False, False], fill_value=999999)''') ) # line-wrapped 1d arrays are correctly aligned a = np.ma.arange(20) assert_equal( repr(a), textwrap.dedent('''\ masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], mask=False, fill_value=999999)''') ) # 2d arrays cause wrapping a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) a[1,1] = np.ma.masked assert_equal( repr(a), textwrap.dedent('''\ masked_array( data=[[1, 2, 3], [4, --, 6]], mask=[[False, False, False], [False, True, False]], fill_value=999999, dtype=int8)''') ) # but not it they're a row vector assert_equal( repr(a[:1]), textwrap.dedent('''\ masked_array(data=[[1, 2, 3]], mask=[[False, False, False]], fill_value=999999, dtype=int8)''') ) # dtype=int is implied, so not shown assert_equal( repr(a.astype(int)), textwrap.dedent('''\ masked_array( data=[[1, 2, 3], [4, --, 6]], mask=[[False, False, False], [False, True, False]], fill_value=999999)''') ) def test_str_repr_legacy(self): oldopts = np.get_printoptions() np.set_printoptions(legacy='1.13') try: a = array([0, 1, 2], mask=[False, True, False]) assert_equal(str(a), '[0 -- 2]') assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' ' mask = [False True False],\n' ' fill_value = 999999)\n') a = np.ma.arange(2000) a[1:50] = np.ma.masked assert_equal( repr(a), 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' ' mask = [False True True ..., False False False],\n' ' fill_value = 999999)\n' ) finally: np.set_printoptions(**oldopts) def test_0d_unicode(self): u = u'caf\xe9' utype = type(u) arr_nomask = np.ma.array(u) arr_masked = np.ma.array(u, mask=True) assert_equal(utype(arr_nomask), u) assert_equal(utype(arr_masked), u'--') def test_pickling(self): # Tests pickling for dtype in (int, float, str, object): a = arange(10).astype(dtype) a.fill_value = 999 masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked True, # Fully masked False) # Fully unmasked for mask in masks: a.mask = mask a_pickled = pickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled._data, a._data) if dtype in (object, int): assert_equal(a_pickled.fill_value, 999) else: assert_equal(a_pickled.fill_value, dtype(999)) assert_array_equal(a_pickled.mask, mask) def test_pickling_subbaseclass(self): # Test pickling w/ a subclass of ndarray a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) a_pickled = pickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) assert_(isinstance(a_pickled._data, np.matrix)) def test_pickling_maskedconstant(self): # Test pickling MaskedConstant mc = np.ma.masked mc_pickled = pickle.loads(mc.dumps()) assert_equal(mc_pickled._baseclass, mc._baseclass) assert_equal(mc_pickled._mask, mc._mask) assert_equal(mc_pickled._data, mc._data) def test_pickling_wstructured(self): # Tests pickling w/ structured array a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], dtype=[('a', int), ('b', float)]) a_pickled = pickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) def test_pickling_keepalignment(self): # Tests pickling w/ F_CONTIGUOUS arrays a = arange(10) a.shape = (-1, 2) b = a.T test = pickle.loads(pickle.dumps(b)) assert_equal(test, b) def test_single_element_subscript(self): # Tests single element subscripts of Maskedarrays. a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) assert_equal(a[0].shape, ()) assert_equal(b[0].shape, ()) assert_equal(b[1].shape, ()) def test_topython(self): # Tests some communication issues with Python. assert_equal(1, int(array(1))) assert_equal(1.0, float(array(1))) assert_equal(1, int(array([[[1]]]))) assert_equal(1.0, float(array([[1]]))) assert_raises(TypeError, float, array([1, 1])) with suppress_warnings() as sup: sup.filter(UserWarning, 'Warning: converting a masked element') assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) assert_raises(TypeError, lambda: float(a)) assert_equal(float(a[-1]), 3.) assert_(np.isnan(float(a[0]))) assert_raises(TypeError, int, a) assert_equal(int(a[-1]), 3) assert_raises(MAError, lambda:int(a[0])) def test_oddfeatures_1(self): # Test of other odd features x = arange(20) x = x.reshape(4, 5) x.flat[5] = 12 assert_(x[1, 0] == 12) z = x + 10j * x assert_equal(z.real, x) assert_equal(z.imag, 10 * x) assert_equal((z * conjugate(z)).real, 101 * x * x) z.imag[...] = 0.0 x = arange(10) x[3] = masked assert_(str(x[3]) == str(masked)) c = x >= 8 assert_(count(where(c, masked, masked)) == 0) assert_(shape(where(c, masked, masked)) == c.shape) z = masked_where(c, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) assert_equal(x, z) def test_oddfeatures_2(self): # Tests some more features. x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) @suppress_copy_mask_on_assignment def test_oddfeatures_3(self): # Tests some generic features atest = array([10], mask=True) btest = array([20]) idx = atest.mask atest[idx] = btest[idx] assert_equal(atest, [20]) def test_filled_with_object_dtype(self): a = np.ma.masked_all(1, dtype='O') assert_equal(a.filled('x')[0], 'x') def test_filled_with_flexible_dtype(self): # Test filled w/ flexible dtype flexi = array([(1, 1, 1)], dtype=[('i', int), ('s', '|S8'), ('f', float)]) flexi[0] = masked assert_equal(flexi.filled(), np.array([(default_fill_value(0), default_fill_value('0'), default_fill_value(0.),)], dtype=flexi.dtype)) flexi[0] = masked assert_equal(flexi.filled(1), np.array([(1, '1', 1.)], dtype=flexi.dtype)) def test_filled_with_mvoid(self): # Test filled w/ mvoid ndtype = [('a', int), ('b', float)] a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) # Filled using default test = a.filled() assert_equal(tuple(test), (1, default_fill_value(1.))) # Explicit fill_value test = a.filled((-1, -1)) assert_equal(tuple(test), (1, -1)) # Using predefined filling values a.fill_value = (-999, -999) assert_equal(tuple(a.filled()), (1, -999)) def test_filled_with_nested_dtype(self): # Test filled w/ nested dtype ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] a = array([(1, (1, 1)), (2, (2, 2))], mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) test = a.filled(0) control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) assert_equal(test, control) test = a['B'].filled(0) control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) assert_equal(test, control) # test if mask gets set correctly (see #6760) Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), ('f1', 'i1', (2, 2))], (2, 2))])) assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), ('f1', '?', (2, 2))], (2, 2))])) def test_filled_with_f_order(self): # Test filled w/ F-contiguous array a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), order='F') # this is currently ignored assert_(a.flags['F_CONTIGUOUS']) assert_(a.filled(0).flags['F_CONTIGUOUS']) def test_optinfo_propagation(self): # Checks that _optinfo dictionary isn't back-propagated x = array([1, 2, 3, ], dtype=float) x._optinfo['info'] = '???' y = x.copy() assert_equal(y._optinfo['info'], '???') y._optinfo['info'] = '!!!' assert_equal(x._optinfo['info'], '???') def test_optinfo_forward_propagation(self): a = array([1,2,2,4]) a._optinfo["key"] = "value" assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) def test_fancy_printoptions(self): # Test printing a masked array w/ fancy dtype. fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) test = array([(1, (2, 3.0)), (4, (5, 6.0))], mask=[(1, (0, 1)), (0, (1, 0))], dtype=fancydtype) control = "[(--, (2, --)) (4, (--, 6.0))]" assert_equal(str(test), control) # Test 0-d array with multi-dimensional dtype t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], 0.0), mask = (False, [[True, False, True], [False, False, True]], False), dtype = "int, (2,3)float, float") control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" assert_equal(str(t_2d0), control) def test_flatten_structured_array(self): # Test flatten_structured_array on arrays # On ndarray ndtype = [('a', int), ('b', float)] a = np.array([(1, 1), (2, 2)], dtype=ndtype) test = flatten_structured_array(a) control = np.array([[1., 1.], [2., 2.]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) # On masked_array a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = flatten_structured_array(a) control = array([[1., 1.], [2., 2.]], mask=[[0, 1], [1, 0]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) assert_equal(test.mask, control.mask) # On masked array with nested structure ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] a = array([(1, (1, 1.1)), (2, (2, 2.2))], mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) test = flatten_structured_array(a) control = array([[1., 1., 1.1], [2., 2., 2.2]], mask=[[0, 1, 0], [1, 0, 1]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) assert_equal(test.mask, control.mask) # Keeping the initial shape ndtype = [('a', int), ('b', float)] a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) test = flatten_structured_array(a) control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) def test_void0d(self): # Test creating a mvoid object ndtype = [('a', int), ('b', int)] a = np.array([(1, 2,)], dtype=ndtype)[0] f = mvoid(a) assert_(isinstance(f, mvoid)) a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] assert_(isinstance(a, mvoid)) a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) f = mvoid(a._data[0], a._mask[0]) assert_(isinstance(f, mvoid)) def test_mvoid_getitem(self): # Test mvoid.__getitem__ ndtype = [('a', int), ('b', int)] a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype) # w/o mask f = a[0] assert_(isinstance(f, mvoid)) assert_equal((f[0], f['a']), (1, 1)) assert_equal(f['b'], 2) # w/ mask f = a[1] assert_(isinstance(f, mvoid)) assert_(f[0] is masked) assert_(f['a'] is masked) assert_equal(f[1], 4) # exotic dtype A = masked_array(data=[([0,1],)], mask=[([True, False],)], dtype=[("A", ">i2", (2,))]) assert_equal(A[0]["A"], A["A"][0]) assert_equal(A[0]["A"], masked_array(data=[0, 1], mask=[True, False], dtype=">i2")) def test_mvoid_iter(self): # Test iteration on __getitem__ ndtype = [('a', int), ('b', int)] a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype) # w/o mask assert_equal(list(a[0]), [1, 2]) # w/ mask assert_equal(list(a[1]), [masked, 4]) def test_mvoid_print(self): # Test printing a mvoid mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) assert_equal(str(mx[0]), "(1, 1)") mx['b'][0] = masked ini_display = masked_print_option._display masked_print_option.set_display("-X-") try: assert_equal(str(mx[0]), "(1, -X-)") assert_equal(repr(mx[0]), "(1, -X-)") finally: masked_print_option.set_display(ini_display) # also check if there are object datatypes (see gh-7493) mx = array([(1,), (2,)], dtype=[('a', 'O')]) assert_equal(str(mx[0]), "(1,)") def test_mvoid_multidim_print(self): # regression test for gh-6019 t_ma = masked_array(data = [([1, 2, 3],)], mask = [([False, True, False],)], fill_value = ([999999, 999999, 999999],), dtype = [('a', '<i4', (3,))]) assert_(str(t_ma[0]) == "([1, --, 3],)") assert_(repr(t_ma[0]) == "([1, --, 3],)") # additional tests with structured arrays t_2d = masked_array(data = [([[1, 2], [3,4]],)], mask = [([[False, True], [True, False]],)], dtype = [('a', '<i4', (2,2))]) assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)") assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)") t_0d = masked_array(data = [(1,2)], mask = [(True,False)], dtype = [('a', '<i4'), ('b', '<i4')]) assert_(str(t_0d[0]) == "(--, 2)") assert_(repr(t_0d[0]) == "(--, 2)") t_2d = masked_array(data = [([[1, 2], [3,4]], 1)], mask = [([[False, True], [True, False]], False)], dtype = [('a', '<i4', (2,2)), ('b', float)]) assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)") assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)") t_ne = masked_array(data=[(1, (1, 1))], mask=[(True, (True, False))], dtype = [('a', '<i4'), ('b', 'i4,i4')]) assert_(str(t_ne[0]) == "(--, (--, 1))") assert_(repr(t_ne[0]) == "(--, (--, 1))") def test_object_with_array(self): mx1 = masked_array([1.], mask=[True]) mx2 = masked_array([1., 2.]) mx = masked_array([mx1, mx2], mask=[False, True]) assert_(mx[0] is mx1) assert_(mx[1] is not mx2) assert_(np.all(mx[1].data == mx2.data)) assert_(np.all(mx[1].mask)) # check that we return a view. mx[1].data[0] = 0. assert_(mx2[0] == 0.) class TestMaskedArrayArithmetic(object): # Base test class for MaskedArrays. def setup(self): # Base data definition. x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore') def teardown(self): np.seterr(**self.err_status) def test_basic_arithmetic(self): # Test of basic arithmetic. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_equal(a2d * a2d, a2d * a2dm) assert_equal(a2d + a2d, a2d + a2dm) assert_equal(a2d - a2d, a2d - a2dm) for s in [(12,), (4, 3), (2, 6)]: x = x.reshape(s) y = y.reshape(s) xm = xm.reshape(s) ym = ym.reshape(s) xf = xf.reshape(s) assert_equal(-x, -xm) assert_equal(x + y, xm + ym) assert_equal(x - y, xm - ym) assert_equal(x * y, xm * ym) assert_equal(x / y, xm / ym) assert_equal(a10 + y, a10 + ym) assert_equal(a10 - y, a10 - ym) assert_equal(a10 * y, a10 * ym) assert_equal(a10 / y, a10 / ym) assert_equal(x + a10, xm + a10) assert_equal(x - a10, xm - a10) assert_equal(x * a10, xm * a10) assert_equal(x / a10, xm / a10) assert_equal(x ** 2, xm ** 2) assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5) assert_equal(x ** y, xm ** ym) assert_equal(np.add(x, y), add(xm, ym)) assert_equal(np.subtract(x, y), subtract(xm, ym)) assert_equal(np.multiply(x, y), multiply(xm, ym)) assert_equal(np.divide(x, y), divide(xm, ym)) def test_divide_on_different_shapes(self): x = arange(6, dtype=float) x.shape = (2, 3) y = arange(3, dtype=float) z = x / y assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) z = x / y[None,:] assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) y = arange(2, dtype=float) z = x / y[:, None] assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]]) assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]]) def test_mixed_arithmetic(self): # Tests mixed arithmetics. na = np.array([1]) ma = array([1]) assert_(isinstance(na + ma, MaskedArray)) assert_(isinstance(ma + na, MaskedArray)) def test_limits_arithmetic(self): tiny = np.finfo(float).tiny a = array([tiny, 1. / tiny, 0.]) assert_equal(getmaskarray(a / 2), [0, 0, 0]) assert_equal(getmaskarray(2 / a), [1, 0, 1]) def test_masked_singleton_arithmetic(self): # Tests some scalar arithmetics on MaskedArrays. # Masked singleton should remain masked no matter what xm = array(0, mask=1) assert_((1 / array(0)).mask) assert_((1 + xm).mask) assert_((-xm).mask) assert_(maximum(xm, xm).mask) assert_(minimum(xm, xm).mask) def test_masked_singleton_equality(self): # Tests (in)equality on masked singleton a = array([1, 2, 3], mask=[1, 1, 0]) assert_((a[0] == 0) is masked) assert_((a[0] != 0) is masked) assert_equal((a[-1] == 0), False) assert_equal((a[-1] != 0), True) def test_arithmetic_with_masked_singleton(self): # Checks that there's no collapsing to masked x = masked_array([1, 2]) y = x * masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) y = x[0] * masked assert_(y is masked) y = x + masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) def test_arithmetic_with_masked_singleton_on_1d_singleton(self): # Check that we're not losing the shape of a singleton x = masked_array([1, ]) y = x + masked assert_equal(y.shape, x.shape) assert_equal(y.mask, [True, ]) def test_scalar_arithmetic(self): x = array(0, mask=0) assert_equal(x.filled().ctypes.data, x.ctypes.data) # Make sure we don't lose the shape in some circumstances xm = array((0, 0)) / 0. assert_equal(xm.shape, (2,)) assert_equal(xm.mask, [1, 1]) def test_basic_ufuncs(self): # Test various functions such as sin, cos. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(np.cos(x), cos(xm)) assert_equal(np.cosh(x), cosh(xm)) assert_equal(np.sin(x), sin(xm)) assert_equal(np.sinh(x), sinh(xm)) assert_equal(np.tan(x), tan(xm)) assert_equal(np.tanh(x), tanh(xm)) assert_equal(np.sqrt(abs(x)), sqrt(xm)) assert_equal(np.log(abs(x)), log(xm)) assert_equal(np.log10(abs(x)), log10(xm)) assert_equal(np.exp(x), exp(xm)) assert_equal(np.arcsin(z), arcsin(zm)) assert_equal(np.arccos(z), arccos(zm)) assert_equal(np.arctan(z), arctan(zm)) assert_equal(np.arctan2(x, y), arctan2(xm, ym)) assert_equal(np.absolute(x), absolute(xm)) assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym)) assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True)) assert_equal(np.equal(x, y), equal(xm, ym)) assert_equal(np.not_equal(x, y), not_equal(xm, ym)) assert_equal(np.less(x, y), less(xm, ym)) assert_equal(np.greater(x, y), greater(xm, ym)) assert_equal(np.less_equal(x, y), less_equal(xm, ym)) assert_equal(np.greater_equal(x, y), greater_equal(xm, ym)) assert_equal(np.conjugate(x), conjugate(xm)) def test_count_func(self): # Tests count assert_equal(1, count(1)) assert_equal(0, array(1, mask=[1])) ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) res = count(ott) assert_(res.dtype.type is np.intp) assert_equal(3, res) ott = ott.reshape((2, 2)) res = count(ott) assert_(res.dtype.type is np.intp) assert_equal(3, res) res = count(ott, 0) assert_(isinstance(res, ndarray)) assert_equal([1, 2], res) assert_(getmask(res) is nomask) ott = array([0., 1., 2., 3.]) res = count(ott, 0) assert_(isinstance(res, ndarray)) assert_(res.dtype.type is np.intp) assert_raises(np.AxisError, ott.count, axis=1) def test_count_on_python_builtins(self): # Tests count works on python builtins (issue#8019) assert_equal(3, count([1,2,3])) assert_equal(2, count((1,2))) def test_minmax_func(self): # Tests minimum and maximum. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # max doesn't work if shaped xr = np.ravel(x) xmr = ravel(xm) # following are true because of careful selection of data assert_equal(max(xr), maximum.reduce(xmr)) assert_equal(min(xr), minimum.reduce(xmr)) assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]) assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]) x = arange(5) y = arange(5) - 2 x[3] = masked y[0] = masked assert_equal(minimum(x, y), where(less(x, y), x, y)) assert_equal(maximum(x, y), where(greater(x, y), x, y)) assert_(minimum.reduce(x) == 0) assert_(maximum.reduce(x) == 4) x = arange(4).reshape(2, 2) x[-1, -1] = masked assert_equal(maximum.reduce(x, axis=None), 2) def test_minimummaximum_func(self): a = np.ones((2, 2)) aminimum = minimum(a, a) assert_(isinstance(aminimum, MaskedArray)) assert_equal(aminimum, np.minimum(a, a)) aminimum = minimum.outer(a, a) assert_(isinstance(aminimum, MaskedArray)) assert_equal(aminimum, np.minimum.outer(a, a)) amaximum = maximum(a, a) assert_(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum(a, a)) amaximum = maximum.outer(a, a) assert_(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum.outer(a, a)) def test_minmax_reduce(self): # Test np.min/maximum.reduce on array w/ full False mask a = array([1, 2, 3], mask=[False, False, False]) b = np.maximum.reduce(a) assert_equal(b, 3) def test_minmax_funcs_with_output(self): # Tests the min/max functions with explicit outputs mask = np.random.rand(12).round() xm = array(np.random.uniform(0, 10, 12), mask=mask) xm.shape = (3, 4) for funcname in ('min', 'max'): # Initialize npfunc = getattr(np, funcname) mafunc = getattr(numpy.ma.core, funcname) # Use the np version nout = np.empty((4,), dtype=int) try: result = npfunc(xm, axis=0, out=nout) except MaskError: pass nout = np.empty((4,), dtype=float) result = npfunc(xm, axis=0, out=nout) assert_(result is nout) # Use the ma version nout.fill(-999) result = mafunc(xm, axis=0, out=nout) assert_(result is nout) def test_minmax_methods(self): # Additional tests on max/min (_, _, _, _, _, xm, _, _, _, _) = self.d xm.shape = (xm.size,) assert_equal(xm.max(), 10) assert_(xm[0].max() is masked) assert_(xm[0].max(0) is masked) assert_(xm[0].max(-1) is masked) assert_equal(xm.min(), -10.) assert_(xm[0].min() is masked) assert_(xm[0].min(0) is masked) assert_(xm[0].min(-1) is masked) assert_equal(xm.ptp(), 20.) assert_(xm[0].ptp() is masked) assert_(xm[0].ptp(0) is masked) assert_(xm[0].ptp(-1) is masked) x = array([1, 2, 3], mask=True) assert_(x.min() is masked) assert_(x.max() is masked) assert_(x.ptp() is masked) def test_addsumprod(self): # Tests add, sum, product. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(np.add.reduce(x), add.reduce(x)) assert_equal(np.add.accumulate(x), add.accumulate(x)) assert_equal(4, sum(array(4), axis=0)) assert_equal(4, sum(array(4), axis=0)) assert_equal(np.sum(x, axis=0), sum(x, axis=0)) assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)) assert_equal(np.sum(x, 0), sum(x, 0)) assert_equal(np.product(x, axis=0), product(x, axis=0)) assert_equal(np.product(x, 0), product(x, 0)) assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0)) s = (3, 4) x.shape = y.shape = xm.shape = ym.shape = s if len(s) > 1: assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) assert_equal(np.sum(x, 1), sum(x, 1)) assert_equal(np.product(x, 1), product(x, 1)) def test_binops_d2D(self): # Test binary operations on 2D data a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) b = array([[2., 3.], [4., 5.], [6., 7.]]) test = a * b control = array([[2., 3.], [2., 2.], [3., 3.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) test = b * a control = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) a = array([[1.], [2.], [3.]]) b = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [0, 0], [0, 1]]) test = a * b control = array([[2, 3], [8, 10], [18, 3]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) test = b * a control = array([[2, 3], [8, 10], [18, 7]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_domained_binops_d2D(self): # Test domained binary operations on 2D data a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) b = array([[2., 3.], [4., 5.], [6., 7.]]) test = a / b control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) test = b / a control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) a = array([[1.], [2.], [3.]]) b = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [0, 0], [0, 1]]) test = a / b control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) test = b / a control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_noshrinking(self): # Check that we don't shrink a mask when not wanted # Binary operations a = masked_array([1., 2., 3.], mask=[False, False, False], shrink=False) b = a + 1 assert_equal(b.mask, [0, 0, 0]) # In place binary operation a += 1 assert_equal(a.mask, [0, 0, 0]) # Domained binary operation b = a / 1. assert_equal(b.mask, [0, 0, 0]) # In place binary operation a /= 1. assert_equal(a.mask, [0, 0, 0]) def test_ufunc_nomask(self): # check the case ufuncs should set the mask to false m = np.ma.array([1]) # check we don't get array([False], dtype=bool) assert_equal(np.true_divide(m, 5).mask.shape, ()) def test_noshink_on_creation(self): # Check that the mask is not shrunk on array creation when not wanted a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) assert_equal(a.mask, [0, 0, 0]) def test_mod(self): # Tests mod (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(mod(x, y), mod(xm, ym)) test = mod(ym, xm) assert_equal(test, np.mod(ym, xm)) assert_equal(test.mask, mask_or(xm.mask, ym.mask)) test = mod(xm, ym) assert_equal(test, np.mod(xm, ym)) assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) def test_TakeTransposeInnerOuter(self): # Test of take, transpose, inner, outer products x = arange(24) y = np.arange(24) x[5:6] = masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) assert_equal(np.inner(filled(x, 0), filled(y, 0)), inner(x, y)) assert_equal(np.outer(filled(x, 0), filled(y, 0)), outer(x, y)) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked t = take(y, [0, 3, 4]) assert_(t[0] == 'abc') assert_(t[1] == 2) assert_(t[2] == 3) def test_imag_real(self): # Check complex xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) assert_equal(xx.imag, [10, 2]) assert_equal(xx.imag.filled(), [1e+20, 2]) assert_equal(xx.imag.dtype, xx._data.imag.dtype) assert_equal(xx.real, [1, 20]) assert_equal(xx.real.filled(), [1e+20, 20]) assert_equal(xx.real.dtype, xx._data.real.dtype) def test_methods_with_output(self): xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) for funcname in funclist: npfunc = getattr(np, funcname) xmmeth = getattr(xm, funcname) # A ndarray as explicit input output = np.empty(4, dtype=float) output.fill(-9999) result = npfunc(xm, axis=0, out=output) # ... the result should be the given output assert_(result is output) assert_equal(result, xmmeth(axis=0, out=output)) output = empty(4, dtype=int) result = xmmeth(axis=0, out=output) assert_(result is output) assert_(output[0] is masked) def test_count_mean_with_matrix(self): m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2))) assert_equal(m.count(axis=0).shape, (1,2)) assert_equal(m.count(axis=1).shape, (2,1)) #make sure broadcasting inside mean and var work assert_equal(m.mean(axis=0), [[2., 3.]]) assert_equal(m.mean(axis=1), [[1.5], [3.5]]) def test_eq_on_structured(self): # Test the equality of structured arrays ndtype = [('A', int), ('B', int)] a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) test = (a == a) assert_equal(test.data, [True, True]) assert_equal(test.mask, [False, False]) test = (a == a[0]) assert_equal(test.data, [True, False]) assert_equal(test.mask, [False, False]) b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) test = (a == b) assert_equal(test.data, [False, True]) assert_equal(test.mask, [True, False]) test = (a[0] == b) assert_equal(test.data, [False, False]) assert_equal(test.mask, [True, False]) b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = (a == b) assert_equal(test.data, [True, True]) assert_equal(test.mask, [False, False]) # complicated dtype, 2-dimensional array. ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] a = array([[(1, (1, 1)), (2, (2, 2))], [(3, (3, 3)), (4, (4, 4))]], mask=[[(0, (1, 0)), (0, (0, 1))], [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) test = (a[0, 0] == a) assert_equal(test.data, [[True, False], [False, False]]) assert_equal(test.mask, [[False, False], [False, True]]) def test_ne_on_structured(self): # Test the equality of structured arrays ndtype = [('A', int), ('B', int)] a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) test = (a != a) assert_equal(test.data, [False, False]) assert_equal(test.mask, [False, False]) test = (a != a[0]) assert_equal(test.data, [False, True]) assert_equal(test.mask, [False, False]) b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) test = (a != b) assert_equal(test.data, [True, False]) assert_equal(test.mask, [True, False]) test = (a[0] != b) assert_equal(test.data, [True, True]) assert_equal(test.mask, [True, False]) b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = (a != b) assert_equal(test.data, [False, False]) assert_equal(test.mask, [False, False]) # complicated dtype, 2-dimensional array. ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] a = array([[(1, (1, 1)), (2, (2, 2))], [(3, (3, 3)), (4, (4, 4))]], mask=[[(0, (1, 0)), (0, (0, 1))], [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) test = (a[0, 0] != a) assert_equal(test.data, [[False, True], [True, True]]) assert_equal(test.mask, [[False, False], [False, True]]) def test_eq_ne_structured_extra(self): # ensure simple examples are symmetric and make sense. # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 dt = np.dtype('i4,i4') for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), mvoid((1, 2), mask=(0, 1), dtype=dt), mvoid((1, 2), mask=(1, 0), dtype=dt), mvoid((1, 2), mask=(1, 1), dtype=dt)): ma1 = m1.view(MaskedArray) r1 = ma1.view('2i4') for m2 in (np.array((1, 1), dtype=dt), mvoid((1, 1), dtype=dt), mvoid((1, 0), mask=(0, 1), dtype=dt), mvoid((3, 2), mask=(0, 1), dtype=dt)): ma2 = m2.view(MaskedArray) r2 = ma2.view('2i4') eq_expected = (r1 == r2).all() assert_equal(m1 == m2, eq_expected) assert_equal(m2 == m1, eq_expected) assert_equal(ma1 == m2, eq_expected) assert_equal(m1 == ma2, eq_expected) assert_equal(ma1 == ma2, eq_expected) # Also check it is the same if we do it element by element. el_by_el = [m1[name] == m2[name] for name in dt.names] assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) ne_expected = (r1 != r2).any() assert_equal(m1 != m2, ne_expected) assert_equal(m2 != m1, ne_expected) assert_equal(ma1 != m2, ne_expected) assert_equal(m1 != ma2, ne_expected) assert_equal(ma1 != ma2, ne_expected) el_by_el = [m1[name] != m2[name] for name in dt.names] assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) def test_eq_with_None(self): # Really, comparisons with None should not be done, but check them # anyway. Note that pep8 will flag these tests. # Deprecation is in place for arrays, and when it happens this # test will fail (and have to be changed accordingly). # With partial mask with suppress_warnings() as sup: sup.filter(FutureWarning, "Comparison to `None`") a = array([None, 1], mask=[0, 1]) assert_equal(a == None, array([True, False], mask=[0, 1])) assert_equal(a.data == None, [True, False]) assert_equal(a != None, array([False, True], mask=[0, 1])) # With nomask a = array([None, 1], mask=False) assert_equal(a == None, [True, False]) assert_equal(a != None, [False, True]) # With complete mask a = array([None, 2], mask=True) assert_equal(a == None, array([False, True], mask=True)) assert_equal(a != None, array([True, False], mask=True)) # Fully masked, even comparison to None should return "masked" a = masked assert_equal(a == None, masked) def test_eq_with_scalar(self): a = array(1) assert_equal(a == 1, True) assert_equal(a == 0, False) assert_equal(a != 1, False) assert_equal(a != 0, True) b = array(1, mask=True) assert_equal(b == 0, masked) assert_equal(b == 1, masked) assert_equal(b != 0, masked) assert_equal(b != 1, masked) def test_eq_different_dimensions(self): m1 = array([1, 1], mask=[0, 1]) # test comparison with both masked and regular arrays. for m2 in (array([[0, 1], [1, 2]]), np.array([[0, 1], [1, 2]])): test = (m1 == m2) assert_equal(test.data, [[False, False], [True, False]]) assert_equal(test.mask, [[False, True], [False, True]]) def test_numpyarithmetics(self): # Check that the mask is not back-propagated when using numpy functions a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) control = masked_array([np.nan, np.nan, 0, np.log(2), -1], mask=[1, 1, 0, 0, 1]) test = log(a) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(a.mask, [0, 0, 0, 0, 1]) test = np.log(a) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(a.mask, [0, 0, 0, 0, 1]) class TestMaskedArrayAttributes(object): def test_keepmask(self): # Tests the keep mask flag x = masked_array([1, 2, 3], mask=[1, 0, 0]) mx = masked_array(x) assert_equal(mx.mask, x.mask) mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) assert_equal(mx.mask, [0, 1, 0]) mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) assert_equal(mx.mask, [1, 1, 0]) # We default to true mx = masked_array(x, mask=[0, 1, 0]) assert_equal(mx.mask, [1, 1, 0]) def test_hardmask(self): # Test hard_mask d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d, mask=m, hard_mask=True) # We need to copy, to avoid updating d in xh ! xs = array(d, mask=m, hard_mask=False, copy=True) xh[[1, 4]] = [10, 40] xs[[1, 4]] = [10, 40] assert_equal(xh._data, [0, 10, 2, 3, 4]) assert_equal(xs._data, [0, 10, 2, 3, 40]) assert_equal(xs.mask, [0, 0, 0, 1, 0]) assert_(xh._hardmask) assert_(not xs._hardmask) xh[1:4] = [10, 20, 30] xs[1:4] = [10, 20, 30] assert_equal(xh._data, [0, 10, 20, 3, 4]) assert_equal(xs._data, [0, 10, 20, 30, 40]) assert_equal(xs.mask, nomask) xh[0] = masked xs[0] = masked assert_equal(xh.mask, [1, 0, 0, 1, 1]) assert_equal(xs.mask, [1, 0, 0, 0, 0]) xh[:] = 1 xs[:] = 1 assert_equal(xh._data, [0, 1, 1, 3, 4]) assert_equal(xs._data, [1, 1, 1, 1, 1]) assert_equal(xh.mask, [1, 0, 0, 1, 1]) assert_equal(xs.mask, nomask) # Switch to soft mask xh.soften_mask() xh[:] = arange(5) assert_equal(xh._data, [0, 1, 2, 3, 4]) assert_equal(xh.mask, nomask) # Switch back to hard mask xh.harden_mask() xh[xh < 3] = masked assert_equal(xh._data, [0, 1, 2, 3, 4]) assert_equal(xh._mask, [1, 1, 1, 0, 0]) xh[filled(xh > 1, False)] = 5 assert_equal(xh._data, [0, 1, 2, 5, 5]) assert_equal(xh._mask, [1, 1, 1, 0, 0]) xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) xh[0] = 0 assert_equal(xh._data, [[1, 0], [3, 4]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) xh[-1, -1] = 5 assert_equal(xh._data, [[1, 0], [3, 5]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) xh[filled(xh < 5, False)] = 2 assert_equal(xh._data, [[1, 2], [2, 5]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) def test_hardmask_again(self): # Another test of hardmask d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d, mask=m, hard_mask=True) xh[4:5] = 999 xh[0:1] = 999 assert_equal(xh._data, [999, 1, 2, 3, 4]) def test_hardmask_oncemore_yay(self): # OK, yet another test of hardmask # Make sure that harden_mask/soften_mask//unshare_mask returns self a = array([1, 2, 3], mask=[1, 0, 0]) b = a.harden_mask() assert_equal(a, b) b[0] = 0 assert_equal(a, b) assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) a = b.soften_mask() a[0] = 0 assert_equal(a, b) assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) def test_smallmask(self): # Checks the behaviour of _smallmask a = arange(10) a[1] = masked a[1] = 1 assert_equal(a._mask, nomask) a = arange(10) a._smallmask = False a[1] = masked a[1] = 1 assert_equal(a._mask, zeros(10)) def test_shrink_mask(self): # Tests .shrink_mask() a = array([1, 2, 3], mask=[0, 0, 0]) b = a.shrink_mask() assert_equal(a, b) assert_equal(a.mask, nomask) # Mask cannot be shrunk on structured types, so is a no-op a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) b = a.copy() a.shrink_mask() assert_equal(a.mask, b.mask) def test_flat(self): # Test that flat can return all types of items [#4585, #4615] # test simple access test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) assert_equal(test.flat[1], 2) assert_equal(test.flat[2], masked) assert_(np.all(test.flat[0:2] == test[0, 0:2])) # Test flat on masked_matrices test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) assert_equal(test, control) # Test setting test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat testflat[:] = testflat[[2, 1, 0]] assert_equal(test, control) testflat[0] = 9 assert_equal(test[0, 0], 9) # test 2-D record array # ... on structured array w/ masked records x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], dtype=[('a', int), ('b', float), ('c', '|S8')]) x['a'][0, 1] = masked x['b'][1, 0] = masked x['c'][0, 2] = masked x[-1, -1] = masked xflat = x.flat assert_equal(xflat[0], x[0, 0]) assert_equal(xflat[1], x[0, 1]) assert_equal(xflat[2], x[0, 2]) assert_equal(xflat[:3], x[0]) assert_equal(xflat[3], x[1, 0]) assert_equal(xflat[4], x[1, 1]) assert_equal(xflat[5], x[1, 2]) assert_equal(xflat[3:], x[1]) assert_equal(xflat[-1], x[-1, -1]) i = 0 j = 0 for xf in xflat: assert_equal(xf, x[j, i]) i += 1 if i >= x.shape[-1]: i = 0 j += 1 # test that matrices keep the correct shape (#4615) a = masked_array(np.matrix(np.eye(2)), mask=0) b = a.flat b01 = b[:2] assert_equal(b01.data, array([[1., 0.]])) assert_equal(b01.mask, array([[False, False]])) def test_assign_dtype(self): # check that the mask's dtype is updated when dtype is changed a = np.zeros(4, dtype='f4,i4') m = np.ma.array(a) m.dtype = np.dtype('f4') repr(m) # raises? assert_equal(m.dtype, np.dtype('f4')) # check that dtype changes that change shape of mask too much # are not allowed def assign(): m = np.ma.array(a) m.dtype = np.dtype('f8') assert_raises(ValueError, assign) b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? assert_equal(b.dtype, np.dtype('f4')) # check that nomask is preserved a = np.zeros(4, dtype='f4') m = np.ma.array(a) m.dtype = np.dtype('f4,i4') assert_equal(m.dtype, np.dtype('f4,i4')) assert_equal(m._mask, np.ma.nomask) class TestFillingValues(object): def test_check_on_scalar(self): # Test _check_fill_value set to valid and invalid values _check_fill_value = np.ma.core._check_fill_value fval = _check_fill_value(0, int) assert_equal(fval, 0) fval = _check_fill_value(None, int) assert_equal(fval, default_fill_value(0)) fval = _check_fill_value(0, "|S3") assert_equal(fval, b"0") fval = _check_fill_value(None, "|S3") assert_equal(fval, default_fill_value(b"camelot!")) assert_raises(TypeError, _check_fill_value, 1e+20, int) assert_raises(TypeError, _check_fill_value, 'stuff', int) def test_check_on_fields(self): # Tests _check_fill_value with records _check_fill_value = np.ma.core._check_fill_value ndtype = [('a', int), ('b', float), ('c', "|S3")] # A check on a list should return a single record fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, b"???"]) # A check on None should output the defaults fval = _check_fill_value(None, ndtype) assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [default_fill_value(0), default_fill_value(0.), asbytes(default_fill_value("0"))]) #.....Using a structured type as fill_value should work fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) fval = _check_fill_value(fill_val, ndtype) assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, b"???"]) #.....Using a flexible type w/ a different type shouldn't matter # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured # types by position fill_val = np.array((-999, -12345678.9, "???"), dtype=[("A", int), ("B", float), ("C", "|S3")]) fval = _check_fill_value(fill_val, ndtype) assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, b"???"]) #.....Using an object-array shouldn't matter either fill_val = np.ndarray(shape=(1,), dtype=object) fill_val[0] = (-999, -12345678.9, b"???") fval = _check_fill_value(fill_val, object) assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, b"???"]) # NOTE: This test was never run properly as "fill_value" rather than # "fill_val" was assigned. Written properly, it fails. #fill_val = np.array((-999, -12345678.9, "???")) #fval = _check_fill_value(fill_val, ndtype) #assert_(isinstance(fval, ndarray)) #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) #.....One-field-only flexible type should work as well ndtype = [("a", int)] fval = _check_fill_value(-999999999, ndtype) assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), (-999999999,)) def test_fillvalue_conversion(self): # Tests the behavior of fill_value during conversion # We had a tailored comment to make sure special attributes are # properly dealt with a = array([b'3', b'4', b'5']) a._optinfo.update({'comment':"updated!"}) b = array(a, dtype=int) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0)) b = array(a, dtype=float) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0.)) b = a.astype(int) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0)) assert_equal(b._optinfo['comment'], "updated!") b = a.astype([('a', '|S3')]) assert_equal(b['a']._data, a._data) assert_equal(b['a'].fill_value, a.fill_value) def test_default_fill_value(self): # check all calling conventions f1 = default_fill_value(1.) f2 = default_fill_value(np.array(1.)) f3 = default_fill_value(np.array(1.).dtype) assert_equal(f1, f2) assert_equal(f1, f3) def test_default_fill_value_structured(self): fields = array([(1, 1, 1)], dtype=[('i', int), ('s', '|S8'), ('f', float)]) f1 = default_fill_value(fields) f2 = default_fill_value(fields.dtype) expected = np.array((default_fill_value(0), default_fill_value('0'), default_fill_value(0.)), dtype=fields.dtype) assert_equal(f1, expected) assert_equal(f2, expected) def test_default_fill_value_void(self): dt = np.dtype([('v', 'V7')]) f = default_fill_value(dt) assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) def test_fillvalue(self): # Yet more fun with the fill_value data = masked_array([1, 2, 3], fill_value=-999) series = data[[0, 2, 1]] assert_equal(series._fill_value, data._fill_value) mtype = [('f', float), ('s', '|S3')] x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) x.fill_value = 999 assert_equal(x.fill_value.item(), [999., b'999']) assert_equal(x['f'].fill_value, 999) assert_equal(x['s'].fill_value, b'999') x.fill_value = (9, '???') assert_equal(x.fill_value.item(), (9, b'???')) assert_equal(x['f'].fill_value, 9) assert_equal(x['s'].fill_value, b'???') x = array([1, 2, 3.1]) x.fill_value = 999 assert_equal(np.asarray(x.fill_value).dtype, float) assert_equal(x.fill_value, 999.) assert_equal(x._fill_value, np.array(999.)) def test_fillvalue_exotic_dtype(self): # Tests yet more exotic flexible dtypes _check_fill_value = np.ma.core._check_fill_value ndtype = [('i', int), ('s', '|S8'), ('f', float)] control = np.array((default_fill_value(0), default_fill_value('0'), default_fill_value(0.),), dtype=ndtype) assert_equal(_check_fill_value(None, ndtype), control) # The shape shouldn't matter ndtype = [('f0', float, (2, 2))] control = np.array((default_fill_value(0.),), dtype=[('f0', float)]).astype(ndtype) assert_equal(_check_fill_value(None, ndtype), control) control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) ndtype = np.dtype("int, (2,3)float, float") control = np.array((default_fill_value(0), default_fill_value(0.), default_fill_value(0.),), dtype="int, float, float").astype(ndtype) test = _check_fill_value(None, ndtype) assert_equal(test, control) control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) # but when indexing, fill value should become scalar not tuple # See issue #6723 M = masked_array(control) assert_equal(M["f1"].fill_value.ndim, 0) def test_fillvalue_datetime_timedelta(self): # Test default fillvalue for datetime64 and timedelta64 types. # See issue #4476, this would return '?' which would cause errors # elsewhere for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", "h", "D", "W", "M", "Y"): control = numpy.datetime64("NaT", timecode) test = default_fill_value(numpy.dtype("<M8[" + timecode + "]")) np.testing.assert_equal(test, control) control = numpy.timedelta64("NaT", timecode) test = default_fill_value(numpy.dtype("<m8[" + timecode + "]")) np.testing.assert_equal(test, control) def test_extremum_fill_value(self): # Tests extremum fill values for flexible type. a = array([(1, (2, 3)), (4, (5, 6))], dtype=[('A', int), ('B', [('BA', int), ('BB', int)])]) test = a.fill_value assert_equal(test.dtype, a.dtype) assert_equal(test['A'], default_fill_value(a['A'])) assert_equal(test['B']['BA'], default_fill_value(a['B']['BA'])) assert_equal(test['B']['BB'], default_fill_value(a['B']['BB'])) test = minimum_fill_value(a) assert_equal(test.dtype, a.dtype) assert_equal(test[0], minimum_fill_value(a['A'])) assert_equal(test[1][0], minimum_fill_value(a['B']['BA'])) assert_equal(test[1][1], minimum_fill_value(a['B']['BB'])) assert_equal(test[1], minimum_fill_value(a['B'])) test = maximum_fill_value(a) assert_equal(test.dtype, a.dtype) assert_equal(test[0], maximum_fill_value(a['A'])) assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) assert_equal(test[1], maximum_fill_value(a['B'])) def test_extremum_fill_value_subdtype(self): a = array(([2, 3, 4],), dtype=[('value', np.int8, 3)]) test = minimum_fill_value(a) assert_equal(test.dtype, a.dtype) assert_equal(test[0], np.full(3, minimum_fill_value(a['value']))) test = maximum_fill_value(a) assert_equal(test.dtype, a.dtype) assert_equal(test[0], np.full(3, maximum_fill_value(a['value']))) def test_fillvalue_individual_fields(self): # Test setting fill_value on individual fields ndtype = [('a', int), ('b', int)] # Explicit fill_value a = array(list(zip([1, 2, 3], [4, 5, 6])), fill_value=(-999, -999), dtype=ndtype) aa = a['a'] aa.set_fill_value(10) assert_equal(aa._fill_value, np.array(10)) assert_equal(tuple(a.fill_value), (10, -999)) a.fill_value['b'] = -10 assert_equal(tuple(a.fill_value), (10, -10)) # Implicit fill_value t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype) tt = t['a'] tt.set_fill_value(10) assert_equal(tt._fill_value, np.array(10)) assert_equal(tuple(t.fill_value), (10, default_fill_value(0))) def test_fillvalue_implicit_structured_array(self): # Check that fill_value is always defined for structured arrays ndtype = ('b', float) adtype = ('a', float) a = array([(1.,), (2.,)], mask=[(False,), (False,)], fill_value=(np.nan,), dtype=np.dtype([adtype])) b = empty(a.shape, dtype=[adtype, ndtype]) b['a'] = a['a'] b['a'].set_fill_value(a['a'].fill_value) f = b._fill_value[()] assert_(np.isnan(f[0])) assert_equal(f[-1], default_fill_value(1.)) def test_fillvalue_as_arguments(self): # Test adding a fill_value parameter to empty/ones/zeros a = empty(3, fill_value=999.) assert_equal(a.fill_value, 999.) a = ones(3, fill_value=999., dtype=float) assert_equal(a.fill_value, 999.) a = zeros(3, fill_value=0., dtype=complex) assert_equal(a.fill_value, 0.) a = identity(3, fill_value=0., dtype=complex) assert_equal(a.fill_value, 0.) def test_shape_argument(self): # Test that shape can be provides as an argument # GH issue 6106 a = empty(shape=(3, )) assert_equal(a.shape, (3, )) a = ones(shape=(3, ), dtype=float) assert_equal(a.shape, (3, )) a = zeros(shape=(3, ), dtype=complex) assert_equal(a.shape, (3, )) def test_fillvalue_in_view(self): # Test the behavior of fill_value in view # Create initial masked array x = array([1, 2, 3], fill_value=1, dtype=np.int64) # Check that fill_value is preserved by default y = x.view() assert_(y.fill_value == 1) # Check that fill_value is preserved if dtype is specified and the # dtype is an ndarray sub-class and has a _fill_value attribute y = x.view(MaskedArray) assert_(y.fill_value == 1) # Check that fill_value is preserved if type is specified and the # dtype is an ndarray sub-class and has a _fill_value attribute (by # default, the first argument is dtype, not type) y = x.view(type=MaskedArray) assert_(y.fill_value == 1) # Check that code does not crash if passed an ndarray sub-class that # does not have a _fill_value attribute y = x.view(np.ndarray) y = x.view(type=np.ndarray) # Check that fill_value can be overridden with view y = x.view(MaskedArray, fill_value=2) assert_(y.fill_value == 2) # Check that fill_value can be overridden with view (using type=) y = x.view(type=MaskedArray, fill_value=2) assert_(y.fill_value == 2) # Check that fill_value gets reset if passed a dtype but not a # fill_value. This is because even though in some cases one can safely # cast the fill_value, e.g. if taking an int64 view of an int32 array, # in other cases, this cannot be done (e.g. int32 view of an int64 # array with a large fill_value). y = x.view(dtype=np.int32) assert_(y.fill_value == 999999) def test_fillvalue_bytes_or_str(self): # Test whether fill values work as expected for structured dtypes # containing bytes or str. See issue #7259. a = empty(shape=(3, ), dtype="(2)3S,(2)3U") assert_equal(a["f0"].fill_value, default_fill_value(b"spam")) assert_equal(a["f1"].fill_value, default_fill_value("eggs")) class TestUfuncs(object): # Test class for the application of ufuncs on MaskedArrays. def setup(self): # Base data definition. self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore') def teardown(self): np.seterr(**self.err_status) def test_testUfuncRegression(self): # Tests new ufuncs on MaskedArrays. for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh', 'absolute', 'fabs', 'negative', 'floor', 'ceil', 'logical_not', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'floor_divide', 'remainder', 'fmod', 'hypot', 'arctan2', 'equal', 'not_equal', 'less_equal', 'greater_equal', 'less', 'greater', 'logical_and', 'logical_or', 'logical_xor', ]: try: uf = getattr(umath, f) except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(numpy.ma.core, f) args = self.d[:uf.nin] ur = uf(*args) mr = mf(*args) assert_equal(ur.filled(0), mr.filled(0), f) assert_mask_equal(ur.mask, mr.mask, err_msg=f) def test_reduce(self): # Tests reduce on MaskedArrays. a = self.d[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) assert_equal(product(a, axis=0), 0) assert_equal(add.reduce(a), pi) def test_minmax(self): # Tests extrema on MaskedArrays. a = arange(1, 13).reshape(3, 4) amask = masked_where(a < 5, a) assert_equal(amask.max(), a.max()) assert_equal(amask.min(), 5) assert_equal(amask.max(0), a.max(0)) assert_equal(amask.min(0), [5, 6, 7, 8]) assert_(amask.max(1)[0].mask) assert_(amask.min(1)[0].mask) def test_ndarray_mask(self): # Check that the mask of the result is a ndarray (not a MaskedArray...) a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) test = np.sqrt(a) control = masked_array([-1, 0, 1, np.sqrt(2), -1], mask=[1, 0, 0, 0, 1]) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_(not isinstance(test.mask, MaskedArray)) def test_treatment_of_NotImplemented(self): # Check that NotImplemented is returned at appropriate places a = masked_array([1., 2.], mask=[1, 0]) assert_raises(TypeError, operator.mul, a, "abc") assert_raises(TypeError, operator.truediv, a, "abc") class MyClass(object): __array_priority__ = a.__array_priority__ + 1 def __mul__(self, other): return "My mul" def __rmul__(self, other): return "My rmul" me = MyClass() assert_(me * a == "My mul") assert_(a * me == "My rmul") # and that __array_priority__ is respected class MyClass2(object): __array_priority__ = 100 def __mul__(self, other): return "Me2mul" def __rmul__(self, other): return "Me2rmul" def __rdiv__(self, other): return "Me2rdiv" __rtruediv__ = __rdiv__ me_too = MyClass2() assert_(a.__mul__(me_too) is NotImplemented) assert_(all(multiply.outer(a, me_too) == "Me2rmul")) assert_(a.__truediv__(me_too) is NotImplemented) assert_(me_too * a == "Me2mul") assert_(a * me_too == "Me2rmul") assert_(a / me_too == "Me2rdiv") def test_no_masked_nan_warnings(self): # check that a nan in masked position does not # cause ufunc warnings m = np.ma.array([0.5, np.nan], mask=[0,1]) with warnings.catch_warnings(): warnings.filterwarnings("error") # test unary and binary ufuncs exp(m) add(m, 1) m > 0 # test different unary domains sqrt(m) log(m) tan(m) arcsin(m) arccos(m) arccosh(m) # test binary domains divide(m, 2) # also check that allclose uses ma ufuncs, to avoid warning allclose(m, 0.5) class TestMaskedArrayInPlaceArithmetics(object): # Test MaskedArray Arithmetics def setup(self): x = arange(10) y = arange(10) xm = arange(10) xm[2] = masked self.intdata = (x, y, xm) self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] self.othertypes = [np.dtype(_).type for _ in self.othertypes] self.uint8data = ( x.astype(np.uint8), y.astype(np.uint8), xm.astype(np.uint8) ) def test_inplace_addition_scalar(self): # Test of inplace additions (x, y, xm) = self.intdata xm[2] = masked x += 1 assert_equal(x, y + 1) xm += 1 assert_equal(xm, y + 1) (x, _, xm) = self.floatdata id1 = x.data.ctypes._data x += 1. assert_(id1 == x.data.ctypes._data) assert_equal(x, y + 1.) def test_inplace_addition_array(self): # Test of inplace additions (x, y, xm) = self.intdata m = xm.mask a = arange(10, dtype=np.int16) a[-1] = masked x += a xm += a assert_equal(x, y + a) assert_equal(xm, y + a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_subtraction_scalar(self): # Test of inplace subtractions (x, y, xm) = self.intdata x -= 1 assert_equal(x, y - 1) xm -= 1 assert_equal(xm, y - 1) def test_inplace_subtraction_array(self): # Test of inplace subtractions (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x -= a xm -= a assert_equal(x, y - a) assert_equal(xm, y - a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_multiplication_scalar(self): # Test of inplace multiplication (x, y, xm) = self.floatdata x *= 2.0 assert_equal(x, y * 2) xm *= 2.0 assert_equal(xm, y * 2) def test_inplace_multiplication_array(self): # Test of inplace multiplication (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x *= a xm *= a assert_equal(x, y * a) assert_equal(xm, y * a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_division_scalar_int(self): # Test of inplace division (x, y, xm) = self.intdata x = arange(10) * 2 xm = arange(10) * 2 xm[2] = masked x //= 2 assert_equal(x, y) xm //= 2 assert_equal(xm, y) def test_inplace_division_scalar_float(self): # Test of inplace division (x, y, xm) = self.floatdata x /= 2.0 assert_equal(x, y / 2.0) xm /= arange(10) assert_equal(xm, ones((10,))) def test_inplace_division_array_float(self): # Test of inplace division (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x /= a xm /= a assert_equal(x, y / a) assert_equal(xm, y / a) assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) def test_inplace_division_misc(self): x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = xm / ym assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) xm = xm.copy() xm /= ym assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) def test_datafriendly_add(self): # Test keeping data w/ (inplace) addition x = array([1, 2, 3], mask=[0, 0, 1]) # Test add w/ scalar xx = x + 1 assert_equal(xx.data, [2, 3, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test iadd w/ scalar x += 1 assert_equal(x.data, [2, 3, 3]) assert_equal(x.mask, [0, 0, 1]) # Test add w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x + array([1, 2, 3], mask=[1, 0, 0]) assert_equal(xx.data, [1, 4, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test iadd w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x += array([1, 2, 3], mask=[1, 0, 0]) assert_equal(x.data, [1, 4, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_sub(self): # Test keeping data w/ (inplace) subtraction # Test sub w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x - 1 assert_equal(xx.data, [0, 1, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test isub w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) x -= 1 assert_equal(x.data, [0, 1, 3]) assert_equal(x.mask, [0, 0, 1]) # Test sub w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x - array([1, 2, 3], mask=[1, 0, 0]) assert_equal(xx.data, [1, 0, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test isub w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x -= array([1, 2, 3], mask=[1, 0, 0]) assert_equal(x.data, [1, 0, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_mul(self): # Test keeping data w/ (inplace) multiplication # Test mul w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x * 2 assert_equal(xx.data, [2, 4, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test imul w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) x *= 2 assert_equal(x.data, [2, 4, 3]) assert_equal(x.mask, [0, 0, 1]) # Test mul w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x * array([10, 20, 30], mask=[1, 0, 0]) assert_equal(xx.data, [1, 40, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test imul w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x *= array([10, 20, 30], mask=[1, 0, 0]) assert_equal(x.data, [1, 40, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_div(self): # Test keeping data w/ (inplace) division # Test div on scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x / 2. assert_equal(xx.data, [1 / 2., 2 / 2., 3]) assert_equal(xx.mask, [0, 0, 1]) # Test idiv on scalar x = array([1., 2., 3.], mask=[0, 0, 1]) x /= 2. assert_equal(x.data, [1 / 2., 2 / 2., 3]) assert_equal(x.mask, [0, 0, 1]) # Test div on array x = array([1., 2., 3.], mask=[0, 0, 1]) xx = x / array([10., 20., 30.], mask=[1, 0, 0]) assert_equal(xx.data, [1., 2. / 20., 3.]) assert_equal(xx.mask, [1, 0, 1]) # Test idiv on array x = array([1., 2., 3.], mask=[0, 0, 1]) x /= array([10., 20., 30.], mask=[1, 0, 0]) assert_equal(x.data, [1., 2 / 20., 3.]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_pow(self): # Test keeping data w/ (inplace) power # Test pow on scalar x = array([1., 2., 3.], mask=[0, 0, 1]) xx = x ** 2.5 assert_equal(xx.data, [1., 2. ** 2.5, 3.]) assert_equal(xx.mask, [0, 0, 1]) # Test ipow on scalar x **= 2.5 assert_equal(x.data, [1., 2. ** 2.5, 3]) assert_equal(x.mask, [0, 0, 1]) def test_datafriendly_add_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a += b assert_equal(a, [[2, 2], [4, 4]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a += b assert_equal(a, [[2, 2], [4, 4]]) assert_equal(a.mask, [[0, 1], [0, 1]]) def test_datafriendly_sub_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a -= b assert_equal(a, [[0, 0], [2, 2]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a -= b assert_equal(a, [[0, 0], [2, 2]]) assert_equal(a.mask, [[0, 1], [0, 1]]) def test_datafriendly_mul_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a *= b assert_equal(a, [[1, 1], [3, 3]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a *= b assert_equal(a, [[1, 1], [3, 3]]) assert_equal(a.mask, [[0, 1], [0, 1]]) def test_inplace_addition_scalar_type(self): # Test of inplace additions for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) xm[2] = masked x += t(1) assert_equal(x, y + t(1)) xm += t(1) assert_equal(xm, y + t(1)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_addition_array_type(self): # Test of inplace additions for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked x += a xm += a assert_equal(x, y + a) assert_equal(xm, y + a) assert_equal(xm.mask, mask_or(m, a.mask)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_subtraction_scalar_type(self): # Test of inplace subtractions for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) x -= t(1) assert_equal(x, y - t(1)) xm -= t(1) assert_equal(xm, y - t(1)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_subtraction_array_type(self): # Test of inplace subtractions for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked x -= a xm -= a assert_equal(x, y - a) assert_equal(xm, y - a) assert_equal(xm.mask, mask_or(m, a.mask)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_multiplication_scalar_type(self): # Test of inplace multiplication for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) x *= t(2) assert_equal(x, y * t(2)) xm *= t(2) assert_equal(xm, y * t(2)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_multiplication_array_type(self): # Test of inplace multiplication for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked x *= a xm *= a assert_equal(x, y * a) assert_equal(xm, y * a) assert_equal(xm.mask, mask_or(m, a.mask)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_floor_division_scalar_type(self): # Test of inplace division for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked x //= t(2) xm //= t(2) assert_equal(x, y) assert_equal(xm, y) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_floor_division_array_type(self): # Test of inplace division for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked x //= a xm //= a assert_equal(x, y // a) assert_equal(xm, y // a) assert_equal( xm.mask, mask_or(mask_or(m, a.mask), (a == t(0))) ) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_division_scalar_type(self): # Test of inplace division for t in self.othertypes: with suppress_warnings() as sup: sup.record(UserWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked # May get a DeprecationWarning or a TypeError. # # This is a consequence of the fact that this is true divide # and will require casting to float for calculation and # casting back to the original type. This will only be raised # with integers. Whether it is an error or warning is only # dependent on how stringent the casting rules are. # # Will handle the same way. try: x /= t(2) assert_equal(x, y) except (DeprecationWarning, TypeError) as e: warnings.warn(str(e), stacklevel=1) try: xm /= t(2) assert_equal(xm, y) except (DeprecationWarning, TypeError) as e: warnings.warn(str(e), stacklevel=1) if issubclass(t, np.integer): assert_equal(len(sup.log), 2, "Failed on type=%s." % t) else: assert_equal(len(sup.log), 0, "Failed on type=%s." % t) def test_inplace_division_array_type(self): # Test of inplace division for t in self.othertypes: with suppress_warnings() as sup: sup.record(UserWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked # May get a DeprecationWarning or a TypeError. # # This is a consequence of the fact that this is true divide # and will require casting to float for calculation and # casting back to the original type. This will only be raised # with integers. Whether it is an error or warning is only # dependent on how stringent the casting rules are. # # Will handle the same way. try: x /= a assert_equal(x, y / a) except (DeprecationWarning, TypeError) as e: warnings.warn(str(e), stacklevel=1) try: xm /= a assert_equal(xm, y / a) assert_equal( xm.mask, mask_or(mask_or(m, a.mask), (a == t(0))) ) except (DeprecationWarning, TypeError) as e: warnings.warn(str(e), stacklevel=1) if issubclass(t, np.integer): assert_equal(len(sup.log), 2, "Failed on type=%s." % t) else: assert_equal(len(sup.log), 0, "Failed on type=%s." % t) def test_inplace_pow_type(self): # Test keeping data w/ (inplace) power for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") # Test pow on scalar x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) xx = x ** t(2) xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) assert_equal(xx.data, xx_r.data) assert_equal(xx.mask, xx_r.mask) # Test ipow on scalar x **= t(2) assert_equal(x.data, xx_r.data) assert_equal(x.mask, xx_r.mask) assert_equal(len(w), 0, "Failed on type=%s." % t) class TestMaskedArrayMethods(object): # Test class for miscellaneous MaskedArrays methods. def setup(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_generic_methods(self): # Tests some MaskedArray methods. a = array([1, 3, 2]) assert_equal(a.any(), a._data.any()) assert_equal(a.all(), a._data.all()) assert_equal(a.argmax(), a._data.argmax()) assert_equal(a.argmin(), a._data.argmin()) assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) assert_equal(a.conj(), a._data.conj()) assert_equal(a.conjugate(), a._data.conjugate()) m = array([[1, 2], [3, 4]]) assert_equal(m.diagonal(), m._data.diagonal()) assert_equal(a.sum(), a._data.sum()) assert_equal(a.take([1, 2]), a._data.take([1, 2])) assert_equal(m.transpose(), m._data.transpose()) def test_allclose(self): # Tests allclose on arrays a = np.random.rand(10) b = a + np.random.rand(10) * 1e-8 assert_(allclose(a, b)) # Test allclose w/ infs a[0] = np.inf assert_(not allclose(a, b)) b[0] = np.inf assert_(allclose(a, b)) # Test allclose w/ masked a = masked_array(a) a[-1] = masked assert_(allclose(a, b, masked_equal=True)) assert_(not allclose(a, b, masked_equal=False)) # Test comparison w/ scalar a *= 1e-8 a[0] = 0 assert_(allclose(a, 0, masked_equal=True)) # Test that the function works for MIN_INT integer typed arrays a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) assert_(allclose(a, a)) def test_allany(self): # Checks the any/all methods/functions. x = np.array([[0.13, 0.26, 0.90], [0.28, 0.33, 0.63], [0.31, 0.87, 0.70]]) m = np.array([[True, False, False], [False, False, False], [True, True, False]], dtype=np.bool_) mx = masked_array(x, mask=m) mxbig = (mx > 0.5) mxsmall = (mx < 0.5) assert_(not mxbig.all()) assert_(mxbig.any()) assert_equal(mxbig.all(0), [False, False, True]) assert_equal(mxbig.all(1), [False, False, True]) assert_equal(mxbig.any(0), [False, False, True]) assert_equal(mxbig.any(1), [True, True, True]) assert_(not mxsmall.all()) assert_(mxsmall.any()) assert_equal(mxsmall.all(0), [True, True, False]) assert_equal(mxsmall.all(1), [False, False, False]) assert_equal(mxsmall.any(0), [True, True, False]) assert_equal(mxsmall.any(1), [True, True, False]) def test_allany_onmatrices(self): x = np.array([[0.13, 0.26, 0.90], [0.28, 0.33, 0.63], [0.31, 0.87, 0.70]]) X = np.matrix(x) m = np.array([[True, False, False], [False, False, False], [True, True, False]], dtype=np.bool_) mX = masked_array(X, mask=m) mXbig = (mX > 0.5) mXsmall = (mX < 0.5) assert_(not mXbig.all()) assert_(mXbig.any()) assert_equal(mXbig.all(0), np.matrix([False, False, True])) assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) assert_equal(mXbig.any(0), np.matrix([False, False, True])) assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) assert_(not mXsmall.all()) assert_(mXsmall.any()) assert_equal(mXsmall.all(0), np.matrix([True, True, False])) assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) assert_equal(mXsmall.any(0), np.matrix([True, True, False])) assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) def test_allany_oddities(self): # Some fun with all and any store = empty((), dtype=bool) full = array([1, 2, 3], mask=True) assert_(full.all() is masked) full.all(out=store) assert_(store) assert_(store._mask, True) assert_(store is not masked) store = empty((), dtype=bool) assert_(full.any() is masked) full.any(out=store) assert_(not store) assert_(store._mask, True) assert_(store is not masked) def test_argmax_argmin(self): # Tests argmin & argmax on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d assert_equal(mx.argmin(), 35) assert_equal(mX.argmin(), 35) assert_equal(m2x.argmin(), 4) assert_equal(m2X.argmin(), 4) assert_equal(mx.argmax(), 28) assert_equal(mX.argmax(), 28) assert_equal(m2x.argmax(), 31) assert_equal(m2X.argmax(), 31) assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) def test_clip(self): # Tests clip on MaskedArrays. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(x, mask=m) clipped = mx.clip(2, 8) assert_equal(clipped.mask, mx.mask) assert_equal(clipped._data, x.clip(2, 8)) assert_equal(clipped._data, mx._data.clip(2, 8)) def test_compress(self): # test compress a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) condition = (a > 1.5) & (a < 3.5) assert_equal(a.compress(condition), [2., 3.]) a[[2, 3]] = masked b = a.compress(condition) assert_equal(b._data, [2., 3.]) assert_equal(b._mask, [0, 1]) assert_equal(b.fill_value, 9999) assert_equal(b, a[condition]) condition = (a < 4.) b = a.compress(condition) assert_equal(b._data, [1., 2., 3.]) assert_equal(b._mask, [0, 0, 1]) assert_equal(b.fill_value, 9999) assert_equal(b, a[condition]) a = masked_array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0]]) b = a.compress(a.ravel() >= 22) assert_equal(b._data, [30, 40, 50, 60]) assert_equal(b._mask, [1, 1, 0, 0]) x = np.array([3, 1, 2]) b = a.compress(x >= 2, axis=1) assert_equal(b._data, [[10, 30], [40, 60]]) assert_equal(b._mask, [[0, 1], [1, 0]]) def test_compressed(self): # Tests compressed a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) b = a.compressed() assert_equal(b, a) a[0] = masked b = a.compressed() assert_equal(b, [2, 3, 4]) a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) b = a.compressed() assert_equal(b, a) assert_(isinstance(b, np.matrix)) a[0, 0] = masked b = a.compressed() assert_equal(b, [[2, 3, 4]]) def test_empty(self): # Tests empty/like datatype = [('a', int), ('b', float), ('c', '|S8')] a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], dtype=datatype) assert_equal(len(a.fill_value.item()), len(datatype)) b = empty_like(a) assert_equal(b.shape, a.shape) assert_equal(b.fill_value, a.fill_value) b = empty(len(a), dtype=datatype) assert_equal(b.shape, a.shape) assert_equal(b.fill_value, a.fill_value) # check empty_like mask handling a = masked_array([1, 2, 3], mask=[False, True, False]) b = empty_like(a) assert_(not np.may_share_memory(a.mask, b.mask)) b = a.view(masked_array) assert_(np.may_share_memory(a.mask, b.mask)) @suppress_copy_mask_on_assignment def test_put(self): # Tests put. d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) x = array(d, mask=m) assert_(x[3] is masked) assert_(x[4] is masked) x[[1, 4]] = [10, 40] assert_(x[3] is masked) assert_(x[4] is not masked) assert_equal(x, [0, 10, 2, -1, 40]) x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) i = [0, 2, 4, 6] x.put(i, [6, 4, 2, 0]) assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) put(x, i, [6, 4, 2, 0]) assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) def test_put_nomask(self): # GitHub issue 6425 x = zeros(10) z = array([3., -1.], mask=[False, True]) x.put([1, 2], z) assert_(x[0] is not masked) assert_equal(x[0], 0) assert_(x[1] is not masked) assert_equal(x[1], 3) assert_(x[2] is masked) assert_(x[3] is not masked) assert_equal(x[3], 0) def test_put_hardmask(self): # Tests put on hardmask d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d + 1, mask=m, hard_mask=True, copy=True) xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) assert_equal(xh._data, [3, 4, 2, 4, 5]) def test_putmask(self): x = arange(6) + 1 mx = array(x, mask=[0, 0, 0, 1, 1, 1]) mask = [0, 0, 1, 0, 0, 1] # w/o mask, w/o masked values xx = x.copy() putmask(xx, mask, 99) assert_equal(xx, [1, 2, 99, 4, 5, 99]) # w/ mask, w/o masked values mxx = mx.copy() putmask(mxx, mask, 99) assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) # w/o mask, w/ masked values values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) xx = x.copy() putmask(xx, mask, values) assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) # w/ mask, w/ masked values mxx = mx.copy() putmask(mxx, mask, values) assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) # w/ mask, w/ masked values + hardmask mxx = mx.copy() mxx.harden_mask() putmask(mxx, mask, values) assert_equal(mxx, [1, 2, 30, 4, 5, 60]) def test_ravel(self): # Tests ravel a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) aravel = a.ravel() assert_equal(aravel._mask.shape, aravel.shape) a = array([0, 0], mask=[1, 1]) aravel = a.ravel() assert_equal(aravel._mask.shape, a.shape) a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) aravel = a.ravel() assert_equal(aravel.shape, (1, 5)) assert_equal(aravel._mask.shape, a.shape) # Checks that small_mask is preserved a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) assert_equal(a.ravel()._mask, [0, 0, 0, 0]) # Test that the fill_value is preserved a.fill_value = -99 a.shape = (2, 2) ar = a.ravel() assert_equal(ar._mask, [0, 0, 0, 0]) assert_equal(ar._data, [1, 2, 3, 4]) assert_equal(ar.fill_value, -99) # Test index ordering assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) def test_reshape(self): # Tests reshape x = arange(4) x[0] = masked y = x.reshape(2, 2) assert_equal(y.shape, (2, 2,)) assert_equal(y._mask.shape, (2, 2,)) assert_equal(x.shape, (4,)) assert_equal(x._mask.shape, (4,)) def test_sort(self): # Test sort x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) sortedx = sort(x) assert_equal(sortedx._data, [1, 2, 3, 4]) assert_equal(sortedx._mask, [0, 0, 0, 1]) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [4, 1, 2, 3]) assert_equal(sortedx._mask, [1, 0, 0, 0]) x.sort() assert_equal(x._data, [1, 2, 3, 4]) assert_equal(x._mask, [0, 0, 0, 1]) x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) x.sort(endwith=False) assert_equal(x._data, [4, 1, 2, 3]) assert_equal(x._mask, [1, 0, 0, 0]) x = [1, 4, 2, 3] sortedx = sort(x) assert_(not isinstance(sorted, MaskedArray)) x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [1, 2, -2, -1, 0]) assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) def test_argsort_matches_sort(self): x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) for kwargs in [dict(), dict(endwith=True), dict(endwith=False), dict(fill_value=2), dict(fill_value=2, endwith=True), dict(fill_value=2, endwith=False)]: sortedx = sort(x, **kwargs) argsortedx = x[argsort(x, **kwargs)] assert_equal(sortedx._data, argsortedx._data) assert_equal(sortedx._mask, argsortedx._mask) def test_sort_2d(self): # Check sort of 2D array. # 2D array w/o mask a = masked_array([[8, 4, 1], [2, 0, 9]]) a.sort(0) assert_equal(a, [[2, 0, 1], [8, 4, 9]]) a = masked_array([[8, 4, 1], [2, 0, 9]]) a.sort(1) assert_equal(a, [[1, 4, 8], [0, 2, 9]]) # 2D array w/mask a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) a.sort(0) assert_equal(a, [[2, 0, 1], [8, 4, 9]]) assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) a.sort(1) assert_equal(a, [[1, 4, 8], [0, 2, 9]]) assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) # 3D a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], [[1, 2, 3], [7, 8, 9], [4, 5, 6]], [[7, 8, 9], [1, 2, 3], [4, 5, 6]], [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) a[a % 4 == 0] = masked am = a.copy() an = a.filled(99) am.sort(0) an.sort(0) assert_equal(am, an) am = a.copy() an = a.filled(99) am.sort(1) an.sort(1) assert_equal(am, an) am = a.copy() an = a.filled(99) am.sort(2) an.sort(2) assert_equal(am, an) def test_sort_flexible(self): # Test sort on structured dtype. a = array( data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], dtype=[('A', int), ('B', int)]) mask_last = array( data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], dtype=[('A', int), ('B', int)]) mask_first = array( data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], dtype=[('A', int), ('B', int)]) test = sort(a) assert_equal(test, mask_last) assert_equal(test.mask, mask_last.mask) test = sort(a, endwith=False) assert_equal(test, mask_first) assert_equal(test.mask, mask_first.mask) # Test sort on dtype with subarray (gh-8069) dt = np.dtype([('v', int, 2)]) a = a.view(dt) mask_last = mask_last.view(dt) mask_first = mask_first.view(dt) test = sort(a) assert_equal(test, mask_last) assert_equal(test.mask, mask_last.mask) test = sort(a, endwith=False) assert_equal(test, mask_first) assert_equal(test.mask, mask_first.mask) def test_argsort(self): # Test argsort a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) assert_equal(np.argsort(a), argsort(a)) def test_squeeze(self): # Check squeeze data = masked_array([[1, 2, 3]]) assert_equal(data.squeeze(), [1, 2, 3]) data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) assert_equal(data.squeeze(), [1, 2, 3]) assert_equal(data.squeeze()._mask, [1, 1, 1]) # normal ndarrays return a view arr = np.array([[1]]) arr_sq = arr.squeeze() assert_equal(arr_sq, 1) arr_sq[...] = 2 assert_equal(arr[0,0], 2) # so maskedarrays should too m_arr = masked_array([[1]], mask=True) m_arr_sq = m_arr.squeeze() assert_(m_arr_sq is not np.ma.masked) assert_equal(m_arr_sq.mask, True) m_arr_sq[...] = 2 assert_equal(m_arr[0,0], 2) def test_swapaxes(self): # Tests swapaxes on MaskedArrays. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mX = array(x, mask=m).reshape(6, 6) mXX = mX.reshape(3, 2, 2, 3) mXswapped = mX.swapaxes(0, 1) assert_equal(mXswapped[-1], mX[:, -1]) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_take(self): # Tests take x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) assert_equal(x.take([[0, 1], [0, 1]]), masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) # assert_equal crashes when passed np.ma.mask assert_(x[1] is np.ma.masked) assert_(x.take(1) is np.ma.masked) x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) assert_equal(x.take([0, 2], axis=1), array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) assert_equal(take(x, [0, 2], axis=1), array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) def test_take_masked_indices(self): # Test take w/ masked indices a = np.array((40, 18, 37, 9, 22)) indices = np.arange(3)[None,:] + np.arange(5)[:, None] mindices = array(indices, mask=(indices >= len(a))) # No mask test = take(a, mindices, mode='clip') ctrl = array([[40, 18, 37], [18, 37, 9], [37, 9, 22], [9, 22, 22], [22, 22, 22]]) assert_equal(test, ctrl) # Masked indices test = take(a, mindices) ctrl = array([[40, 18, 37], [18, 37, 9], [37, 9, 22], [9, 22, 40], [22, 40, 40]]) ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) # Masked input + masked indices a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) test = take(a, mindices) ctrl[0, 1] = ctrl[1, 0] = masked assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) def test_tolist(self): # Tests to list # ... on 1D x = array(np.arange(12)) x[[1, -2]] = masked xlist = x.tolist() assert_(xlist[1] is None) assert_(xlist[-2] is None) # ... on 2D x.shape = (3, 4) xlist = x.tolist() ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] assert_equal(xlist[0], [0, None, 2, 3]) assert_equal(xlist[1], [4, 5, 6, 7]) assert_equal(xlist[2], [8, 9, None, 11]) assert_equal(xlist, ctrl) # ... on structured array w/ masked records x = array(list(zip([1, 2, 3], [1.1, 2.2, 3.3], ['one', 'two', 'thr'])), dtype=[('a', int), ('b', float), ('c', '|S8')]) x[-1] = masked assert_equal(x.tolist(), [(1, 1.1, b'one'), (2, 2.2, b'two'), (None, None, None)]) # ... on structured array w/ masked fields a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], dtype=[('a', int), ('b', int)]) test = a.tolist() assert_equal(test, [[1, None], [3, 4]]) # ... on mvoid a = a[0] test = a.tolist() assert_equal(test, [1, None]) def test_tolist_specialcase(self): # Test mvoid.tolist: make sure we return a standard Python object a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) # w/o mask: each entry is a np.void whose elements are standard Python for entry in a: for item in entry.tolist(): assert_(not isinstance(item, np.generic)) # w/ mask: each entry is a ma.void whose elements should be # standard Python a.mask[0] = (0, 1) for entry in a: for item in entry.tolist(): assert_(not isinstance(item, np.generic)) def test_toflex(self): # Test the conversion to records data = arange(10) record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) ndtype = [('i', int), ('s', '|S3'), ('f', float)] data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), 'ABCDEFGHIJKLM', np.random.rand(10))], dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) ndtype = np.dtype("int, (2,3)float, float") data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), np.random.rand(10), np.random.rand(10))], dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal_records(record['_data'], data._data) assert_equal_records(record['_mask'], data._mask) def test_fromflex(self): # Test the reconstruction of a masked_array from a record a = array([1, 2, 3]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.mask, a.mask) a = array([1, 2, 3], mask=[0, 0, 1]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.mask, a.mask) a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], dtype=[('A', int), ('B', float)]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.data, a.data) def test_arraymethod(self): # Test a _arraymethod w/ n argument marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) control = masked_array([[1], [2], [3], [4], [5]], mask=[0, 0, 1, 0, 0]) assert_equal(marray.T, control) assert_equal(marray.transpose(), control) assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) def test_arraymethod_0d(self): # gh-9430 x = np.ma.array(42, mask=True) assert_equal(x.T.mask, x.mask) assert_equal(x.T.data, x.data) def test_transpose_view(self): x = np.ma.array([[1, 2, 3], [4, 5, 6]]) x[0,1] = np.ma.masked xt = x.T xt[1,0] = 10 xt[0,1] = np.ma.masked assert_equal(x.data, xt.T.data) assert_equal(x.mask, xt.T.mask) def test_diagonal_view(self): x = np.ma.zeros((3,3)) x[0,0] = 10 x[1,1] = np.ma.masked x[2,2] = 20 xd = x.diagonal() x[1,1] = 15 assert_equal(xd.mask, x.diagonal().mask) assert_equal(xd.data, x.diagonal().data) class TestMaskedArrayMathMethods(object): def setup(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_cumsumprod(self): # Tests cumsum & cumprod on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d mXcp = mX.cumsum(0) assert_equal(mXcp._data, mX.filled(0).cumsum(0)) mXcp = mX.cumsum(1) assert_equal(mXcp._data, mX.filled(0).cumsum(1)) mXcp = mX.cumprod(0) assert_equal(mXcp._data, mX.filled(1).cumprod(0)) mXcp = mX.cumprod(1) assert_equal(mXcp._data, mX.filled(1).cumprod(1)) def test_cumsumprod_with_output(self): # Tests cumsum/cumprod w/ output xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked for funcname in ('cumsum', 'cumprod'): npfunc = getattr(np, funcname) xmmeth = getattr(xm, funcname) # A ndarray as explicit input output = np.empty((3, 4), dtype=float) output.fill(-9999) result = npfunc(xm, axis=0, out=output) # ... the result should be the given output assert_(result is output) assert_equal(result, xmmeth(axis=0, out=output)) output = empty((3, 4), dtype=int) result = xmmeth(axis=0, out=output) assert_(result is output) def test_ptp(self): # Tests ptp on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d (n, m) = X.shape assert_equal(mx.ptp(), mx.compressed().ptp()) rows = np.zeros(n, float) cols = np.zeros(m, float) for k in range(m): cols[k] = mX[:, k].compressed().ptp() for k in range(n): rows[k] = mX[k].compressed().ptp() assert_equal(mX.ptp(0), cols) assert_equal(mX.ptp(1), rows) def test_add_object(self): x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) y = x + 'x' assert_equal(y[1], 'bx') assert_(y.mask[0]) def test_sum_object(self): # Test sum on object dtype a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) assert_equal(a.sum(), 5) a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) assert_equal(a.sum(axis=0), [5, 7, 9]) def test_prod_object(self): # Test prod on object dtype a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) assert_equal(a.prod(), 2 * 3) a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) assert_equal(a.prod(axis=0), [4, 10, 18]) def test_meananom_object(self): # Test mean/anom on object dtype a = masked_array([1, 2, 3], dtype=object) assert_equal(a.mean(), 2) assert_equal(a.anom(), [-1, 0, 1]) def test_trace(self): # Tests trace on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_almost_equal(mX.trace(), X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0)) assert_equal(np.trace(mX), mX.trace()) # gh-5560 arr = np.arange(2*4*4).reshape(2,4,4) m_arr = np.ma.masked_array(arr, False) assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) def test_dot(self): # Tests dot on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d fx = mx.filled(0) r = mx.dot(mx) assert_almost_equal(r.filled(0), fx.dot(fx)) assert_(r.mask is nomask) fX = mX.filled(0) r = mX.dot(mX) assert_almost_equal(r.filled(0), fX.dot(fX)) assert_(r.mask[1,3]) r1 = empty_like(r) mX.dot(mX, out=r1) assert_almost_equal(r, r1) mYY = mXX.swapaxes(-1, -2) fXX, fYY = mXX.filled(0), mYY.filled(0) r = mXX.dot(mYY) assert_almost_equal(r.filled(0), fXX.dot(fYY)) r1 = empty_like(r) mXX.dot(mYY, out=r1) assert_almost_equal(r, r1) def test_dot_shape_mismatch(self): # regression test x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) z = masked_array([[0,1],[3,3]]) x.dot(y, out=z) assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) assert_almost_equal(z.mask, [[0, 1], [0, 0]]) def test_varmean_nomask(self): # gh-5769 foo = array([1,2,3,4], dtype='f8') bar = array([1,2,3,4], dtype='f8') assert_equal(type(foo.mean()), np.float64) assert_equal(type(foo.var()), np.float64) assert((foo.mean() == bar.mean()) is np.bool_(True)) # check array type is preserved and out works foo = array(np.arange(16).reshape((4,4)), dtype='f8') bar = empty(4, dtype='f4') assert_equal(type(foo.mean(axis=1)), MaskedArray) assert_equal(type(foo.var(axis=1)), MaskedArray) assert_(foo.mean(axis=1, out=bar) is bar) assert_(foo.var(axis=1, out=bar) is bar) def test_varstd(self): # Tests var & std on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_almost_equal(mX.std(axis=None, ddof=1), mX.compressed().std(ddof=1)) assert_almost_equal(mX.var(axis=None, ddof=1), mX.compressed().var(ddof=1)) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) assert_equal(mX.var().shape, X.var().shape) (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2)) assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2)) for k in range(6): assert_almost_equal(mXvar1[k], mX[k].compressed().var()) assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) @dec.knownfailureif(sys.platform=='win32' and sys.version_info < (3, 6), msg='Fails on Python < 3.6 (Issue #9671)') @suppress_copy_mask_on_assignment def test_varstd_specialcases(self): # Test a special case for var nout = np.array(-1, dtype=float) mout = array(-1, dtype=float) x = array(arange(10), mask=True) for methodname in ('var', 'std'): method = getattr(x, methodname) assert_(method() is masked) assert_(method(0) is masked) assert_(method(-1) is masked) # Using a masked array as explicit output method(out=mout) assert_(mout is not masked) assert_equal(mout.mask, True) # Using a ndarray as explicit output method(out=nout) assert_(np.isnan(nout)) x = array(arange(10), mask=True) x[-1] = 9 for methodname in ('var', 'std'): method = getattr(x, methodname) assert_(method(ddof=1) is masked) assert_(method(0, ddof=1) is masked) assert_(method(-1, ddof=1) is masked) # Using a masked array as explicit output method(out=mout, ddof=1) assert_(mout is not masked) assert_equal(mout.mask, True) # Using a ndarray as explicit output method(out=nout, ddof=1) assert_(np.isnan(nout)) def test_varstd_ddof(self): a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) test = a.std(axis=0, ddof=0) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [0, 0, 1]) test = a.std(axis=0, ddof=1) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [0, 0, 1]) test = a.std(axis=0, ddof=2) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [1, 1, 1]) def test_diag(self): # Test diag x = arange(9).reshape((3, 3)) x[1, 1] = masked out = np.diag(x) assert_equal(out, [0, 4, 8]) out = diag(x) assert_equal(out, [0, 4, 8]) assert_equal(out.mask, [0, 1, 0]) out = diag(out) control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(out, control) def test_axis_methods_nomask(self): # Test the combination nomask & methods w/ axis a = array([[1, 2, 3], [4, 5, 6]]) assert_equal(a.sum(0), [5, 7, 9]) assert_equal(a.sum(-1), [6, 15]) assert_equal(a.sum(1), [6, 15]) assert_equal(a.prod(0), [4, 10, 18]) assert_equal(a.prod(-1), [6, 120]) assert_equal(a.prod(1), [6, 120]) assert_equal(a.min(0), [1, 2, 3]) assert_equal(a.min(-1), [1, 4]) assert_equal(a.min(1), [1, 4]) assert_equal(a.max(0), [4, 5, 6]) assert_equal(a.max(-1), [3, 6]) assert_equal(a.max(1), [3, 6]) class TestMaskedArrayMathMethodsComplex(object): # Test class for miscellaneous MaskedArrays methods. def setup(self): # Base data definition. x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_varstd(self): # Tests var & std on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) assert_equal(mX.var().shape, X.var().shape) (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2)) assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2)) for k in range(6): assert_almost_equal(mXvar1[k], mX[k].compressed().var()) assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) class TestMaskedArrayFunctions(object): # Test class for miscellaneous functions. def setup(self): x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) xm.set_fill_value(1e+20) self.info = (xm, ym) def test_masked_where_bool(self): x = [1, 2] y = masked_where(False, x) assert_equal(y, [1, 2]) assert_equal(y[1], 2) def test_masked_equal_wlist(self): x = [1, 2, 3] mx = masked_equal(x, 3) assert_equal(mx, x) assert_equal(mx._mask, [0, 0, 1]) mx = masked_not_equal(x, 3) assert_equal(mx, x) assert_equal(mx._mask, [1, 1, 0]) def test_masked_equal_fill_value(self): x = [1, 2, 3] mx = masked_equal(x, 3) assert_equal(mx._mask, [0, 0, 1]) assert_equal(mx.fill_value, 3) def test_masked_where_condition(self): # Tests masking functions. x = array([1., 2., 3., 4., 5.]) x[2] = masked assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) assert_equal(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2)) assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) assert_equal(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)) assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5]) def test_masked_where_oddities(self): # Tests some generic features. atest = ones((10, 10, 10), dtype=float) btest = zeros(atest.shape, MaskType) ctest = masked_where(btest, atest) assert_equal(atest, ctest) def test_masked_where_shape_constraint(self): a = arange(10) try: test = masked_equal(1, a) except IndexError: pass else: raise AssertionError("Should have failed...") test = masked_equal(a, 1) assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) def test_masked_where_structured(self): # test that masked_where on a structured array sets a structured # mask (see issue #2972) a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")]) am = np.ma.masked_where(a["A"] < 5, a) assert_equal(am.mask.dtype.names, am.dtype.names) assert_equal(am["A"], np.ma.masked_array(np.zeros(10), np.ones(10))) def test_masked_where_mismatch(self): # gh-4520 x = np.arange(10) y = np.arange(5) assert_raises(IndexError, np.ma.masked_where, y > 6, x) def test_masked_otherfunctions(self): assert_equal(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]) assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) assert_equal(masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0]) assert_equal(masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1]) assert_equal(masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0]) assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1]) def test_round(self): a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], mask=[0, 1, 0, 0, 0]) assert_equal(a.round(), [1., 2., 3., 5., 6.]) assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) b = empty_like(a) a.round(out=b) assert_equal(b, [1., 2., 3., 5., 6.]) x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) def test_round_with_output(self): # Testing round with an explicit output xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked # A ndarray as explicit input output = np.empty((3, 4), dtype=float) output.fill(-9999) result = np.round(xm, decimals=2, out=output) # ... the result should be the given output assert_(result is output) assert_equal(result, xm.round(decimals=2, out=output)) output = empty((3, 4), dtype=float) result = xm.round(decimals=2, out=output) assert_(result is output) def test_round_with_scalar(self): # Testing round with scalar/zero dimension input # GH issue 2244 a = array(1.1, mask=[False]) assert_equal(a.round(), 1) a = array(1.1, mask=[True]) assert_(a.round() is masked) a = array(1.1, mask=[False]) output = np.empty(1, dtype=float) output.fill(-9999) a.round(out=output) assert_equal(output, 1) a = array(1.1, mask=[False]) output = array(-9999., mask=[True]) a.round(out=output) assert_equal(output[()], 1) a = array(1.1, mask=[True]) output = array(-9999., mask=[False]) a.round(out=output) assert_(output[()] is masked) def test_identity(self): a = identity(5) assert_(isinstance(a, MaskedArray)) assert_equal(a, np.identity(5)) def test_power(self): x = -1.1 assert_almost_equal(power(x, 2.), 1.21) assert_(power(x, masked) is masked) x = array([-1.1, -1.1, 1.1, 1.1, 0.]) b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) y = power(x, b) assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) assert_equal(y._mask, [1, 0, 0, 0, 1]) b.mask = nomask y = power(x, b) assert_equal(y._mask, [1, 0, 0, 0, 1]) z = x ** b assert_equal(z._mask, y._mask) assert_almost_equal(z, y) assert_almost_equal(z._data, y._data) x **= b assert_equal(x._mask, y._mask) assert_almost_equal(x, y) assert_almost_equal(x._data, y._data) def test_power_with_broadcasting(self): # Test power w/ broadcasting a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) b1 = np.array([2, 4, 3]) b2 = np.array([b1, b1]) b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], mask=[[1, 1, 0], [0, 1, 1]]) # No broadcasting, base & exp w/ mask test = a2m ** b2m assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) # No broadcasting, base w/ mask, exp w/o mask test = a2m ** b2 assert_equal(test, ctrl) assert_equal(test.mask, a2m.mask) # No broadcasting, base w/o mask, exp w/ mask test = a2 ** b2m assert_equal(test, ctrl) assert_equal(test.mask, b2m.mask) ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], mask=[[0, 1, 0], [0, 1, 0]]) test = b1 ** b2m assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) test = b2m ** b1 assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) def test_where(self): # Test the where function x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) xm.set_fill_value(1e+20) d = where(xm > 2, xm, -9) assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.]) assert_equal(d._mask, xm._mask) d = where(xm > 2, -9, ym) assert_equal(d, [5., 0., 3., 2., -1., -9., -9., -10., -9., 1., 0., -9.]) assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) d = where(xm > 2, xm, masked) assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.]) tmp = xm._mask.copy() tmp[(xm <= 2).filled(True)] = True assert_equal(d._mask, tmp) ixm = xm.astype(int) d = where(ixm > 2, ixm, masked) assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) assert_equal(d.dtype, ixm.dtype) def test_where_object(self): a = np.array(None) b = masked_array(None) r = b.copy() assert_equal(np.ma.where(True, a, a), r) assert_equal(np.ma.where(True, b, b), r) def test_where_with_masked_choice(self): x = arange(10) x[3] = masked c = x >= 8 # Set False to masked z = where(c, x, masked) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is masked) assert_(z[7] is masked) assert_(z[8] is not masked) assert_(z[9] is not masked) assert_equal(x, z) # Set True to masked z = where(c, masked, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) def test_where_with_masked_condition(self): x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) x = arange(1, 6) x[-1] = masked y = arange(1, 6) * 10 y[2] = masked c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) cm = c.filled(1) z = where(c, x, y) zm = where(cm, x, y) assert_equal(z, zm) assert_(getmask(zm) is nomask) assert_equal(zm, [1, 2, 3, 40, 50]) z = where(c, masked, 1) assert_equal(z, [99, 99, 99, 1, 1]) z = where(c, 1, masked) assert_equal(z, [99, 1, 1, 99, 99]) def test_where_type(self): # Test the type conservation with where x = np.arange(4, dtype=np.int32) y = np.arange(4, dtype=np.float32) * 2.2 test = where(x > 1.5, y, x).dtype control = np.find_common_type([np.int32, np.float32], []) assert_equal(test, control) def test_where_broadcast(self): # Issue 8599 x = np.arange(9).reshape(3, 3) y = np.zeros(3) core = np.where([1, 0, 1], x, y) ma = where([1, 0, 1], x, y) assert_equal(core, ma) assert_equal(core.dtype, ma.dtype) def test_where_structured(self): # Issue 8600 dt = np.dtype([('a', int), ('b', int)]) x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) y = np.array((10, 20), dtype=dt) core = np.where([0, 1, 1], x, y) ma = np.where([0, 1, 1], x, y) assert_equal(core, ma) assert_equal(core.dtype, ma.dtype) def test_where_structured_masked(self): dt = np.dtype([('a', int), ('b', int)]) x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) ma = where([0, 1, 1], x, masked) expected = masked_where([1, 0, 0], x) assert_equal(ma.dtype, expected.dtype) assert_equal(ma, expected) assert_equal(ma.mask, expected.mask) def test_choose(self): # Test choose choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] chosen = choose([2, 3, 1, 0], choices) assert_equal(chosen, array([20, 31, 12, 3])) chosen = choose([2, 4, 1, 0], choices, mode='clip') assert_equal(chosen, array([20, 31, 12, 3])) chosen = choose([2, 4, 1, 0], choices, mode='wrap') assert_equal(chosen, array([20, 1, 12, 3])) # Check with some masked indices indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) chosen = choose(indices_, choices, mode='wrap') assert_equal(chosen, array([99, 1, 12, 99])) assert_equal(chosen.mask, [1, 0, 0, 1]) # Check with some masked choices choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], [1, 0, 0, 0], [0, 0, 0, 0]]) indices_ = [2, 3, 1, 0] chosen = choose(indices_, choices, mode='wrap') assert_equal(chosen, array([20, 31, 12, 3])) assert_equal(chosen.mask, [1, 0, 0, 1]) def test_choose_with_out(self): # Test choose with an explicit out keyword choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] store = empty(4, dtype=int) chosen = choose([2, 3, 1, 0], choices, out=store) assert_equal(store, array([20, 31, 12, 3])) assert_(store is chosen) # Check with some masked indices + out store = empty(4, dtype=int) indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) chosen = choose(indices_, choices, mode='wrap', out=store) assert_equal(store, array([99, 31, 12, 99])) assert_equal(store.mask, [1, 0, 0, 1]) # Check with some masked choices + out ina ndarray ! choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], [1, 0, 0, 0], [0, 0, 0, 0]]) indices_ = [2, 3, 1, 0] store = empty(4, dtype=int).view(ndarray) chosen = choose(indices_, choices, mode='wrap', out=store) assert_equal(store, array([999999, 31, 12, 999999])) def test_reshape(self): a = arange(10) a[0] = masked # Try the default b = a.reshape((5, 2)) assert_equal(b.shape, (5, 2)) assert_(b.flags['C']) # Try w/ arguments as list instead of tuple b = a.reshape(5, 2) assert_equal(b.shape, (5, 2)) assert_(b.flags['C']) # Try w/ order b = a.reshape((5, 2), order='F') assert_equal(b.shape, (5, 2)) assert_(b.flags['F']) # Try w/ order b = a.reshape(5, 2, order='F') assert_equal(b.shape, (5, 2)) assert_(b.flags['F']) c = np.reshape(a, (2, 5)) assert_(isinstance(c, MaskedArray)) assert_equal(c.shape, (2, 5)) assert_(c[0, 0] is masked) assert_(c.flags['C']) def test_make_mask_descr(self): # Flexible ntype = [('a', float), ('b', float)] test = make_mask_descr(ntype) assert_equal(test, [('a', bool), ('b', bool)]) assert_(test is make_mask_descr(test)) # Standard w/ shape ntype = (float, 2) test = make_mask_descr(ntype) assert_equal(test, (bool, 2)) assert_(test is make_mask_descr(test)) # Standard standard ntype = float test = make_mask_descr(ntype) assert_equal(test, np.dtype(bool)) assert_(test is make_mask_descr(test)) # Nested ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] test = make_mask_descr(ntype) control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) assert_equal(test, control) assert_(test is make_mask_descr(test)) # Named+ shape ntype = [('a', (float, 2))] test = make_mask_descr(ntype) assert_equal(test, np.dtype([('a', (bool, 2))])) assert_(test is make_mask_descr(test)) # 2 names ntype = [(('A', 'a'), float)] test = make_mask_descr(ntype) assert_equal(test, np.dtype([(('A', 'a'), bool)])) assert_(test is make_mask_descr(test)) # nested boolean types should preserve identity base_type = np.dtype([('a', int, 3)]) base_mtype = make_mask_descr(base_type) sub_type = np.dtype([('a', int), ('b', base_mtype)]) test = make_mask_descr(sub_type) assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) assert_(test.fields['b'][0] is base_mtype) def test_make_mask(self): # Test make_mask # w/ a list as an input mask = [0, 1] test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [0, 1]) # w/ a ndarray as an input mask = np.array([0, 1], dtype=bool) test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [0, 1]) # w/ a flexible-type ndarray as an input - use default mdtype = [('a', bool), ('b', bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [1, 1]) # w/ a flexible-type ndarray as an input - use input dtype mdtype = [('a', bool), ('b', bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask, dtype=mask.dtype) assert_equal(test.dtype, mdtype) assert_equal(test, mask) # w/ a flexible-type ndarray as an input - use input dtype mdtype = [('a', float), ('b', float)] bdtype = [('a', bool), ('b', bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask, dtype=mask.dtype) assert_equal(test.dtype, bdtype) assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) # Ensure this also works for void mask = np.array((False, True), dtype='?,?')[()] assert_(isinstance(mask, np.void)) test = make_mask(mask, dtype=mask.dtype) assert_equal(test, mask) assert_(test is not mask) mask = np.array((0, 1), dtype='i4,i4')[()] test2 = make_mask(mask, dtype=mask.dtype) assert_equal(test2, test) # test that nomask is returned when m is nomask. bools = [True, False] dtypes = [MaskType, float] msgformat = 'copy=%s, shrink=%s, dtype=%s' for cpy, shr, dt in itertools.product(bools, bools, dtypes): res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) assert_(res is nomask, msgformat % (cpy, shr, dt)) def test_mask_or(self): # Initialize mtype = [('a', bool), ('b', bool)] mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) # Test using nomask as input test = mask_or(mask, nomask) assert_equal(test, mask) test = mask_or(nomask, mask) assert_equal(test, mask) # Using False as input test = mask_or(mask, False) assert_equal(test, mask) # Using another array w / the same dtype other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) test = mask_or(mask, other) control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) assert_equal(test, control) # Using another array w / a different dtype othertype = [('A', bool), ('B', bool)] other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) try: test = mask_or(mask, other) except ValueError: pass # Using nested arrays dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) assert_equal(mask_or(amask, bmask), cntrl) def test_flatten_mask(self): # Tests flatten mask # Standard dtype mask = np.array([0, 0, 1], dtype=bool) assert_equal(flatten_mask(mask), mask) # Flexible dtype mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) test = flatten_mask(mask) control = np.array([0, 0, 0, 1], dtype=bool) assert_equal(test, control) mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] data = [(0, (0, 0)), (0, (0, 1))] mask = np.array(data, dtype=mdtype) test = flatten_mask(mask) control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) assert_equal(test, control) def test_on_ndarray(self): # Test functions on ndarrays a = np.array([1, 2, 3, 4]) m = array(a, mask=False) test = anom(a) assert_equal(test, m.anom()) test = reshape(a, (2, 2)) assert_equal(test, m.reshape(2, 2)) def test_compress(self): # Test compress function on ndarray and masked array # Address Github #2495. arr = np.arange(8) arr.shape = 4, 2 cond = np.array([True, False, True, True]) control = arr[[0, 2, 3]] test = np.ma.compress(cond, arr, axis=0) assert_equal(test, control) marr = np.ma.array(arr) test = np.ma.compress(cond, marr, axis=0) assert_equal(test, control) def test_compressed(self): # Test ma.compressed function. # Address gh-4026 a = np.ma.array([1, 2]) test = np.ma.compressed(a) assert_(type(test) is np.ndarray) # Test case when input data is ndarray subclass class A(np.ndarray): pass a = np.ma.array(A(shape=0)) test = np.ma.compressed(a) assert_(type(test) is A) # Test that compress flattens test = np.ma.compressed([[1],[2]]) assert_equal(test.ndim, 1) test = np.ma.compressed([[[[[1]]]]]) assert_equal(test.ndim, 1) # Test case when input is MaskedArray subclass class M(MaskedArray): pass test = np.ma.compressed(M(shape=(0,1,2))) assert_equal(test.ndim, 1) # with .compressed() overridden class M(MaskedArray): def compressed(self): return 42 test = np.ma.compressed(M(shape=(0,1,2))) assert_equal(test, 42) def test_convolve(self): a = masked_equal(np.arange(5), 2) b = np.array([1, 1]) test = np.ma.convolve(a, b) assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1)) test = np.ma.convolve(a, b, propagate_mask=False) assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1)) test = np.ma.convolve([1, 1], [1, 1, 1]) assert_equal(test, masked_equal([1, 2, 2, 1], -1)) a = [1, 1] b = masked_equal([1, -1, -1, 1], -1) test = np.ma.convolve(a, b, propagate_mask=False) assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) test = np.ma.convolve(a, b, propagate_mask=True) assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) class TestMaskedFields(object): def setup(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] ddtype = [('a', int), ('b', float), ('c', '|S8')] mdtype = [('a', bool), ('b', bool), ('c', bool)] mask = [0, 1, 0, 0, 1] base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) def test_set_records_masks(self): base = self.data['base'] mdtype = self.data['mdtype'] # Set w/ nomask or masked base.mask = nomask assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) base.mask = masked assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) # Set w/ simple boolean base.mask = False assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) base.mask = True assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) # Set w/ list base.mask = [0, 0, 0, 1, 1] assert_equal_records(base._mask, np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], dtype=mdtype)) def test_set_record_element(self): # Check setting an element of a record) base = self.data['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = (pi, pi, 'pi') assert_equal(base_a.dtype, int) assert_equal(base_a._data, [3, 2, 3, 4, 5]) assert_equal(base_b.dtype, float) assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) assert_equal(base_c.dtype, '|S8') assert_equal(base_c._data, [b'pi', b'two', b'three', b'four', b'five']) def test_set_record_slice(self): base = self.data['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[:3] = (pi, pi, 'pi') assert_equal(base_a.dtype, int) assert_equal(base_a._data, [3, 3, 3, 4, 5]) assert_equal(base_b.dtype, float) assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) assert_equal(base_c.dtype, '|S8') assert_equal(base_c._data, [b'pi', b'pi', b'pi', b'four', b'five']) def test_mask_element(self): "Check record access" base = self.data['base'] base[0] = masked for n in ('a', 'b', 'c'): assert_equal(base[n].mask, [1, 1, 0, 0, 1]) assert_equal(base[n]._data, base._data[n]) def test_getmaskarray(self): # Test getmaskarray on flexible dtype ndtype = [('a', int), ('b', float)] test = empty(3, dtype=ndtype) assert_equal(getmaskarray(test), np.array([(0, 0), (0, 0), (0, 0)], dtype=[('a', '|b1'), ('b', '|b1')])) test[:] = masked assert_equal(getmaskarray(test), np.array([(1, 1), (1, 1), (1, 1)], dtype=[('a', '|b1'), ('b', '|b1')])) def test_view(self): # Test view w/ flexible dtype iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) # Transform globally to simple dtype test = a.view(float) assert_equal(test, data.ravel()) assert_equal(test.mask, controlmask) # Transform globally to dty test = a.view((float, 2)) assert_equal(test, data) assert_equal(test.mask, controlmask.reshape(-1, 2)) test = a.view((float, 2), np.matrix) assert_equal(test, data) assert_(isinstance(test, np.matrix)) def test_getitem(self): ndtype = [('a', float), ('b', float)] a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), dtype=[('a', bool), ('b', bool)]) def _test_index(i): assert_equal(type(a[i]), mvoid) assert_equal_records(a[i]._data, a._data[i]) assert_equal_records(a[i]._mask, a._mask[i]) assert_equal(type(a[i, ...]), MaskedArray) assert_equal_records(a[i,...]._data, a._data[i,...]) assert_equal_records(a[i,...]._mask, a._mask[i,...]) _test_index(1) # No mask _test_index(0) # One element masked _test_index(-2) # All element masked def test_setitem(self): # Issue 4866: check that one can set individual items in [record][col] # and [col][record] order ndtype = np.dtype([('a', float), ('b', int)]) ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) ma['a'][1] = 3.0 assert_equal(ma['a'], np.array([1.0, 3.0])) ma[1]['a'] = 4.0 assert_equal(ma['a'], np.array([1.0, 4.0])) # Issue 2403 mdtype = np.dtype([('a', bool), ('b', bool)]) # soft mask control = np.array([(False, True), (True, True)], dtype=mdtype) a = np.ma.masked_all((2,), dtype=ndtype) a['a'][0] = 2 assert_equal(a.mask, control) a = np.ma.masked_all((2,), dtype=ndtype) a[0]['a'] = 2 assert_equal(a.mask, control) # hard mask control = np.array([(True, True), (True, True)], dtype=mdtype) a = np.ma.masked_all((2,), dtype=ndtype) a.harden_mask() a['a'][0] = 2 assert_equal(a.mask, control) a = np.ma.masked_all((2,), dtype=ndtype) a.harden_mask() a[0]['a'] = 2 assert_equal(a.mask, control) def test_setitem_scalar(self): # 8510 mask_0d = np.ma.masked_array(1, mask=True) arr = np.ma.arange(3) arr[0] = mask_0d assert_array_equal(arr.mask, [True, False, False]) def test_element_len(self): # check that len() works for mvoid (Github issue #576) for rec in self.data['base']: assert_equal(len(rec), len(self.data['ddtype'])) class TestMaskedObjectArray(object): def test_getitem(self): arr = np.ma.array([None, None]) for dt in [float, object]: a0 = np.eye(2).astype(dt) a1 = np.eye(3).astype(dt) arr[0] = a0 arr[1] = a1 assert_(arr[0] is a0) assert_(arr[1] is a1) assert_(isinstance(arr[0,...], MaskedArray)) assert_(isinstance(arr[1,...], MaskedArray)) assert_(arr[0,...][()] is a0) assert_(arr[1,...][()] is a1) arr[0] = np.ma.masked assert_(arr[1] is a1) assert_(isinstance(arr[0,...], MaskedArray)) assert_(isinstance(arr[1,...], MaskedArray)) assert_equal(arr[0,...].mask, True) assert_(arr[1,...][()] is a1) # gh-5962 - object arrays of arrays do something special assert_equal(arr[0].data, a0) assert_equal(arr[0].mask, True) assert_equal(arr[0,...][()].data, a0) assert_equal(arr[0,...][()].mask, True) def test_nested_ma(self): arr = np.ma.array([None, None]) # set the first object to be an unmasked masked constant. A little fiddly arr[0,...] = np.array([np.ma.masked], object)[0,...] # check the above line did what we were aiming for assert_(arr.data[0] is np.ma.masked) # test that getitem returned the value by identity assert_(arr[0] is np.ma.masked) # now mask the masked value! arr[0] = np.ma.masked assert_(arr[0] is np.ma.masked) class TestMaskedView(object): def setup(self): iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) self.data = (data, a, controlmask) def test_view_to_nothing(self): (data, a, controlmask) = self.data test = a.view() assert_(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) def test_view_to_type(self): (data, a, controlmask) = self.data test = a.view(np.ndarray) assert_(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) def test_view_to_simple_dtype(self): (data, a, controlmask) = self.data # View globally test = a.view(float) assert_(isinstance(test, MaskedArray)) assert_equal(test, data.ravel()) assert_equal(test.mask, controlmask) def test_view_to_flexible_dtype(self): (data, a, controlmask) = self.data test = a.view([('A', float), ('B', float)]) assert_equal(test.mask.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a']) assert_equal(test['B'], a['b']) test = a[0].view([('A', float), ('B', float)]) assert_(isinstance(test, MaskedArray)) assert_equal(test.mask.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][0]) assert_equal(test['B'], a['b'][0]) test = a[-1].view([('A', float), ('B', float)]) assert_(isinstance(test, MaskedArray)) assert_equal(test.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][-1]) assert_equal(test['B'], a['b'][-1]) def test_view_to_subdtype(self): (data, a, controlmask) = self.data # View globally test = a.view((float, 2)) assert_(isinstance(test, MaskedArray)) assert_equal(test, data) assert_equal(test.mask, controlmask.reshape(-1, 2)) # View on 1 masked element test = a[0].view((float, 2)) assert_(isinstance(test, MaskedArray)) assert_equal(test, data[0]) assert_equal(test.mask, (1, 0)) # View on 1 unmasked element test = a[-1].view((float, 2)) assert_(isinstance(test, MaskedArray)) assert_equal(test, data[-1]) def test_view_to_dtype_and_type(self): (data, a, controlmask) = self.data test = a.view((float, 2), np.matrix) assert_equal(test, data) assert_(isinstance(test, np.matrix)) assert_(not isinstance(test, MaskedArray)) class TestOptionalArgs(object): def test_ndarrayfuncs(self): # test axis arg behaves the same as ndarray (including multiple axes) d = np.arange(24.0).reshape((2,3,4)) m = np.zeros(24, dtype=bool).reshape((2,3,4)) # mask out last element of last dimension m[:,:,-1] = True a = np.ma.array(d, mask=m) def testaxis(f, a, d): numpy_f = numpy.__getattribute__(f) ma_f = np.ma.__getattribute__(f) # test axis arg assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1)) assert_equal(ma_f(a, axis=(0,1))[...,:-1], numpy_f(d[...,:-1], axis=(0,1))) def testkeepdims(f, a, d): numpy_f = numpy.__getattribute__(f) ma_f = np.ma.__getattribute__(f) # test keepdims arg assert_equal(ma_f(a, keepdims=True).shape, numpy_f(d, keepdims=True).shape) assert_equal(ma_f(a, keepdims=False).shape, numpy_f(d, keepdims=False).shape) # test both at once assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1], numpy_f(d[...,:-1], axis=1, keepdims=True)) assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1], numpy_f(d[...,:-1], axis=(0,1), keepdims=True)) for f in ['sum', 'prod', 'mean', 'var', 'std']: testaxis(f, a, d) testkeepdims(f, a, d) for f in ['min', 'max']: testaxis(f, a, d) d = (np.arange(24).reshape((2,3,4))%2 == 0) a = np.ma.array(d, mask=m) for f in ['all', 'any']: testaxis(f, a, d) testkeepdims(f, a, d) def test_count(self): # test np.ma.count specially d = np.arange(24.0).reshape((2,3,4)) m = np.zeros(24, dtype=bool).reshape((2,3,4)) m[:,0,:] = True a = np.ma.array(d, mask=m) assert_equal(count(a), 16) assert_equal(count(a, axis=1), 2*ones((2,4))) assert_equal(count(a, axis=(0,1)), 4*ones((4,))) assert_equal(count(a, keepdims=True), 16*ones((1,1,1))) assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4))) assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4))) assert_equal(count(a, axis=-2), 2*ones((2,4))) assert_raises(ValueError, count, a, axis=(1,1)) assert_raises(np.AxisError, count, a, axis=3) # check the 'nomask' path a = np.ma.array(d, mask=nomask) assert_equal(count(a), 24) assert_equal(count(a, axis=1), 3*ones((2,4))) assert_equal(count(a, axis=(0,1)), 6*ones((4,))) assert_equal(count(a, keepdims=True), 24*ones((1,1,1))) assert_equal(np.ndim(count(a, keepdims=True)), 3) assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4))) assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4))) assert_equal(count(a, axis=-2), 3*ones((2,4))) assert_raises(ValueError, count, a, axis=(1,1)) assert_raises(np.AxisError, count, a, axis=3) # check the 'masked' singleton assert_equal(count(np.ma.masked), 0) # check 0-d arrays do not allow axis > 0 assert_raises(np.AxisError, count, np.ma.array(1), axis=1) class TestMaskedConstant(object): def _do_add_test(self, add): # sanity check assert_(add(np.ma.masked, 1) is np.ma.masked) # now try with a vector vector = np.array([1, 2, 3]) result = add(np.ma.masked, vector) # lots of things could go wrong here assert_(result is not np.ma.masked) assert_(not isinstance(result, np.ma.core.MaskedConstant)) assert_equal(result.shape, vector.shape) assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) def test_ufunc(self): self._do_add_test(np.add) def test_operator(self): self._do_add_test(lambda a, b: a + b) def test_ctor(self): m = np.ma.array(np.ma.masked) # most importantly, we do not want to create a new MaskedConstant # instance assert_(not isinstance(m, np.ma.core.MaskedConstant)) assert_(m is not np.ma.masked) def test_repr(self): # copies should not exist, but if they do, it should be obvious that # something is wrong assert_equal(repr(np.ma.masked), 'masked') # create a new instance in a weird way masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) assert_not_equal(repr(masked2), 'masked') def test_pickle(self): from io import BytesIO import pickle with BytesIO() as f: pickle.dump(np.ma.masked, f) f.seek(0) res = pickle.load(f) assert_(res is np.ma.masked) def test_copy(self): # gh-9328 # copy is a no-op, like it is with np.True_ assert_equal( np.ma.masked.copy() is np.ma.masked, np.True_.copy() is np.True_) def test_immutable(self): orig = np.ma.masked assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) assert_raises(ValueError,operator.setitem, orig.data, (), 1) assert_raises(ValueError, operator.setitem, orig.mask, (), False) view = np.ma.masked.view(np.ma.MaskedArray) assert_raises(ValueError, operator.setitem, view, (), 1) assert_raises(ValueError, operator.setitem, view.data, (), 1) assert_raises(ValueError, operator.setitem, view.mask, (), False) def test_coercion_int(self): a_i = np.zeros((), int) assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) assert_raises(MaskError, int, np.ma.masked) @dec.skipif(sys.version_info.major == 3, "long doesn't exist in Python 3") def test_coercion_long(self): assert_raises(MaskError, long, np.ma.masked) def test_coercion_float(self): a_f = np.zeros((), float) assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) assert_(np.isnan(a_f[()])) @dec.knownfailureif(True, "See gh-9750") def test_coercion_unicode(self): a_u = np.zeros((), 'U10') a_u[()] = np.ma.masked assert_equal(a_u[()], u'--') @dec.knownfailureif(True, "See gh-9750") def test_coercion_bytes(self): a_b = np.zeros((), 'S10') a_b[()] = np.ma.masked assert_equal(a_b[()], b'--') def test_subclass(self): # https://github.com/astropy/astropy/issues/6645 class Sub(type(np.ma.masked)): pass a = Sub() assert_(a is Sub()) assert_(a is not np.ma.masked) assert_not_equal(repr(a), 'masked') class TestMaskedWhereAliases(object): # TODO: Test masked_object, masked_equal, ... def test_masked_values(self): res = masked_values(np.array([-32768.0]), np.int16(-32768)) assert_equal(res.mask, [True]) res = masked_values(np.inf, np.inf) assert_equal(res.mask, True) res = np.ma.masked_values(np.inf, -np.inf) assert_equal(res.mask, False) def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) def test_append_masked_array(): a = np.ma.masked_equal([1,2,3], value=2) b = np.ma.masked_equal([4,3,2], value=2) result = np.ma.append(a, b) expected_data = [1, 2, 3, 4, 3, 2] expected_mask = [False, True, False, False, False, True] assert_array_equal(result.data, expected_data) assert_array_equal(result.mask, expected_mask) a = np.ma.masked_all((2,2)) b = np.ma.ones((3,1)) result = np.ma.append(a, b) expected_data = [1] * 3 expected_mask = [True] * 4 + [False] * 3 assert_array_equal(result.data[-3], expected_data) assert_array_equal(result.mask, expected_mask) result = np.ma.append(a, b, axis=None) assert_array_equal(result.data[-3], expected_data) assert_array_equal(result.mask, expected_mask) def test_append_masked_array_along_axis(): a = np.ma.masked_equal([1,2,3], value=2) b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) # When `axis` is specified, `values` must have the correct shape. assert_raises(ValueError, np.ma.append, a, b, axis=0) result = np.ma.append(a[np.newaxis,:], b, axis=0) expected = np.ma.arange(1, 10) expected[[1, 6]] = np.ma.masked expected = expected.reshape((3,3)) assert_array_equal(result.data, expected.data) assert_array_equal(result.mask, expected.mask) def test_default_fill_value_complex(): # regression test for Python 3, where 'unicode' was not defined assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) def test_ufunc_with_output(): # check that giving an output argument always returns that output. # Regression test for gh-8416. x = array([1., 2., 3.], mask=[0, 0, 1]) y = np.add(x, 1., out=x) assert_(y is x) def test_ufunc_with_out_varied(): """ Test that masked arrays are immune to gh-10459 """ # the mask of the output should not affect the result, however it is passed a = array([ 1, 2, 3], mask=[1, 0, 0]) b = array([10, 20, 30], mask=[1, 0, 0]) out = array([ 0, 0, 0], mask=[0, 0, 1]) expected = array([11, 22, 33], mask=[1, 0, 0]) out_pos = out.copy() res_pos = np.add(a, b, out_pos) out_kw = out.copy() res_kw = np.add(a, b, out=out_kw) out_tup = out.copy() res_tup = np.add(a, b, out=(out_tup,)) assert_equal(res_kw.mask, expected.mask) assert_equal(res_kw.data, expected.data) assert_equal(res_tup.mask, expected.mask) assert_equal(res_tup.data, expected.data) assert_equal(res_pos.mask, expected.mask) assert_equal(res_pos.data, expected.data) def test_astype(): descr = [('v', int, 3), ('x', [('y', float)])] x = array(([1, 2, 3], (1.0,)), dtype=descr) assert_equal(x, x.astype(descr)) ############################################################################### if __name__ == "__main__": run_module_suite()
192,244
36.769155
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/tests/test_regression.py
from __future__ import division, absolute_import, print_function import warnings import numpy as np from numpy.testing import ( assert_, assert_array_equal, assert_allclose, run_module_suite, suppress_warnings ) class TestRegression(object): def test_masked_array_create(self): # Ticket #17 x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], mask=[0, 0, 0, 1, 1, 1, 0, 0]) assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) def test_masked_array(self): # Ticket #61 np.ma.array(1, mask=[1]) def test_mem_masked_where(self): # Ticket #62 from numpy.ma import masked_where, MaskType a = np.zeros((1, 1)) b = np.zeros(a.shape, MaskType) c = masked_where(b, a) a-c def test_masked_array_multiply(self): # Ticket #254 a = np.ma.zeros((4, 1)) a[2, 0] = np.ma.masked b = np.zeros((4, 2)) a*b b*a def test_masked_array_repeat(self): # Ticket #271 np.ma.array([1], mask=False).repeat(10) def test_masked_array_repr_unicode(self): # Ticket #1256 repr(np.ma.array(u"Unicode")) def test_atleast_2d(self): # Ticket #1559 a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) b = np.atleast_2d(a) assert_(a.mask.ndim == 1) assert_(b.mask.ndim == 2) def test_set_fill_value_unicode_py3(self): # Ticket #2733 a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) a.fill_value = 'X' assert_(a.fill_value == 'X') def test_var_sets_maskedarray_scalar(self): # Issue gh-2757 a = np.ma.array(np.arange(5), mask=True) mout = np.ma.array(-1, dtype=float) a.var(out=mout) assert_(mout._data == 0) def test_ddof_corrcoef(self): # See gh-3336 x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) y = np.array([2, 2.5, 3.1, 3, 5]) # this test can be removed after deprecation. with suppress_warnings() as sup: sup.filter(DeprecationWarning, "bias and ddof have no effect") r0 = np.ma.corrcoef(x, y, ddof=0) r1 = np.ma.corrcoef(x, y, ddof=1) # ddof should not have an effect (it gets cancelled out) assert_allclose(r0.data, r1.data) if __name__ == "__main__": run_module_suite()
2,437
29.098765
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/tests/test_deprecations.py
"""Test deprecation and future warnings. """ from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import run_module_suite, assert_warns from numpy.ma.testutils import assert_equal from numpy.ma.core import MaskedArrayFutureWarning class TestArgsort(object): """ gh-8701 """ def _test_base(self, argsort, cls): arr_0d = np.array(1).view(cls) argsort(arr_0d) arr_1d = np.array([1, 2, 3]).view(cls) argsort(arr_1d) # argsort has a bad default for >1d arrays arr_2d = np.array([[1, 2], [3, 4]]).view(cls) result = assert_warns( np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) assert_equal(result, argsort(arr_2d, axis=None)) # should be no warnings for explicitly specifying it argsort(arr_2d, axis=None) argsort(arr_2d, axis=-1) def test_function_ndarray(self): return self._test_base(np.ma.argsort, np.ndarray) def test_function_maskedarray(self): return self._test_base(np.ma.argsort, np.ma.MaskedArray) def test_method(self): return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) class TestMinimumMaximum(object): def test_minimum(self): assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2])) def test_maximum(self): assert_warns(DeprecationWarning, np.ma.maximum, np.ma.array([1, 2])) def test_axis_default(self): # NumPy 1.13, 2017-05-06 data1d = np.ma.arange(6) data2d = data1d.reshape(2, 3) ma_min = np.ma.minimum.reduce ma_max = np.ma.maximum.reduce # check that the default axis is still None, but warns on 2d arrays result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) assert_equal(result, ma_max(data2d, axis=None)) result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) assert_equal(result, ma_min(data2d, axis=None)) # no warnings on 1d, as both new and old defaults are equivalent result = ma_min(data1d) assert_equal(result, ma_min(data1d, axis=None)) assert_equal(result, ma_min(data1d, axis=0)) result = ma_max(data1d) assert_equal(result, ma_max(data1d, axis=None)) assert_equal(result, ma_max(data1d, axis=0)) if __name__ == "__main__": run_module_suite()
2,410
31.146667
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/tests/test_extras.py
# pylint: disable-msg=W0611, W0612, W0511 """Tests suite for MaskedArray. Adapted from the original test_ma by Pierre Gerard-Marchant :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu :version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ from __future__ import division, absolute_import, print_function import warnings import itertools import numpy as np from numpy.testing import ( run_module_suite, assert_warns, suppress_warnings, assert_raises, ) from numpy.ma.testutils import ( assert_, assert_array_equal, assert_equal, assert_almost_equal ) from numpy.ma.core import ( array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, nomask, ones, zeros, count ) from numpy.ma.extras import ( atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, diagflat ) import numpy.ma.extras as mae class TestGeneric(object): # def test_masked_all(self): # Tests masked_all # Standard dtype test = masked_all((2,), dtype=float) control = array([1, 1], mask=[1, 1], dtype=float) assert_equal(test, control) # Flexible dtype dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) test = masked_all((2,), dtype=dt) control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) assert_equal(test, control) test = masked_all((2, 2), dtype=dt) control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], dtype=dt) assert_equal(test, control) # Nested dtype dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) test = masked_all((2,), dtype=dt) control = array([(1, (1, 1)), (1, (1, 1))], mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) assert_equal(test, control) test = masked_all((2,), dtype=dt) control = array([(1, (1, 1)), (1, (1, 1))], mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) assert_equal(test, control) test = masked_all((1, 1), dtype=dt) control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) assert_equal(test, control) def test_masked_all_like(self): # Tests masked_all # Standard dtype base = array([1, 2], dtype=float) test = masked_all_like(base) control = array([1, 1], mask=[1, 1], dtype=float) assert_equal(test, control) # Flexible dtype dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) test = masked_all_like(base) control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) assert_equal(test, control) # Nested dtype dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) control = array([(1, (1, 1)), (1, (1, 1))], mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) test = masked_all_like(control) assert_equal(test, control) def check_clump(self, f): for i in range(1, 7): for j in range(2**i): k = np.arange(i, dtype=int) ja = np.full(i, j, dtype=int) a = masked_array(2**k) a.mask = (ja & (2**k)) != 0 s = 0 for sl in f(a): s += a.data[sl].sum() if f == clump_unmasked: assert_equal(a.compressed().sum(), s) else: a.mask = ~a.mask assert_equal(a.compressed().sum(), s) def test_clump_masked(self): # Test clump_masked a = masked_array(np.arange(10)) a[[0, 1, 2, 6, 8, 9]] = masked # test = clump_masked(a) control = [slice(0, 3), slice(6, 7), slice(8, 10)] assert_equal(test, control) self.check_clump(clump_masked) def test_clump_unmasked(self): # Test clump_unmasked a = masked_array(np.arange(10)) a[[0, 1, 2, 6, 8, 9]] = masked test = clump_unmasked(a) control = [slice(3, 6), slice(7, 8), ] assert_equal(test, control) self.check_clump(clump_unmasked) def test_flatnotmasked_contiguous(self): # Test flatnotmasked_contiguous a = arange(10) # No mask test = flatnotmasked_contiguous(a) assert_equal(test, slice(0, a.size)) # Some mask a[(a < 3) | (a > 8) | (a == 5)] = masked test = flatnotmasked_contiguous(a) assert_equal(test, [slice(3, 5), slice(6, 9)]) # a[:] = masked test = flatnotmasked_contiguous(a) assert_equal(test, None) class TestAverage(object): # Several tests of average. Why so many ? Good point... def test_testAverage1(self): # Test of average. ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) assert_equal(2.0, average(ott, axis=0)) assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) assert_equal(2.0, result) assert_(wts == 4.0) ott[:] = masked assert_equal(average(ott, axis=0).mask, [True]) ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) ott = ott.reshape(2, 2) ott[:, 1] = masked assert_equal(average(ott, axis=0), [2.0, 0.0]) assert_equal(average(ott, axis=1).mask[0], [True]) assert_equal([2., 0.], average(ott, axis=0)) result, wts = average(ott, axis=0, returned=1) assert_equal(wts, [1., 0.]) def test_testAverage2(self): # More tests of average. w1 = [0, 1, 1, 1, 1, 0] w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] x = arange(6, dtype=np.float_) assert_equal(average(x, axis=0), 2.5) assert_equal(average(x, axis=0, weights=w1), 2.5) y = array([arange(6, dtype=np.float_), 2.0 * arange(6)]) assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) assert_equal(average(y, axis=1), [average(x, axis=0), average(x, axis=0) * 2.0]) assert_equal(average(y, None, weights=w2), 20. / 6.) assert_equal(average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) assert_equal(average(y, axis=1), [average(x, axis=0), average(x, axis=0) * 2.0]) m1 = zeros(6) m2 = [0, 0, 1, 1, 0, 0] m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] m4 = ones(6) m5 = [0, 1, 1, 1, 1, 1] assert_equal(average(masked_array(x, m1), axis=0), 2.5) assert_equal(average(masked_array(x, m2), axis=0), 2.5) assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) assert_equal(average(masked_array(x, m5), axis=0), 0.0) assert_equal(count(average(masked_array(x, m4), axis=0)), 0) z = masked_array(y, m3) assert_equal(average(z, None), 20. / 6.) assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) assert_equal(average(z, axis=1), [2.5, 5.0]) assert_equal(average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) def test_testAverage3(self): # Yet more tests of average! a = arange(6) b = arange(6) * 3 r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) assert_equal(shape(r1), shape(w1)) assert_equal(r1.shape, w1.shape) r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) assert_equal(shape(w2), shape(r2)) r2, w2 = average(ones((2, 2, 3)), returned=1) assert_equal(shape(w2), shape(r2)) r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) assert_equal(shape(w2), shape(r2)) a2d = array([[1, 2], [0, 4]], float) a2dm = masked_array(a2d, [[False, False], [True, False]]) a2da = average(a2d, axis=0) assert_equal(a2da, [0.5, 3.0]) a2dma = average(a2dm, axis=0) assert_equal(a2dma, [1.0, 3.0]) a2dma = average(a2dm, axis=None) assert_equal(a2dma, 7. / 3.) a2dma = average(a2dm, axis=1) assert_equal(a2dma, [1.5, 4.0]) def test_onintegers_with_mask(self): # Test average on integers with mask a = average(array([1, 2])) assert_equal(a, 1.5) a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) assert_equal(a, 1.5) def test_complex(self): # Test with complex data. # (Regression test for https://github.com/numpy/numpy/issues/2684) mask = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=bool) a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], [9j, 0+1j, 2+3j, 4+5j, 7+7j]], mask=mask) av = average(a) expected = np.average(a.compressed()) assert_almost_equal(av.real, expected.real) assert_almost_equal(av.imag, expected.imag) av0 = average(a, axis=0) expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j assert_almost_equal(av0.real, expected0.real) assert_almost_equal(av0.imag, expected0.imag) av1 = average(a, axis=1) expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j assert_almost_equal(av1.real, expected1.real) assert_almost_equal(av1.imag, expected1.imag) # Test with the 'weights' argument. wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], [1.0, 1.0, 1.0, 1.0, 1.0]]) wav = average(a, weights=wts) expected = np.average(a.compressed(), weights=wts[~mask]) assert_almost_equal(wav.real, expected.real) assert_almost_equal(wav.imag, expected.imag) wav0 = average(a, weights=wts, axis=0) expected0 = (average(a.real, weights=wts, axis=0) + average(a.imag, weights=wts, axis=0)*1j) assert_almost_equal(wav0.real, expected0.real) assert_almost_equal(wav0.imag, expected0.imag) wav1 = average(a, weights=wts, axis=1) expected1 = (average(a.real, weights=wts, axis=1) + average(a.imag, weights=wts, axis=1)*1j) assert_almost_equal(wav1.real, expected1.real) assert_almost_equal(wav1.imag, expected1.imag) class TestConcatenator(object): # Tests for mr_, the equivalent of r_ for masked arrays. def test_1d(self): # Tests mr_ on 1D arrays. assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) b = ones(5) m = [1, 0, 0, 0, 0] d = masked_array(b, mask=m) c = mr_[d, 0, 0, d] assert_(isinstance(c, MaskedArray)) assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) assert_array_equal(c.mask, mr_[m, 0, 0, m]) def test_2d(self): # Tests mr_ on 2D arrays. a_1 = np.random.rand(5, 5) a_2 = np.random.rand(5, 5) m_1 = np.round_(np.random.rand(5, 5), 0) m_2 = np.round_(np.random.rand(5, 5), 0) b_1 = masked_array(a_1, mask=m_1) b_2 = masked_array(a_2, mask=m_2) # append columns d = mr_['1', b_1, b_2] assert_(d.shape == (5, 10)) assert_array_equal(d[:, :5], b_1) assert_array_equal(d[:, 5:], b_2) assert_array_equal(d.mask, np.r_['1', m_1, m_2]) d = mr_[b_1, b_2] assert_(d.shape == (10, 5)) assert_array_equal(d[:5,:], b_1) assert_array_equal(d[5:,:], b_2) assert_array_equal(d.mask, np.r_[m_1, m_2]) def test_matrix_builder(self): assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) def test_matrix(self): actual = mr_['r', 1, 2, 3] expected = np.ma.array(np.r_['r', 1, 2, 3]) assert_array_equal(actual, expected) # outer type is masked array, inner type is matrix assert_equal(type(actual), type(expected)) assert_equal(type(actual.data), type(expected.data)) class TestNotMasked(object): # Tests notmasked_edges and notmasked_contiguous. def test_edges(self): # Tests unmasked_edges data = masked_array(np.arange(25).reshape(5, 5), mask=[[0, 0, 1, 0, 0], [0, 0, 0, 1, 1], [1, 1, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 0, 0]],) test = notmasked_edges(data, None) assert_equal(test, [0, 24]) test = notmasked_edges(data, 0) assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) test = notmasked_edges(data, 1) assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) # test = notmasked_edges(data.data, None) assert_equal(test, [0, 24]) test = notmasked_edges(data.data, 0) assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) test = notmasked_edges(data.data, -1) assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) # data[-2] = masked test = notmasked_edges(data, 0) assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) test = notmasked_edges(data, -1) assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) def test_contiguous(self): # Tests notmasked_contiguous a = masked_array(np.arange(24).reshape(3, 8), mask=[[0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 0], ]) tmp = notmasked_contiguous(a, None) assert_equal(tmp[-1], slice(23, 24, None)) assert_equal(tmp[-2], slice(16, 22, None)) assert_equal(tmp[-3], slice(0, 4, None)) # tmp = notmasked_contiguous(a, 0) assert_(len(tmp[-1]) == 1) assert_(tmp[-2] is None) assert_equal(tmp[-3], tmp[-1]) assert_(len(tmp[0]) == 2) # tmp = notmasked_contiguous(a, 1) assert_equal(tmp[0][-1], slice(0, 4, None)) assert_(tmp[1] is None) assert_equal(tmp[2][-1], slice(7, 8, None)) assert_equal(tmp[2][-2], slice(0, 6, None)) class TestCompressFunctions(object): def test_compress_nd(self): # Tests compress_nd x = np.array(list(range(3*4*5))).reshape(3, 4, 5) m = np.zeros((3,4,5)).astype(bool) m[1,1,1] = True x = array(x, mask=m) # axis=None a = compress_nd(x) assert_equal(a, [[[ 0, 2, 3, 4], [10, 12, 13, 14], [15, 17, 18, 19]], [[40, 42, 43, 44], [50, 52, 53, 54], [55, 57, 58, 59]]]) # axis=0 a = compress_nd(x, 0) assert_equal(a, [[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], [[40, 41, 42, 43, 44], [45, 46, 47, 48, 49], [50, 51, 52, 53, 54], [55, 56, 57, 58, 59]]]) # axis=1 a = compress_nd(x, 1) assert_equal(a, [[[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], [[20, 21, 22, 23, 24], [30, 31, 32, 33, 34], [35, 36, 37, 38, 39]], [[40, 41, 42, 43, 44], [50, 51, 52, 53, 54], [55, 56, 57, 58, 59]]]) a2 = compress_nd(x, (1,)) a3 = compress_nd(x, -2) a4 = compress_nd(x, (-2,)) assert_equal(a, a2) assert_equal(a, a3) assert_equal(a, a4) # axis=2 a = compress_nd(x, 2) assert_equal(a, [[[ 0, 2, 3, 4], [ 5, 7, 8, 9], [10, 12, 13, 14], [15, 17, 18, 19]], [[20, 22, 23, 24], [25, 27, 28, 29], [30, 32, 33, 34], [35, 37, 38, 39]], [[40, 42, 43, 44], [45, 47, 48, 49], [50, 52, 53, 54], [55, 57, 58, 59]]]) a2 = compress_nd(x, (2,)) a3 = compress_nd(x, -1) a4 = compress_nd(x, (-1,)) assert_equal(a, a2) assert_equal(a, a3) assert_equal(a, a4) # axis=(0, 1) a = compress_nd(x, (0, 1)) assert_equal(a, [[[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], [[40, 41, 42, 43, 44], [50, 51, 52, 53, 54], [55, 56, 57, 58, 59]]]) a2 = compress_nd(x, (0, -2)) assert_equal(a, a2) # axis=(1, 2) a = compress_nd(x, (1, 2)) assert_equal(a, [[[ 0, 2, 3, 4], [10, 12, 13, 14], [15, 17, 18, 19]], [[20, 22, 23, 24], [30, 32, 33, 34], [35, 37, 38, 39]], [[40, 42, 43, 44], [50, 52, 53, 54], [55, 57, 58, 59]]]) a2 = compress_nd(x, (-2, 2)) a3 = compress_nd(x, (1, -1)) a4 = compress_nd(x, (-2, -1)) assert_equal(a, a2) assert_equal(a, a3) assert_equal(a, a4) # axis=(0, 2) a = compress_nd(x, (0, 2)) assert_equal(a, [[[ 0, 2, 3, 4], [ 5, 7, 8, 9], [10, 12, 13, 14], [15, 17, 18, 19]], [[40, 42, 43, 44], [45, 47, 48, 49], [50, 52, 53, 54], [55, 57, 58, 59]]]) a2 = compress_nd(x, (0, -1)) assert_equal(a, a2) def test_compress_rowcols(self): # Tests compress_rowcols x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(compress_rowcols(x), [[8]]) assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) assert_equal(compress_rowcols(x).size, 0) assert_equal(compress_rowcols(x, 0).size, 0) assert_equal(compress_rowcols(x, 1).size, 0) def test_mask_rowcols(self): # Tests mask_rowcols. x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) assert_equal(mask_rowcols(x, 1).mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(mask_rowcols(x).mask, [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) assert_equal(mask_rowcols(x, 0).mask, [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) assert_equal(mask_rowcols(x, 1).mask, [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) assert_equal(mask_rowcols(x, 1,).mask, [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) assert_(mask_rowcols(x).all() is masked) assert_(mask_rowcols(x, 0).all() is masked) assert_(mask_rowcols(x, 1).all() is masked) assert_(mask_rowcols(x).mask.all()) assert_(mask_rowcols(x, 0).mask.all()) assert_(mask_rowcols(x, 1).mask.all()) def test_dot(self): # Tests dot product n = np.arange(1, 7) # m = [1, 0, 0, 0, 0, 0] a = masked_array(n, mask=m).reshape(2, 3) b = masked_array(n, mask=m).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 1], [1, 0]]) c = dot(b, a, strict=True) assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # m = [0, 0, 0, 0, 0, 1] a = masked_array(n, mask=m).reshape(2, 3) b = masked_array(n, mask=m).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[0, 1], [1, 1]]) c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) assert_equal(c, dot(a, b)) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # m = [0, 0, 0, 0, 0, 0] a = masked_array(n, mask=m).reshape(2, 3) b = masked_array(n, mask=m).reshape(3, 2) c = dot(a, b) assert_equal(c.mask, nomask) c = dot(b, a) assert_equal(c.mask, nomask) # a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 1], [0, 0]]) c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) c = dot(b, a, strict=True) assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[0, 0], [1, 1]]) c = dot(a, b) assert_equal(c, np.dot(a.filled(0), b.filled(0))) c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 0], [1, 1]]) c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) def test_dot_returns_maskedarray(self): # See gh-6611 a = np.eye(3) b = array(a) assert_(type(dot(a, a)) is MaskedArray) assert_(type(dot(a, b)) is MaskedArray) assert_(type(dot(b, a)) is MaskedArray) assert_(type(dot(b, b)) is MaskedArray) def test_dot_out(self): a = array(np.eye(3)) out = array(np.zeros((3, 3))) res = dot(a, a, out=out) assert_(res is out) assert_equal(a, res) class TestApplyAlongAxis(object): # Tests 2D functions def test_3d(self): a = arange(12.).reshape(2, 2, 3) def myfunc(b): return b[1] xa = apply_along_axis(myfunc, 2, a) assert_equal(xa, [[1, 4], [7, 10]]) # Tests kwargs functions def test_3d_kwargs(self): a = arange(12).reshape(2, 2, 3) def myfunc(b, offset=0): return b[1+offset] xa = apply_along_axis(myfunc, 2, a, offset=1) assert_equal(xa, [[2, 5], [8, 11]]) class TestApplyOverAxes(object): # Tests apply_over_axes def test_basic(self): a = arange(24).reshape(2, 3, 4) test = apply_over_axes(np.sum, a, [0, 2]) ctrl = np.array([[[60], [92], [124]]]) assert_equal(test, ctrl) a[(a % 2).astype(bool)] = masked test = apply_over_axes(np.sum, a, [0, 2]) ctrl = np.array([[[28], [44], [60]]]) assert_equal(test, ctrl) class TestMedian(object): def test_pytype(self): r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) assert_equal(r, np.inf) def test_inf(self): # test that even which computes handles inf / x = masked r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], [np.inf, np.inf]]), axis=-1) assert_equal(r, np.inf) r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], [np.inf, np.inf]]), axis=None) assert_equal(r, np.inf) # all masked r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], [np.inf, np.inf]], mask=True), axis=-1) assert_equal(r.mask, True) r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], [np.inf, np.inf]], mask=True), axis=None) assert_equal(r.mask, True) def test_non_masked(self): x = np.arange(9) assert_equal(np.ma.median(x), 4.) assert_(type(np.ma.median(x)) is not MaskedArray) x = range(8) assert_equal(np.ma.median(x), 3.5) assert_(type(np.ma.median(x)) is not MaskedArray) x = 5 assert_equal(np.ma.median(x), 5.) assert_(type(np.ma.median(x)) is not MaskedArray) # integer x = np.arange(9 * 8).reshape(9, 8) assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) assert_(np.ma.median(x, axis=1) is not MaskedArray) # float x = np.arange(9 * 8.).reshape(9, 8) assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) assert_(np.ma.median(x, axis=1) is not MaskedArray) def test_docstring_examples(self): "test the examples given in the docstring of ma.median" x = array(np.arange(8), mask=[0]*4 + [1]*4) assert_equal(np.ma.median(x), 1.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) ma_x = np.ma.median(x, axis=-1, overwrite_input=True) assert_equal(ma_x, [2., 5.]) assert_equal(ma_x.shape, (2,), "shape mismatch") assert_(type(ma_x) is MaskedArray) def test_axis_argument_errors(self): msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" for ndmin in range(5): for mask in [False, True]: x = array(1, ndmin=ndmin, mask=mask) # Valid axis values should not raise exception args = itertools.product(range(-ndmin, ndmin), [False, True]) for axis, over in args: try: np.ma.median(x, axis=axis, overwrite_input=over) except Exception: raise AssertionError(msg % (mask, ndmin, axis, over)) # Invalid axis values should raise exception args = itertools.product([-(ndmin + 1), ndmin], [False, True]) for axis, over in args: try: np.ma.median(x, axis=axis, overwrite_input=over) except np.AxisError: pass else: raise AssertionError(msg % (mask, ndmin, axis, over)) def test_masked_0d(self): # Check values x = array(1, mask=False) assert_equal(np.ma.median(x), 1) x = array(1, mask=True) assert_equal(np.ma.median(x), np.ma.masked) def test_masked_1d(self): x = array(np.arange(5), mask=True) assert_equal(np.ma.median(x), np.ma.masked) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) x = array(np.arange(5), mask=False) assert_equal(np.ma.median(x), 2.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) x = array(np.arange(5), mask=[0,1,0,0,0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) x = array(np.arange(5), mask=[0,1,1,1,1]) assert_equal(np.ma.median(x), 0.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # integer x = array(np.arange(5), mask=[0,1,1,0,0]) assert_equal(np.ma.median(x), 3.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # float x = array(np.arange(5.), mask=[0,1,1,0,0]) assert_equal(np.ma.median(x), 3.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # integer x = array(np.arange(6), mask=[0,1,1,1,1,0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # float x = array(np.arange(6.), mask=[0,1,1,1,1,0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) def test_1d_shape_consistency(self): assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape, np.ma.median(array([1,2,3],mask=[0,1,0])).shape ) def test_2d(self): # Tests median w/ 2D (n, p) = (101, 30) x = masked_array(np.linspace(-1., 1., n),) x[:10] = x[-10:] = masked z = masked_array(np.empty((n, p), dtype=float)) z[:, 0] = x[:] idx = np.arange(len(x)) for i in range(1, p): np.random.shuffle(idx) z[:, i] = x[idx] assert_equal(median(z[:, 0]), 0) assert_equal(median(z), 0) assert_equal(median(z, axis=0), np.zeros(p)) assert_equal(median(z.T, axis=1), np.zeros(p)) def test_2d_waxis(self): # Tests median w/ 2D arrays and different axis. x = masked_array(np.arange(30).reshape(10, 3)) x[:3] = x[-3:] = masked assert_equal(median(x), 14.5) assert_(type(np.ma.median(x)) is not MaskedArray) assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) assert_(type(np.ma.median(x, axis=0)) is MaskedArray) assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) assert_(type(np.ma.median(x, axis=1)) is MaskedArray) assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) def test_3d(self): # Tests median w/ 3D x = np.ma.arange(24).reshape(3, 4, 2) x[x % 3 == 0] = masked assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) x.shape = (4, 3, 2) assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) x = np.ma.arange(24).reshape(4, 3, 2) x[x % 5 == 0] = masked assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) def test_neg_axis(self): x = masked_array(np.arange(30).reshape(10, 3)) x[:3] = x[-3:] = masked assert_equal(median(x, axis=-1), median(x, axis=1)) def test_out_1d(self): # integer float even odd for v in (30, 30., 31, 31.): x = masked_array(np.arange(v)) x[:3] = x[-3:] = masked out = masked_array(np.ones(())) r = median(x, out=out) if v == 30: assert_equal(out, 14.5) else: assert_equal(out, 15.) assert_(r is out) assert_(type(r) is MaskedArray) def test_out(self): # integer float even odd for v in (40, 40., 30, 30.): x = masked_array(np.arange(v).reshape(10, -1)) x[:3] = x[-3:] = masked out = masked_array(np.ones(10)) r = median(x, axis=1, out=out) if v == 30: e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3, mask=[True] * 3 + [False] * 4 + [True] * 3) else: e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3, mask=[True]*3 + [False]*4 + [True]*3) assert_equal(r, e) assert_(r is out) assert_(type(r) is MaskedArray) def test_single_non_masked_value_on_axis(self): data = [[1., 0.], [0., 3.], [0., 0.]] masked_arr = np.ma.masked_equal(data, 0) expected = [1., 3.] assert_array_equal(np.ma.median(masked_arr, axis=0), expected) def test_nan(self): with suppress_warnings() as w: w.record(RuntimeWarning) for mask in (False, np.zeros(6, dtype=bool)): dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) dm.mask = mask # scalar result r = np.ma.median(dm, axis=None) assert_(np.isscalar(r)) assert_array_equal(r, np.nan) r = np.ma.median(dm.ravel(), axis=0) assert_(np.isscalar(r)) assert_array_equal(r, np.nan) r = np.ma.median(dm, axis=0) assert_equal(type(r), MaskedArray) assert_array_equal(r, [1, np.nan, 3]) r = np.ma.median(dm, axis=1) assert_equal(type(r), MaskedArray) assert_array_equal(r, [np.nan, 2]) r = np.ma.median(dm, axis=-1) assert_equal(type(r), MaskedArray) assert_array_equal(r, [np.nan, 2]) dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) dm[:, 2] = np.ma.masked assert_array_equal(np.ma.median(dm, axis=None), np.nan) assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) assert_equal([x.category is RuntimeWarning for x in w.log], [True]*13) def test_out_nan(self): with warnings.catch_warnings(record=True): warnings.filterwarnings('always', '', RuntimeWarning) o = np.ma.masked_array(np.zeros((4,))) d = np.ma.masked_array(np.ones((3, 4))) d[2, 1] = np.nan d[2, 2] = np.ma.masked assert_equal(np.ma.median(d, 0, out=o), o) o = np.ma.masked_array(np.zeros((3,))) assert_equal(np.ma.median(d, 1, out=o), o) o = np.ma.masked_array(np.zeros(())) assert_equal(np.ma.median(d, out=o), o) def test_nan_behavior(self): a = np.ma.masked_array(np.arange(24, dtype=float)) a[::3] = np.ma.masked a[2] = np.nan with suppress_warnings() as w: w.record(RuntimeWarning) assert_array_equal(np.ma.median(a), np.nan) assert_array_equal(np.ma.median(a, axis=0), np.nan) assert_(w.log[0].category is RuntimeWarning) assert_(w.log[1].category is RuntimeWarning) a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) a.mask = np.arange(a.size) % 2 == 1 aorig = a.copy() a[1, 2, 3] = np.nan a[1, 1, 2] = np.nan # no axis with suppress_warnings() as w: w.record(RuntimeWarning) warnings.filterwarnings('always', '', RuntimeWarning) assert_array_equal(np.ma.median(a), np.nan) assert_(np.isscalar(np.ma.median(a))) assert_(w.log[0].category is RuntimeWarning) # axis0 b = np.ma.median(aorig, axis=0) b[2, 3] = np.nan b[1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.ma.median(a, 0), b) assert_equal(len(w), 1) # axis1 b = np.ma.median(aorig, axis=1) b[1, 3] = np.nan b[1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.ma.median(a, 1), b) assert_equal(len(w), 1) # axis02 b = np.ma.median(aorig, axis=(0, 2)) b[1] = np.nan b[2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.ma.median(a, (0, 2)), b) assert_equal(len(w), 1) def test_ambigous_fill(self): # 255 is max value, used as filler for sort a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) a = np.ma.masked_array(a, mask=a == 3) assert_array_equal(np.ma.median(a, axis=1), 255) assert_array_equal(np.ma.median(a, axis=1).mask, False) assert_array_equal(np.ma.median(a, axis=0), a[0]) assert_array_equal(np.ma.median(a), 255) def test_special(self): for inf in [np.inf, -np.inf]: a = np.array([[inf, np.nan], [np.nan, np.nan]]) a = np.ma.masked_array(a, mask=np.isnan(a)) assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) assert_equal(np.ma.median(a), inf) a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) a = np.ma.masked_array(a, mask=np.isnan(a)) assert_array_equal(np.ma.median(a, axis=1), inf) assert_array_equal(np.ma.median(a, axis=1).mask, False) assert_array_equal(np.ma.median(a, axis=0), a[0]) assert_array_equal(np.ma.median(a), inf) # no mask a = np.array([[inf, inf], [inf, inf]]) assert_equal(np.ma.median(a), inf) assert_equal(np.ma.median(a, axis=0), inf) assert_equal(np.ma.median(a, axis=1), inf) a = np.array([[inf, 7, -inf, -9], [-10, np.nan, np.nan, 5], [4, np.nan, np.nan, inf]], dtype=np.float32) a = np.ma.masked_array(a, mask=np.isnan(a)) if inf > 0: assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) assert_equal(np.ma.median(a), 4.5) else: assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) assert_equal(np.ma.median(a), -2.5) assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) for i in range(0, 10): for j in range(1, 10): a = np.array([([np.nan] * i) + ([inf] * j)] * 2) a = np.ma.masked_array(a, mask=np.isnan(a)) assert_equal(np.ma.median(a), inf) assert_equal(np.ma.median(a, axis=1), inf) assert_equal(np.ma.median(a, axis=0), ([np.nan] * i) + [inf] * j) def test_empty(self): # empty arrays a = np.ma.masked_array(np.array([], dtype=float)) with suppress_warnings() as w: w.record(RuntimeWarning) assert_array_equal(np.ma.median(a), np.nan) assert_(w.log[0].category is RuntimeWarning) # multiple dimensions a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) # no axis with suppress_warnings() as w: w.record(RuntimeWarning) warnings.filterwarnings('always', '', RuntimeWarning) assert_array_equal(np.ma.median(a), np.nan) assert_(w.log[0].category is RuntimeWarning) # axis 0 and 1 b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) assert_equal(np.ma.median(a, axis=0), b) assert_equal(np.ma.median(a, axis=1), b) # axis 2 b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.ma.median(a, axis=2), b) assert_(w[0].category is RuntimeWarning) def test_object(self): o = np.ma.masked_array(np.arange(7.)) assert_(type(np.ma.median(o.astype(object))), float) o[2] = np.nan assert_(type(np.ma.median(o.astype(object))), float) class TestCov(object): def setup(self): self.data = array(np.random.rand(12)) def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values x = self.data assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), cov(x, rowvar=False, bias=True)) def test_2d_without_missing(self): # Test cov on 1 2D variable w/o missing values x = self.data.reshape(3, 4) assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), cov(x, rowvar=False, bias=True)) def test_1d_with_missing(self): # Test cov 1 1D variable w/missing values x = self.data x[-1] = masked x -= x.mean() nx = x.compressed() assert_almost_equal(np.cov(nx), cov(x)) assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(nx, rowvar=False, bias=True), cov(x, rowvar=False, bias=True)) # try: cov(x, allow_masked=False) except ValueError: pass # # 2 1D variables w/ missing values nx = x[1:-1] assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), cov(x, x[::-1], rowvar=False)) assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), cov(x, x[::-1], rowvar=False, bias=True)) def test_2d_with_missing(self): # Test cov on 2D variable w/ missing value x = self.data x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) frac = np.dot(valid, valid.T) xf = (x - x.mean(1)[:, None]).filled(0) assert_almost_equal(cov(x), np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) assert_almost_equal(cov(x, bias=True), np.cov(xf, bias=True) * x.shape[1] / frac) frac = np.dot(valid.T, valid) xf = (x - x.mean(0)).filled(0) assert_almost_equal(cov(x, rowvar=False), (np.cov(xf, rowvar=False) * (x.shape[0] - 1) / (frac - 1.))) assert_almost_equal(cov(x, rowvar=False, bias=True), (np.cov(xf, rowvar=False, bias=True) * x.shape[0] / frac)) class TestCorrcoef(object): def setup(self): self.data = array(np.random.rand(12)) self.data2 = array(np.random.rand(12)) def test_ddof(self): # ddof raises DeprecationWarning x, y = self.data, self.data2 expected = np.corrcoef(x) expected2 = np.corrcoef(x, y) with suppress_warnings() as sup: warnings.simplefilter("always") assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) sup.filter(DeprecationWarning, "bias and ddof have no effect") # ddof has no or negligible effect on the function assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) assert_almost_equal(corrcoef(x, ddof=-1), expected) assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) assert_almost_equal(corrcoef(x, ddof=3), expected) assert_almost_equal(corrcoef(x, y, ddof=3), expected2) def test_bias(self): x, y = self.data, self.data2 expected = np.corrcoef(x) # bias raises DeprecationWarning with suppress_warnings() as sup: warnings.simplefilter("always") assert_warns(DeprecationWarning, corrcoef, x, y, True, False) assert_warns(DeprecationWarning, corrcoef, x, y, True, True) assert_warns(DeprecationWarning, corrcoef, x, bias=False) sup.filter(DeprecationWarning, "bias and ddof have no effect") # bias has no or negligible effect on the function assert_almost_equal(corrcoef(x, bias=1), expected) def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values x = self.data assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "bias and ddof have no effect") assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) def test_2d_without_missing(self): # Test corrcoef on 1 2D variable w/o missing values x = self.data.reshape(3, 4) assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "bias and ddof have no effect") assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) def test_1d_with_missing(self): # Test corrcoef 1 1D variable w/missing values x = self.data x[-1] = masked x -= x.mean() nx = x.compressed() assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "bias and ddof have no effect") assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) try: corrcoef(x, allow_masked=False) except ValueError: pass # 2 1D variables w/ missing values nx = x[1:-1] assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "bias and ddof have no effect") # ddof and bias have no or negligible effect on the function assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1], bias=1)) assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1], ddof=2)) def test_2d_with_missing(self): # Test corrcoef on 2D variable w/ missing value x = self.data x[-1] = masked x = x.reshape(3, 4) test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "bias and ddof have no effect") # ddof and bias have no or negligible effect on the function assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], control[:-1, :-1]) assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], control[:-1, :-1]) assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], control[:-1, :-1]) class TestPolynomial(object): # def test_polyfit(self): # Tests polyfit # On ndarrays x = np.random.rand(10) y = np.random.rand(20).reshape(-1, 2) assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) # ON 1D maskedarrays x = x.view(MaskedArray) x[0] = masked y = y.view(MaskedArray) y[0, 0] = y[-1, -1] = masked # (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) # (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) # (C, R, K, S, D) = polyfit(x, y, 3, full=True) (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) # w = np.random.rand(10) + 1 wo = w.copy() xs = x[1:-1] ys = y[1:-1] ws = w[1:-1] (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) assert_equal(w, wo) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) def test_polyfit_with_masked_NaNs(self): x = np.random.rand(10) y = np.random.rand(20).reshape(-1, 2) x[0] = np.nan y[-1,-1] = np.nan x = x.view(MaskedArray) y = y.view(MaskedArray) x[0] = masked y[-1,-1] = masked (C, R, K, S, D) = polyfit(x, y, 3, full=True) (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) class TestArraySetOps(object): def test_unique_onlist(self): # Test unique on list data = [1, 1, 1, 2, 2, 3] test = unique(data, return_index=True, return_inverse=True) assert_(isinstance(test[0], MaskedArray)) assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) assert_equal(test[1], [0, 3, 5]) assert_equal(test[2], [0, 0, 0, 1, 1, 2]) def test_unique_onmaskedarray(self): # Test unique on masked data w/use_mask=True data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) test = unique(data, return_index=True, return_inverse=True) assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) assert_equal(test[1], [0, 3, 5, 2]) assert_equal(test[2], [0, 0, 3, 1, 3, 2]) # data.fill_value = 3 data = masked_array(data=[1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0], fill_value=3) test = unique(data, return_index=True, return_inverse=True) assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) assert_equal(test[1], [0, 3, 5, 2]) assert_equal(test[2], [0, 0, 3, 1, 3, 2]) def test_unique_allmasked(self): # Test all masked data = masked_array([1, 1, 1], mask=True) test = unique(data, return_index=True, return_inverse=True) assert_equal(test[0], masked_array([1, ], mask=[True])) assert_equal(test[1], [0]) assert_equal(test[2], [0, 0, 0]) # # Test masked data = masked test = unique(data, return_index=True, return_inverse=True) assert_equal(test[0], masked_array(masked)) assert_equal(test[1], [0]) assert_equal(test[2], [0]) def test_ediff1d(self): # Tests mediff1d x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) test = ediff1d(x) assert_equal(test, control) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) def test_ediff1d_tobegin(self): # Test ediff1d w/ to_begin x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) test = ediff1d(x, to_begin=masked) control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) assert_equal(test, control) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) # test = ediff1d(x, to_begin=[1, 2, 3]) control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) assert_equal(test, control) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) def test_ediff1d_toend(self): # Test ediff1d w/ to_end x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) test = ediff1d(x, to_end=masked) control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) assert_equal(test, control) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) # test = ediff1d(x, to_end=[1, 2, 3]) control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) assert_equal(test, control) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) def test_ediff1d_tobegin_toend(self): # Test ediff1d w/ to_begin and to_end x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) test = ediff1d(x, to_end=masked, to_begin=masked) control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) assert_equal(test, control) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) # test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) control = array([0, 1, 1, 1, 4, 1, 2, 3], mask=[1, 1, 0, 0, 1, 0, 0, 0]) assert_equal(test, control) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) def test_ediff1d_ndarray(self): # Test ediff1d w/ a ndarray x = np.arange(5) test = ediff1d(x) control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) assert_equal(test, control) assert_(isinstance(test, MaskedArray)) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) # test = ediff1d(x, to_end=masked, to_begin=masked) control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) assert_(isinstance(test, MaskedArray)) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) def test_intersect1d(self): # Test intersect1d x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) test = intersect1d(x, y) control = array([1, 3, -1], mask=[0, 0, 1]) assert_equal(test, control) def test_setxor1d(self): # Test setxor1d a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) test = setxor1d(a, b) assert_equal(test, array([3, 4, 7])) # a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) b = [1, 2, 3, 4, 5] test = setxor1d(a, b) assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) # a = array([1, 2, 3]) b = array([6, 5, 4]) test = setxor1d(a, b) assert_(isinstance(test, MaskedArray)) assert_equal(test, [1, 2, 3, 4, 5, 6]) # a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) test = setxor1d(a, b) assert_(isinstance(test, MaskedArray)) assert_equal(test, [1, 2, 3, 4, 5, 6]) # assert_array_equal([], setxor1d([], [])) def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test # isin instead. a = np.arange(24).reshape([2, 3, 4]) mask = np.zeros([2, 3, 4]) mask[1, 2, 0] = 1 a = array(a, mask=mask) b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) ec = zeros((2, 3, 4), dtype=bool) ec[0, 0, 0] = True ec[0, 0, 1] = True ec[0, 2, 3] = True c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) #compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d) def test_in1d(self): # Test in1d a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) test = in1d(a, b) assert_equal(test, [True, True, True, False, True]) # a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) b = array([1, 5, -1], mask=[0, 0, 1]) test = in1d(a, b) assert_equal(test, [True, True, False, True, True]) # assert_array_equal([], in1d([], [])) def test_in1d_invert(self): # Test in1d's invert parameter a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) b = array([1, 5, -1], mask=[0, 0, 1]) assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) assert_array_equal([], in1d([], [], invert=True)) def test_union1d(self): # Test union1d a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) test = union1d(a, b) control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) assert_equal(test, control) # Tests gh-10340, arguments to union1d should be # flattened if they are not already 1D x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) z = union1d(x, y) assert_equal(z, ez) # assert_array_equal([], union1d([], [])) def test_setdiff1d(self): # Test setdiff1d a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) b = array([2, 4, 3, 3, 2, 1, 5]) test = setdiff1d(a, b) assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) # a = arange(10) b = arange(8) assert_equal(setdiff1d(a, b), array([8, 9])) a = array([], np.uint32, mask=[]) assert_equal(setdiff1d(a, []).dtype, np.uint32) def test_setdiff1d_char_array(self): # Test setdiff1d_charray a = np.array(['a', 'b', 'c']) b = np.array(['a', 'b', 's']) assert_array_equal(setdiff1d(a, b), np.array(['c'])) class TestShapeBase(object): def test_atleast_2d(self): # Test atleast_2d a = masked_array([0, 1, 2], mask=[0, 1, 0]) b = atleast_2d(a) assert_equal(b.shape, (1, 3)) assert_equal(b.mask.shape, b.data.shape) assert_equal(a.shape, (3,)) assert_equal(a.mask.shape, a.data.shape) assert_equal(b.mask.shape, b.data.shape) def test_shape_scalar(self): # the atleast and diagflat function should work with scalars # GitHub issue #3367 # Additionally, the atleast functions should accept multiple scalars # correctly b = atleast_1d(1.0) assert_equal(b.shape, (1,)) assert_equal(b.mask.shape, b.shape) assert_equal(b.data.shape, b.shape) b = atleast_1d(1.0, 2.0) for a in b: assert_equal(a.shape, (1,)) assert_equal(a.mask.shape, a.shape) assert_equal(a.data.shape, a.shape) b = atleast_2d(1.0) assert_equal(b.shape, (1, 1)) assert_equal(b.mask.shape, b.shape) assert_equal(b.data.shape, b.shape) b = atleast_2d(1.0, 2.0) for a in b: assert_equal(a.shape, (1, 1)) assert_equal(a.mask.shape, a.shape) assert_equal(a.data.shape, a.shape) b = atleast_3d(1.0) assert_equal(b.shape, (1, 1, 1)) assert_equal(b.mask.shape, b.shape) assert_equal(b.data.shape, b.shape) b = atleast_3d(1.0, 2.0) for a in b: assert_equal(a.shape, (1, 1, 1)) assert_equal(a.mask.shape, a.shape) assert_equal(a.data.shape, a.shape) b = diagflat(1.0) assert_equal(b.shape, (1, 1)) assert_equal(b.mask.shape, b.data.shape) if __name__ == "__main__": run_module_suite()
64,108
39.244193
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/tests/test_mrecords.py
# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for mrecords. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu """ from __future__ import division, absolute_import, print_function import warnings import pickle import numpy as np import numpy.ma as ma from numpy import recarray from numpy.ma import masked, nomask from numpy.testing import run_module_suite, temppath from numpy.core.records import ( fromrecords as recfromrecords, fromarrays as recfromarrays ) from numpy.ma.mrecords import ( MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, addfield ) from numpy.ma.testutils import ( assert_, assert_equal, assert_equal_records, ) class TestMRecords(object): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = [b'one', b'two', b'three', b'four', b'five'] ddtype = [('a', int), ('b', float), ('c', '|S8')] mask = [0, 1, 0, 0, 1] base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) def test_byview(self): # Test creation by view base = self.base mbase = base.view(mrecarray) assert_equal(mbase.recordmask, base.recordmask) assert_equal_records(mbase._mask, base._mask) assert_(isinstance(mbase._data, recarray)) assert_equal_records(mbase._data, base._data.view(recarray)) for field in ('a', 'b', 'c'): assert_equal(base[field], mbase[field]) assert_equal_records(mbase.view(mrecarray), mbase) def test_get(self): # Tests fields retrieval base = self.base.copy() mbase = base.view(mrecarray) # As fields.......... for field in ('a', 'b', 'c'): assert_equal(getattr(mbase, field), mbase[field]) assert_equal(base[field], mbase[field]) # as elements ....... mbase_first = mbase[0] assert_(isinstance(mbase_first, mrecarray)) assert_equal(mbase_first.dtype, mbase.dtype) assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) # Used to be mask, now it's recordmask assert_equal(mbase_first.recordmask, nomask) assert_equal(mbase_first._mask.item(), (False, False, False)) assert_equal(mbase_first['a'], mbase['a'][0]) mbase_last = mbase[-1] assert_(isinstance(mbase_last, mrecarray)) assert_equal(mbase_last.dtype, mbase.dtype) assert_equal(mbase_last.tolist(), (None, None, None)) # Used to be mask, now it's recordmask assert_equal(mbase_last.recordmask, True) assert_equal(mbase_last._mask.item(), (True, True, True)) assert_equal(mbase_last['a'], mbase['a'][-1]) assert_((mbase_last['a'] is masked)) # as slice .......... mbase_sl = mbase[:2] assert_(isinstance(mbase_sl, mrecarray)) assert_equal(mbase_sl.dtype, mbase.dtype) # Used to be mask, now it's recordmask assert_equal(mbase_sl.recordmask, [0, 1]) assert_equal_records(mbase_sl.mask, np.array([(False, False, False), (True, True, True)], dtype=mbase._mask.dtype)) assert_equal_records(mbase_sl, base[:2].view(mrecarray)) for field in ('a', 'b', 'c'): assert_equal(getattr(mbase_sl, field), base[:2][field]) def test_set_fields(self): # Tests setting fields. base = self.base.copy() mbase = base.view(mrecarray) mbase = mbase.copy() mbase.fill_value = (999999, 1e20, 'N/A') # Change the data, the mask should be conserved mbase.a._data[:] = 5 assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) # Change the elements, and the mask will follow mbase.a = 1 assert_equal(mbase['a']._data, [1]*5) assert_equal(ma.getmaskarray(mbase['a']), [0]*5) # Use to be _mask, now it's recordmask assert_equal(mbase.recordmask, [False]*5) assert_equal(mbase._mask.tolist(), np.array([(0, 0, 0), (0, 1, 1), (0, 0, 0), (0, 0, 0), (0, 1, 1)], dtype=bool)) # Set a field to mask ........................ mbase.c = masked # Use to be mask, and now it's still mask ! assert_equal(mbase.c.mask, [1]*5) assert_equal(mbase.c.recordmask, [1]*5) assert_equal(ma.getmaskarray(mbase['c']), [1]*5) assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) assert_equal(mbase._mask.tolist(), np.array([(0, 0, 1), (0, 1, 1), (0, 0, 1), (0, 0, 1), (0, 1, 1)], dtype=bool)) # Set fields by slices ....................... mbase = base.view(mrecarray).copy() mbase.a[3:] = 5 assert_equal(mbase.a, [1, 2, 3, 5, 5]) assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) mbase.b[3:] = masked assert_equal(mbase.b, base['b']) assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) # Set fields globally.......................... ndtype = [('alpha', '|S1'), ('num', int)] data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) rdata = data.view(MaskedRecords) val = ma.array([10, 20, 30], mask=[1, 0, 0]) rdata['num'] = val assert_equal(rdata.num, val) assert_equal(rdata.num.mask, [1, 0, 0]) def test_set_fields_mask(self): # Tests setting the mask of a field. base = self.base.copy() # This one has already a mask.... mbase = base.view(mrecarray) mbase['a'][-2] = masked assert_equal(mbase.a, [1, 2, 3, 4, 5]) assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) # This one has not yet mbase = fromarrays([np.arange(5), np.random.rand(5)], dtype=[('a', int), ('b', float)]) mbase['a'][-2] = masked assert_equal(mbase.a, [0, 1, 2, 3, 4]) assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) def test_set_mask(self): base = self.base.copy() mbase = base.view(mrecarray) # Set the mask to True ....................... mbase.mask = masked assert_equal(ma.getmaskarray(mbase['b']), [1]*5) assert_equal(mbase['a']._mask, mbase['b']._mask) assert_equal(mbase['a']._mask, mbase['c']._mask) assert_equal(mbase._mask.tolist(), np.array([(1, 1, 1)]*5, dtype=bool)) # Delete the mask ............................ mbase.mask = nomask assert_equal(ma.getmaskarray(mbase['c']), [0]*5) assert_equal(mbase._mask.tolist(), np.array([(0, 0, 0)]*5, dtype=bool)) def test_set_mask_fromarray(self): base = self.base.copy() mbase = base.view(mrecarray) # Sets the mask w/ an array mbase.mask = [1, 0, 0, 0, 1] assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) # Yay, once more ! mbase.mask = [0, 0, 0, 0, 1] assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) def test_set_mask_fromfields(self): mbase = self.base.copy().view(mrecarray) nmask = np.array( [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], dtype=[('a', bool), ('b', bool), ('c', bool)]) mbase.mask = nmask assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) # Reinitialize and redo mbase.mask = False mbase.fieldmask = nmask assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) def test_set_elements(self): base = self.base.copy() # Set an element to mask ..................... mbase = base.view(mrecarray).copy() mbase[-2] = masked assert_equal( mbase._mask.tolist(), np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], dtype=bool)) # Used to be mask, now it's recordmask! assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) # Set slices ................................. mbase = base.view(mrecarray).copy() mbase[:2] = (5, 5, 5) assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) assert_equal(mbase.c._data, [b'5', b'5', b'three', b'four', b'five']) assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) mbase = base.view(mrecarray).copy() mbase[:2] = masked assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) assert_equal(mbase.c._data, [b'one', b'two', b'three', b'four', b'five']) assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) def test_setslices_hardmask(self): # Tests setting slices w/ hardmask. base = self.base.copy() mbase = base.view(mrecarray) mbase.harden_mask() try: mbase[-2:] = (5, 5, 5) assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) assert_equal(mbase.c._data, [b'one', b'two', b'three', b'5', b'five']) assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) assert_equal(mbase.b._mask, mbase.a._mask) assert_equal(mbase.b._mask, mbase.c._mask) except NotImplementedError: # OK, not implemented yet... pass except AssertionError: raise else: raise Exception("Flexible hard masks should be supported !") # Not using a tuple should crash try: mbase[-2:] = 3 except (NotImplementedError, TypeError): pass else: raise TypeError("Should have expected a readable buffer object!") def test_hardmask(self): # Test hardmask base = self.base.copy() mbase = base.view(mrecarray) mbase.harden_mask() assert_(mbase._hardmask) mbase.mask = nomask assert_equal_records(mbase._mask, base._mask) mbase.soften_mask() assert_(not mbase._hardmask) mbase.mask = nomask # So, the mask of a field is no longer set to nomask... assert_equal_records(mbase._mask, ma.make_mask_none(base.shape, base.dtype)) assert_(ma.make_mask(mbase['b']._mask) is nomask) assert_equal(mbase['a']._mask, mbase['b']._mask) def test_pickling(self): # Test pickling base = self.base.copy() mrec = base.view(mrecarray) _ = pickle.dumps(mrec) mrec_ = pickle.loads(_) assert_equal(mrec_.dtype, mrec.dtype) assert_equal_records(mrec_._data, mrec._data) assert_equal(mrec_._mask, mrec._mask) assert_equal_records(mrec_._mask, mrec._mask) def test_filled(self): # Test filling the array _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') ddtype = [('a', int), ('b', float), ('c', '|S8')] mrec = fromarrays([_a, _b, _c], dtype=ddtype, fill_value=(99999, 99999., 'N/A')) mrecfilled = mrec.filled() assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), dtype=float)) assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), dtype='|S8')) def test_tolist(self): # Test tolist. _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') ddtype = [('a', int), ('b', float), ('c', '|S8')] mrec = fromarrays([_a, _b, _c], dtype=ddtype, fill_value=(99999, 99999., 'N/A')) assert_equal(mrec.tolist(), [(1, 1.1, None), (2, 2.2, b'two'), (None, None, b'three')]) def test_withnames(self): # Test the creation w/ format and names x = mrecarray(1, formats=float, names='base') x[0]['base'] = 10 assert_equal(x['base'][0], 10) def test_exotic_formats(self): # Test that 'exotic' formats are processed properly easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) easy[0] = masked assert_equal(easy.filled(1).item(), (1, b'1', 1.)) solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))]) solo[0] = masked assert_equal(solo.filled(1).item(), np.array((1,), dtype=solo.dtype).item()) mult = mrecarray(2, dtype="i4, (2,3)float, float") mult[0] = masked mult[1] = (1, 1, 1) mult.filled(0) assert_equal_records(mult.filled(0), np.array([(0, 0, 0), (1, 1, 1)], dtype=mult.dtype)) class TestView(object): def setup(self): (a, b) = (np.arange(10), np.random.rand(10)) ndtype = [('a', float), ('b', float)] arr = np.array(list(zip(a, b)), dtype=ndtype) mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) mrec.mask[3] = (False, True) self.data = (mrec, a, b, arr) def test_view_by_itself(self): (mrec, a, b, arr) = self.data test = mrec.view() assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, mrec) assert_equal_records(test._mask, mrec._mask) def test_view_simple_dtype(self): (mrec, a, b, arr) = self.data ntype = (float, 2) test = mrec.view(ntype) assert_(isinstance(test, ma.MaskedArray)) assert_equal(test, np.array(list(zip(a, b)), dtype=float)) assert_(test[3, 1] is ma.masked) def test_view_flexible_type(self): (mrec, a, b, arr) = self.data alttype = [('A', float), ('B', float)] test = mrec.view(alttype) assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, arr.view(alttype)) assert_(test['B'][3] is masked) assert_equal(test.dtype, np.dtype(alttype)) assert_(test._fill_value is None) ############################################################################## class TestMRecordsImport(object): _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) _c = ma.array([b'one', b'two', b'three'], mask=[0, 0, 1], dtype='|S8') ddtype = [('a', int), ('b', float), ('c', '|S8')] mrec = fromarrays([_a, _b, _c], dtype=ddtype, fill_value=(b'99999', b'99999.', b'N/A')) nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype) data = (mrec, nrec, ddtype) def test_fromarrays(self): _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') (mrec, nrec, _) = self.data for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)): assert_equal(getattr(mrec, f)._mask, l._mask) # One record only _x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0],) assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0]) def test_fromrecords(self): # Test construction from records. (mrec, nrec, ddtype) = self.data #...... palist = [(1, 'abc', 3.7000002861022949, 0), (2, 'xy', 6.6999998092651367, 1), (0, ' ', 0.40000000596046448, 0)] pa = recfromrecords(palist, names='c1, c2, c3, c4') mpa = fromrecords(palist, names='c1, c2, c3, c4') assert_equal_records(pa, mpa) #..... _mrec = fromrecords(nrec) assert_equal(_mrec.dtype, mrec.dtype) for field in _mrec.dtype.names: assert_equal(getattr(_mrec, field), getattr(mrec._data, field)) _mrec = fromrecords(nrec.tolist(), names='c1,c2,c3') assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')]) for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')): assert_equal(getattr(_mrec, f), getattr(mrec._data, n)) _mrec = fromrecords(mrec) assert_equal(_mrec.dtype, mrec.dtype) assert_equal_records(_mrec._data, mrec.filled()) assert_equal_records(_mrec._mask, mrec._mask) def test_fromrecords_wmask(self): # Tests construction from records w/ mask. (mrec, nrec, ddtype) = self.data _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,]) assert_equal_records(_mrec._data, mrec._data) assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)]) _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True) assert_equal_records(_mrec._data, mrec._data) assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)]) _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask) assert_equal_records(_mrec._data, mrec._data) assert_equal(_mrec._mask.tolist(), mrec._mask.tolist()) _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask.tolist()) assert_equal_records(_mrec._data, mrec._data) assert_equal(_mrec._mask.tolist(), mrec._mask.tolist()) def test_fromtextfile(self): # Tests reading from a text file. fcontent = ( """# 'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)' 'strings',1,1.0,'mixed column',,1 'with embedded "double quotes"',2,2.0,1.0,,1 'strings',3,3.0E5,3,,1 'strings',4,-1e-10,,,1 """) with temppath() as path: with open(path, 'w') as f: f.write(fcontent) mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG') assert_(isinstance(mrectxt, MaskedRecords)) assert_equal(mrectxt.F, [1, 1, 1, 1]) assert_equal(mrectxt.E._mask, [1, 1, 1, 1]) assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10]) def test_addfield(self): # Tests addfield (mrec, nrec, ddtype) = self.data (d, m) = ([100, 200, 300], [1, 0, 0]) mrec = addfield(mrec, ma.array(d, mask=m)) assert_equal(mrec.f3, d) assert_equal(mrec.f3._mask, m) def test_record_array_with_object_field(): # Trac #1839 y = ma.masked_array( [(1, '2'), (3, '4')], mask=[(0, 0), (0, 1)], dtype=[('a', int), ('b', object)]) # getting an item used to fail y[1] if __name__ == "__main__": run_module_suite()
19,929
38.780439
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/tests/test_subclassing.py
# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu :version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import run_module_suite, assert_, assert_raises, dec from numpy.ma.testutils import assert_equal from numpy.ma.core import ( array, arange, masked, MaskedArray, masked_array, log, add, hypot, divide, asarray, asanyarray, nomask ) # from numpy.ma.core import ( def assert_startswith(a, b): # produces a better error message than assert_(a.startswith(b)) assert_equal(a[:len(b)], b) class SubArray(np.ndarray): # Defines a generic np.ndarray subclass, that stores some metadata # in the dictionary `info`. def __new__(cls,arr,info={}): x = np.asanyarray(arr).view(cls) x.info = info.copy() return x def __array_finalize__(self, obj): if callable(getattr(super(SubArray, self), '__array_finalize__', None)): super(SubArray, self).__array_finalize__(obj) self.info = getattr(obj, 'info', {}).copy() return def __add__(self, other): result = super(SubArray, self).__add__(other) result.info['added'] = result.info.get('added', 0) + 1 return result def __iadd__(self, other): result = super(SubArray, self).__iadd__(other) result.info['iadded'] = result.info.get('iadded', 0) + 1 return result subarray = SubArray class SubMaskedArray(MaskedArray): """Pure subclass of MaskedArray, keeping some info on subclass.""" def __new__(cls, info=None, **kwargs): obj = super(SubMaskedArray, cls).__new__(cls, **kwargs) obj._optinfo['info'] = info return obj class MSubArray(SubArray, MaskedArray): def __new__(cls, data, info={}, mask=nomask): subarr = SubArray(data, info) _data = MaskedArray.__new__(cls, data=subarr, mask=mask) _data.info = subarr.info return _data def _get_series(self): _view = self.view(MaskedArray) _view._sharedmask = False return _view _series = property(fget=_get_series) msubarray = MSubArray class MMatrix(MaskedArray, np.matrix,): def __new__(cls, data, mask=nomask): mat = np.matrix(data) _data = MaskedArray.__new__(cls, data=mat, mask=mask) return _data def __array_finalize__(self, obj): np.matrix.__array_finalize__(self, obj) MaskedArray.__array_finalize__(self, obj) return def _get_series(self): _view = self.view(MaskedArray) _view._sharedmask = False return _view _series = property(fget=_get_series) mmatrix = MMatrix # Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing # setting to non-class values (and thus np.ma.core.masked_print_option) # and overrides __array_wrap__, updating the info dict, to check that this # doesn't get destroyed by MaskedArray._update_from. But this one also needs # its own iterator... class CSAIterator(object): """ Flat iterator object that uses its own setter/getter (works around ndarray.flat not propagating subclass setters/getters see https://github.com/numpy/numpy/issues/4564) roughly following MaskedIterator """ def __init__(self, a): self._original = a self._dataiter = a.view(np.ndarray).flat def __iter__(self): return self def __getitem__(self, indx): out = self._dataiter.__getitem__(indx) if not isinstance(out, np.ndarray): out = out.__array__() out = out.view(type(self._original)) return out def __setitem__(self, index, value): self._dataiter[index] = self._original._validate_input(value) def __next__(self): return next(self._dataiter).__array__().view(type(self._original)) next = __next__ class ComplicatedSubArray(SubArray): def __str__(self): return 'myprefix {0} mypostfix'.format(self.view(SubArray)) def __repr__(self): # Return a repr that does not start with 'name(' return '<{0} {1}>'.format(self.__class__.__name__, self) def _validate_input(self, value): if not isinstance(value, ComplicatedSubArray): raise ValueError("Can only set to MySubArray values") return value def __setitem__(self, item, value): # validation ensures direct assignment with ndarray or # masked_print_option will fail super(ComplicatedSubArray, self).__setitem__( item, self._validate_input(value)) def __getitem__(self, item): # ensure getter returns our own class also for scalars value = super(ComplicatedSubArray, self).__getitem__(item) if not isinstance(value, np.ndarray): # scalar value = value.__array__().view(ComplicatedSubArray) return value @property def flat(self): return CSAIterator(self) @flat.setter def flat(self, value): y = self.ravel() y[:] = value def __array_wrap__(self, obj, context=None): obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context) if context is not None and context[0] is np.multiply: obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 return obj class TestSubclassing(object): # Test suite for masked subclasses of ndarray. def setup(self): x = np.arange(5, dtype='float') mx = mmatrix(x, mask=[0, 1, 0, 0, 0]) self.data = (x, mx) def test_data_subclassing(self): # Tests whether the subclass is kept. x = np.arange(5) m = [0, 0, 1, 0, 0] xsub = SubArray(x) xmsub = masked_array(xsub, mask=m) assert_(isinstance(xmsub, MaskedArray)) assert_equal(xmsub._data, xsub) assert_(isinstance(xmsub._data, SubArray)) def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray (x, mx) = self.data assert_(isinstance(mx._data, np.matrix)) def test_masked_unary_operations(self): # Tests masked_unary_operation (x, mx) = self.data with np.errstate(divide='ignore'): assert_(isinstance(log(mx), mmatrix)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation (x, mx) = self.data # Result should be a mmatrix assert_(isinstance(add(mx, mx), mmatrix)) assert_(isinstance(add(mx, x), mmatrix)) # Result should work assert_equal(add(mx, x), mx+x) assert_(isinstance(add(mx, mx)._data, np.matrix)) assert_(isinstance(add.outer(mx, mx), mmatrix)) assert_(isinstance(hypot(mx, mx), mmatrix)) assert_(isinstance(hypot(mx, x), mmatrix)) def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation (x, mx) = self.data xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), mmatrix)) assert_(isinstance(divide(mx, x), mmatrix)) assert_equal(divide(mx, mx), divide(xmx, xmx)) def test_attributepropagation(self): x = array(arange(5), mask=[0]+[1]*4) my = masked_array(subarray(x)) ym = msubarray(x) # z = (my+1) assert_(isinstance(z, MaskedArray)) assert_(not isinstance(z, MSubArray)) assert_(isinstance(z._data, SubArray)) assert_equal(z._data.info, {}) # z = (ym+1) assert_(isinstance(z, MaskedArray)) assert_(isinstance(z, MSubArray)) assert_(isinstance(z._data, SubArray)) assert_(z._data.info['added'] > 0) # Test that inplace methods from data get used (gh-4617) ym += 1 assert_(isinstance(ym, MaskedArray)) assert_(isinstance(ym, MSubArray)) assert_(isinstance(ym._data, SubArray)) assert_(ym._data.info['iadded'] > 0) # ym._set_mask([1, 0, 0, 0, 1]) assert_equal(ym._mask, [1, 0, 0, 0, 1]) ym._series._set_mask([0, 0, 0, 0, 1]) assert_equal(ym._mask, [0, 0, 0, 0, 1]) # xsub = subarray(x, info={'name':'x'}) mxsub = masked_array(xsub) assert_(hasattr(mxsub, 'info')) assert_equal(mxsub.info, xsub.info) def test_subclasspreservation(self): # Checks that masked_array(...,subok=True) preserves the class. x = np.arange(5) m = [0, 0, 1, 0, 0] xinfo = [(i, j) for (i, j) in zip(x, m)] xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) # mxsub = masked_array(xsub, subok=False) assert_(not isinstance(mxsub, MSubArray)) assert_(isinstance(mxsub, MaskedArray)) assert_equal(mxsub._mask, m) # mxsub = asarray(xsub) assert_(not isinstance(mxsub, MSubArray)) assert_(isinstance(mxsub, MaskedArray)) assert_equal(mxsub._mask, m) # mxsub = masked_array(xsub, subok=True) assert_(isinstance(mxsub, MSubArray)) assert_equal(mxsub.info, xsub.info) assert_equal(mxsub._mask, xsub._mask) # mxsub = asanyarray(xsub) assert_(isinstance(mxsub, MSubArray)) assert_equal(mxsub.info, xsub.info) assert_equal(mxsub._mask, m) def test_subclass_items(self): """test that getter and setter go via baseclass""" x = np.arange(5) xcsub = ComplicatedSubArray(x) mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) # getter should return a ComplicatedSubArray, even for single item # first check we wrote ComplicatedSubArray correctly assert_(isinstance(xcsub[1], ComplicatedSubArray)) assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) # now that it propagates inside the MaskedArray assert_(isinstance(mxcsub[1], ComplicatedSubArray)) assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) assert_(mxcsub[0] is masked) assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) # also for flattened version (which goes via MaskedIterator) assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) assert_(mxcsub.flat[0] is masked) assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) # setter should only work with ComplicatedSubArray input # first check we wrote ComplicatedSubArray correctly assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) # now that it propagates inside the MaskedArray assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) mxcsub[1] = xcsub[4] mxcsub[1:4] = xcsub[1:4] # also for flattened version (which goes via MaskedIterator) assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) mxcsub.flat[1] = xcsub[4] mxcsub.flat[1:4] = xcsub[1:4] def test_subclass_nomask_items(self): x = np.arange(5) xcsub = ComplicatedSubArray(x) mxcsub_nomask = masked_array(xcsub) assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) def test_subclass_repr(self): """test that repr uses the name of the subclass and 'array' for np.ndarray""" x = np.arange(5) mx = masked_array(x, mask=[True, False, True, False, False]) assert_startswith(repr(mx), 'masked_array') xsub = SubArray(x) mxsub = masked_array(xsub, mask=[True, False, True, False, False]) assert_startswith(repr(mxsub), 'masked_{0}(data=[--, 1, --, 3, 4]'.format(SubArray.__name__)) def test_subclass_str(self): """test str with subclass that has overridden str, setitem""" # first without override x = np.arange(5) xsub = SubArray(x) mxsub = masked_array(xsub, mask=[True, False, True, False, False]) assert_equal(str(mxsub), '[-- 1 -- 3 4]') xcsub = ComplicatedSubArray(x) assert_raises(ValueError, xcsub.__setitem__, 0, np.ma.core.masked_print_option) mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') def test_pure_subclass_info_preservation(self): # Test that ufuncs and methods conserve extra information consistently; # see gh-7122. arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) diff1 = np.subtract(arr1, arr2) assert_('info' in diff1._optinfo) assert_(diff1._optinfo['info'] == 'test') diff2 = arr1 - arr2 assert_('info' in diff2._optinfo) assert_(diff2._optinfo['info'] == 'test') ############################################################################### if __name__ == '__main__': run_module_suite()
13,666
35.156085
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/ma/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/internals.py
""" =============== Array Internals =============== Internal organization of numpy arrays ===================================== It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy". NumPy arrays consist of two major components, the raw array data (from now on, referred to as the data buffer), and the information about the raw array data. The data buffer is typically what people think of as arrays in C or Fortran, a contiguous (and fixed) block of memory containing fixed sized data items. NumPy also contains a significant set of data that describes how to interpret the data in the data buffer. This extra information contains (among other things): 1) The basic data element's size in bytes 2) The start of the data within the data buffer (an offset relative to the beginning of the data buffer). 3) The number of dimensions and the size of each dimension 4) The separation between elements for each dimension (the 'stride'). This does not have to be a multiple of the element size 5) The byte order of the data (which may not be the native byte order) 6) Whether the buffer is read-only 7) Information (via the dtype object) about the interpretation of the basic data element. The basic data element may be as simple as a int or a float, or it may be a compound object (e.g., struct-like), a fixed character field, or Python object pointers. 8) Whether the array is to interpreted as C-order or Fortran-order. This arrangement allow for very flexible use of arrays. One thing that it allows is simple changes of the metadata to change the interpretation of the array buffer. Changing the byteorder of the array is a simple change involving no rearrangement of the data. The shape of the array can be changed very easily without changing anything in the data buffer or any data copying at all Among other things that are made possible is one can create a new array metadata object that uses the same data buffer to create a new view of that data buffer that has a different interpretation of the buffer (e.g., different shape, offset, byte order, strides, etc) but shares the same data bytes. Many operations in numpy do just this such as slices. Other operations, such as transpose, don't move data elements around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. Typically these new versions of the array metadata but the same data buffer are new 'views' into the data buffer. There is a different ndarray object, but it uses the same data buffer. This is why it is necessary to force copies through use of the .copy() method if one really wants to make a new and independent copy of the data buffer. New views into arrays mean the object reference counts for the data buffer increase. Simply doing away with the original array object will not remove the data buffer if other views of it still exist. Multidimensional Array Indexing Order Issues ============================================ What is the right way to index multi-dimensional arrays? Before you jump to conclusions about the one and true way to index multi-dimensional arrays, it pays to understand why this is a confusing issue. This section will try to explain in detail how numpy indexing works and why we adopt the convention we do for images, and when it may be appropriate to adopt other conventions. The first thing to understand is that there are two conflicting conventions for indexing 2-dimensional arrays. Matrix notation uses the first index to indicate which row is being selected and the second index to indicate which column is selected. This is opposite the geometrically oriented-convention for images where people generally think the first index represents x position (i.e., column) and the second represents y position (i.e., row). This alone is the source of much confusion; matrix-oriented users and image-oriented users expect two different things with regard to indexing. The second issue to understand is how indices correspond to the order the array is stored in memory. In Fortran the first index is the most rapidly varying index when moving through the elements of a two dimensional array as it is stored in memory. If you adopt the matrix convention for indexing, then this means the matrix is stored one column at a time (since the first index moves to the next row as it changes). Thus Fortran is considered a Column-major language. C has just the opposite convention. In C, the last index changes most rapidly as one moves through the array as stored in memory. Thus C is a Row-major language. The matrix is stored by rows. Note that in both cases it presumes that the matrix convention for indexing is being used, i.e., for both Fortran and C, the first index is the row. Note this convention implies that the indexing convention is invariant and that the data order changes to keep that so. But that's not the only way to look at it. Suppose one has large two-dimensional arrays (images or matrices) stored in data files. Suppose the data are stored by rows rather than by columns. If we are to preserve our index convention (whether matrix or image) that means that depending on the language we use, we may be forced to reorder the data if it is read into memory to preserve our indexing convention. For example if we read row-ordered data into memory without reordering, it will match the matrix indexing convention for C, but not for Fortran. Conversely, it will match the image indexing convention for Fortran, but not for C. For C, if one is using data stored in row order, and one wants to preserve the image index convention, the data must be reordered when reading into memory. In the end, which you do for Fortran or C depends on which is more important, not reordering data or preserving the indexing convention. For large images, reordering data is potentially expensive, and often the indexing convention is inverted to avoid that. The situation with numpy makes this issue yet more complicated. The internal machinery of numpy arrays is flexible enough to accept any ordering of indices. One can simply reorder indices by manipulating the internal stride information for arrays without reordering the data at all. NumPy will know how to map the new index order to the data without moving the data. So if this is true, why not choose the index order that matches what you most expect? In particular, why not define row-ordered images to use the image convention? (This is sometimes referred to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' order options for array ordering in numpy.) The drawback of doing this is potential performance penalties. It's common to access the data sequentially, either implicitly in array operations or explicitly by looping over rows of an image. When that is done, then the data will be accessed in non-optimal order. As the first index is incremented, what is actually happening is that elements spaced far apart in memory are being sequentially accessed, with usually poor memory access speeds. For example, for a two dimensional image 'im' defined so that im[0, 10] represents the value at x=0, y=10. To be consistent with usual Python behavior then im[0] would represent a column at x=0. Yet that data would be spread over the whole array since the data are stored in row order. Despite the flexibility of numpy's indexing, it can't really paper over the fact basic operations are rendered inefficient because of data order or that getting contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs im[0]), thus one can't use an idiom such as for row in im; for col in im does work, but doesn't yield contiguous column data. As it turns out, numpy is smart enough when dealing with ufuncs to determine which index is the most rapidly varying one in memory and uses that for the innermost loop. Thus for ufuncs there is no large intrinsic advantage to either approach in most cases. On the other hand, use of .flat with an FORTRAN ordered array will lead to non-optimal memory access as adjacent elements in the flattened array (iterator, actually) are not contiguous in memory. Indeed, the fact is that Python indexing on lists and other sequences naturally leads to an outside-to inside ordering (the first index gets the largest grouping, the next the next largest, and the last gets the smallest element). Since image data are normally stored by rows, this corresponds to position within rows being the last item indexed. If you do want to use Fortran ordering realize that there are two approaches to consider: 1) accept that the first index is just not the most rapidly changing in memory and have all your I/O routines reorder your data when going from memory to disk or visa versa, or use numpy's mechanism for mapping the first index to the most rapidly varying data. We recommend the former if possible. The disadvantage of the latter is that many of numpy's functions will yield arrays without Fortran ordering unless you are careful to use the 'order' keyword. Doing this would be highly inconvenient. Otherwise we recommend simply learning to reverse the usual order of indices when accessing elements of an array. Granted, it goes against the grain, but it is more in line with Python semantics and the natural order of the data. """ from __future__ import division, absolute_import, print_function
9,669
57.963415
259
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/glossary.py
""" ======== Glossary ======== .. glossary:: along an axis Axes are defined for arrays with more than one dimension. A 2-dimensional array has two corresponding axes: the first running vertically downwards across rows (axis 0), and the second running horizontally across columns (axis 1). Many operations can take place along one of these axes. For example, we can sum each row of an array, in which case we operate along columns, or axis 1:: >>> x = np.arange(12).reshape((3,4)) >>> x array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.sum(axis=1) array([ 6, 22, 38]) array A homogeneous container of numerical elements. Each element in the array occupies a fixed amount of memory (hence homogeneous), and can be a numerical element of a single type (such as float, int or complex) or a combination (such as ``(float, int, float)``). Each array has an associated data-type (or ``dtype``), which describes the numerical type of its elements:: >>> x = np.array([1, 2, 3], float) >>> x array([ 1., 2., 3.]) >>> x.dtype # floating point number, 64 bits of memory per element dtype('float64') # More complicated data type: each array element is a combination of # and integer and a floating point number >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) array([(1, 2.0), (3, 4.0)], dtype=[('x', '<i4'), ('y', '<f8')]) Fast element-wise operations, called :term:`ufuncs`, operate on arrays. array_like Any sequence that can be interpreted as an ndarray. This includes nested lists, tuples, scalars and existing arrays. attribute A property of an object that can be accessed using ``obj.attribute``, e.g., ``shape`` is an attribute of an array:: >>> x = np.array([1, 2, 3]) >>> x.shape (3,) BLAS `Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_ broadcast NumPy can do operations on arrays whose shapes are mismatched:: >>> x = np.array([1, 2]) >>> y = np.array([[3], [4]]) >>> x array([1, 2]) >>> y array([[3], [4]]) >>> x + y array([[4, 5], [5, 6]]) See `numpy.doc.broadcasting` for more information. C order See `row-major` column-major A way to represent items in a N-dimensional array in the 1-dimensional computer memory. In column-major order, the leftmost index "varies the fastest": for example the array:: [[1, 2, 3], [4, 5, 6]] is represented in the column-major order as:: [1, 4, 2, 5, 3, 6] Column-major order is also known as the Fortran order, as the Fortran programming language uses it. decorator An operator that transforms a function. For example, a ``log`` decorator may be defined to print debugging information upon function execution:: >>> def log(f): ... def new_logging_func(*args, **kwargs): ... print("Logging call with parameters:", args, kwargs) ... return f(*args, **kwargs) ... ... return new_logging_func Now, when we define a function, we can "decorate" it using ``log``:: >>> @log ... def add(a, b): ... return a + b Calling ``add`` then yields: >>> add(1, 2) Logging call with parameters: (1, 2) {} 3 dictionary Resembling a language dictionary, which provides a mapping between words and descriptions thereof, a Python dictionary is a mapping between two objects:: >>> x = {1: 'one', 'two': [1, 2]} Here, `x` is a dictionary mapping keys to values, in this case the integer 1 to the string "one", and the string "two" to the list ``[1, 2]``. The values may be accessed using their corresponding keys:: >>> x[1] 'one' >>> x['two'] [1, 2] Note that dictionaries are not stored in any specific order. Also, most mutable (see *immutable* below) objects, such as lists, may not be used as keys. For more information on dictionaries, read the `Python tutorial <http://docs.python.org/tut>`_. Fortran order See `column-major` flattened Collapsed to a one-dimensional array. See `numpy.ndarray.flatten` for details. immutable An object that cannot be modified after execution is called immutable. Two common examples are strings and tuples. instance A class definition gives the blueprint for constructing an object:: >>> class House(object): ... wall_colour = 'white' Yet, we have to *build* a house before it exists:: >>> h = House() # build a house Now, ``h`` is called a ``House`` instance. An instance is therefore a specific realisation of a class. iterable A sequence that allows "walking" (iterating) over items, typically using a loop such as:: >>> x = [1, 2, 3] >>> [item**2 for item in x] [1, 4, 9] It is often used in combination with ``enumerate``:: >>> keys = ['a','b','c'] >>> for n, k in enumerate(keys): ... print("Key %d: %s" % (n, k)) ... Key 0: a Key 1: b Key 2: c list A Python container that can hold any number of objects or items. The items do not have to be of the same type, and can even be lists themselves:: >>> x = [2, 2.0, "two", [2, 2.0]] The list `x` contains 4 items, each which can be accessed individually:: >>> x[2] # the string 'two' 'two' >>> x[3] # a list, containing an integer 2 and a float 2.0 [2, 2.0] It is also possible to select more than one item at a time, using *slicing*:: >>> x[0:2] # or, equivalently, x[:2] [2, 2.0] In code, arrays are often conveniently expressed as nested lists:: >>> np.array([[1, 2], [3, 4]]) array([[1, 2], [3, 4]]) For more information, read the section on lists in the `Python tutorial <http://docs.python.org/tut>`_. For a mapping type (key-value), see *dictionary*. mask A boolean array, used to select only certain elements for an operation:: >>> x = np.arange(5) >>> x array([0, 1, 2, 3, 4]) >>> mask = (x > 2) >>> mask array([False, False, False, True, True]) >>> x[mask] = -1 >>> x array([ 0, 1, 2, -1, -1]) masked array Array that suppressed values indicated by a mask:: >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) >>> x masked_array(data = [-- 2.0 --], mask = [ True False True], fill_value = 1e+20) <BLANKLINE> >>> x + [1, 2, 3] masked_array(data = [-- 4.0 --], mask = [ True False True], fill_value = 1e+20) <BLANKLINE> Masked arrays are often used when operating on arrays containing missing or invalid entries. matrix A 2-dimensional ndarray that preserves its two-dimensional nature throughout operations. It has certain special operations, such as ``*`` (matrix multiplication) and ``**`` (matrix power), defined:: >>> x = np.mat([[1, 2], [3, 4]]) >>> x matrix([[1, 2], [3, 4]]) >>> x**2 matrix([[ 7, 10], [15, 22]]) method A function associated with an object. For example, each ndarray has a method called ``repeat``:: >>> x = np.array([1, 2, 3]) >>> x.repeat(2) array([1, 1, 2, 2, 3, 3]) ndarray See *array*. record array An :term:`ndarray` with :term:`structured data type`_ which has been subclassed as ``np.recarray`` and whose dtype is of type ``np.record``, making the fields of its data type to be accessible by attribute. reference If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, ``a`` and ``b`` are different names for the same Python object. row-major A way to represent items in a N-dimensional array in the 1-dimensional computer memory. In row-major order, the rightmost index "varies the fastest": for example the array:: [[1, 2, 3], [4, 5, 6]] is represented in the row-major order as:: [1, 2, 3, 4, 5, 6] Row-major order is also known as the C order, as the C programming language uses it. New NumPy arrays are by default in row-major order. self Often seen in method signatures, ``self`` refers to the instance of the associated class. For example: >>> class Paintbrush(object): ... color = 'blue' ... ... def paint(self): ... print("Painting the city %s!" % self.color) ... >>> p = Paintbrush() >>> p.color = 'red' >>> p.paint() # self refers to 'p' Painting the city red! slice Used to select only certain elements from a sequence:: >>> x = range(5) >>> x [0, 1, 2, 3, 4] >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) [1, 2] >>> x[1:5:2] # slice from 1 to 5, but skipping every second element [1, 3] >>> x[::-1] # slice a sequence in reverse [4, 3, 2, 1, 0] Arrays may have more than one dimension, each which can be sliced individually:: >>> x = np.array([[1, 2], [3, 4]]) >>> x array([[1, 2], [3, 4]]) >>> x[:, 1] array([2, 4]) structured data type A data type composed of other datatypes tuple A sequence that may contain a variable number of types of any kind. A tuple is immutable, i.e., once constructed it cannot be changed. Similar to a list, it can be indexed and sliced:: >>> x = (1, 'one', [1, 2]) >>> x (1, 'one', [1, 2]) >>> x[0] 1 >>> x[:2] (1, 'one') A useful concept is "tuple unpacking", which allows variables to be assigned to the contents of a tuple:: >>> x, y = (1, 2) >>> x, y = 1, 2 This is often used when a function returns multiple values: >>> def return_many(): ... return 1, 'alpha', None >>> a, b, c = return_many() >>> a, b, c (1, 'alpha', None) >>> a 1 >>> b 'alpha' ufunc Universal function. A fast element-wise array operation. Examples include ``add``, ``sin`` and ``logical_or``. view An array that does not own its data, but refers to another array's data instead. For example, we may create a view that only shows every second element of another array:: >>> x = np.arange(5) >>> x array([0, 1, 2, 3, 4]) >>> y = x[::2] >>> y array([0, 2, 4]) >>> x[0] = 3 # changing x changes y as well, since y is a view on x >>> y array([3, 2, 4]) wrapper Python is a high-level (highly abstracted, or English-like) language. This abstraction comes at a price in execution speed, and sometimes it becomes necessary to use lower level languages to do fast computations. A wrapper is code that provides a bridge between high and the low level languages, allowing, e.g., Python to execute code written in C or Fortran. Examples include ctypes, SWIG and Cython (which wraps C and C++) and f2py (which wraps Fortran). """ from __future__ import division, absolute_import, print_function
12,371
28.110588
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/indexing.py
"""============== Array indexing ============== Array indexing refers to any use of the square brackets ([]) to index array values. There are many options to indexing, which give numpy indexing great power, but with power comes some complexity and the potential for confusion. This section is just an overview of the various options and issues related to indexing. Aside from single element indexing, the details on most of these options are to be found in related sections. Assignment vs referencing ========================= Most of the following examples show the use of indexing when referencing data in an array. The examples work just as well when assigning to an array. See the section at the end for specific examples and explanations on how assignments work. Single element indexing ======================= Single element indexing for a 1-D array is what one expects. It work exactly like that for other standard Python sequences. It is 0-based, and accepts negative indices for indexing from the end of the array. :: >>> x = np.arange(10) >>> x[2] 2 >>> x[-2] 8 Unlike lists and tuples, numpy arrays support multidimensional indexing for multidimensional arrays. That means that it is not necessary to separate each dimension's index into its own set of square brackets. :: >>> x.shape = (2,5) # now x is 2-dimensional >>> x[1,3] 8 >>> x[1,-1] 9 Note that if one indexes a multidimensional array with fewer indices than dimensions, one gets a subdimensional array. For example: :: >>> x[0] array([0, 1, 2, 3, 4]) That is, each index specified selects the array corresponding to the rest of the dimensions selected. In the above example, choosing 0 means that the remaining dimension of length 5 is being left unspecified, and that what is returned is an array of that dimensionality and size. It must be noted that the returned array is not a copy of the original, but points to the same values in memory as does the original array. In this case, the 1-D array at the first position (0) is returned. So using a single index on the returned array, results in a single element being returned. That is: :: >>> x[0][2] 2 So note that ``x[0,2] = x[0][2]`` though the second case is more inefficient as a new temporary array is created after the first index that is subsequently indexed by 2. Note to those used to IDL or Fortran memory order as it relates to indexing. NumPy uses C-order indexing. That means that the last index usually represents the most rapidly changing memory location, unlike Fortran or IDL, where the first index represents the most rapidly changing location in memory. This difference represents a great potential for confusion. Other indexing options ====================== It is possible to slice and stride arrays to extract arrays of the same number of dimensions, but of different sizes than the original. The slicing and striding works exactly the same way it does for lists and tuples except that they can be applied to multiple dimensions as well. A few examples illustrates best: :: >>> x = np.arange(10) >>> x[2:5] array([2, 3, 4]) >>> x[:-7] array([0, 1, 2]) >>> x[1:7:2] array([1, 3, 5]) >>> y = np.arange(35).reshape(5,7) >>> y[1:5:2,::3] array([[ 7, 10, 13], [21, 24, 27]]) Note that slices of arrays do not copy the internal array data but also produce new views of the original data. It is possible to index arrays with other arrays for the purposes of selecting lists of values out of arrays into new arrays. There are two different ways of accomplishing this. One uses one or more arrays of index values. The other involves giving a boolean array of the proper shape to indicate the values to be selected. Index arrays are a very powerful tool that allow one to avoid looping over individual elements in arrays and thus greatly improve performance. It is possible to use special features to effectively increase the number of dimensions in an array through indexing so the resulting array aquires the shape needed for use in an expression or with a specific function. Index arrays ============ NumPy arrays may be indexed with other arrays (or any other sequence- like object that can be converted to an array, such as lists, with the exception of tuples; see the end of this document for why this is). The use of index arrays ranges from simple, straightforward cases to complex, hard-to-understand cases. For all cases of index arrays, what is returned is a copy of the original data, not a view as one gets for slices. Index arrays must be of integer type. Each value in the array indicates which value in the array to use in place of the index. To illustrate: :: >>> x = np.arange(10,1,-1) >>> x array([10, 9, 8, 7, 6, 5, 4, 3, 2]) >>> x[np.array([3, 3, 1, 8])] array([7, 7, 9, 2]) The index array consisting of the values 3, 3, 1 and 8 correspondingly create an array of length 4 (same as the index array) where each index is replaced by the value the index array has in the array being indexed. Negative values are permitted and work as they do with single indices or slices: :: >>> x[np.array([3,3,-3,8])] array([7, 7, 4, 2]) It is an error to have index values out of bounds: :: >>> x[np.array([3, 3, 20, 8])] <type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9 Generally speaking, what is returned when index arrays are used is an array with the same shape as the index array, but with the type and values of the array being indexed. As an example, we can use a multidimensional index array instead: :: >>> x[np.array([[1,1],[2,3]])] array([[9, 9], [8, 7]]) Indexing Multi-dimensional arrays ================================= Things become more complex when multidimensional arrays are indexed, particularly with multidimensional index arrays. These tend to be more unusual uses, but they are permitted, and they are useful for some problems. We'll start with the simplest multidimensional case (using the array y from the previous examples): :: >>> y[np.array([0,2,4]), np.array([0,1,2])] array([ 0, 15, 30]) In this case, if the index arrays have a matching shape, and there is an index array for each dimension of the array being indexed, the resultant array has the same shape as the index arrays, and the values correspond to the index set for each position in the index arrays. In this example, the first index value is 0 for both index arrays, and thus the first value of the resultant array is y[0,0]. The next value is y[2,1], and the last is y[4,2]. If the index arrays do not have the same shape, there is an attempt to broadcast them to the same shape. If they cannot be broadcast to the same shape, an exception is raised: :: >>> y[np.array([0,2,4]), np.array([0,1])] <type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape The broadcasting mechanism permits index arrays to be combined with scalars for other indices. The effect is that the scalar value is used for all the corresponding values of the index arrays: :: >>> y[np.array([0,2,4]), 1] array([ 1, 15, 29]) Jumping to the next level of complexity, it is possible to only partially index an array with index arrays. It takes a bit of thought to understand what happens in such cases. For example if we just use one index array with y: :: >>> y[np.array([0,2,4])] array([[ 0, 1, 2, 3, 4, 5, 6], [14, 15, 16, 17, 18, 19, 20], [28, 29, 30, 31, 32, 33, 34]]) What results is the construction of a new array where each value of the index array selects one row from the array being indexed and the resultant array has the resulting shape (number of index elements, size of row). An example of where this may be useful is for a color lookup table where we want to map the values of an image into RGB triples for display. The lookup table could have a shape (nlookup, 3). Indexing such an array with an image with shape (ny, nx) with dtype=np.uint8 (or any integer type so long as values are with the bounds of the lookup table) will result in an array of shape (ny, nx, 3) where a triple of RGB values is associated with each pixel location. In general, the shape of the resultant array will be the concatenation of the shape of the index array (or the shape that all the index arrays were broadcast to) with the shape of any unused dimensions (those not indexed) in the array being indexed. Boolean or "mask" index arrays ============================== Boolean arrays used as indices are treated in a different manner entirely than index arrays. Boolean arrays must be of the same shape as the initial dimensions of the array being indexed. In the most straightforward case, the boolean array has the same shape: :: >>> b = y>20 >>> y[b] array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) Unlike in the case of integer index arrays, in the boolean case, the result is a 1-D array containing all the elements in the indexed array corresponding to all the true elements in the boolean array. The elements in the indexed array are always iterated and returned in :term:`row-major` (C-style) order. The result is also identical to ``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy of the data, not a view as one gets with slices. The result will be multidimensional if y has more dimensions than b. For example: :: >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y array([False, False, False, True, True]) >>> y[b[:,5]] array([[21, 22, 23, 24, 25, 26, 27], [28, 29, 30, 31, 32, 33, 34]]) Here the 4th and 5th rows are selected from the indexed array and combined to make a 2-D array. In general, when the boolean array has fewer dimensions than the array being indexed, this is equivalent to y[b, ...], which means y is indexed by b followed by as many : as are needed to fill out the rank of y. Thus the shape of the result is one dimension containing the number of True elements of the boolean array, followed by the remaining dimensions of the array being indexed. For example, using a 2-D boolean array of shape (2,3) with four True elements to select rows from a 3-D array of shape (2,3,5) results in a 2-D result of shape (4,5): :: >>> x = np.arange(30).reshape(2,3,5) >>> x array([[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]) >>> b = np.array([[True, True, False], [False, True, True]]) >>> x[b] array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]) For further details, consult the numpy reference documentation on array indexing. Combining index arrays with slices ================================== Index arrays may be combined with slices. For example: :: >>> y[np.array([0,2,4]),1:3] array([[ 1, 2], [15, 16], [29, 30]]) In effect, the slice is converted to an index array np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array to produce a resultant array of shape (3,2). Likewise, slicing can be combined with broadcasted boolean indices: :: >>> y[b[:,5],1:3] array([[22, 23], [29, 30]]) Structural indexing tools ========================= To facilitate easy matching of array shapes with expressions and in assignments, the np.newaxis object can be used within array indices to add new dimensions with a size of 1. For example: :: >>> y.shape (5, 7) >>> y[:,np.newaxis,:].shape (5, 1, 7) Note that there are no new elements in the array, just that the dimensionality is increased. This can be handy to combine two arrays in a way that otherwise would require explicitly reshaping operations. For example: :: >>> x = np.arange(5) >>> x[:,np.newaxis] + x[np.newaxis,:] array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7], [4, 5, 6, 7, 8]]) The ellipsis syntax maybe used to indicate selecting in full any remaining unspecified dimensions. For example: :: >>> z = np.arange(81).reshape(3,3,3,3) >>> z[1,...,2] array([[29, 32, 35], [38, 41, 44], [47, 50, 53]]) This is equivalent to: :: >>> z[1,:,:,2] array([[29, 32, 35], [38, 41, 44], [47, 50, 53]]) Assigning values to indexed arrays ================================== As mentioned, one can select a subset of an array to assign to using a single index, slices, and index and mask arrays. The value being assigned to the indexed array must be shape consistent (the same shape or broadcastable to the shape the index produces). For example, it is permitted to assign a constant to a slice: :: >>> x = np.arange(10) >>> x[2:7] = 1 or an array of the right size: :: >>> x[2:7] = np.arange(5) Note that assignments may result in changes if assigning higher types to lower types (like floats to ints) or even exceptions (assigning complex to floats or ints): :: >>> x[1] = 1.2 >>> x[1] 1 >>> x[1] = 1.2j <type 'exceptions.TypeError'>: can't convert complex to long; use long(abs(z)) Unlike some of the references (such as array and mask indices) assignments are always made to the original data in the array (indeed, nothing else would make sense!). Note though, that some actions may not work as one may naively expect. This particular example is often surprising to people: :: >>> x = np.arange(0, 50, 10) >>> x array([ 0, 10, 20, 30, 40]) >>> x[np.array([1, 1, 3, 1])] += 1 >>> x array([ 0, 11, 20, 31, 40]) Where people expect that the 1st location will be incremented by 3. In fact, it will only be incremented by 1. The reason is because a new array is extracted from the original (as a temporary) containing the values at 1, 1, 3, 1, then the value 1 is added to the temporary, and then the temporary is assigned back to the original array. Thus the value of the array at x[1]+1 is assigned to x[1] three times, rather than being incremented 3 times. Dealing with variable numbers of indices within programs ======================================================== The index syntax is very powerful but limiting when dealing with a variable number of indices. For example, if you want to write a function that can handle arguments with various numbers of dimensions without having to write special case code for each number of possible dimensions, how can that be done? If one supplies to the index a tuple, the tuple will be interpreted as a list of indices. For example (using the previous definition for the array z): :: >>> indices = (1,1,1,1) >>> z[indices] 40 So one can use code to construct tuples of any number of indices and then use these within an index. Slices can be specified within programs by using the slice() function in Python. For example: :: >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] >>> z[indices] array([39, 40]) Likewise, ellipsis can be specified by code by using the Ellipsis object: :: >>> indices = (1, Ellipsis, 1) # same as [1,...,1] >>> z[indices] array([[28, 31, 34], [37, 40, 43], [46, 49, 52]]) For this reason it is possible to use the output from the np.nonzero() function directly as an index since it always returns a tuple of index arrays. Because the special treatment of tuples, they are not automatically converted to an array as a list would be. As an example: :: >>> z[[1,1,1,1]] # produces a large array array([[[[27, 28, 29], [30, 31, 32], ... >>> z[(1,1,1,1)] # returns a single value 40 """ from __future__ import division, absolute_import, print_function
15,669
34.613636
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/constants.py
""" ========= Constants ========= NumPy includes several constants: %(constant_list)s """ # # Note: the docstring is autogenerated. # from __future__ import division, absolute_import, print_function import textwrap, re # Maintain same format as in numpy.add_newdocs constants = [] def add_newdoc(module, name, doc): constants.append((name, doc)) add_newdoc('numpy', 'Inf', """ IEEE 754 floating point representation of (positive) infinity. Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. For more details, see `inf`. See Also -------- inf """) add_newdoc('numpy', 'Infinity', """ IEEE 754 floating point representation of (positive) infinity. Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. For more details, see `inf`. See Also -------- inf """) add_newdoc('numpy', 'NAN', """ IEEE 754 floating point representation of Not a Number (NaN). `NaN` and `NAN` are equivalent definitions of `nan`. Please use `nan` instead of `NAN`. See Also -------- nan """) add_newdoc('numpy', 'NINF', """ IEEE 754 floating point representation of negative infinity. Returns ------- y : float A floating point representation of negative infinity. See Also -------- isinf : Shows which elements are positive or negative infinity isposinf : Shows which elements are positive infinity isneginf : Shows which elements are negative infinity isnan : Shows which elements are Not a Number isfinite : Shows which elements are finite (not one of Not a Number, positive infinity and negative infinity) Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Also that positive infinity is not equivalent to negative infinity. But infinity is equivalent to positive infinity. Examples -------- >>> np.NINF -inf >>> np.log(0) -inf """) add_newdoc('numpy', 'NZERO', """ IEEE 754 floating point representation of negative zero. Returns ------- y : float A floating point representation of negative zero. See Also -------- PZERO : Defines positive zero. isinf : Shows which elements are positive or negative infinity. isposinf : Shows which elements are positive infinity. isneginf : Shows which elements are negative infinity. isnan : Shows which elements are Not a Number. isfinite : Shows which elements are finite - not one of Not a Number, positive infinity and negative infinity. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). Negative zero is considered to be a finite number. Examples -------- >>> np.NZERO -0.0 >>> np.PZERO 0.0 >>> np.isfinite([np.NZERO]) array([ True]) >>> np.isnan([np.NZERO]) array([False]) >>> np.isinf([np.NZERO]) array([False]) """) add_newdoc('numpy', 'NaN', """ IEEE 754 floating point representation of Not a Number (NaN). `NaN` and `NAN` are equivalent definitions of `nan`. Please use `nan` instead of `NaN`. See Also -------- nan """) add_newdoc('numpy', 'PINF', """ IEEE 754 floating point representation of (positive) infinity. Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. For more details, see `inf`. See Also -------- inf """) add_newdoc('numpy', 'PZERO', """ IEEE 754 floating point representation of positive zero. Returns ------- y : float A floating point representation of positive zero. See Also -------- NZERO : Defines negative zero. isinf : Shows which elements are positive or negative infinity. isposinf : Shows which elements are positive infinity. isneginf : Shows which elements are negative infinity. isnan : Shows which elements are Not a Number. isfinite : Shows which elements are finite - not one of Not a Number, positive infinity and negative infinity. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). Positive zero is considered to be a finite number. Examples -------- >>> np.PZERO 0.0 >>> np.NZERO -0.0 >>> np.isfinite([np.PZERO]) array([ True]) >>> np.isnan([np.PZERO]) array([False]) >>> np.isinf([np.PZERO]) array([False]) """) add_newdoc('numpy', 'e', """ Euler's constant, base of natural logarithms, Napier's constant. ``e = 2.71828182845904523536028747135266249775724709369995...`` See Also -------- exp : Exponential function log : Natural logarithm References ---------- .. [1] http://en.wikipedia.org/wiki/Napier_constant """) add_newdoc('numpy', 'inf', """ IEEE 754 floating point representation of (positive) infinity. Returns ------- y : float A floating point representation of positive infinity. See Also -------- isinf : Shows which elements are positive or negative infinity isposinf : Shows which elements are positive infinity isneginf : Shows which elements are negative infinity isnan : Shows which elements are Not a Number isfinite : Shows which elements are finite (not one of Not a Number, positive infinity and negative infinity) Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Also that positive infinity is not equivalent to negative infinity. But infinity is equivalent to positive infinity. `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. Examples -------- >>> np.inf inf >>> np.array([1]) / 0. array([ Inf]) """) add_newdoc('numpy', 'infty', """ IEEE 754 floating point representation of (positive) infinity. Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. For more details, see `inf`. See Also -------- inf """) add_newdoc('numpy', 'nan', """ IEEE 754 floating point representation of Not a Number (NaN). Returns ------- y : A floating point representation of Not a Number. See Also -------- isnan : Shows which elements are Not a Number. isfinite : Shows which elements are finite (not one of Not a Number, positive infinity and negative infinity) Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. `NaN` and `NAN` are aliases of `nan`. Examples -------- >>> np.nan nan >>> np.log(-1) nan >>> np.log([-1, 1, 2]) array([ NaN, 0. , 0.69314718]) """) add_newdoc('numpy', 'newaxis', """ A convenient alias for None, useful for indexing arrays. See Also -------- `numpy.doc.indexing` Examples -------- >>> newaxis is None True >>> x = np.arange(3) >>> x array([0, 1, 2]) >>> x[:, newaxis] array([[0], [1], [2]]) >>> x[:, newaxis, newaxis] array([[[0]], [[1]], [[2]]]) >>> x[:, newaxis] * x array([[0, 0, 0], [0, 1, 2], [0, 2, 4]]) Outer product, same as ``outer(x, y)``: >>> y = np.arange(3, 6) >>> x[:, newaxis] * y array([[ 0, 0, 0], [ 3, 4, 5], [ 6, 8, 10]]) ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: >>> x[newaxis, :].shape (1, 3) >>> x[newaxis].shape (1, 3) >>> x[None].shape (1, 3) >>> x[:, newaxis].shape (3, 1) """) if __doc__: constants_str = [] constants.sort() for name, doc in constants: s = textwrap.dedent(doc).replace("\n", "\n ") # Replace sections by rubrics lines = s.split("\n") new_lines = [] for line in lines: m = re.match(r'^(\s+)[-=]+\s*$', line) if m and new_lines: prev = textwrap.dedent(new_lines.pop()) new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) new_lines.append('') else: new_lines.append(line) s = "\n".join(new_lines) # Done. constants_str.append(""".. const:: %s\n %s""" % (name, s)) constants_str = "\n".join(constants_str) __doc__ = __doc__ % dict(constant_list=constants_str) del constants_str, name, doc del line, lines, new_lines, m, s, prev del constants, add_newdoc
8,882
21.545685
75
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/creation.py
""" ============== Array Creation ============== Introduction ============ There are 5 general mechanisms for creating arrays: 1) Conversion from other Python structures (e.g., lists, tuples) 2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros, etc.) 3) Reading arrays from disk, either from standard or custom formats 4) Creating arrays from raw bytes through the use of strings or buffers 5) Use of special library functions (e.g., random) This section will not cover means of replicating, joining, or otherwise expanding or mutating existing arrays. Nor will it cover creating object arrays or structured arrays. Both of those are covered in their own sections. Converting Python array_like Objects to NumPy Arrays ==================================================== In general, numerical data arranged in an array-like structure in Python can be converted to arrays through the use of the array() function. The most obvious examples are lists and tuples. See the documentation for array() for details for its use. Some objects may support the array-protocol and allow conversion to arrays this way. A simple way to find out if the object can be converted to a numpy array using array() is simply to try it interactively and see if it works! (The Python Way). Examples: :: >>> x = np.array([2,3,1,0]) >>> x = np.array([2, 3, 1, 0]) >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, and types >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) Intrinsic NumPy Array Creation ============================== NumPy has built-in functions for creating arrays from scratch: zeros(shape) will create an array filled with 0 values with the specified shape. The default dtype is float64. ``>>> np.zeros((2, 3)) array([[ 0., 0., 0.], [ 0., 0., 0.]])`` ones(shape) will create an array filled with 1 values. It is identical to zeros in all other respects. arange() will create arrays with regularly incrementing values. Check the docstring for complete information on the various ways it can be used. A few examples will be given here: :: >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> np.arange(2, 10, dtype=float) array([ 2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.arange(2, 3, 0.1) array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) Note that there are some subtleties regarding the last usage that the user should be aware of that are described in the arange docstring. linspace() will create arrays with a specified number of elements, and spaced equally between the specified beginning and end values. For example: :: >>> np.linspace(1., 4., 6) array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ]) The advantage of this creation function is that one can guarantee the number of elements and the starting and end point, which arange() generally will not do for arbitrary start, stop, and step values. indices() will create a set of arrays (stacked as a one-higher dimensioned array), one per dimension with each representing variation in that dimension. An example illustrates much better than a verbal description: :: >>> np.indices((3,3)) array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) This is particularly useful for evaluating functions of multiple dimensions on a regular grid. Reading Arrays From Disk ======================== This is presumably the most common case of large array creation. The details, of course, depend greatly on the format of data on disk and so this section can only give general pointers on how to handle various formats. Standard Binary Formats ----------------------- Various fields have standard formats for array data. The following lists the ones with known python libraries to read them and return numpy arrays (there may be others for which it is possible to read and convert to numpy arrays so check the last section as well) :: HDF5: h5py FITS: Astropy Examples of formats that cannot be read directly but for which it is not hard to convert are those formats supported by libraries like PIL (able to read and write many image formats such as jpg, png, etc). Common ASCII Formats ------------------------ Comma Separated Value files (CSV) are widely used (and an export and import option for programs like Excel). There are a number of ways of reading these files in Python. There are CSV functions in Python and functions in pylab (part of matplotlib). More generic ascii files can be read using the io package in scipy. Custom Binary Formats --------------------- There are a variety of approaches one can use. If the file has a relatively simple format then one can write a simple I/O library and use the numpy fromfile() function and .tofile() method to read and write numpy arrays directly (mind your byteorder though!) If a good C or C++ library exists that read the data, one can wrap that library with a variety of techniques though that certainly is much more work and requires significantly more advanced knowledge to interface with C or C++. Use of Special Libraries ------------------------ There are libraries that can be used to generate arrays for special purposes and it isn't possible to enumerate all of them. The most common uses are use of the many array generation functions in random that can generate arrays of random values, and some utility functions to generate special matrices (e.g. diagonal). """ from __future__ import division, absolute_import, print_function
5,501
36.944828
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/basics.py
""" ============ Array basics ============ Array types and conversions between types ========================================= NumPy supports a much greater variety of numerical types than Python does. This section shows which are available, and how to modify an array's data-type. ============ ========================================================== Data type Description ============ ========================================================== ``bool_`` Boolean (True or False) stored as a byte ``int_`` Default integer type (same as C ``long``; normally either ``int64`` or ``int32``) intc Identical to C ``int`` (normally ``int32`` or ``int64``) intp Integer used for indexing (same as C ``ssize_t``; normally either ``int32`` or ``int64``) int8 Byte (-128 to 127) int16 Integer (-32768 to 32767) int32 Integer (-2147483648 to 2147483647) int64 Integer (-9223372036854775808 to 9223372036854775807) uint8 Unsigned integer (0 to 255) uint16 Unsigned integer (0 to 65535) uint32 Unsigned integer (0 to 4294967295) uint64 Unsigned integer (0 to 18446744073709551615) ``float_`` Shorthand for ``float64``. float16 Half precision float: sign bit, 5 bits exponent, 10 bits mantissa float32 Single precision float: sign bit, 8 bits exponent, 23 bits mantissa float64 Double precision float: sign bit, 11 bits exponent, 52 bits mantissa ``complex_`` Shorthand for ``complex128``. complex64 Complex number, represented by two 32-bit floats (real and imaginary components) complex128 Complex number, represented by two 64-bit floats (real and imaginary components) ============ ========================================================== Additionally to ``intc`` the platform dependent C integer types ``short``, ``long``, ``longlong`` and their unsigned versions are defined. NumPy numerical types are instances of ``dtype`` (data-type) objects, each having unique characteristics. Once you have imported NumPy using :: >>> import numpy as np the dtypes are available as ``np.bool_``, ``np.float32``, etc. Advanced types, not listed in the table above, are explored in section :ref:`structured_arrays`. There are 5 basic numerical types representing booleans (bool), integers (int), unsigned integers (uint) floating point (float) and complex. Those with numbers in their name indicate the bitsize of the type (i.e. how many bits are needed to represent a single value in memory). Some types, such as ``int`` and ``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit vs. 64-bit machines). This should be taken into account when interfacing with low-level code (such as C or Fortran) where the raw memory is addressed. Data-types can be used as functions to convert python numbers to array scalars (see the array scalar section for an explanation), python sequences of numbers to arrays of that type, or as arguments to the dtype keyword that many numpy functions or methods accept. Some examples:: >>> import numpy as np >>> x = np.float32(1.0) >>> x 1.0 >>> y = np.int_([1,2,4]) >>> y array([1, 2, 4]) >>> z = np.arange(3, dtype=np.uint8) >>> z array([0, 1, 2], dtype=uint8) Array types can also be referred to by character codes, mostly to retain backward compatibility with older packages such as Numeric. Some documentation may still refer to these, for example:: >>> np.array([1, 2, 3], dtype='f') array([ 1., 2., 3.], dtype=float32) We recommend using dtype objects instead. To convert the type of an array, use the .astype() method (preferred) or the type itself as a function. For example: :: >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE array([ 0., 1., 2.]) >>> np.int8(z) array([0, 1, 2], dtype=int8) Note that, above, we use the *Python* float object as a dtype. NumPy knows that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``, that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``. The other data-types do not have Python equivalents. To determine the type of an array, look at the dtype attribute:: >>> z.dtype dtype('uint8') dtype objects also contain information about the type, such as its bit-width and its byte-order. The data type can also be used indirectly to query properties of the type, such as whether it is an integer:: >>> d = np.dtype(int) >>> d dtype('int32') >>> np.issubdtype(d, np.integer) True >>> np.issubdtype(d, np.floating) False Array Scalars ============= NumPy generally returns elements of arrays as array scalars (a scalar with an associated dtype). Array scalars differ from Python scalars, but for the most part they can be used interchangeably (the primary exception is for versions of Python older than v2.x, where integer array scalars cannot act as indices for lists and tuples). There are some exceptions, such as when code requires very specific attributes of a scalar or when it checks specifically whether a value is a Python scalar. Generally, problems are easily fixed by explicitly converting array scalars to Python scalars, using the corresponding Python type function (e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). The primary advantage of using array scalars is that they preserve the array type (Python may not have a matching scalar type available, e.g. ``int16``). Therefore, the use of array scalars ensures identical behaviour between arrays and scalars, irrespective of whether the value is inside an array or not. NumPy scalars also have many of the same methods arrays do. Extended Precision ================== Python's floating-point numbers are usually 64-bit floating-point numbers, nearly equivalent to ``np.float64``. In some unusual situations it may be useful to use floating-point numbers with more precision. Whether this is possible in numpy depends on the hardware and on the development environment: specifically, x86 machines provide hardware floating-point with 80-bit precision, and while most C compilers provide this as their ``long double`` type, MSVC (standard for Windows builds) makes ``long double`` identical to ``double`` (64 bits). NumPy makes the compiler's ``long double`` available as ``np.longdouble`` (and ``np.clongdouble`` for the complex numbers). You can find out what your numpy provides with ``np.finfo(np.longdouble)``. NumPy does not provide a dtype with more precision than C ``long double``\\s; in particular, the 128-bit IEEE quad precision data type (FORTRAN's ``REAL*16``\\) is not available. For efficient memory alignment, ``np.longdouble`` is usually stored padded with zero bits, either to 96 or 128 bits. Which is more efficient depends on hardware and development environment; typically on 32-bit systems they are padded to 96 bits, while on 64-bit systems they are typically padded to 128 bits. ``np.longdouble`` is padded to the system default; ``np.float96`` and ``np.float128`` are provided for users who want specific padding. In spite of the names, ``np.float96`` and ``np.float128`` provide only as much precision as ``np.longdouble``, that is, 80 bits on most x86 machines and 64 bits in standard Windows builds. Be warned that even if ``np.longdouble`` offers more precision than python ``float``, it is easy to lose that extra precision, since python often forces values to pass through ``float``. For example, the ``%`` formatting operator requires its arguments to be converted to standard python types, and it is therefore impossible to preserve extended precision even if many decimal places are requested. It can be useful to test your code with the value ``1 + np.finfo(np.longdouble).eps``. """ from __future__ import division, absolute_import, print_function
7,918
41.575269
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/structured_arrays.py
""" ================= Structured Arrays ================= Introduction ============ Structured arrays are ndarrays whose datatype is a composition of simpler datatypes organized as a sequence of named :term:`fields <field>`. For example, :: >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)], ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')]) >>> x array([('Rex', 9, 81.0), ('Fido', 3, 27.0)], dtype=[('name', 'S10'), ('age', '<i4'), ('weight', '<f4')]) Here ``x`` is a one-dimensional array of length two whose datatype is a structure with three fields: 1. A string of length 10 or less named 'name', 2. a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'. If you index ``x`` at position 1 you get a structure:: >>> x[1] ('Fido', 3, 27.0) You can access and modify individual fields of a structured array by indexing with the field name:: >>> x['age'] array([9, 3], dtype=int32) >>> x['age'] = 5 >>> x array([('Rex', 5, 81.0), ('Fido', 5, 27.0)], dtype=[('name', 'S10'), ('age', '<i4'), ('weight', '<f4')]) Structured arrays are designed for low-level manipulation of structured data, for example, for interpreting binary blobs. Structured datatypes are designed to mimic 'structs' in the C language, making them also useful for interfacing with C code. For these purposes, numpy supports specialized features such as subarrays and nested datatypes, and allows manual control over the memory layout of the structure. For simple manipulation of tabular data other pydata projects, such as pandas, xarray, or DataArray, provide higher-level interfaces that may be more suitable. These projects may also give better performance for tabular data analysis because the C-struct-like memory layout of structured arrays can lead to poor cache behavior. .. _defining-structured-types: Structured Datatypes ==================== To use structured arrays one first needs to define a structured datatype. A structured datatype can be thought of as a sequence of bytes of a certain length (the structure's :term:`itemsize`) which is interpreted as a collection of fields. Each field has a name, a datatype, and a byte offset within the structure. The datatype of a field may be any numpy datatype including other structured datatypes, and it may also be a :term:`sub-array` which behaves like an ndarray of a specified shape. The offsets of the fields are arbitrary, and fields may even overlap. These offsets are usually determined automatically by numpy, but can also be specified. Structured Datatype Creation ---------------------------- Structured datatypes may be created using the function :func:`numpy.dtype`. There are 4 alternative forms of specification which vary in flexibility and conciseness. These are further documented in the :ref:`Data Type Objects <arrays.dtypes.constructing>` reference page, and in summary they are: 1. A list of tuples, one tuple per field Each tuple has the form ``(fieldname, datatype, shape)`` where shape is optional. ``fieldname`` is a string (or tuple if titles are used, see :ref:`Field Titles <titles>` below), ``datatype`` may be any object convertible to a datatype, and ``shape`` is a tuple of integers specifying subarray shape. >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2,2))]) dtype=[('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))]) If ``fieldname`` is the empty string ``''``, the field will be given a default name of the form ``f#``, where ``#`` is the integer index of the field, counting from 0 from the left:: >>> np.dtype([('x', 'f4'),('', 'i4'),('z', 'i8')]) dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')]) The byte offsets of the fields within the structure and the total structure itemsize are determined automatically. 2. A string of comma-separated dtype specifications In this shorthand notation any of the :ref:`string dtype specifications <arrays.dtypes.constructing>` may be used in a string and separated by commas. The itemsize and byte offsets of the fields are determined automatically, and the field names are given the default names ``f0``, ``f1``, etc. :: >>> np.dtype('i8,f4,S3') dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')]) >>> np.dtype('3int8, float32, (2,3)float64') dtype([('f0', 'i1', 3), ('f1', '<f4'), ('f2', '<f8', (2, 3))]) 3. A dictionary of field parameter arrays This is the most flexible form of specification since it allows control over the byte-offsets of the fields and the itemsize of the structure. The dictionary has two required keys, 'names' and 'formats', and four optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'. The values for 'names' and 'formats' should respectively be a list of field names and a list of dtype specifications, of the same length. The optional 'offsets' value should be a list of integer byte-offsets, one for each field within the structure. If 'offsets' is not given the offsets are determined automatically. The optional 'itemsize' value should be an integer describing the total size in bytes of the dtype, which must be large enough to contain all the fields. :: >>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4','f4']}) dtype([('col1', '<i4'), ('col2', '<f4')]) >>> np.dtype({'names': ['col1', 'col2'], ... 'formats': ['i4','f4'], ... 'offsets': [0, 4], ... 'itemsize': 12}) dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12}) Offsets may be chosen such that the fields overlap, though this will mean that assigning to one field may clobber any overlapping field's data. As an exception, fields of :class:`numpy.object` type cannot overlap with other fields, because of the risk of clobbering the internal object pointer and then dereferencing it. The optional 'aligned' value can be set to ``True`` to make the automatic offset computation use aligned offsets (see :ref:`offsets-and-alignment`), as if the 'align' keyword argument of :func:`numpy.dtype` had been set to True. The optional 'titles' value should be a list of titles of the same length as 'names', see :ref:`Field Titles <titles>` below. 4. A dictionary of field names The use of this form of specification is discouraged, but documented here because older numpy code may use it. The keys of the dictionary are the field names and the values are tuples specifying type and offset:: >>> np.dtype=({'col1': ('i1',0), 'col2': ('f4',1)}) dtype([(('col1'), 'i1'), (('col2'), '>f4')]) This form is discouraged because Python dictionaries do not preserve order in Python versions before Python 3.6, and the order of the fields in a structured dtype has meaning. :ref:`Field Titles <titles>` may be specified by using a 3-tuple, see below. Manipulating and Displaying Structured Datatypes ------------------------------------------------ The list of field names of a structured datatype can be found in the ``names`` attribute of the dtype object:: >>> d = np.dtype([('x', 'i8'), ('y', 'f4')]) >>> d.names ('x', 'y') The field names may be modified by assigning to the ``names`` attribute using a sequence of strings of the same length. The dtype object also has a dictionary-like attribute, ``fields``, whose keys are the field names (and :ref:`Field Titles <titles>`, see below) and whose values are tuples containing the dtype and byte offset of each field. :: >>> d.fields mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)}) Both the ``names`` and ``fields`` attributes will equal ``None`` for unstructured arrays. The string representation of a structured datatype is shown in the "list of tuples" form if possible, otherwise numpy falls back to using the more general dictionary form. .. _offsets-and-alignment: Automatic Byte Offsets and Alignment ------------------------------------ Numpy uses one of two methods to automatically determine the field byte offsets and the overall itemsize of a structured datatype, depending on whether ``align=True`` was specified as a keyword argument to :func:`numpy.dtype`. By default (``align=False``), numpy will pack the fields together such that each field starts at the byte offset the previous field ended, and the fields are contiguous in memory. :: >>> def print_offsets(d): ... print("offsets:", [d.fields[name][1] for name in d.names]) ... print("itemsize:", d.itemsize) >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2')) offsets: [0, 1, 2, 6, 7, 15] itemsize: 17 If ``align=True`` is set, numpy will pad the structure in the same way many C compilers would pad a C-struct. Aligned structures can give a performance improvement in some cases, at the cost of increased datatype size. Padding bytes are inserted between fields such that each field's byte offset will be a multiple of that field's alignment, which is usually equal to the field's size in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The structure will also have trailing padding added so that its itemsize is a multiple of the largest field's alignment. :: >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2', align=True)) offsets: [0, 1, 4, 8, 16, 24] itemsize: 32 Note that although almost all modern C compilers pad in this way by default, padding in C structs is C-implementation-dependent so this memory layout is not guaranteed to exactly match that of a corresponding struct in a C program. Some work may be needed, either on the numpy side or the C side, to obtain exact correspondence. If offsets were specified using the optional ``offsets`` key in the dictionary-based dtype specification, setting ``align=True`` will check that each field's offset is a multiple of its size and that the itemsize is a multiple of the largest field size, and raise an exception if not. If the offsets of the fields and itemsize of a structured array satisfy the alignment conditions, the array will have the ``ALIGNED`` :ref:`flag <numpy.ndarray.flags>` set. A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an aligned dtype or array to a packed one and vice versa. It takes either a dtype or structured ndarray as an argument, and returns a copy with fields re-packed, with or without padding bytes. .. _titles: Field Titles ------------ In addition to field names, fields may also have an associated :term:`title`, an alternate name, which is sometimes used as an additional description or alias for the field. The title may be used to index an array, just like a field name. To add titles when using the list-of-tuples form of dtype specification, the field name may be be specified as a tuple of two strings instead of a single string, which will be the field's title and field name respectively. For example:: >>> np.dtype([(('my title', 'name'), 'f4')]) When using the first form of dictionary-based specification, the titles may be supplied as an extra ``'titles'`` key as described above. When using the second (discouraged) dictionary-based specification, the title can be supplied by providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual 2-element tuple:: >>> np.dtype({'name': ('i4', 0, 'my title')}) The ``dtype.fields`` dictionary will contain :term:`titles` as keys, if any titles are used. This means effectively that a field with a title will be represented twice in the fields dictionary. The tuple values for these fields will also have a third element, the field title. Because of this, and because the ``names`` attribute preserves the field order while the ``fields`` attribute may not, it is recommended to iterate through the fields of a dtype using the ``names`` attribute of the dtype, which will not list titles, as in:: >>> for name in d.names: ... print(d.fields[name][:2]) Union types ----------- Structured datatypes are implemented in numpy to have base type :class:`numpy.void` by default, but it is possible to interpret other numpy types as structured types using the ``(base_dtype, dtype)`` form of dtype specification described in :ref:`Data Type Objects <arrays.dtypes.constructing>`. Here, ``base_dtype`` is the desired underlying dtype, and fields and flags will be copied from ``dtype``. This dtype is similar to a 'union' in C. Indexing and Assignment to Structured arrays ============================================= Assigning data to a Structured Array ------------------------------------ There are a number of ways to assign values to a structured array: Using python tuples, using scalar values, or using other structured arrays. Assignment from Python Native Types (Tuples) ``````````````````````````````````````````` The simplest way to assign values to a structured array is using python tuples. Each assigned value should be a tuple of length equal to the number of fields in the array, and not a list or array as these will trigger numpy's broadcasting rules. The tuple's elements are assigned to the successive fields of the array, from left to right:: >>> x = np.array([(1,2,3),(4,5,6)], dtype='i8,f4,f8') >>> x[1] = (7,8,9) >>> x array([(1, 2., 3.), (7, 8., 9.)], dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')]) Assignment from Scalars ``````````````````````` A scalar assigned to a structured element will be assigned to all fields. This happens when a scalar is assigned to a structured array, or when an unstructured array is assigned to a structured array:: >>> x = np.zeros(2, dtype='i8,f4,?,S1') >>> x[:] = 3 >>> x array([(3, 3.0, True, b'3'), (3, 3.0, True, b'3')], dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')]) >>> x[:] = np.arange(2) >>> x array([(0, 0.0, False, b'0'), (1, 1.0, True, b'1')], dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')]) Structured arrays can also be assigned to unstructured arrays, but only if the structured datatype has just a single field:: >>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')]) >>> onefield = np.zeros(2, dtype=[('A', 'i4')]) >>> nostruct = np.zeros(2, dtype='i4') >>> nostruct[:] = twofield ValueError: Can't cast from structure to non-structure, except if the structure only has a single field. >>> nostruct[:] = onefield >>> nostruct array([0, 0], dtype=int32) Assignment from other Structured Arrays ``````````````````````````````````````` Assignment between two structured arrays occurs as if the source elements had been converted to tuples and then assigned to the destination elements. That is, the first field of the source array is assigned to the first field of the destination array, and the second field likewise, and so on, regardless of field names. Structured arrays with a different number of fields cannot be assigned to each other. Bytes of the destination structure which are not included in any of the fields are unaffected. :: >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')]) >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')]) >>> b[:] = a >>> b array([(0.0, b'0.0', b''), (0.0, b'0.0', b''), (0.0, b'0.0', b'')], dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')]) Assignment involving subarrays `````````````````````````````` When assigning to fields which are subarrays, the assigned value will first be broadcast to the shape of the subarray. Indexing Structured Arrays -------------------------- Accessing Individual Fields ``````````````````````````` Individual fields of a structured array may be accessed and modified by indexing the array with the field name. :: >>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) >>> x['foo'] array([1, 3]) >>> x['foo'] = 10 >>> x array([(10, 2.), (10, 4.)], dtype=[('foo', '<i8'), ('bar', '<f4')]) The resulting array is a view into the original array. It shares the same memory locations and writing to the view will modify the original array. :: >>> y = x['bar'] >>> y[:] = 10 >>> x array([(10, 5.), (10, 5.)], dtype=[('foo', '<i8'), ('bar', '<f4')]) This view has the same dtype and itemsize as the indexed field, so it is typically a non-structured array, except in the case of nested structures. >>> y.dtype, y.shape, y.strides (dtype('float32'), (2,), (12,)) Accessing Multiple Fields ``````````````````````````` One can index and assign to a structured array with a multi-field index, where the index is a list of field names. .. warning:: The behavior of multi-field indexes will change from Numpy 1.14 to Numpy 1.15. In Numpy 1.15, the result of indexing with a multi-field index will be a view into the original array, as follows:: >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')]) >>> a[['a', 'c']] array([(0, 0.), (0, 0.), (0, 0.)], dtype={'names':['a','c'], 'formats':['<i4','<f4'], 'offsets':[0,8], 'itemsize':12}) Assignment to the view modifies the original array. The view's fields will be in the order they were indexed. Note that unlike for single-field indexing, the view's dtype has the same itemsize as the original array, and has fields at the same offsets as in the original array, and unindexed fields are merely missing. In Numpy 1.14, indexing an array with a multi-field index returns a copy of the result above for 1.15, but with fields packed together in memory as if passed through :func:`numpy.lib.recfunctions.repack_fields`. This is the behavior of Numpy 1.7 to 1.13. .. warning:: The new behavior in Numpy 1.15 leads to extra "padding" bytes at the location of unindexed fields. You will need to update any code which depends on the data having a "packed" layout. For instance code such as:: >>> a[['a','c']].view('i8') # will fail in Numpy 1.15 ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype will need to be changed. This code has raised a ``FutureWarning`` since Numpy 1.12. The following is a recommended fix, which will behave identically in Numpy 1.14 and Numpy 1.15:: >>> from numpy.lib.recfunctions import repack_fields >>> repack_fields(a[['a','c']]).view('i8') # supported 1.14 and 1.15 array([0, 0, 0]) Assigning to an array with a multi-field index will behave the same in Numpy 1.14 and Numpy 1.15. In both versions the assignment will modify the original array:: >>> a[['a', 'c']] = (2, 3) >>> a array([(2, 0, 3.0), (2, 0, 3.0), (2, 0, 3.0)], dtype=[('a', '<i8'), ('b', '<i4'), ('c', '<f8')]) This obeys the structured array assignment rules described above. For example, this means that one can swap the values of two fields using appropriate multi-field indexes:: >>> a[['a', 'c']] = a[['c', 'a']] Indexing with an Integer to get a Structured Scalar ``````````````````````````````````````````````````` Indexing a single element of a structured array (with an integer index) returns a structured scalar:: >>> x = np.array([(1, 2., 3.)], dtype='i,f,f') >>> scalar = x[0] >>> scalar (1, 2., 3.) >>> type(scalar) numpy.void Unlike other numpy scalars, structured scalars are mutable and act like views into the original array, such that modifying the scalar will modify the original array. Structured scalars also support access and assignment by field name:: >>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) >>> s = x[0] >>> s['bar'] = 100 >>> x array([(1, 100.), (3, 4.)], dtype=[('foo', '<i8'), ('bar', '<f4')]) Similarly to tuples, structured scalars can also be indexed with an integer:: >>> scalar = np.array([(1, 2., 3.)], dtype='i,f,f')[0] >>> scalar[0] 1 >>> scalar[1] = 4 Thus, tuples might be thought of as the native Python equivalent to numpy's structured types, much like native python integers are the equivalent to numpy's integer types. Structured scalars may be converted to a tuple by calling :func:`ndarray.item`:: >>> scalar.item(), type(scalar.item()) ((1, 2.0, 3.0), tuple) Viewing Structured Arrays Containing Objects -------------------------------------------- In order to prevent clobbering object pointers in fields of :class:`numpy.object` type, numpy currently does not allow views of structured arrays containing objects. Structure Comparison -------------------- If the dtypes of two void structured arrays are equal, testing the equality of the arrays will result in a boolean array with the dimensions of the original arrays, with elements set to ``True`` where all fields of the corresponding structures are equal. Structured dtypes are equal if the field names, dtypes and titles are the same, ignoring endianness, and the fields are in the same order:: >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')]) >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')]) >>> a == b array([False, False]) Currently, if the dtypes of two void structured arrays are not equivalent the comparison fails, returning the scalar value ``False``. This behavior is deprecated as of numpy 1.10 and will raise an error or perform elementwise comparison in the future. The ``<`` and ``>`` operators always return ``False`` when comparing void structured arrays, and arithmetic and bitwise operations are not supported. Record Arrays ============= As an optional convenience numpy provides an ndarray subclass, :class:`numpy.recarray`, and associated helper functions in the :mod:`numpy.rec` submodule, that allows access to fields of structured arrays by attribute instead of only by index. Record arrays also use a special datatype, :class:`numpy.record`, that allows field access by attribute on the structured scalars obtained from the array. The simplest way to create a record array is with :func:`numpy.rec.array`:: >>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")], ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')]) >>> recordarr.bar array([ 2., 3.], dtype=float32) >>> recordarr[1:2] rec.array([(2, 3.0, 'World')], dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]) >>> recordarr[1:2].foo array([2], dtype=int32) >>> recordarr.foo[1:2] array([2], dtype=int32) >>> recordarr[1].baz 'World' :func:`numpy.rec.array` can convert a wide variety of arguments into record arrays, including structured arrays:: >>> arr = array([(1,2.,'Hello'),(2,3.,"World")], ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')]) >>> recordarr = np.rec.array(arr) The :mod:`numpy.rec` module provides a number of other convenience functions for creating record arrays, see :ref:`record array creation routines <routines.array-creation.rec>`. A record array representation of a structured array can be obtained using the appropriate :ref:`view`:: >>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")], ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')]) >>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)), ... type=np.recarray) For convenience, viewing an ndarray as type :class:`np.recarray` will automatically convert to :class:`np.record` datatype, so the dtype can be left out of the view:: >>> recordarr = arr.view(np.recarray) >>> recordarr.dtype dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])) To get back to a plain ndarray both the dtype and type must be reset. The following view does so, taking into account the unusual case that the recordarr was not a structured type:: >>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray) Record array fields accessed by index or by attribute are returned as a record array if the field has a structured type but as a plain ndarray otherwise. :: >>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))], ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])]) >>> type(recordarr.foo) <type 'numpy.ndarray'> >>> type(recordarr.bar) <class 'numpy.core.records.recarray'> Note that if a field has the same name as an ndarray attribute, the ndarray attribute takes precedence. Such fields will be inaccessible by attribute but will still be accessible by index. """ from __future__ import division, absolute_import, print_function
24,443
39.336634
106
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/misc.py
""" ============= Miscellaneous ============= IEEE 754 Floating Point Special Values -------------------------------------- Special values defined in numpy: nan, inf, NaNs can be used as a poor-man's mask (if you don't care what the original value was) Note: cannot use equality to test NaNs. E.g.: :: >>> myarr = np.array([1., 0., np.nan, 3.]) >>> np.nonzero(myarr == np.nan) (array([], dtype=int64),) >>> np.nan == np.nan # is always False! Use special numpy functions instead. False >>> myarr[myarr == np.nan] = 0. # doesn't work >>> myarr array([ 1., 0., NaN, 3.]) >>> myarr[np.isnan(myarr)] = 0. # use this instead find >>> myarr array([ 1., 0., 0., 3.]) Other related special value functions: :: isinf(): True if value is inf isfinite(): True if not nan or inf nan_to_num(): Map nan to 0, inf to max float, -inf to min float The following corresponds to the usual functions except that nans are excluded from the results: :: nansum() nanmax() nanmin() nanargmax() nanargmin() >>> x = np.arange(10.) >>> x[3] = np.nan >>> x.sum() nan >>> np.nansum(x) 42.0 How numpy handles numerical exceptions -------------------------------------- The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` and ``'ignore'`` for ``underflow``. But this can be changed, and it can be set individually for different kinds of exceptions. The different behaviors are: - 'ignore' : Take no action when the exception occurs. - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - 'raise' : Raise a `FloatingPointError`. - 'call' : Call a function specified using the `seterrcall` function. - 'print' : Print a warning directly to ``stdout``. - 'log' : Record error in a Log object specified by `seterrcall`. These behaviors can be set for all kinds of errors or specific ones: - all : apply to all numeric exceptions - invalid : when NaNs are generated - divide : divide by zero (for integers as well!) - overflow : floating point overflows - underflow : floating point underflows Note that integer divide-by-zero is handled by the same machinery. These behaviors are set on a per-thread basis. Examples -------- :: >>> oldsettings = np.seterr(all='warn') >>> np.zeros(5,dtype=np.float32)/0. invalid value encountered in divide >>> j = np.seterr(under='ignore') >>> np.array([1.e-100])**10 >>> j = np.seterr(invalid='raise') >>> np.sqrt(np.array([-1.])) FloatingPointError: invalid value encountered in sqrt >>> def errorhandler(errstr, errflag): ... print("saw stupid error!") >>> np.seterrcall(errorhandler) <function err_handler at 0x...> >>> j = np.seterr(all='call') >>> np.zeros(5, dtype=np.int32)/0 FloatingPointError: invalid value encountered in divide saw stupid error! >>> j = np.seterr(**oldsettings) # restore previous ... # error-handling settings Interfacing to C ---------------- Only a survey of the choices. Little detail on how each works. 1) Bare metal, wrap your own C-code manually. - Plusses: - Efficient - No dependencies on other tools - Minuses: - Lots of learning overhead: - need to learn basics of Python C API - need to learn basics of numpy C API - need to learn how to handle reference counting and love it. - Reference counting often difficult to get right. - getting it wrong leads to memory leaks, and worse, segfaults - API will change for Python 3.0! 2) Cython - Plusses: - avoid learning C API's - no dealing with reference counting - can code in pseudo python and generate C code - can also interface to existing C code - should shield you from changes to Python C api - has become the de-facto standard within the scientific Python community - fast indexing support for arrays - Minuses: - Can write code in non-standard form which may become obsolete - Not as flexible as manual wrapping 3) ctypes - Plusses: - part of Python standard library - good for interfacing to existing sharable libraries, particularly Windows DLLs - avoids API/reference counting issues - good numpy support: arrays have all these in their ctypes attribute: :: a.ctypes.data a.ctypes.get_strides a.ctypes.data_as a.ctypes.shape a.ctypes.get_as_parameter a.ctypes.shape_as a.ctypes.get_data a.ctypes.strides a.ctypes.get_shape a.ctypes.strides_as - Minuses: - can't use for writing code to be turned into C extensions, only a wrapper tool. 4) SWIG (automatic wrapper generator) - Plusses: - around a long time - multiple scripting language support - C++ support - Good for wrapping large (many functions) existing C libraries - Minuses: - generates lots of code between Python and the C code - can cause performance problems that are nearly impossible to optimize out - interface files can be hard to write - doesn't necessarily avoid reference counting issues or needing to know API's 5) scipy.weave - Plusses: - can turn many numpy expressions into C code - dynamic compiling and loading of generated C code - can embed pure C code in Python module and have weave extract, generate interfaces and compile, etc. - Minuses: - Future very uncertain: it's the only part of Scipy not ported to Python 3 and is effectively deprecated in favor of Cython. 6) Psyco - Plusses: - Turns pure python into efficient machine code through jit-like optimizations - very fast when it optimizes well - Minuses: - Only on intel (windows?) - Doesn't do much for numpy? Interfacing to Fortran: ----------------------- The clear choice to wrap Fortran code is `f2py <http://docs.scipy.org/doc/numpy-dev/f2py/>`_. Pyfort is an older alternative, but not supported any longer. Fwrap is a newer project that looked promising but isn't being developed any longer. Interfacing to C++: ------------------- 1) Cython 2) CXX 3) Boost.python 4) SWIG 5) SIP (used mainly in PyQT) """ from __future__ import division, absolute_import, print_function
6,194
26.171053
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/ufuncs.py
""" =================== Universal Functions =================== Ufuncs are, generally speaking, mathematical functions or operations that are applied element-by-element to the contents of an array. That is, the result in each output array element only depends on the value in the corresponding input array (or arrays) and on no other array elements. NumPy comes with a large suite of ufuncs, and scipy extends that suite substantially. The simplest example is the addition operator: :: >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) array([1, 3, 2, 6]) The unfunc module lists all the available ufuncs in numpy. Documentation on the specific ufuncs may be found in those modules. This documentation is intended to address the more general aspects of unfuncs common to most of them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) have equivalent functions defined (e.g. add() for +) Type coercion ============= What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of two different types? What is the type of the result? Typically, the result is the higher of the two types. For example: :: float32 + float64 -> float64 int8 + int32 -> int32 int16 + float32 -> float32 float32 + complex64 -> complex64 There are some less obvious cases generally involving mixes of types (e.g. uints, ints and floats) where equal bit sizes for each are not capable of saving all the information in a different type of equivalent bit size. Some examples are int32 vs float32 or uint32 vs int32. Generally, the result is the higher type of larger size than both (if available). So: :: int32 + float32 -> float64 uint32 + int32 -> int64 Finally, the type coercion behavior when expressions involve Python scalars is different than that seen for arrays. Since Python has a limited number of types, combining a Python int with a dtype=np.int8 array does not coerce to the higher type but instead, the type of the array prevails. So the rules for Python scalars combined with arrays is that the result will be that of the array equivalent the Python scalar if the Python scalar is of a higher 'kind' than the array (e.g., float vs. int), otherwise the resultant type will be that of the array. For example: :: Python int + int8 -> int8 Python float + int8 -> float64 ufunc methods ============= Binary ufuncs support 4 methods. **.reduce(arr)** applies the binary operator to elements of the array in sequence. For example: :: >>> np.add.reduce(np.arange(10)) # adds all elements of array 45 For multidimensional arrays, the first dimension is reduced by default: :: >>> np.add.reduce(np.arange(10).reshape(2,5)) array([ 5, 7, 9, 11, 13]) The axis keyword can be used to specify different axes to reduce: :: >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) array([10, 35]) **.accumulate(arr)** applies the binary operator and generates an an equivalently shaped array that includes the accumulated amount for each element of the array. A couple examples: :: >>> np.add.accumulate(np.arange(10)) array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) >>> np.multiply.accumulate(np.arange(1,9)) array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) The behavior for multidimensional arrays is the same as for .reduce(), as is the use of the axis keyword). **.reduceat(arr,indices)** allows one to apply reduce to selected parts of an array. It is a difficult method to understand. See the documentation at: **.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and arr2. It will work on multidimensional arrays (the shape of the result is the concatenation of the two input shapes.: :: >>> np.multiply.outer(np.arange(3),np.arange(4)) array([[0, 0, 0, 0], [0, 1, 2, 3], [0, 2, 4, 6]]) Output arguments ================ All ufuncs accept an optional output array. The array must be of the expected output shape. Beware that if the type of the output array is of a different (and lower) type than the output result, the results may be silently truncated or otherwise corrupted in the downcast to the lower type. This usage is useful when one wants to avoid creating large temporary arrays and instead allows one to reuse the same array memory repeatedly (at the expense of not being able to use more convenient operator notation in expressions). Note that when the output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) >>> np.add(np.arange(2),np.arange(2.),x) array([0, 2]) >>> x array([0, 2]) and & or as ufuncs ================== Invariably people try to use the python 'and' and 'or' as logical operators (and quite understandably). But these operators do not behave as normal operators since Python treats these quite differently. They cannot be overloaded with array equivalents. Thus using 'and' or 'or' with an array results in an error. There are two alternatives: 1) use the ufunc functions logical_and() and logical_or(). 2) use the bitwise operators & and \\|. The drawback of these is that if the arguments to these operators are not boolean arrays, the result is likely incorrect. On the other hand, most usages of logical_and and logical_or are with boolean arrays. As long as one is careful, this is a convenient way to apply these operators. """ from __future__ import division, absolute_import, print_function
5,427
38.05036
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/__init__.py
from __future__ import division, absolute_import, print_function import os ref_dir = os.path.join(os.path.dirname(__file__)) __all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and not f.startswith('__')) for f in __all__: __import__(__name__ + '.' + f) del f, ref_dir __doc__ = """\ Topical documentation ===================== The following topics are available: %s You can view them by >>> help(np.doc.TOPIC) #doctest: +SKIP """ % '\n- '.join([''] + __all__) __all__.extend(['__doc__'])
574
18.827586
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/byteswapping.py
""" ============================= Byteswapping and byte order ============================= Introduction to byte ordering and ndarrays ========================================== The ``ndarray`` is an object that provide a python array interface to data in memory. It often happens that the memory that you want to view with an array is not of the same byte ordering as the computer on which you are running Python. For example, I might be working on a computer with a little-endian CPU - such as an Intel Pentium, but I have loaded some data from a file written by a computer that is big-endian. Let's say I have loaded 4 bytes from a file written by a Sun (big-endian) computer. I know that these 4 bytes represent two 16-bit integers. On a big-endian machine, a two-byte integer is stored with the Most Significant Byte (MSB) first, and then the Least Significant Byte (LSB). Thus the bytes are, in memory order: #. MSB integer 1 #. LSB integer 1 #. MSB integer 2 #. LSB integer 2 Let's say the two integers were in fact 1 and 770. Because 770 = 256 * 3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2. The bytes I have loaded from the file would have these contents: >>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2) >>> big_end_str '\\x00\\x01\\x03\\x02' We might want to use an ``ndarray`` to access these integers. In that case, we can create an array around this memory, and tell numpy that there are two integers, and that they are 16 bit and big-endian: >>> import numpy as np >>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str) >>> big_end_arr[0] 1 >>> big_end_arr[1] 770 Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' (``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For example, if our data represented a single unsigned 4-byte little-endian integer, the dtype string would be ``<u4``. In fact, why don't we try that? >>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_str) >>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3 True Returning to our ``big_end_arr`` - in this case our underlying data is big-endian (data endianness) and we've set the dtype to match (the dtype is also big-endian). However, sometimes you need to flip these around. .. warning:: Scalars currently do not include byte order information, so extracting a scalar from an array will return an integer in native byte order. Hence: >>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder True Changing byte ordering ====================== As you can imagine from the introduction, there are two ways you can affect the relationship between the byte ordering of the array and the underlying memory it is looking at: * Change the byte-ordering information in the array dtype so that it interprets the underlying data as being in a different byte order. This is the role of ``arr.newbyteorder()`` * Change the byte-ordering of the underlying data, leaving the dtype interpretation as it was. This is what ``arr.byteswap()`` does. The common situations in which you need to change byte ordering are: #. Your data and dtype endianess don't match, and you want to change the dtype so that it matches the data. #. Your data and dtype endianess don't match, and you want to swap the data so that they match the dtype #. Your data and dtype endianess match, but you want the data swapped and the dtype to reflect this Data and dtype endianness don't match, change dtype to match data ----------------------------------------------------------------- We make something where they don't match: >>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str) >>> wrong_end_dtype_arr[0] 256 The obvious fix for this situation is to change the dtype so it gives the correct endianness: >>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder() >>> fixed_end_dtype_arr[0] 1 Note the array has not changed in memory: >>> fixed_end_dtype_arr.tobytes() == big_end_str True Data and type endianness don't match, change data to match dtype ---------------------------------------------------------------- You might want to do this if you need the data in memory to be a certain ordering. For example you might be writing the memory out to a file that needs a certain byte ordering. >>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() >>> fixed_end_mem_arr[0] 1 Now the array *has* changed in memory: >>> fixed_end_mem_arr.tobytes() == big_end_str False Data and dtype endianness match, swap data and dtype ---------------------------------------------------- You may have a correctly specified array dtype, but you need the array to have the opposite byte order in memory, and you want the dtype to match so the array values make sense. In this case you just do both of the previous operations: >>> swapped_end_arr = big_end_arr.byteswap().newbyteorder() >>> swapped_end_arr[0] 1 >>> swapped_end_arr.tobytes() == big_end_str False An easier way of casting the data to a specific dtype and byte ordering can be achieved with the ndarray astype method: >>> swapped_end_arr = big_end_arr.astype('<i2') >>> swapped_end_arr[0] 1 >>> swapped_end_arr.tobytes() == big_end_str False """ from __future__ import division, absolute_import, print_function
5,346
33.057325
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/broadcasting.py
""" ======================== Broadcasting over arrays ======================== The term broadcasting describes how numpy treats arrays with different shapes during arithmetic operations. Subject to certain constraints, the smaller array is "broadcast" across the larger array so that they have compatible shapes. Broadcasting provides a means of vectorizing array operations so that looping occurs in C instead of Python. It does this without making needless copies of data and usually leads to efficient algorithm implementations. There are, however, cases where broadcasting is a bad idea because it leads to inefficient use of memory that slows computation. NumPy operations are usually done on pairs of arrays on an element-by-element basis. In the simplest case, the two arrays must have exactly the same shape, as in the following example: >>> a = np.array([1.0, 2.0, 3.0]) >>> b = np.array([2.0, 2.0, 2.0]) >>> a * b array([ 2., 4., 6.]) NumPy's broadcasting rule relaxes this constraint when the arrays' shapes meet certain constraints. The simplest broadcasting example occurs when an array and a scalar value are combined in an operation: >>> a = np.array([1.0, 2.0, 3.0]) >>> b = 2.0 >>> a * b array([ 2., 4., 6.]) The result is equivalent to the previous example where ``b`` was an array. We can think of the scalar ``b`` being *stretched* during the arithmetic operation into an array with the same shape as ``a``. The new elements in ``b`` are simply copies of the original scalar. The stretching analogy is only conceptual. NumPy is smart enough to use the original scalar value without actually making copies, so that broadcasting operations are as memory and computationally efficient as possible. The code in the second example is more efficient than that in the first because broadcasting moves less memory around during the multiplication (``b`` is a scalar rather than an array). General Broadcasting Rules ========================== When operating on two arrays, NumPy compares their shapes element-wise. It starts with the trailing dimensions, and works its way forward. Two dimensions are compatible when 1) they are equal, or 2) one of them is 1 If these conditions are not met, a ``ValueError: frames are not aligned`` exception is thrown, indicating that the arrays have incompatible shapes. The size of the resulting array is the maximum size along each dimension of the input arrays. Arrays do not need to have the same *number* of dimensions. For example, if you have a ``256x256x3`` array of RGB values, and you want to scale each color in the image by a different value, you can multiply the image by a one-dimensional array with 3 values. Lining up the sizes of the trailing axes of these arrays according to the broadcast rules, shows that they are compatible:: Image (3d array): 256 x 256 x 3 Scale (1d array): 3 Result (3d array): 256 x 256 x 3 When either of the dimensions compared is one, the other is used. In other words, dimensions with size 1 are stretched or "copied" to match the other. In the following example, both the ``A`` and ``B`` arrays have axes with length one that are expanded to a larger size during the broadcast operation:: A (4d array): 8 x 1 x 6 x 1 B (3d array): 7 x 1 x 5 Result (4d array): 8 x 7 x 6 x 5 Here are some more examples:: A (2d array): 5 x 4 B (1d array): 1 Result (2d array): 5 x 4 A (2d array): 5 x 4 B (1d array): 4 Result (2d array): 5 x 4 A (3d array): 15 x 3 x 5 B (3d array): 15 x 1 x 5 Result (3d array): 15 x 3 x 5 A (3d array): 15 x 3 x 5 B (2d array): 3 x 5 Result (3d array): 15 x 3 x 5 A (3d array): 15 x 3 x 5 B (2d array): 3 x 1 Result (3d array): 15 x 3 x 5 Here are examples of shapes that do not broadcast:: A (1d array): 3 B (1d array): 4 # trailing dimensions do not match A (2d array): 2 x 1 B (3d array): 8 x 4 x 3 # second from last dimensions mismatched An example of broadcasting in practice:: >>> x = np.arange(4) >>> xx = x.reshape(4,1) >>> y = np.ones(5) >>> z = np.ones((3,4)) >>> x.shape (4,) >>> y.shape (5,) >>> x + y <type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape >>> xx.shape (4, 1) >>> y.shape (5,) >>> (xx + y).shape (4, 5) >>> xx + y array([[ 1., 1., 1., 1., 1.], [ 2., 2., 2., 2., 2.], [ 3., 3., 3., 3., 3.], [ 4., 4., 4., 4., 4.]]) >>> x.shape (4,) >>> z.shape (3, 4) >>> (x + z).shape (3, 4) >>> x + z array([[ 1., 2., 3., 4.], [ 1., 2., 3., 4.], [ 1., 2., 3., 4.]]) Broadcasting provides a convenient way of taking the outer product (or any other outer operation) of two arrays. The following example shows an outer addition operation of two 1-d arrays:: >>> a = np.array([0.0, 10.0, 20.0, 30.0]) >>> b = np.array([1.0, 2.0, 3.0]) >>> a[:, np.newaxis] + b array([[ 1., 2., 3.], [ 11., 12., 13.], [ 21., 22., 23.], [ 31., 32., 33.]]) Here the ``newaxis`` index operator inserts a new axis into ``a``, making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. See `this article <http://wiki.scipy.org/EricsBroadcastingDoc>`_ for illustrations of broadcasting concepts. """ from __future__ import division, absolute_import, print_function
5,565
30.094972
94
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/doc/subclassing.py
"""============================= Subclassing ndarray in python ============================= Introduction ------------ Subclassing ndarray is relatively simple, but it has some complications compared to other Python objects. On this page we explain the machinery that allows you to subclass ndarray, and the implications for implementing a subclass. ndarrays and object creation ============================ Subclassing ndarray is complicated by the fact that new instances of ndarray classes can come about in three different ways. These are: #. Explicit constructor call - as in ``MySubClass(params)``. This is the usual route to Python instance creation. #. View casting - casting an existing ndarray as a given subclass #. New from template - creating a new instance from a template instance. Examples include returning slices from a subclassed array, creating return types from ufuncs, and copying arrays. See :ref:`new-from-template` for more details The last two are characteristics of ndarrays - in order to support things like array slicing. The complications of subclassing ndarray are due to the mechanisms numpy has to support these latter two routes of instance creation. .. _view-casting: View casting ------------ *View casting* is the standard ndarray mechanism by which you take an ndarray of any subclass, and return a view of the array as another (specified) subclass: >>> import numpy as np >>> # create a completely useless ndarray subclass >>> class C(np.ndarray): pass >>> # create a standard ndarray >>> arr = np.zeros((3,)) >>> # take a view of it, as our useless subclass >>> c_arr = arr.view(C) >>> type(c_arr) <class 'C'> .. _new-from-template: Creating new from template -------------------------- New instances of an ndarray subclass can also come about by a very similar mechanism to :ref:`view-casting`, when numpy finds it needs to create a new instance from a template instance. The most obvious place this has to happen is when you are taking slices of subclassed arrays. For example: >>> v = c_arr[1:] >>> type(v) # the view is of type 'C' <class 'C'> >>> v is c_arr # but it's a new instance False The slice is a *view* onto the original ``c_arr`` data. So, when we take a view from the ndarray, we return a new ndarray, of the same class, that points to the data in the original. There are other points in the use of ndarrays where we need such views, such as copying arrays (``c_arr.copy()``), creating ufunc output arrays (see also :ref:`array-wrap`), and reducing methods (like ``c_arr.mean()``. Relationship of view casting and new-from-template -------------------------------------------------- These paths both use the same machinery. We make the distinction here, because they result in different input to your methods. Specifically, :ref:`view-casting` means you have created a new instance of your array type from any potential subclass of ndarray. :ref:`new-from-template` means you have created a new instance of your class from a pre-existing instance, allowing you - for example - to copy across attributes that are particular to your subclass. Implications for subclassing ---------------------------- If we subclass ndarray, we need to deal not only with explicit construction of our array type, but also :ref:`view-casting` or :ref:`new-from-template`. NumPy has the machinery to do this, and this machinery that makes subclassing slightly non-standard. There are two aspects to the machinery that ndarray uses to support views and new-from-template in subclasses. The first is the use of the ``ndarray.__new__`` method for the main work of object initialization, rather then the more usual ``__init__`` method. The second is the use of the ``__array_finalize__`` method to allow subclasses to clean up after the creation of views and new instances from templates. A brief Python primer on ``__new__`` and ``__init__`` ===================================================== ``__new__`` is a standard Python method, and, if present, is called before ``__init__`` when we create a class instance. See the `python __new__ documentation <http://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail. For example, consider the following Python code: .. testcode:: class C(object): def __new__(cls, *args): print('Cls in __new__:', cls) print('Args in __new__:', args) return object.__new__(cls, *args) def __init__(self, *args): print('type(self) in __init__:', type(self)) print('Args in __init__:', args) meaning that we get: >>> c = C('hello') Cls in __new__: <class 'C'> Args in __new__: ('hello',) type(self) in __init__: <class 'C'> Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class as first argument, and the passed argument, which is the string ``'hello'``. After python calls ``__new__``, it usually (see below) calls our ``__init__`` method, with the output of ``__new__`` as the first argument (now a class instance), and the passed arguments following. As you can see, the object can be initialized in the ``__new__`` method or the ``__init__`` method, or both, and in fact ndarray does not have an ``__init__`` method, because all the initialization is done in the ``__new__`` method. Why use ``__new__`` rather than just the usual ``__init__``? Because in some cases, as for ndarray, we want to be able to return an object of some other class. Consider the following: .. testcode:: class D(C): def __new__(cls, *args): print('D cls is:', cls) print('D args in __new__:', args) return C.__new__(C, *args) def __init__(self, *args): # we never get here print('In D __init__') meaning that: >>> obj = D('hello') D cls is: <class 'D'> D args in __new__: ('hello',) Cls in __new__: <class 'C'> Args in __new__: ('hello',) >>> type(obj) <class 'C'> The definition of ``C`` is the same as before, but for ``D``, the ``__new__`` method returns an instance of class ``C`` rather than ``D``. Note that the ``__init__`` method of ``D`` does not get called. In general, when the ``__new__`` method returns an object of class other than the class in which it is defined, the ``__init__`` method of that class is not called. This is how subclasses of the ndarray class are able to return views that preserve the class type. When taking a view, the standard ndarray machinery creates the new ndarray object with something like:: obj = ndarray.__new__(subtype, shape, ... where ``subdtype`` is the subclass. Thus the returned view is of the same class as the subclass, rather than being of class ``ndarray``. That solves the problem of returning views of the same type, but now we have a new problem. The machinery of ndarray can set the class this way, in its standard methods for taking views, but the ndarray ``__new__`` method knows nothing of what we have done in our own ``__new__`` method in order to set attributes, and so on. (Aside - why not call ``obj = subdtype.__new__(...`` then? Because we may not have a ``__new__`` method with the same call signature). The role of ``__array_finalize__`` ================================== ``__array_finalize__`` is the mechanism that numpy provides to allow subclasses to handle the various ways that new instances get created. Remember that subclass instances can come about in these three ways: #. explicit constructor call (``obj = MySubClass(params)``). This will call the usual sequence of ``MySubClass.__new__`` then (if it exists) ``MySubClass.__init__``. #. :ref:`view-casting` #. :ref:`new-from-template` Our ``MySubClass.__new__`` method only gets called in the case of the explicit constructor call, so we can't rely on ``MySubClass.__new__`` or ``MySubClass.__init__`` to deal with the view casting and new-from-template. It turns out that ``MySubClass.__array_finalize__`` *does* get called for all three methods of object creation, so this is where our object creation housekeeping usually goes. * For the explicit constructor call, our subclass will need to create a new ndarray instance of its own class. In practice this means that we, the authors of the code, will need to make a call to ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an existing array (see below) * For view casting and new-from-template, the equivalent of ``ndarray.__new__(MySubClass,...`` is called, at the C level. The arguments that ``__array_finalize__`` receives differ for the three methods of instance creation above. The following code allows us to look at the call sequences and arguments: .. testcode:: import numpy as np class C(np.ndarray): def __new__(cls, *args, **kwargs): print('In __new__ with class %s' % cls) return super(C, cls).__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): # in practice you probably will not need or want an __init__ # method for your subclass print('In __init__ with class %s' % self.__class__) def __array_finalize__(self, obj): print('In array_finalize:') print(' self type is %s' % type(self)) print(' obj type is %s' % type(obj)) Now: >>> # Explicit constructor >>> c = C((10,)) In __new__ with class <class 'C'> In array_finalize: self type is <class 'C'> obj type is <type 'NoneType'> In __init__ with class <class 'C'> >>> # View casting >>> a = np.arange(10) >>> cast_a = a.view(C) In array_finalize: self type is <class 'C'> obj type is <type 'numpy.ndarray'> >>> # Slicing (example of new-from-template) >>> cv = c[:1] In array_finalize: self type is <class 'C'> obj type is <class 'C'> The signature of ``__array_finalize__`` is:: def __array_finalize__(self, obj): One sees that the ``super`` call, which goes to ``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our own class (``self``) as well as the object from which the view has been taken (``obj``). As you can see from the output above, the ``self`` is always a newly created instance of our subclass, and the type of ``obj`` differs for the three instance creation methods: * When called from the explicit constructor, ``obj`` is ``None`` * When called from view casting, ``obj`` can be an instance of any subclass of ndarray, including our own. * When called in new-from-template, ``obj`` is another instance of our own subclass, that we might use to update the new ``self`` instance. Because ``__array_finalize__`` is the only method that always sees new instances being created, it is the sensible place to fill in instance defaults for new object attributes, among other tasks. This may be clearer with an example. Simple example - adding an extra attribute to ndarray ----------------------------------------------------- .. testcode:: import numpy as np class InfoArray(np.ndarray): def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard # ndarray constructor, but return an object of our type. # It also triggers a call to InfoArray.__array_finalize__ obj = super(InfoArray, subtype).__new__(subtype, shape, dtype, buffer, offset, strides, order) # set the new 'info' attribute to the value passed obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # ``self`` is a new object resulting from # ndarray.__new__(InfoArray, ...), therefore it only has # attributes that the ndarray.__new__ constructor gave it - # i.e. those of a standard ndarray. # # We could have got to the ndarray.__new__ call in 3 ways: # From an explicit constructor - e.g. InfoArray(): # obj is None # (we're in the middle of the InfoArray.__new__ # constructor, and self.info will be set when we return to # InfoArray.__new__) if obj is None: return # From view casting - e.g arr.view(InfoArray): # obj is arr # (type(obj) can be InfoArray) # From new-from-template - e.g infoarr[:3] # type(obj) is InfoArray # # Note that it is here, rather than in the __new__ method, # that we set the default value for 'info', because this # method sees all creation of default objects - with the # InfoArray.__new__ constructor, but also with # arr.view(InfoArray). self.info = getattr(obj, 'info', None) # We do not need to return anything Using the object looks like this: >>> obj = InfoArray(shape=(3,)) # explicit constructor >>> type(obj) <class 'InfoArray'> >>> obj.info is None True >>> obj = InfoArray(shape=(3,), info='information') >>> obj.info 'information' >>> v = obj[1:] # new-from-template - here - slicing >>> type(v) <class 'InfoArray'> >>> v.info 'information' >>> arr = np.arange(10) >>> cast_arr = arr.view(InfoArray) # view casting >>> type(cast_arr) <class 'InfoArray'> >>> cast_arr.info is None True This class isn't very useful, because it has the same constructor as the bare ndarray object, including passing in buffers and shapes and so on. We would probably prefer the constructor to be able to take an already formed ndarray from the usual numpy calls to ``np.array`` and return an object. Slightly more realistic example - attribute added to existing array ------------------------------------------------------------------- Here is a class that takes a standard ndarray that already exists, casts as our type, and adds an extra attribute. .. testcode:: import numpy as np class RealisticInfoArray(np.ndarray): def __new__(cls, input_array, info=None): # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array).view(cls) # add the new attribute to the created instance obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self.info = getattr(obj, 'info', None) So: >>> arr = np.arange(5) >>> obj = RealisticInfoArray(arr, info='information') >>> type(obj) <class 'RealisticInfoArray'> >>> obj.info 'information' >>> v = obj[1:] >>> type(v) <class 'RealisticInfoArray'> >>> v.info 'information' .. _array-ufunc: ``__array_ufunc__`` for ufuncs ------------------------------ .. versionadded:: 1.13 A subclass can override what happens when executing numpy ufuncs on it by overriding the default ``ndarray.__array_ufunc__`` method. This method is executed *instead* of the ufunc and should return either the result of the operation, or :obj:`NotImplemented` if the operation requested is not implemented. The signature of ``__array_ufunc__`` is:: def __array_ufunc__(ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either ``"__call__"`` to indicate it was called directly, or one of its :ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``, ``"reduceat"``, ``"outer"``, or ``"at"``. - *inputs* is a tuple of the input arguments to the ``ufunc`` - *kwargs* contains any optional or keyword arguments passed to the function. This includes any ``out`` arguments, which are always contained in a tuple. A typical implementation would convert any inputs or ouputs that are instances of one's own class, pass everything on to a superclass using ``super()``, and finally return the results after possible back-conversion. An example, taken from the test case ``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the following. .. testcode:: input numpy as np class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): args = [] in_no = [] for i, input_ in enumerate(inputs): if isinstance(input_, A): in_no.append(i) args.append(input_.view(np.ndarray)) else: args.append(input_) outputs = kwargs.pop('out', None) out_no = [] if outputs: out_args = [] for j, output in enumerate(outputs): if isinstance(output, A): out_no.append(j) out_args.append(output.view(np.ndarray)) else: out_args.append(output) kwargs['out'] = tuple(out_args) else: outputs = (None,) * ufunc.nout info = {} if in_no: info['inputs'] = in_no if out_no: info['outputs'] = out_no results = super(A, self).__array_ufunc__(ufunc, method, *args, **kwargs) if results is NotImplemented: return NotImplemented if method == 'at': if isinstance(inputs[0], A): inputs[0].info = info return if ufunc.nout == 1: results = (results,) results = tuple((np.asarray(result).view(A) if output is None else output) for result, output in zip(results, outputs)) if results and isinstance(results[0], A): results[0].info = info return results[0] if len(results) == 1 else results So, this class does not actually do anything interesting: it just converts any instances of its own to regular ndarray (otherwise, we'd get infinite recursion!), and adds an ``info`` dictionary that tells which inputs and outputs it converted. Hence, e.g., >>> a = np.arange(5.).view(A) >>> b = np.sin(a) >>> b.info {'inputs': [0]} >>> b = np.sin(np.arange(5.), out=(a,)) >>> b.info {'outputs': [0]} >>> a = np.arange(5.).view(A) >>> b = np.ones(1).view(A) >>> c = a + b >>> c.info {'inputs': [0, 1]} >>> a += b >>> a.info {'inputs': [0, 1], 'outputs': [0]} Note that another approach would be to to use ``getattr(ufunc, methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, the result would be identical, but there is a difference if another operand also defines ``__array_ufunc__``. E.g., lets assume that we evalulate ``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has an override. If you use ``super`` as in the example, ``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which means it cannot evaluate the result itself. Thus, it will return `NotImplemented` and so will our class ``A``. Then, control will be passed over to ``b``, which either knows how to deal with us and produces a result, or does not and returns `NotImplemented`, raising a ``TypeError``. If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__`` will be called, but now it sees an ``ndarray`` as the other argument. Likely, it will know how to handle this, and return a new instance of the ``B`` class to us. Our example class is not set up to handle this, but it might well be the best approach if, e.g., one were to re-implement ``MaskedArray`` using ``__array_ufunc__``. As a final note: if the ``super`` route is suited to a given class, an advantage of using it is that it helps in constructing class hierarchies. E.g., suppose that our other class ``B`` also used the ``super`` in its ``__array_ufunc__`` implementation, and we created a class ``C`` that depended on both, i.e., ``class C(A, B)`` (with, for simplicity, not another ``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to ``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to ``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate. .. _array-wrap: ``__array_wrap__`` for ufuncs and other functions ------------------------------------------------- Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using ``__array_wrap__`` and ``__array_prepare__``. These two allowed one to change the output type of a ufunc, but, in constrast to ``__array_ufunc__``, did not allow one to make any changes to the inputs. It is hoped to eventually deprecate these, but ``__array_wrap__`` is also used by other numpy functions and methods, such as ``squeeze``, so at the present time is still needed for full functionality. Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of allowing a subclass to set the type of the return value and update attributes and metadata. Let's show how this works with an example. First we return to the simpler example subclass, but with a different name and some print statements: .. testcode:: import numpy as np class MySubClass(np.ndarray): def __new__(cls, input_array, info=None): obj = np.asarray(input_array).view(cls) obj.info = info return obj def __array_finalize__(self, obj): print('In __array_finalize__:') print(' self is %s' % repr(self)) print(' obj is %s' % repr(obj)) if obj is None: return self.info = getattr(obj, 'info', None) def __array_wrap__(self, out_arr, context=None): print('In __array_wrap__:') print(' self is %s' % repr(self)) print(' arr is %s' % repr(out_arr)) # then just call the parent return super(MySubClass, self).__array_wrap__(self, out_arr, context) We run a ufunc on an instance of our new array: >>> obj = MySubClass(np.arange(5), info='spam') In __array_finalize__: self is MySubClass([0, 1, 2, 3, 4]) obj is array([0, 1, 2, 3, 4]) >>> arr2 = np.arange(5)+1 >>> ret = np.add(arr2, obj) In __array_wrap__: self is MySubClass([0, 1, 2, 3, 4]) arr is array([1, 3, 5, 7, 9]) In __array_finalize__: self is MySubClass([1, 3, 5, 7, 9]) obj is MySubClass([0, 1, 2, 3, 4]) >>> ret MySubClass([1, 3, 5, 7, 9]) >>> ret.info 'spam' Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result of the addition. In turn, the default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``, and called ``__array_finalize__`` - hence the copying of the ``info`` attribute. This has all happened at the C level. But, we could do anything we wanted: .. testcode:: class SillySubClass(np.ndarray): def __array_wrap__(self, arr, context=None): return 'I lost your data' >>> arr1 = np.arange(5) >>> obj = arr1.view(SillySubClass) >>> arr2 = np.arange(5) >>> ret = np.multiply(obj, arr2) >>> ret 'I lost your data' So, by defining a specific ``__array_wrap__`` method for our subclass, we can tweak the output from ufuncs. The ``__array_wrap__`` method requires ``self``, then an argument - which is the result of the ufunc - and an optional parameter *context*. This parameter is returned by ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, domain of the ufunc), but is not set by other numpy functions. Though, as seen above, it is possible to do otherwise, ``__array_wrap__`` should return an instance of its containing class. See the masked array subclass for an implementation. In addition to ``__array_wrap__``, which is called on the way out of the ufunc, there is also an ``__array_prepare__`` method which is called on the way into the ufunc, after the output arrays are created but before any computation has been performed. The default implementation does nothing but pass through the array. ``__array_prepare__`` should not attempt to access the array data or resize the array, it is intended for setting the output array type, updating attributes and metadata, and performing any checks based on the input that may be desired before computation begins. Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or subclass thereof or raise an error. Extra gotchas - custom ``__del__`` methods and ndarray.base ----------------------------------------------------------- One of the problems that ndarray solves is keeping track of memory ownership of ndarrays and their views. Consider the case where we have created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. The two objects are looking at the same memory. NumPy keeps track of where the data came from for a particular array or view, with the ``base`` attribute: >>> # A normal ndarray, that owns its own data >>> arr = np.zeros((4,)) >>> # In this case, base is None >>> arr.base is None True >>> # We take a view >>> v1 = arr[1:] >>> # base now points to the array that it derived from >>> v1.base is arr True >>> # Take a view of a view >>> v2 = v1[1:] >>> # base points to the view it derived from >>> v2.base is v1 True In general, if the array owns its own memory, as for ``arr`` in this case, then ``arr.base`` will be None - there are some exceptions to this - see the numpy book for more details. The ``base`` attribute is useful in being able to tell whether we have a view or the original array. This in turn can be useful if we need to know whether or not to do some specific cleanup when the subclassed array is deleted. For example, we may only want to do the cleanup if the original array is deleted, but not the views. For an example of how this can work, have a look at the ``memmap`` class in ``numpy.core``. Subclassing and Downstream Compatibility ---------------------------------------- When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray`` interface, it is your responsibility to decide how aligned your APIs will be with those of numpy. For convenience, many numpy functions that have a corresponding ``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking if the first argument to a function has a method of the same name. If it exists, the method is called instead of coercing the arguments to a numpy array. For example, if you want your sub-class or duck-type to be compatible with numpy's ``sum`` function, the method signature for this object's ``sum`` method should be the following: .. testcode:: def sum(self, axis=None, dtype=None, out=None, keepdims=False): ... This is the exact same method signature for ``np.sum``, so now if a user calls ``np.sum`` on this object, numpy will call the object's own ``sum`` method and pass in these arguments enumerated above in the signature, and no errors will be raised because the signatures are completely compatible with each other. If, however, you decide to deviate from this signature and do something like this: .. testcode:: def sum(self, axis=None, dtype=None): ... This object is no longer compatible with ``np.sum`` because if you call ``np.sum``, it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError to be raised. If you wish to maintain compatibility with numpy and its subsequent versions (which might add new keyword arguments) but do not want to surface all of numpy's arguments, your function's signature should accept ``**kwargs``. For example: .. testcode:: def sum(self, axis=None, dtype=None, **unused_kwargs): ... This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. """ from __future__ import division, absolute_import, print_function
28,560
36.929615
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/fft/setup.py
from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fft', parent_package, top_path) config.add_data_dir('tests') # Configure fftpack_lite config.add_extension('fftpack_lite', sources=['fftpack_litemodule.c', 'fftpack.c'] ) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)
550
26.55
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/numpy/fft/helper.py
""" Discrete Fourier Transforms - helper.py """ from __future__ import division, absolute_import, print_function import collections import threading from numpy.compat import integer_types from numpy.core import ( asarray, concatenate, arange, take, integer, empty ) # Created by Pearu Peterson, September 2002 __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] integer_types = integer_types + (integer,) def fftshift(x, axes=None): """ Shift the zero-frequency component to the center of the spectrum. This function swaps half-spaces for all axes listed (defaults to all). Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. Parameters ---------- x : array_like Input array. axes : int or shape tuple, optional Axes over which to shift. Default is None, which shifts all axes. Returns ------- y : ndarray The shifted array. See Also -------- ifftshift : The inverse of `fftshift`. Examples -------- >>> freqs = np.fft.fftfreq(10, 0.1) >>> freqs array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) >>> np.fft.fftshift(freqs) array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) Shift the zero-frequency component only along the second axis: >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]]) >>> np.fft.fftshift(freqs, axes=(1,)) array([[ 2., 0., 1.], [-4., 3., 4.], [-1., -3., -2.]]) """ tmp = asarray(x) ndim = tmp.ndim if axes is None: axes = list(range(ndim)) elif isinstance(axes, integer_types): axes = (axes,) y = tmp for k in axes: n = tmp.shape[k] p2 = (n+1)//2 mylist = concatenate((arange(p2, n), arange(p2))) y = take(y, mylist, k) return y def ifftshift(x, axes=None): """ The inverse of `fftshift`. Although identical for even-length `x`, the functions differ by one sample for odd-length `x`. Parameters ---------- x : array_like Input array. axes : int or shape tuple, optional Axes over which to calculate. Defaults to None, which shifts all axes. Returns ------- y : ndarray The shifted array. See Also -------- fftshift : Shift zero-frequency component to the center of the spectrum. Examples -------- >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]]) >>> np.fft.ifftshift(np.fft.fftshift(freqs)) array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]]) """ tmp = asarray(x) ndim = tmp.ndim if axes is None: axes = list(range(ndim)) elif isinstance(axes, integer_types): axes = (axes,) y = tmp for k in axes: n = tmp.shape[k] p2 = n-(n+1)//2 mylist = concatenate((arange(p2, n), arange(p2))) y = take(y, mylist, k) return y def fftfreq(n, d=1.0): """ Return the Discrete Fourier Transform sample frequencies. The returned float array `f` contains the frequency bin centers in cycles per unit of the sample spacing (with zero at the start). For instance, if the sample spacing is in seconds, then the frequency unit is cycles/second. Given a window length `n` and a sample spacing `d`:: f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd Parameters ---------- n : int Window length. d : scalar, optional Sample spacing (inverse of the sampling rate). Defaults to 1. Returns ------- f : ndarray Array of length `n` containing the sample frequencies. Examples -------- >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) >>> fourier = np.fft.fft(signal) >>> n = signal.size >>> timestep = 0.1 >>> freq = np.fft.fftfreq(n, d=timestep) >>> freq array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25]) """ if not isinstance(n, integer_types): raise ValueError("n should be an integer") val = 1.0 / (n * d) results = empty(n, int) N = (n-1)//2 + 1 p1 = arange(0, N, dtype=int) results[:N] = p1 p2 = arange(-(n//2), 0, dtype=int) results[N:] = p2 return results * val #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d) def rfftfreq(n, d=1.0): """ Return the Discrete Fourier Transform sample frequencies (for usage with rfft, irfft). The returned float array `f` contains the frequency bin centers in cycles per unit of the sample spacing (with zero at the start). For instance, if the sample spacing is in seconds, then the frequency unit is cycles/second. Given a window length `n` and a sample spacing `d`:: f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) the Nyquist frequency component is considered to be positive. Parameters ---------- n : int Window length. d : scalar, optional Sample spacing (inverse of the sampling rate). Defaults to 1. Returns ------- f : ndarray Array of length ``n//2 + 1`` containing the sample frequencies. Examples -------- >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) >>> fourier = np.fft.rfft(signal) >>> n = signal.size >>> sample_rate = 100 >>> freq = np.fft.fftfreq(n, d=1./sample_rate) >>> freq array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.]) >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) >>> freq array([ 0., 10., 20., 30., 40., 50.]) """ if not isinstance(n, integer_types): raise ValueError("n should be an integer") val = 1.0/(n*d) N = n//2 + 1 results = arange(0, N, dtype=int) return results * val class _FFTCache(object): """ Cache for the FFT twiddle factors as an LRU (least recently used) cache. Parameters ---------- max_size_in_mb : int Maximum memory usage of the cache before items are being evicted. max_item_count : int Maximum item count of the cache before items are being evicted. Notes ----- Items will be evicted if either limit has been reached upon getting and setting. The maximum memory usages is not strictly the given ``max_size_in_mb`` but rather ``max(max_size_in_mb, 1.5 * size_of_largest_item)``. Thus the cache will never be completely cleared - at least one item will remain and a single large item can cause the cache to retain several smaller items even if the given maximum cache size has been exceeded. """ def __init__(self, max_size_in_mb, max_item_count): self._max_size_in_bytes = max_size_in_mb * 1024 ** 2 self._max_item_count = max_item_count self._dict = collections.OrderedDict() self._lock = threading.Lock() def put_twiddle_factors(self, n, factors): """ Store twiddle factors for an FFT of length n in the cache. Putting multiple twiddle factors for a certain n will store it multiple times. Parameters ---------- n : int Data length for the FFT. factors : ndarray The actual twiddle values. """ with self._lock: # Pop + later add to move it to the end for LRU behavior. # Internally everything is stored in a dictionary whose values are # lists. try: value = self._dict.pop(n) except KeyError: value = [] value.append(factors) self._dict[n] = value self._prune_cache() def pop_twiddle_factors(self, n): """ Pop twiddle factors for an FFT of length n from the cache. Will return None if the requested twiddle factors are not available in the cache. Parameters ---------- n : int Data length for the FFT. Returns ------- out : ndarray or None The retrieved twiddle factors if available, else None. """ with self._lock: if n not in self._dict or not self._dict[n]: return None # Pop + later add to move it to the end for LRU behavior. all_values = self._dict.pop(n) value = all_values.pop() # Only put pack if there are still some arrays left in the list. if all_values: self._dict[n] = all_values return value def _prune_cache(self): # Always keep at least one item. while len(self._dict) > 1 and ( len(self._dict) > self._max_item_count or self._check_size()): self._dict.popitem(last=False) def _check_size(self): item_sizes = [sum(_j.nbytes for _j in _i) for _i in self._dict.values() if _i] if not item_sizes: return False max_size = max(self._max_size_in_bytes, 1.5 * max(item_sizes)) return sum(item_sizes) > max_size
9,523
28.395062
79
py