Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/_datasource.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/_iotools.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/_version.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/arraypad.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/arraysetops.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/arrayterator.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/format.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/function_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/histograms.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/index_tricks.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/mixins.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/npyio.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/polynomial.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/recfunctions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/scimath.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/setup.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/shape_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/twodim_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/type_check.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/ufunclike.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/user_array.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/arrayterator.pyi +49 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/histograms.pyi +47 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/npyio.pyi +330 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/setup.py +12 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/shape_base.pyi +220 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/stride_tricks.py +547 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/stride_tricks.pyi +80 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test__datasource.py +350 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test__iotools.py +353 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test__version.py +64 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_arraypad.py +1380 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_arraysetops.py +944 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_arrayterator.py +46 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_financial_expired.py +11 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_format.py +1028 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_function_base.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_histograms.py +816 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_index_tricks.py +551 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_io.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_loadtxt.py +1048 -0
- env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_mixins.py +216 -0
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/_datasource.cpython-310.pyc
ADDED
Binary file (20.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/_iotools.cpython-310.pyc
ADDED
Binary file (25.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/_version.cpython-310.pyc
ADDED
Binary file (4.83 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/arraypad.cpython-310.pyc
ADDED
Binary file (22.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/arraysetops.cpython-310.pyc
ADDED
Binary file (28 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/arrayterator.cpython-310.pyc
ADDED
Binary file (7.03 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/format.cpython-310.pyc
ADDED
Binary file (27 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/function_base.cpython-310.pyc
ADDED
Binary file (165 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/histograms.cpython-310.pyc
ADDED
Binary file (30.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/index_tricks.cpython-310.pyc
ADDED
Binary file (29.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/mixins.cpython-310.pyc
ADDED
Binary file (7.04 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-310.pyc
ADDED
Binary file (59 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/npyio.cpython-310.pyc
ADDED
Binary file (74.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/polynomial.cpython-310.pyc
ADDED
Binary file (41.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/recfunctions.cpython-310.pyc
ADDED
Binary file (48.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/scimath.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/setup.cpython-310.pyc
ADDED
Binary file (624 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/shape_base.cpython-310.pyc
ADDED
Binary file (35.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-310.pyc
ADDED
Binary file (16.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/twodim_base.cpython-310.pyc
ADDED
Binary file (33.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/type_check.cpython-310.pyc
ADDED
Binary file (19.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/ufunclike.cpython-310.pyc
ADDED
Binary file (6.27 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/user_array.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (29.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/arrayterator.pyi
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Generator
|
2 |
+
from typing import (
|
3 |
+
Any,
|
4 |
+
TypeVar,
|
5 |
+
Union,
|
6 |
+
overload,
|
7 |
+
)
|
8 |
+
|
9 |
+
from numpy import ndarray, dtype, generic
|
10 |
+
from numpy._typing import DTypeLike
|
11 |
+
|
12 |
+
# TODO: Set a shape bound once we've got proper shape support
|
13 |
+
_Shape = TypeVar("_Shape", bound=Any)
|
14 |
+
_DType = TypeVar("_DType", bound=dtype[Any])
|
15 |
+
_ScalarType = TypeVar("_ScalarType", bound=generic)
|
16 |
+
|
17 |
+
_Index = Union[
|
18 |
+
Union[ellipsis, int, slice],
|
19 |
+
tuple[Union[ellipsis, int, slice], ...],
|
20 |
+
]
|
21 |
+
|
22 |
+
__all__: list[str]
|
23 |
+
|
24 |
+
# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
|
25 |
+
# but its ``__getattr__` method does wrap around the former and thus has
|
26 |
+
# access to all its methods
|
27 |
+
|
28 |
+
class Arrayterator(ndarray[_Shape, _DType]):
|
29 |
+
var: ndarray[_Shape, _DType] # type: ignore[assignment]
|
30 |
+
buf_size: None | int
|
31 |
+
start: list[int]
|
32 |
+
stop: list[int]
|
33 |
+
step: list[int]
|
34 |
+
|
35 |
+
@property # type: ignore[misc]
|
36 |
+
def shape(self) -> tuple[int, ...]: ...
|
37 |
+
@property
|
38 |
+
def flat( # type: ignore[override]
|
39 |
+
self: ndarray[Any, dtype[_ScalarType]]
|
40 |
+
) -> Generator[_ScalarType, None, None]: ...
|
41 |
+
def __init__(
|
42 |
+
self, var: ndarray[_Shape, _DType], buf_size: None | int = ...
|
43 |
+
) -> None: ...
|
44 |
+
@overload
|
45 |
+
def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ...
|
46 |
+
@overload
|
47 |
+
def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ...
|
48 |
+
def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ...
|
49 |
+
def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ...
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/histograms.pyi
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Sequence
|
2 |
+
from typing import (
|
3 |
+
Literal as L,
|
4 |
+
Any,
|
5 |
+
SupportsIndex,
|
6 |
+
)
|
7 |
+
|
8 |
+
from numpy._typing import (
|
9 |
+
NDArray,
|
10 |
+
ArrayLike,
|
11 |
+
)
|
12 |
+
|
13 |
+
_BinKind = L[
|
14 |
+
"stone",
|
15 |
+
"auto",
|
16 |
+
"doane",
|
17 |
+
"fd",
|
18 |
+
"rice",
|
19 |
+
"scott",
|
20 |
+
"sqrt",
|
21 |
+
"sturges",
|
22 |
+
]
|
23 |
+
|
24 |
+
__all__: list[str]
|
25 |
+
|
26 |
+
def histogram_bin_edges(
|
27 |
+
a: ArrayLike,
|
28 |
+
bins: _BinKind | SupportsIndex | ArrayLike = ...,
|
29 |
+
range: None | tuple[float, float] = ...,
|
30 |
+
weights: None | ArrayLike = ...,
|
31 |
+
) -> NDArray[Any]: ...
|
32 |
+
|
33 |
+
def histogram(
|
34 |
+
a: ArrayLike,
|
35 |
+
bins: _BinKind | SupportsIndex | ArrayLike = ...,
|
36 |
+
range: None | tuple[float, float] = ...,
|
37 |
+
density: bool = ...,
|
38 |
+
weights: None | ArrayLike = ...,
|
39 |
+
) -> tuple[NDArray[Any], NDArray[Any]]: ...
|
40 |
+
|
41 |
+
def histogramdd(
|
42 |
+
sample: ArrayLike,
|
43 |
+
bins: SupportsIndex | ArrayLike = ...,
|
44 |
+
range: Sequence[tuple[float, float]] = ...,
|
45 |
+
density: None | bool = ...,
|
46 |
+
weights: None | ArrayLike = ...,
|
47 |
+
) -> tuple[NDArray[Any], list[NDArray[Any]]]: ...
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/npyio.pyi
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import zipfile
|
4 |
+
import types
|
5 |
+
from re import Pattern
|
6 |
+
from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable
|
7 |
+
from typing import (
|
8 |
+
Literal as L,
|
9 |
+
Any,
|
10 |
+
TypeVar,
|
11 |
+
Generic,
|
12 |
+
IO,
|
13 |
+
overload,
|
14 |
+
Protocol,
|
15 |
+
)
|
16 |
+
|
17 |
+
from numpy import (
|
18 |
+
DataSource as DataSource,
|
19 |
+
ndarray,
|
20 |
+
recarray,
|
21 |
+
dtype,
|
22 |
+
generic,
|
23 |
+
float64,
|
24 |
+
void,
|
25 |
+
record,
|
26 |
+
)
|
27 |
+
|
28 |
+
from numpy.ma.mrecords import MaskedRecords
|
29 |
+
from numpy._typing import (
|
30 |
+
ArrayLike,
|
31 |
+
DTypeLike,
|
32 |
+
NDArray,
|
33 |
+
_DTypeLike,
|
34 |
+
_SupportsArrayFunc,
|
35 |
+
)
|
36 |
+
|
37 |
+
from numpy.core.multiarray import (
|
38 |
+
packbits as packbits,
|
39 |
+
unpackbits as unpackbits,
|
40 |
+
)
|
41 |
+
|
42 |
+
_T = TypeVar("_T")
|
43 |
+
_T_contra = TypeVar("_T_contra", contravariant=True)
|
44 |
+
_T_co = TypeVar("_T_co", covariant=True)
|
45 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
46 |
+
_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
|
47 |
+
_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
|
48 |
+
|
49 |
+
class _SupportsGetItem(Protocol[_T_contra, _T_co]):
|
50 |
+
def __getitem__(self, key: _T_contra, /) -> _T_co: ...
|
51 |
+
|
52 |
+
class _SupportsRead(Protocol[_CharType_co]):
|
53 |
+
def read(self) -> _CharType_co: ...
|
54 |
+
|
55 |
+
class _SupportsReadSeek(Protocol[_CharType_co]):
|
56 |
+
def read(self, n: int, /) -> _CharType_co: ...
|
57 |
+
def seek(self, offset: int, whence: int, /) -> object: ...
|
58 |
+
|
59 |
+
class _SupportsWrite(Protocol[_CharType_contra]):
|
60 |
+
def write(self, s: _CharType_contra, /) -> object: ...
|
61 |
+
|
62 |
+
__all__: list[str]
|
63 |
+
|
64 |
+
class BagObj(Generic[_T_co]):
|
65 |
+
def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
|
66 |
+
def __getattribute__(self, key: str) -> _T_co: ...
|
67 |
+
def __dir__(self) -> list[str]: ...
|
68 |
+
|
69 |
+
class NpzFile(Mapping[str, NDArray[Any]]):
|
70 |
+
zip: zipfile.ZipFile
|
71 |
+
fid: None | IO[str]
|
72 |
+
files: list[str]
|
73 |
+
allow_pickle: bool
|
74 |
+
pickle_kwargs: None | Mapping[str, Any]
|
75 |
+
_MAX_REPR_ARRAY_COUNT: int
|
76 |
+
# Represent `f` as a mutable property so we can access the type of `self`
|
77 |
+
@property
|
78 |
+
def f(self: _T) -> BagObj[_T]: ...
|
79 |
+
@f.setter
|
80 |
+
def f(self: _T, value: BagObj[_T]) -> None: ...
|
81 |
+
def __init__(
|
82 |
+
self,
|
83 |
+
fid: IO[str],
|
84 |
+
own_fid: bool = ...,
|
85 |
+
allow_pickle: bool = ...,
|
86 |
+
pickle_kwargs: None | Mapping[str, Any] = ...,
|
87 |
+
) -> None: ...
|
88 |
+
def __enter__(self: _T) -> _T: ...
|
89 |
+
def __exit__(
|
90 |
+
self,
|
91 |
+
exc_type: None | type[BaseException],
|
92 |
+
exc_value: None | BaseException,
|
93 |
+
traceback: None | types.TracebackType,
|
94 |
+
/,
|
95 |
+
) -> None: ...
|
96 |
+
def close(self) -> None: ...
|
97 |
+
def __del__(self) -> None: ...
|
98 |
+
def __iter__(self) -> Iterator[str]: ...
|
99 |
+
def __len__(self) -> int: ...
|
100 |
+
def __getitem__(self, key: str) -> NDArray[Any]: ...
|
101 |
+
def __contains__(self, key: str) -> bool: ...
|
102 |
+
def __repr__(self) -> str: ...
|
103 |
+
|
104 |
+
# NOTE: Returns a `NpzFile` if file is a zip file;
|
105 |
+
# returns an `ndarray`/`memmap` otherwise
|
106 |
+
def load(
|
107 |
+
file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
|
108 |
+
mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
|
109 |
+
allow_pickle: bool = ...,
|
110 |
+
fix_imports: bool = ...,
|
111 |
+
encoding: L["ASCII", "latin1", "bytes"] = ...,
|
112 |
+
) -> Any: ...
|
113 |
+
|
114 |
+
def save(
|
115 |
+
file: str | os.PathLike[str] | _SupportsWrite[bytes],
|
116 |
+
arr: ArrayLike,
|
117 |
+
allow_pickle: bool = ...,
|
118 |
+
fix_imports: bool = ...,
|
119 |
+
) -> None: ...
|
120 |
+
|
121 |
+
def savez(
|
122 |
+
file: str | os.PathLike[str] | _SupportsWrite[bytes],
|
123 |
+
*args: ArrayLike,
|
124 |
+
**kwds: ArrayLike,
|
125 |
+
) -> None: ...
|
126 |
+
|
127 |
+
def savez_compressed(
|
128 |
+
file: str | os.PathLike[str] | _SupportsWrite[bytes],
|
129 |
+
*args: ArrayLike,
|
130 |
+
**kwds: ArrayLike,
|
131 |
+
) -> None: ...
|
132 |
+
|
133 |
+
# File-like objects only have to implement `__iter__` and,
|
134 |
+
# optionally, `encoding`
|
135 |
+
@overload
|
136 |
+
def loadtxt(
|
137 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
138 |
+
dtype: None = ...,
|
139 |
+
comments: None | str | Sequence[str] = ...,
|
140 |
+
delimiter: None | str = ...,
|
141 |
+
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
142 |
+
skiprows: int = ...,
|
143 |
+
usecols: int | Sequence[int] = ...,
|
144 |
+
unpack: bool = ...,
|
145 |
+
ndmin: L[0, 1, 2] = ...,
|
146 |
+
encoding: None | str = ...,
|
147 |
+
max_rows: None | int = ...,
|
148 |
+
*,
|
149 |
+
quotechar: None | str = ...,
|
150 |
+
like: None | _SupportsArrayFunc = ...
|
151 |
+
) -> NDArray[float64]: ...
|
152 |
+
@overload
|
153 |
+
def loadtxt(
|
154 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
155 |
+
dtype: _DTypeLike[_SCT],
|
156 |
+
comments: None | str | Sequence[str] = ...,
|
157 |
+
delimiter: None | str = ...,
|
158 |
+
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
159 |
+
skiprows: int = ...,
|
160 |
+
usecols: int | Sequence[int] = ...,
|
161 |
+
unpack: bool = ...,
|
162 |
+
ndmin: L[0, 1, 2] = ...,
|
163 |
+
encoding: None | str = ...,
|
164 |
+
max_rows: None | int = ...,
|
165 |
+
*,
|
166 |
+
quotechar: None | str = ...,
|
167 |
+
like: None | _SupportsArrayFunc = ...
|
168 |
+
) -> NDArray[_SCT]: ...
|
169 |
+
@overload
|
170 |
+
def loadtxt(
|
171 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
172 |
+
dtype: DTypeLike,
|
173 |
+
comments: None | str | Sequence[str] = ...,
|
174 |
+
delimiter: None | str = ...,
|
175 |
+
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
176 |
+
skiprows: int = ...,
|
177 |
+
usecols: int | Sequence[int] = ...,
|
178 |
+
unpack: bool = ...,
|
179 |
+
ndmin: L[0, 1, 2] = ...,
|
180 |
+
encoding: None | str = ...,
|
181 |
+
max_rows: None | int = ...,
|
182 |
+
*,
|
183 |
+
quotechar: None | str = ...,
|
184 |
+
like: None | _SupportsArrayFunc = ...
|
185 |
+
) -> NDArray[Any]: ...
|
186 |
+
|
187 |
+
def savetxt(
|
188 |
+
fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
|
189 |
+
X: ArrayLike,
|
190 |
+
fmt: str | Sequence[str] = ...,
|
191 |
+
delimiter: str = ...,
|
192 |
+
newline: str = ...,
|
193 |
+
header: str = ...,
|
194 |
+
footer: str = ...,
|
195 |
+
comments: str = ...,
|
196 |
+
encoding: None | str = ...,
|
197 |
+
) -> None: ...
|
198 |
+
|
199 |
+
@overload
|
200 |
+
def fromregex(
|
201 |
+
file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
|
202 |
+
regexp: str | bytes | Pattern[Any],
|
203 |
+
dtype: _DTypeLike[_SCT],
|
204 |
+
encoding: None | str = ...
|
205 |
+
) -> NDArray[_SCT]: ...
|
206 |
+
@overload
|
207 |
+
def fromregex(
|
208 |
+
file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
|
209 |
+
regexp: str | bytes | Pattern[Any],
|
210 |
+
dtype: DTypeLike,
|
211 |
+
encoding: None | str = ...
|
212 |
+
) -> NDArray[Any]: ...
|
213 |
+
|
214 |
+
@overload
|
215 |
+
def genfromtxt(
|
216 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
217 |
+
dtype: None = ...,
|
218 |
+
comments: str = ...,
|
219 |
+
delimiter: None | str | int | Iterable[int] = ...,
|
220 |
+
skip_header: int = ...,
|
221 |
+
skip_footer: int = ...,
|
222 |
+
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
223 |
+
missing_values: Any = ...,
|
224 |
+
filling_values: Any = ...,
|
225 |
+
usecols: None | Sequence[int] = ...,
|
226 |
+
names: L[None, True] | str | Collection[str] = ...,
|
227 |
+
excludelist: None | Sequence[str] = ...,
|
228 |
+
deletechars: str = ...,
|
229 |
+
replace_space: str = ...,
|
230 |
+
autostrip: bool = ...,
|
231 |
+
case_sensitive: bool | L['upper', 'lower'] = ...,
|
232 |
+
defaultfmt: str = ...,
|
233 |
+
unpack: None | bool = ...,
|
234 |
+
usemask: bool = ...,
|
235 |
+
loose: bool = ...,
|
236 |
+
invalid_raise: bool = ...,
|
237 |
+
max_rows: None | int = ...,
|
238 |
+
encoding: str = ...,
|
239 |
+
*,
|
240 |
+
ndmin: L[0, 1, 2] = ...,
|
241 |
+
like: None | _SupportsArrayFunc = ...,
|
242 |
+
) -> NDArray[Any]: ...
|
243 |
+
@overload
|
244 |
+
def genfromtxt(
|
245 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
246 |
+
dtype: _DTypeLike[_SCT],
|
247 |
+
comments: str = ...,
|
248 |
+
delimiter: None | str | int | Iterable[int] = ...,
|
249 |
+
skip_header: int = ...,
|
250 |
+
skip_footer: int = ...,
|
251 |
+
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
252 |
+
missing_values: Any = ...,
|
253 |
+
filling_values: Any = ...,
|
254 |
+
usecols: None | Sequence[int] = ...,
|
255 |
+
names: L[None, True] | str | Collection[str] = ...,
|
256 |
+
excludelist: None | Sequence[str] = ...,
|
257 |
+
deletechars: str = ...,
|
258 |
+
replace_space: str = ...,
|
259 |
+
autostrip: bool = ...,
|
260 |
+
case_sensitive: bool | L['upper', 'lower'] = ...,
|
261 |
+
defaultfmt: str = ...,
|
262 |
+
unpack: None | bool = ...,
|
263 |
+
usemask: bool = ...,
|
264 |
+
loose: bool = ...,
|
265 |
+
invalid_raise: bool = ...,
|
266 |
+
max_rows: None | int = ...,
|
267 |
+
encoding: str = ...,
|
268 |
+
*,
|
269 |
+
ndmin: L[0, 1, 2] = ...,
|
270 |
+
like: None | _SupportsArrayFunc = ...,
|
271 |
+
) -> NDArray[_SCT]: ...
|
272 |
+
@overload
|
273 |
+
def genfromtxt(
|
274 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
275 |
+
dtype: DTypeLike,
|
276 |
+
comments: str = ...,
|
277 |
+
delimiter: None | str | int | Iterable[int] = ...,
|
278 |
+
skip_header: int = ...,
|
279 |
+
skip_footer: int = ...,
|
280 |
+
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
|
281 |
+
missing_values: Any = ...,
|
282 |
+
filling_values: Any = ...,
|
283 |
+
usecols: None | Sequence[int] = ...,
|
284 |
+
names: L[None, True] | str | Collection[str] = ...,
|
285 |
+
excludelist: None | Sequence[str] = ...,
|
286 |
+
deletechars: str = ...,
|
287 |
+
replace_space: str = ...,
|
288 |
+
autostrip: bool = ...,
|
289 |
+
case_sensitive: bool | L['upper', 'lower'] = ...,
|
290 |
+
defaultfmt: str = ...,
|
291 |
+
unpack: None | bool = ...,
|
292 |
+
usemask: bool = ...,
|
293 |
+
loose: bool = ...,
|
294 |
+
invalid_raise: bool = ...,
|
295 |
+
max_rows: None | int = ...,
|
296 |
+
encoding: str = ...,
|
297 |
+
*,
|
298 |
+
ndmin: L[0, 1, 2] = ...,
|
299 |
+
like: None | _SupportsArrayFunc = ...,
|
300 |
+
) -> NDArray[Any]: ...
|
301 |
+
|
302 |
+
@overload
|
303 |
+
def recfromtxt(
|
304 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
305 |
+
*,
|
306 |
+
usemask: L[False] = ...,
|
307 |
+
**kwargs: Any,
|
308 |
+
) -> recarray[Any, dtype[record]]: ...
|
309 |
+
@overload
|
310 |
+
def recfromtxt(
|
311 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
312 |
+
*,
|
313 |
+
usemask: L[True],
|
314 |
+
**kwargs: Any,
|
315 |
+
) -> MaskedRecords[Any, dtype[void]]: ...
|
316 |
+
|
317 |
+
@overload
|
318 |
+
def recfromcsv(
|
319 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
320 |
+
*,
|
321 |
+
usemask: L[False] = ...,
|
322 |
+
**kwargs: Any,
|
323 |
+
) -> recarray[Any, dtype[record]]: ...
|
324 |
+
@overload
|
325 |
+
def recfromcsv(
|
326 |
+
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
|
327 |
+
*,
|
328 |
+
usemask: L[True],
|
329 |
+
**kwargs: Any,
|
330 |
+
) -> MaskedRecords[Any, dtype[void]]: ...
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/setup.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def configuration(parent_package='',top_path=None):
|
2 |
+
from numpy.distutils.misc_util import Configuration
|
3 |
+
|
4 |
+
config = Configuration('lib', parent_package, top_path)
|
5 |
+
config.add_subpackage('tests')
|
6 |
+
config.add_data_dir('tests/data')
|
7 |
+
config.add_data_files('*.pyi')
|
8 |
+
return config
|
9 |
+
|
10 |
+
if __name__ == '__main__':
|
11 |
+
from numpy.distutils.core import setup
|
12 |
+
setup(configuration=configuration)
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/shape_base.pyi
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
from collections.abc import Callable, Sequence
|
3 |
+
from typing import TypeVar, Any, overload, SupportsIndex, Protocol
|
4 |
+
|
5 |
+
if sys.version_info >= (3, 10):
|
6 |
+
from typing import ParamSpec, Concatenate
|
7 |
+
else:
|
8 |
+
from typing_extensions import ParamSpec, Concatenate
|
9 |
+
|
10 |
+
from numpy import (
|
11 |
+
generic,
|
12 |
+
integer,
|
13 |
+
ufunc,
|
14 |
+
bool_,
|
15 |
+
unsignedinteger,
|
16 |
+
signedinteger,
|
17 |
+
floating,
|
18 |
+
complexfloating,
|
19 |
+
object_,
|
20 |
+
)
|
21 |
+
|
22 |
+
from numpy._typing import (
|
23 |
+
ArrayLike,
|
24 |
+
NDArray,
|
25 |
+
_ShapeLike,
|
26 |
+
_ArrayLike,
|
27 |
+
_ArrayLikeBool_co,
|
28 |
+
_ArrayLikeUInt_co,
|
29 |
+
_ArrayLikeInt_co,
|
30 |
+
_ArrayLikeFloat_co,
|
31 |
+
_ArrayLikeComplex_co,
|
32 |
+
_ArrayLikeObject_co,
|
33 |
+
)
|
34 |
+
|
35 |
+
from numpy.core.shape_base import vstack
|
36 |
+
|
37 |
+
_P = ParamSpec("_P")
|
38 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
39 |
+
|
40 |
+
# The signatures of `__array_wrap__` and `__array_prepare__` are the same;
|
41 |
+
# give them unique names for the sake of clarity
|
42 |
+
class _ArrayWrap(Protocol):
|
43 |
+
def __call__(
|
44 |
+
self,
|
45 |
+
array: NDArray[Any],
|
46 |
+
context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
|
47 |
+
/,
|
48 |
+
) -> Any: ...
|
49 |
+
|
50 |
+
class _ArrayPrepare(Protocol):
|
51 |
+
def __call__(
|
52 |
+
self,
|
53 |
+
array: NDArray[Any],
|
54 |
+
context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
|
55 |
+
/,
|
56 |
+
) -> Any: ...
|
57 |
+
|
58 |
+
class _SupportsArrayWrap(Protocol):
|
59 |
+
@property
|
60 |
+
def __array_wrap__(self) -> _ArrayWrap: ...
|
61 |
+
|
62 |
+
class _SupportsArrayPrepare(Protocol):
|
63 |
+
@property
|
64 |
+
def __array_prepare__(self) -> _ArrayPrepare: ...
|
65 |
+
|
66 |
+
__all__: list[str]
|
67 |
+
|
68 |
+
row_stack = vstack
|
69 |
+
|
70 |
+
def take_along_axis(
|
71 |
+
arr: _SCT | NDArray[_SCT],
|
72 |
+
indices: NDArray[integer[Any]],
|
73 |
+
axis: None | int,
|
74 |
+
) -> NDArray[_SCT]: ...
|
75 |
+
|
76 |
+
def put_along_axis(
|
77 |
+
arr: NDArray[_SCT],
|
78 |
+
indices: NDArray[integer[Any]],
|
79 |
+
values: ArrayLike,
|
80 |
+
axis: None | int,
|
81 |
+
) -> None: ...
|
82 |
+
|
83 |
+
@overload
|
84 |
+
def apply_along_axis(
|
85 |
+
func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]],
|
86 |
+
axis: SupportsIndex,
|
87 |
+
arr: ArrayLike,
|
88 |
+
*args: _P.args,
|
89 |
+
**kwargs: _P.kwargs,
|
90 |
+
) -> NDArray[_SCT]: ...
|
91 |
+
@overload
|
92 |
+
def apply_along_axis(
|
93 |
+
func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike],
|
94 |
+
axis: SupportsIndex,
|
95 |
+
arr: ArrayLike,
|
96 |
+
*args: _P.args,
|
97 |
+
**kwargs: _P.kwargs,
|
98 |
+
) -> NDArray[Any]: ...
|
99 |
+
|
100 |
+
def apply_over_axes(
|
101 |
+
func: Callable[[NDArray[Any], int], NDArray[_SCT]],
|
102 |
+
a: ArrayLike,
|
103 |
+
axes: int | Sequence[int],
|
104 |
+
) -> NDArray[_SCT]: ...
|
105 |
+
|
106 |
+
@overload
|
107 |
+
def expand_dims(
|
108 |
+
a: _ArrayLike[_SCT],
|
109 |
+
axis: _ShapeLike,
|
110 |
+
) -> NDArray[_SCT]: ...
|
111 |
+
@overload
|
112 |
+
def expand_dims(
|
113 |
+
a: ArrayLike,
|
114 |
+
axis: _ShapeLike,
|
115 |
+
) -> NDArray[Any]: ...
|
116 |
+
|
117 |
+
@overload
|
118 |
+
def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
|
119 |
+
@overload
|
120 |
+
def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
|
121 |
+
|
122 |
+
@overload
|
123 |
+
def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
|
124 |
+
@overload
|
125 |
+
def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
|
126 |
+
|
127 |
+
@overload
|
128 |
+
def array_split(
|
129 |
+
ary: _ArrayLike[_SCT],
|
130 |
+
indices_or_sections: _ShapeLike,
|
131 |
+
axis: SupportsIndex = ...,
|
132 |
+
) -> list[NDArray[_SCT]]: ...
|
133 |
+
@overload
|
134 |
+
def array_split(
|
135 |
+
ary: ArrayLike,
|
136 |
+
indices_or_sections: _ShapeLike,
|
137 |
+
axis: SupportsIndex = ...,
|
138 |
+
) -> list[NDArray[Any]]: ...
|
139 |
+
|
140 |
+
@overload
|
141 |
+
def split(
|
142 |
+
ary: _ArrayLike[_SCT],
|
143 |
+
indices_or_sections: _ShapeLike,
|
144 |
+
axis: SupportsIndex = ...,
|
145 |
+
) -> list[NDArray[_SCT]]: ...
|
146 |
+
@overload
|
147 |
+
def split(
|
148 |
+
ary: ArrayLike,
|
149 |
+
indices_or_sections: _ShapeLike,
|
150 |
+
axis: SupportsIndex = ...,
|
151 |
+
) -> list[NDArray[Any]]: ...
|
152 |
+
|
153 |
+
@overload
|
154 |
+
def hsplit(
|
155 |
+
ary: _ArrayLike[_SCT],
|
156 |
+
indices_or_sections: _ShapeLike,
|
157 |
+
) -> list[NDArray[_SCT]]: ...
|
158 |
+
@overload
|
159 |
+
def hsplit(
|
160 |
+
ary: ArrayLike,
|
161 |
+
indices_or_sections: _ShapeLike,
|
162 |
+
) -> list[NDArray[Any]]: ...
|
163 |
+
|
164 |
+
@overload
|
165 |
+
def vsplit(
|
166 |
+
ary: _ArrayLike[_SCT],
|
167 |
+
indices_or_sections: _ShapeLike,
|
168 |
+
) -> list[NDArray[_SCT]]: ...
|
169 |
+
@overload
|
170 |
+
def vsplit(
|
171 |
+
ary: ArrayLike,
|
172 |
+
indices_or_sections: _ShapeLike,
|
173 |
+
) -> list[NDArray[Any]]: ...
|
174 |
+
|
175 |
+
@overload
|
176 |
+
def dsplit(
|
177 |
+
ary: _ArrayLike[_SCT],
|
178 |
+
indices_or_sections: _ShapeLike,
|
179 |
+
) -> list[NDArray[_SCT]]: ...
|
180 |
+
@overload
|
181 |
+
def dsplit(
|
182 |
+
ary: ArrayLike,
|
183 |
+
indices_or_sections: _ShapeLike,
|
184 |
+
) -> list[NDArray[Any]]: ...
|
185 |
+
|
186 |
+
@overload
|
187 |
+
def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare: ...
|
188 |
+
@overload
|
189 |
+
def get_array_prepare(*args: object) -> None | _ArrayPrepare: ...
|
190 |
+
|
191 |
+
@overload
|
192 |
+
def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
|
193 |
+
@overload
|
194 |
+
def get_array_wrap(*args: object) -> None | _ArrayWrap: ...
|
195 |
+
|
196 |
+
@overload
|
197 |
+
def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
|
198 |
+
@overload
|
199 |
+
def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
|
200 |
+
@overload
|
201 |
+
def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
|
202 |
+
@overload
|
203 |
+
def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
|
204 |
+
@overload
|
205 |
+
def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
|
206 |
+
@overload
|
207 |
+
def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
|
208 |
+
@overload
|
209 |
+
def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
|
210 |
+
|
211 |
+
@overload
|
212 |
+
def tile(
|
213 |
+
A: _ArrayLike[_SCT],
|
214 |
+
reps: int | Sequence[int],
|
215 |
+
) -> NDArray[_SCT]: ...
|
216 |
+
@overload
|
217 |
+
def tile(
|
218 |
+
A: ArrayLike,
|
219 |
+
reps: int | Sequence[int],
|
220 |
+
) -> NDArray[Any]: ...
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/stride_tricks.py
ADDED
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Utilities that manipulate strides to achieve desirable effects.
|
3 |
+
|
4 |
+
An explanation of strides can be found in the "ndarray.rst" file in the
|
5 |
+
NumPy reference guide.
|
6 |
+
|
7 |
+
"""
|
8 |
+
import numpy as np
|
9 |
+
from numpy.core.numeric import normalize_axis_tuple
|
10 |
+
from numpy.core.overrides import array_function_dispatch, set_module
|
11 |
+
|
12 |
+
__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
|
13 |
+
|
14 |
+
|
15 |
+
class DummyArray:
|
16 |
+
"""Dummy object that just exists to hang __array_interface__ dictionaries
|
17 |
+
and possibly keep alive a reference to a base array.
|
18 |
+
"""
|
19 |
+
|
20 |
+
def __init__(self, interface, base=None):
|
21 |
+
self.__array_interface__ = interface
|
22 |
+
self.base = base
|
23 |
+
|
24 |
+
|
25 |
+
def _maybe_view_as_subclass(original_array, new_array):
|
26 |
+
if type(original_array) is not type(new_array):
|
27 |
+
# if input was an ndarray subclass and subclasses were OK,
|
28 |
+
# then view the result as that subclass.
|
29 |
+
new_array = new_array.view(type=type(original_array))
|
30 |
+
# Since we have done something akin to a view from original_array, we
|
31 |
+
# should let the subclass finalize (if it has it implemented, i.e., is
|
32 |
+
# not None).
|
33 |
+
if new_array.__array_finalize__:
|
34 |
+
new_array.__array_finalize__(original_array)
|
35 |
+
return new_array
|
36 |
+
|
37 |
+
|
38 |
+
def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
|
39 |
+
"""
|
40 |
+
Create a view into the array with the given shape and strides.
|
41 |
+
|
42 |
+
.. warning:: This function has to be used with extreme care, see notes.
|
43 |
+
|
44 |
+
Parameters
|
45 |
+
----------
|
46 |
+
x : ndarray
|
47 |
+
Array to create a new.
|
48 |
+
shape : sequence of int, optional
|
49 |
+
The shape of the new array. Defaults to ``x.shape``.
|
50 |
+
strides : sequence of int, optional
|
51 |
+
The strides of the new array. Defaults to ``x.strides``.
|
52 |
+
subok : bool, optional
|
53 |
+
.. versionadded:: 1.10
|
54 |
+
|
55 |
+
If True, subclasses are preserved.
|
56 |
+
writeable : bool, optional
|
57 |
+
.. versionadded:: 1.12
|
58 |
+
|
59 |
+
If set to False, the returned array will always be readonly.
|
60 |
+
Otherwise it will be writable if the original array was. It
|
61 |
+
is advisable to set this to False if possible (see Notes).
|
62 |
+
|
63 |
+
Returns
|
64 |
+
-------
|
65 |
+
view : ndarray
|
66 |
+
|
67 |
+
See also
|
68 |
+
--------
|
69 |
+
broadcast_to : broadcast an array to a given shape.
|
70 |
+
reshape : reshape an array.
|
71 |
+
lib.stride_tricks.sliding_window_view :
|
72 |
+
userfriendly and safe function for the creation of sliding window views.
|
73 |
+
|
74 |
+
Notes
|
75 |
+
-----
|
76 |
+
``as_strided`` creates a view into the array given the exact strides
|
77 |
+
and shape. This means it manipulates the internal data structure of
|
78 |
+
ndarray and, if done incorrectly, the array elements can point to
|
79 |
+
invalid memory and can corrupt results or crash your program.
|
80 |
+
It is advisable to always use the original ``x.strides`` when
|
81 |
+
calculating new strides to avoid reliance on a contiguous memory
|
82 |
+
layout.
|
83 |
+
|
84 |
+
Furthermore, arrays created with this function often contain self
|
85 |
+
overlapping memory, so that two elements are identical.
|
86 |
+
Vectorized write operations on such arrays will typically be
|
87 |
+
unpredictable. They may even give different results for small, large,
|
88 |
+
or transposed arrays.
|
89 |
+
|
90 |
+
Since writing to these arrays has to be tested and done with great
|
91 |
+
care, you may want to use ``writeable=False`` to avoid accidental write
|
92 |
+
operations.
|
93 |
+
|
94 |
+
For these reasons it is advisable to avoid ``as_strided`` when
|
95 |
+
possible.
|
96 |
+
"""
|
97 |
+
# first convert input to array, possibly keeping subclass
|
98 |
+
x = np.array(x, copy=False, subok=subok)
|
99 |
+
interface = dict(x.__array_interface__)
|
100 |
+
if shape is not None:
|
101 |
+
interface['shape'] = tuple(shape)
|
102 |
+
if strides is not None:
|
103 |
+
interface['strides'] = tuple(strides)
|
104 |
+
|
105 |
+
array = np.asarray(DummyArray(interface, base=x))
|
106 |
+
# The route via `__interface__` does not preserve structured
|
107 |
+
# dtypes. Since dtype should remain unchanged, we set it explicitly.
|
108 |
+
array.dtype = x.dtype
|
109 |
+
|
110 |
+
view = _maybe_view_as_subclass(x, array)
|
111 |
+
|
112 |
+
if view.flags.writeable and not writeable:
|
113 |
+
view.flags.writeable = False
|
114 |
+
|
115 |
+
return view
|
116 |
+
|
117 |
+
|
118 |
+
def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
|
119 |
+
subok=None, writeable=None):
|
120 |
+
return (x,)
|
121 |
+
|
122 |
+
|
123 |
+
@array_function_dispatch(_sliding_window_view_dispatcher)
|
124 |
+
def sliding_window_view(x, window_shape, axis=None, *,
|
125 |
+
subok=False, writeable=False):
|
126 |
+
"""
|
127 |
+
Create a sliding window view into the array with the given window shape.
|
128 |
+
|
129 |
+
Also known as rolling or moving window, the window slides across all
|
130 |
+
dimensions of the array and extracts subsets of the array at all window
|
131 |
+
positions.
|
132 |
+
|
133 |
+
.. versionadded:: 1.20.0
|
134 |
+
|
135 |
+
Parameters
|
136 |
+
----------
|
137 |
+
x : array_like
|
138 |
+
Array to create the sliding window view from.
|
139 |
+
window_shape : int or tuple of int
|
140 |
+
Size of window over each axis that takes part in the sliding window.
|
141 |
+
If `axis` is not present, must have same length as the number of input
|
142 |
+
array dimensions. Single integers `i` are treated as if they were the
|
143 |
+
tuple `(i,)`.
|
144 |
+
axis : int or tuple of int, optional
|
145 |
+
Axis or axes along which the sliding window is applied.
|
146 |
+
By default, the sliding window is applied to all axes and
|
147 |
+
`window_shape[i]` will refer to axis `i` of `x`.
|
148 |
+
If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
|
149 |
+
the axis `axis[i]` of `x`.
|
150 |
+
Single integers `i` are treated as if they were the tuple `(i,)`.
|
151 |
+
subok : bool, optional
|
152 |
+
If True, sub-classes will be passed-through, otherwise the returned
|
153 |
+
array will be forced to be a base-class array (default).
|
154 |
+
writeable : bool, optional
|
155 |
+
When true, allow writing to the returned view. The default is false,
|
156 |
+
as this should be used with caution: the returned view contains the
|
157 |
+
same memory location multiple times, so writing to one location will
|
158 |
+
cause others to change.
|
159 |
+
|
160 |
+
Returns
|
161 |
+
-------
|
162 |
+
view : ndarray
|
163 |
+
Sliding window view of the array. The sliding window dimensions are
|
164 |
+
inserted at the end, and the original dimensions are trimmed as
|
165 |
+
required by the size of the sliding window.
|
166 |
+
That is, ``view.shape = x_shape_trimmed + window_shape``, where
|
167 |
+
``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
|
168 |
+
than the corresponding window size.
|
169 |
+
|
170 |
+
See Also
|
171 |
+
--------
|
172 |
+
lib.stride_tricks.as_strided: A lower-level and less safe routine for
|
173 |
+
creating arbitrary views from custom shape and strides.
|
174 |
+
broadcast_to: broadcast an array to a given shape.
|
175 |
+
|
176 |
+
Notes
|
177 |
+
-----
|
178 |
+
For many applications using a sliding window view can be convenient, but
|
179 |
+
potentially very slow. Often specialized solutions exist, for example:
|
180 |
+
|
181 |
+
- `scipy.signal.fftconvolve`
|
182 |
+
|
183 |
+
- filtering functions in `scipy.ndimage`
|
184 |
+
|
185 |
+
- moving window functions provided by
|
186 |
+
`bottleneck <https://github.com/pydata/bottleneck>`_.
|
187 |
+
|
188 |
+
As a rough estimate, a sliding window approach with an input size of `N`
|
189 |
+
and a window size of `W` will scale as `O(N*W)` where frequently a special
|
190 |
+
algorithm can achieve `O(N)`. That means that the sliding window variant
|
191 |
+
for a window size of 100 can be a 100 times slower than a more specialized
|
192 |
+
version.
|
193 |
+
|
194 |
+
Nevertheless, for small window sizes, when no custom algorithm exists, or
|
195 |
+
as a prototyping and developing tool, this function can be a good solution.
|
196 |
+
|
197 |
+
Examples
|
198 |
+
--------
|
199 |
+
>>> x = np.arange(6)
|
200 |
+
>>> x.shape
|
201 |
+
(6,)
|
202 |
+
>>> v = sliding_window_view(x, 3)
|
203 |
+
>>> v.shape
|
204 |
+
(4, 3)
|
205 |
+
>>> v
|
206 |
+
array([[0, 1, 2],
|
207 |
+
[1, 2, 3],
|
208 |
+
[2, 3, 4],
|
209 |
+
[3, 4, 5]])
|
210 |
+
|
211 |
+
This also works in more dimensions, e.g.
|
212 |
+
|
213 |
+
>>> i, j = np.ogrid[:3, :4]
|
214 |
+
>>> x = 10*i + j
|
215 |
+
>>> x.shape
|
216 |
+
(3, 4)
|
217 |
+
>>> x
|
218 |
+
array([[ 0, 1, 2, 3],
|
219 |
+
[10, 11, 12, 13],
|
220 |
+
[20, 21, 22, 23]])
|
221 |
+
>>> shape = (2,2)
|
222 |
+
>>> v = sliding_window_view(x, shape)
|
223 |
+
>>> v.shape
|
224 |
+
(2, 3, 2, 2)
|
225 |
+
>>> v
|
226 |
+
array([[[[ 0, 1],
|
227 |
+
[10, 11]],
|
228 |
+
[[ 1, 2],
|
229 |
+
[11, 12]],
|
230 |
+
[[ 2, 3],
|
231 |
+
[12, 13]]],
|
232 |
+
[[[10, 11],
|
233 |
+
[20, 21]],
|
234 |
+
[[11, 12],
|
235 |
+
[21, 22]],
|
236 |
+
[[12, 13],
|
237 |
+
[22, 23]]]])
|
238 |
+
|
239 |
+
The axis can be specified explicitly:
|
240 |
+
|
241 |
+
>>> v = sliding_window_view(x, 3, 0)
|
242 |
+
>>> v.shape
|
243 |
+
(1, 4, 3)
|
244 |
+
>>> v
|
245 |
+
array([[[ 0, 10, 20],
|
246 |
+
[ 1, 11, 21],
|
247 |
+
[ 2, 12, 22],
|
248 |
+
[ 3, 13, 23]]])
|
249 |
+
|
250 |
+
The same axis can be used several times. In that case, every use reduces
|
251 |
+
the corresponding original dimension:
|
252 |
+
|
253 |
+
>>> v = sliding_window_view(x, (2, 3), (1, 1))
|
254 |
+
>>> v.shape
|
255 |
+
(3, 1, 2, 3)
|
256 |
+
>>> v
|
257 |
+
array([[[[ 0, 1, 2],
|
258 |
+
[ 1, 2, 3]]],
|
259 |
+
[[[10, 11, 12],
|
260 |
+
[11, 12, 13]]],
|
261 |
+
[[[20, 21, 22],
|
262 |
+
[21, 22, 23]]]])
|
263 |
+
|
264 |
+
Combining with stepped slicing (`::step`), this can be used to take sliding
|
265 |
+
views which skip elements:
|
266 |
+
|
267 |
+
>>> x = np.arange(7)
|
268 |
+
>>> sliding_window_view(x, 5)[:, ::2]
|
269 |
+
array([[0, 2, 4],
|
270 |
+
[1, 3, 5],
|
271 |
+
[2, 4, 6]])
|
272 |
+
|
273 |
+
or views which move by multiple elements
|
274 |
+
|
275 |
+
>>> x = np.arange(7)
|
276 |
+
>>> sliding_window_view(x, 3)[::2, :]
|
277 |
+
array([[0, 1, 2],
|
278 |
+
[2, 3, 4],
|
279 |
+
[4, 5, 6]])
|
280 |
+
|
281 |
+
A common application of `sliding_window_view` is the calculation of running
|
282 |
+
statistics. The simplest example is the
|
283 |
+
`moving average <https://en.wikipedia.org/wiki/Moving_average>`_:
|
284 |
+
|
285 |
+
>>> x = np.arange(6)
|
286 |
+
>>> x.shape
|
287 |
+
(6,)
|
288 |
+
>>> v = sliding_window_view(x, 3)
|
289 |
+
>>> v.shape
|
290 |
+
(4, 3)
|
291 |
+
>>> v
|
292 |
+
array([[0, 1, 2],
|
293 |
+
[1, 2, 3],
|
294 |
+
[2, 3, 4],
|
295 |
+
[3, 4, 5]])
|
296 |
+
>>> moving_average = v.mean(axis=-1)
|
297 |
+
>>> moving_average
|
298 |
+
array([1., 2., 3., 4.])
|
299 |
+
|
300 |
+
Note that a sliding window approach is often **not** optimal (see Notes).
|
301 |
+
"""
|
302 |
+
window_shape = (tuple(window_shape)
|
303 |
+
if np.iterable(window_shape)
|
304 |
+
else (window_shape,))
|
305 |
+
# first convert input to array, possibly keeping subclass
|
306 |
+
x = np.array(x, copy=False, subok=subok)
|
307 |
+
|
308 |
+
window_shape_array = np.array(window_shape)
|
309 |
+
if np.any(window_shape_array < 0):
|
310 |
+
raise ValueError('`window_shape` cannot contain negative values')
|
311 |
+
|
312 |
+
if axis is None:
|
313 |
+
axis = tuple(range(x.ndim))
|
314 |
+
if len(window_shape) != len(axis):
|
315 |
+
raise ValueError(f'Since axis is `None`, must provide '
|
316 |
+
f'window_shape for all dimensions of `x`; '
|
317 |
+
f'got {len(window_shape)} window_shape elements '
|
318 |
+
f'and `x.ndim` is {x.ndim}.')
|
319 |
+
else:
|
320 |
+
axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
|
321 |
+
if len(window_shape) != len(axis):
|
322 |
+
raise ValueError(f'Must provide matching length window_shape and '
|
323 |
+
f'axis; got {len(window_shape)} window_shape '
|
324 |
+
f'elements and {len(axis)} axes elements.')
|
325 |
+
|
326 |
+
out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
|
327 |
+
|
328 |
+
# note: same axis can be windowed repeatedly
|
329 |
+
x_shape_trimmed = list(x.shape)
|
330 |
+
for ax, dim in zip(axis, window_shape):
|
331 |
+
if x_shape_trimmed[ax] < dim:
|
332 |
+
raise ValueError(
|
333 |
+
'window shape cannot be larger than input array shape')
|
334 |
+
x_shape_trimmed[ax] -= dim - 1
|
335 |
+
out_shape = tuple(x_shape_trimmed) + window_shape
|
336 |
+
return as_strided(x, strides=out_strides, shape=out_shape,
|
337 |
+
subok=subok, writeable=writeable)
|
338 |
+
|
339 |
+
|
340 |
+
def _broadcast_to(array, shape, subok, readonly):
|
341 |
+
shape = tuple(shape) if np.iterable(shape) else (shape,)
|
342 |
+
array = np.array(array, copy=False, subok=subok)
|
343 |
+
if not shape and array.shape:
|
344 |
+
raise ValueError('cannot broadcast a non-scalar to a scalar array')
|
345 |
+
if any(size < 0 for size in shape):
|
346 |
+
raise ValueError('all elements of broadcast shape must be non-'
|
347 |
+
'negative')
|
348 |
+
extras = []
|
349 |
+
it = np.nditer(
|
350 |
+
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
|
351 |
+
op_flags=['readonly'], itershape=shape, order='C')
|
352 |
+
with it:
|
353 |
+
# never really has writebackifcopy semantics
|
354 |
+
broadcast = it.itviews[0]
|
355 |
+
result = _maybe_view_as_subclass(array, broadcast)
|
356 |
+
# In a future version this will go away
|
357 |
+
if not readonly and array.flags._writeable_no_warn:
|
358 |
+
result.flags.writeable = True
|
359 |
+
result.flags._warn_on_write = True
|
360 |
+
return result
|
361 |
+
|
362 |
+
|
363 |
+
def _broadcast_to_dispatcher(array, shape, subok=None):
|
364 |
+
return (array,)
|
365 |
+
|
366 |
+
|
367 |
+
@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
|
368 |
+
def broadcast_to(array, shape, subok=False):
|
369 |
+
"""Broadcast an array to a new shape.
|
370 |
+
|
371 |
+
Parameters
|
372 |
+
----------
|
373 |
+
array : array_like
|
374 |
+
The array to broadcast.
|
375 |
+
shape : tuple or int
|
376 |
+
The shape of the desired array. A single integer ``i`` is interpreted
|
377 |
+
as ``(i,)``.
|
378 |
+
subok : bool, optional
|
379 |
+
If True, then sub-classes will be passed-through, otherwise
|
380 |
+
the returned array will be forced to be a base-class array (default).
|
381 |
+
|
382 |
+
Returns
|
383 |
+
-------
|
384 |
+
broadcast : array
|
385 |
+
A readonly view on the original array with the given shape. It is
|
386 |
+
typically not contiguous. Furthermore, more than one element of a
|
387 |
+
broadcasted array may refer to a single memory location.
|
388 |
+
|
389 |
+
Raises
|
390 |
+
------
|
391 |
+
ValueError
|
392 |
+
If the array is not compatible with the new shape according to NumPy's
|
393 |
+
broadcasting rules.
|
394 |
+
|
395 |
+
See Also
|
396 |
+
--------
|
397 |
+
broadcast
|
398 |
+
broadcast_arrays
|
399 |
+
broadcast_shapes
|
400 |
+
|
401 |
+
Notes
|
402 |
+
-----
|
403 |
+
.. versionadded:: 1.10.0
|
404 |
+
|
405 |
+
Examples
|
406 |
+
--------
|
407 |
+
>>> x = np.array([1, 2, 3])
|
408 |
+
>>> np.broadcast_to(x, (3, 3))
|
409 |
+
array([[1, 2, 3],
|
410 |
+
[1, 2, 3],
|
411 |
+
[1, 2, 3]])
|
412 |
+
"""
|
413 |
+
return _broadcast_to(array, shape, subok=subok, readonly=True)
|
414 |
+
|
415 |
+
|
416 |
+
def _broadcast_shape(*args):
|
417 |
+
"""Returns the shape of the arrays that would result from broadcasting the
|
418 |
+
supplied arrays against each other.
|
419 |
+
"""
|
420 |
+
# use the old-iterator because np.nditer does not handle size 0 arrays
|
421 |
+
# consistently
|
422 |
+
b = np.broadcast(*args[:32])
|
423 |
+
# unfortunately, it cannot handle 32 or more arguments directly
|
424 |
+
for pos in range(32, len(args), 31):
|
425 |
+
# ironically, np.broadcast does not properly handle np.broadcast
|
426 |
+
# objects (it treats them as scalars)
|
427 |
+
# use broadcasting to avoid allocating the full array
|
428 |
+
b = broadcast_to(0, b.shape)
|
429 |
+
b = np.broadcast(b, *args[pos:(pos + 31)])
|
430 |
+
return b.shape
|
431 |
+
|
432 |
+
|
433 |
+
@set_module('numpy')
|
434 |
+
def broadcast_shapes(*args):
|
435 |
+
"""
|
436 |
+
Broadcast the input shapes into a single shape.
|
437 |
+
|
438 |
+
:ref:`Learn more about broadcasting here <basics.broadcasting>`.
|
439 |
+
|
440 |
+
.. versionadded:: 1.20.0
|
441 |
+
|
442 |
+
Parameters
|
443 |
+
----------
|
444 |
+
`*args` : tuples of ints, or ints
|
445 |
+
The shapes to be broadcast against each other.
|
446 |
+
|
447 |
+
Returns
|
448 |
+
-------
|
449 |
+
tuple
|
450 |
+
Broadcasted shape.
|
451 |
+
|
452 |
+
Raises
|
453 |
+
------
|
454 |
+
ValueError
|
455 |
+
If the shapes are not compatible and cannot be broadcast according
|
456 |
+
to NumPy's broadcasting rules.
|
457 |
+
|
458 |
+
See Also
|
459 |
+
--------
|
460 |
+
broadcast
|
461 |
+
broadcast_arrays
|
462 |
+
broadcast_to
|
463 |
+
|
464 |
+
Examples
|
465 |
+
--------
|
466 |
+
>>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
|
467 |
+
(3, 2)
|
468 |
+
|
469 |
+
>>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
|
470 |
+
(5, 6, 7)
|
471 |
+
"""
|
472 |
+
arrays = [np.empty(x, dtype=[]) for x in args]
|
473 |
+
return _broadcast_shape(*arrays)
|
474 |
+
|
475 |
+
|
476 |
+
def _broadcast_arrays_dispatcher(*args, subok=None):
|
477 |
+
return args
|
478 |
+
|
479 |
+
|
480 |
+
@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
|
481 |
+
def broadcast_arrays(*args, subok=False):
|
482 |
+
"""
|
483 |
+
Broadcast any number of arrays against each other.
|
484 |
+
|
485 |
+
Parameters
|
486 |
+
----------
|
487 |
+
`*args` : array_likes
|
488 |
+
The arrays to broadcast.
|
489 |
+
|
490 |
+
subok : bool, optional
|
491 |
+
If True, then sub-classes will be passed-through, otherwise
|
492 |
+
the returned arrays will be forced to be a base-class array (default).
|
493 |
+
|
494 |
+
Returns
|
495 |
+
-------
|
496 |
+
broadcasted : list of arrays
|
497 |
+
These arrays are views on the original arrays. They are typically
|
498 |
+
not contiguous. Furthermore, more than one element of a
|
499 |
+
broadcasted array may refer to a single memory location. If you need
|
500 |
+
to write to the arrays, make copies first. While you can set the
|
501 |
+
``writable`` flag True, writing to a single output value may end up
|
502 |
+
changing more than one location in the output array.
|
503 |
+
|
504 |
+
.. deprecated:: 1.17
|
505 |
+
The output is currently marked so that if written to, a deprecation
|
506 |
+
warning will be emitted. A future version will set the
|
507 |
+
``writable`` flag False so writing to it will raise an error.
|
508 |
+
|
509 |
+
See Also
|
510 |
+
--------
|
511 |
+
broadcast
|
512 |
+
broadcast_to
|
513 |
+
broadcast_shapes
|
514 |
+
|
515 |
+
Examples
|
516 |
+
--------
|
517 |
+
>>> x = np.array([[1,2,3]])
|
518 |
+
>>> y = np.array([[4],[5]])
|
519 |
+
>>> np.broadcast_arrays(x, y)
|
520 |
+
[array([[1, 2, 3],
|
521 |
+
[1, 2, 3]]), array([[4, 4, 4],
|
522 |
+
[5, 5, 5]])]
|
523 |
+
|
524 |
+
Here is a useful idiom for getting contiguous copies instead of
|
525 |
+
non-contiguous views.
|
526 |
+
|
527 |
+
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
|
528 |
+
[array([[1, 2, 3],
|
529 |
+
[1, 2, 3]]), array([[4, 4, 4],
|
530 |
+
[5, 5, 5]])]
|
531 |
+
|
532 |
+
"""
|
533 |
+
# nditer is not used here to avoid the limit of 32 arrays.
|
534 |
+
# Otherwise, something like the following one-liner would suffice:
|
535 |
+
# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
|
536 |
+
# order='C').itviews
|
537 |
+
|
538 |
+
args = [np.array(_m, copy=False, subok=subok) for _m in args]
|
539 |
+
|
540 |
+
shape = _broadcast_shape(*args)
|
541 |
+
|
542 |
+
if all(array.shape == shape for array in args):
|
543 |
+
# Common case where nothing needs to be broadcasted.
|
544 |
+
return args
|
545 |
+
|
546 |
+
return [_broadcast_to(array, shape, subok=subok, readonly=False)
|
547 |
+
for array in args]
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/stride_tricks.pyi
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Iterable
|
2 |
+
from typing import Any, TypeVar, overload, SupportsIndex
|
3 |
+
|
4 |
+
from numpy import generic
|
5 |
+
from numpy._typing import (
|
6 |
+
NDArray,
|
7 |
+
ArrayLike,
|
8 |
+
_ShapeLike,
|
9 |
+
_Shape,
|
10 |
+
_ArrayLike
|
11 |
+
)
|
12 |
+
|
13 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
14 |
+
|
15 |
+
__all__: list[str]
|
16 |
+
|
17 |
+
class DummyArray:
|
18 |
+
__array_interface__: dict[str, Any]
|
19 |
+
base: None | NDArray[Any]
|
20 |
+
def __init__(
|
21 |
+
self,
|
22 |
+
interface: dict[str, Any],
|
23 |
+
base: None | NDArray[Any] = ...,
|
24 |
+
) -> None: ...
|
25 |
+
|
26 |
+
@overload
|
27 |
+
def as_strided(
|
28 |
+
x: _ArrayLike[_SCT],
|
29 |
+
shape: None | Iterable[int] = ...,
|
30 |
+
strides: None | Iterable[int] = ...,
|
31 |
+
subok: bool = ...,
|
32 |
+
writeable: bool = ...,
|
33 |
+
) -> NDArray[_SCT]: ...
|
34 |
+
@overload
|
35 |
+
def as_strided(
|
36 |
+
x: ArrayLike,
|
37 |
+
shape: None | Iterable[int] = ...,
|
38 |
+
strides: None | Iterable[int] = ...,
|
39 |
+
subok: bool = ...,
|
40 |
+
writeable: bool = ...,
|
41 |
+
) -> NDArray[Any]: ...
|
42 |
+
|
43 |
+
@overload
|
44 |
+
def sliding_window_view(
|
45 |
+
x: _ArrayLike[_SCT],
|
46 |
+
window_shape: int | Iterable[int],
|
47 |
+
axis: None | SupportsIndex = ...,
|
48 |
+
*,
|
49 |
+
subok: bool = ...,
|
50 |
+
writeable: bool = ...,
|
51 |
+
) -> NDArray[_SCT]: ...
|
52 |
+
@overload
|
53 |
+
def sliding_window_view(
|
54 |
+
x: ArrayLike,
|
55 |
+
window_shape: int | Iterable[int],
|
56 |
+
axis: None | SupportsIndex = ...,
|
57 |
+
*,
|
58 |
+
subok: bool = ...,
|
59 |
+
writeable: bool = ...,
|
60 |
+
) -> NDArray[Any]: ...
|
61 |
+
|
62 |
+
@overload
|
63 |
+
def broadcast_to(
|
64 |
+
array: _ArrayLike[_SCT],
|
65 |
+
shape: int | Iterable[int],
|
66 |
+
subok: bool = ...,
|
67 |
+
) -> NDArray[_SCT]: ...
|
68 |
+
@overload
|
69 |
+
def broadcast_to(
|
70 |
+
array: ArrayLike,
|
71 |
+
shape: int | Iterable[int],
|
72 |
+
subok: bool = ...,
|
73 |
+
) -> NDArray[Any]: ...
|
74 |
+
|
75 |
+
def broadcast_shapes(*args: _ShapeLike) -> _Shape: ...
|
76 |
+
|
77 |
+
def broadcast_arrays(
|
78 |
+
*args: ArrayLike,
|
79 |
+
subok: bool = ...,
|
80 |
+
) -> list[NDArray[Any]]: ...
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-310.pyc
ADDED
Binary file (12.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-310.pyc
ADDED
Binary file (1.64 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-310.pyc
ADDED
Binary file (96.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test__datasource.py
ADDED
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pytest
|
3 |
+
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
|
4 |
+
from shutil import rmtree
|
5 |
+
|
6 |
+
import numpy.lib._datasource as datasource
|
7 |
+
from numpy.testing import assert_, assert_equal, assert_raises
|
8 |
+
|
9 |
+
import urllib.request as urllib_request
|
10 |
+
from urllib.parse import urlparse
|
11 |
+
from urllib.error import URLError
|
12 |
+
|
13 |
+
|
14 |
+
def urlopen_stub(url, data=None):
|
15 |
+
'''Stub to replace urlopen for testing.'''
|
16 |
+
if url == valid_httpurl():
|
17 |
+
tmpfile = NamedTemporaryFile(prefix='urltmp_')
|
18 |
+
return tmpfile
|
19 |
+
else:
|
20 |
+
raise URLError('Name or service not known')
|
21 |
+
|
22 |
+
# setup and teardown
|
23 |
+
old_urlopen = None
|
24 |
+
|
25 |
+
|
26 |
+
def setup_module():
|
27 |
+
global old_urlopen
|
28 |
+
|
29 |
+
old_urlopen = urllib_request.urlopen
|
30 |
+
urllib_request.urlopen = urlopen_stub
|
31 |
+
|
32 |
+
|
33 |
+
def teardown_module():
|
34 |
+
urllib_request.urlopen = old_urlopen
|
35 |
+
|
36 |
+
# A valid website for more robust testing
|
37 |
+
http_path = 'http://www.google.com/'
|
38 |
+
http_file = 'index.html'
|
39 |
+
|
40 |
+
http_fakepath = 'http://fake.abc.web/site/'
|
41 |
+
http_fakefile = 'fake.txt'
|
42 |
+
|
43 |
+
malicious_files = ['/etc/shadow', '../../shadow',
|
44 |
+
'..\\system.dat', 'c:\\windows\\system.dat']
|
45 |
+
|
46 |
+
magic_line = b'three is the magic number'
|
47 |
+
|
48 |
+
|
49 |
+
# Utility functions used by many tests
|
50 |
+
def valid_textfile(filedir):
|
51 |
+
# Generate and return a valid temporary file.
|
52 |
+
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
|
53 |
+
os.close(fd)
|
54 |
+
return path
|
55 |
+
|
56 |
+
|
57 |
+
def invalid_textfile(filedir):
|
58 |
+
# Generate and return an invalid filename.
|
59 |
+
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
|
60 |
+
os.close(fd)
|
61 |
+
os.remove(path)
|
62 |
+
return path
|
63 |
+
|
64 |
+
|
65 |
+
def valid_httpurl():
|
66 |
+
return http_path+http_file
|
67 |
+
|
68 |
+
|
69 |
+
def invalid_httpurl():
|
70 |
+
return http_fakepath+http_fakefile
|
71 |
+
|
72 |
+
|
73 |
+
def valid_baseurl():
|
74 |
+
return http_path
|
75 |
+
|
76 |
+
|
77 |
+
def invalid_baseurl():
|
78 |
+
return http_fakepath
|
79 |
+
|
80 |
+
|
81 |
+
def valid_httpfile():
|
82 |
+
return http_file
|
83 |
+
|
84 |
+
|
85 |
+
def invalid_httpfile():
|
86 |
+
return http_fakefile
|
87 |
+
|
88 |
+
|
89 |
+
class TestDataSourceOpen:
|
90 |
+
def setup_method(self):
|
91 |
+
self.tmpdir = mkdtemp()
|
92 |
+
self.ds = datasource.DataSource(self.tmpdir)
|
93 |
+
|
94 |
+
def teardown_method(self):
|
95 |
+
rmtree(self.tmpdir)
|
96 |
+
del self.ds
|
97 |
+
|
98 |
+
def test_ValidHTTP(self):
|
99 |
+
fh = self.ds.open(valid_httpurl())
|
100 |
+
assert_(fh)
|
101 |
+
fh.close()
|
102 |
+
|
103 |
+
def test_InvalidHTTP(self):
|
104 |
+
url = invalid_httpurl()
|
105 |
+
assert_raises(OSError, self.ds.open, url)
|
106 |
+
try:
|
107 |
+
self.ds.open(url)
|
108 |
+
except OSError as e:
|
109 |
+
# Regression test for bug fixed in r4342.
|
110 |
+
assert_(e.errno is None)
|
111 |
+
|
112 |
+
def test_InvalidHTTPCacheURLError(self):
|
113 |
+
assert_raises(URLError, self.ds._cache, invalid_httpurl())
|
114 |
+
|
115 |
+
def test_ValidFile(self):
|
116 |
+
local_file = valid_textfile(self.tmpdir)
|
117 |
+
fh = self.ds.open(local_file)
|
118 |
+
assert_(fh)
|
119 |
+
fh.close()
|
120 |
+
|
121 |
+
def test_InvalidFile(self):
|
122 |
+
invalid_file = invalid_textfile(self.tmpdir)
|
123 |
+
assert_raises(OSError, self.ds.open, invalid_file)
|
124 |
+
|
125 |
+
def test_ValidGzipFile(self):
|
126 |
+
try:
|
127 |
+
import gzip
|
128 |
+
except ImportError:
|
129 |
+
# We don't have the gzip capabilities to test.
|
130 |
+
pytest.skip()
|
131 |
+
# Test datasource's internal file_opener for Gzip files.
|
132 |
+
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
|
133 |
+
fp = gzip.open(filepath, 'w')
|
134 |
+
fp.write(magic_line)
|
135 |
+
fp.close()
|
136 |
+
fp = self.ds.open(filepath)
|
137 |
+
result = fp.readline()
|
138 |
+
fp.close()
|
139 |
+
assert_equal(magic_line, result)
|
140 |
+
|
141 |
+
def test_ValidBz2File(self):
|
142 |
+
try:
|
143 |
+
import bz2
|
144 |
+
except ImportError:
|
145 |
+
# We don't have the bz2 capabilities to test.
|
146 |
+
pytest.skip()
|
147 |
+
# Test datasource's internal file_opener for BZip2 files.
|
148 |
+
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
|
149 |
+
fp = bz2.BZ2File(filepath, 'w')
|
150 |
+
fp.write(magic_line)
|
151 |
+
fp.close()
|
152 |
+
fp = self.ds.open(filepath)
|
153 |
+
result = fp.readline()
|
154 |
+
fp.close()
|
155 |
+
assert_equal(magic_line, result)
|
156 |
+
|
157 |
+
|
158 |
+
class TestDataSourceExists:
|
159 |
+
def setup_method(self):
|
160 |
+
self.tmpdir = mkdtemp()
|
161 |
+
self.ds = datasource.DataSource(self.tmpdir)
|
162 |
+
|
163 |
+
def teardown_method(self):
|
164 |
+
rmtree(self.tmpdir)
|
165 |
+
del self.ds
|
166 |
+
|
167 |
+
def test_ValidHTTP(self):
|
168 |
+
assert_(self.ds.exists(valid_httpurl()))
|
169 |
+
|
170 |
+
def test_InvalidHTTP(self):
|
171 |
+
assert_equal(self.ds.exists(invalid_httpurl()), False)
|
172 |
+
|
173 |
+
def test_ValidFile(self):
|
174 |
+
# Test valid file in destpath
|
175 |
+
tmpfile = valid_textfile(self.tmpdir)
|
176 |
+
assert_(self.ds.exists(tmpfile))
|
177 |
+
# Test valid local file not in destpath
|
178 |
+
localdir = mkdtemp()
|
179 |
+
tmpfile = valid_textfile(localdir)
|
180 |
+
assert_(self.ds.exists(tmpfile))
|
181 |
+
rmtree(localdir)
|
182 |
+
|
183 |
+
def test_InvalidFile(self):
|
184 |
+
tmpfile = invalid_textfile(self.tmpdir)
|
185 |
+
assert_equal(self.ds.exists(tmpfile), False)
|
186 |
+
|
187 |
+
|
188 |
+
class TestDataSourceAbspath:
|
189 |
+
def setup_method(self):
|
190 |
+
self.tmpdir = os.path.abspath(mkdtemp())
|
191 |
+
self.ds = datasource.DataSource(self.tmpdir)
|
192 |
+
|
193 |
+
def teardown_method(self):
|
194 |
+
rmtree(self.tmpdir)
|
195 |
+
del self.ds
|
196 |
+
|
197 |
+
def test_ValidHTTP(self):
|
198 |
+
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
|
199 |
+
local_path = os.path.join(self.tmpdir, netloc,
|
200 |
+
upath.strip(os.sep).strip('/'))
|
201 |
+
assert_equal(local_path, self.ds.abspath(valid_httpurl()))
|
202 |
+
|
203 |
+
def test_ValidFile(self):
|
204 |
+
tmpfile = valid_textfile(self.tmpdir)
|
205 |
+
tmpfilename = os.path.split(tmpfile)[-1]
|
206 |
+
# Test with filename only
|
207 |
+
assert_equal(tmpfile, self.ds.abspath(tmpfilename))
|
208 |
+
# Test filename with complete path
|
209 |
+
assert_equal(tmpfile, self.ds.abspath(tmpfile))
|
210 |
+
|
211 |
+
def test_InvalidHTTP(self):
|
212 |
+
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
|
213 |
+
invalidhttp = os.path.join(self.tmpdir, netloc,
|
214 |
+
upath.strip(os.sep).strip('/'))
|
215 |
+
assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
|
216 |
+
|
217 |
+
def test_InvalidFile(self):
|
218 |
+
invalidfile = valid_textfile(self.tmpdir)
|
219 |
+
tmpfile = valid_textfile(self.tmpdir)
|
220 |
+
tmpfilename = os.path.split(tmpfile)[-1]
|
221 |
+
# Test with filename only
|
222 |
+
assert_(invalidfile != self.ds.abspath(tmpfilename))
|
223 |
+
# Test filename with complete path
|
224 |
+
assert_(invalidfile != self.ds.abspath(tmpfile))
|
225 |
+
|
226 |
+
def test_sandboxing(self):
|
227 |
+
tmpfile = valid_textfile(self.tmpdir)
|
228 |
+
tmpfilename = os.path.split(tmpfile)[-1]
|
229 |
+
|
230 |
+
tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
|
231 |
+
|
232 |
+
assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir))
|
233 |
+
assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir))
|
234 |
+
assert_(tmp_path(tmpfile).startswith(self.tmpdir))
|
235 |
+
assert_(tmp_path(tmpfilename).startswith(self.tmpdir))
|
236 |
+
for fn in malicious_files:
|
237 |
+
assert_(tmp_path(http_path+fn).startswith(self.tmpdir))
|
238 |
+
assert_(tmp_path(fn).startswith(self.tmpdir))
|
239 |
+
|
240 |
+
def test_windows_os_sep(self):
|
241 |
+
orig_os_sep = os.sep
|
242 |
+
try:
|
243 |
+
os.sep = '\\'
|
244 |
+
self.test_ValidHTTP()
|
245 |
+
self.test_ValidFile()
|
246 |
+
self.test_InvalidHTTP()
|
247 |
+
self.test_InvalidFile()
|
248 |
+
self.test_sandboxing()
|
249 |
+
finally:
|
250 |
+
os.sep = orig_os_sep
|
251 |
+
|
252 |
+
|
253 |
+
class TestRepositoryAbspath:
|
254 |
+
def setup_method(self):
|
255 |
+
self.tmpdir = os.path.abspath(mkdtemp())
|
256 |
+
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
|
257 |
+
|
258 |
+
def teardown_method(self):
|
259 |
+
rmtree(self.tmpdir)
|
260 |
+
del self.repos
|
261 |
+
|
262 |
+
def test_ValidHTTP(self):
|
263 |
+
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
|
264 |
+
local_path = os.path.join(self.repos._destpath, netloc,
|
265 |
+
upath.strip(os.sep).strip('/'))
|
266 |
+
filepath = self.repos.abspath(valid_httpfile())
|
267 |
+
assert_equal(local_path, filepath)
|
268 |
+
|
269 |
+
def test_sandboxing(self):
|
270 |
+
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
|
271 |
+
assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir))
|
272 |
+
for fn in malicious_files:
|
273 |
+
assert_(tmp_path(http_path+fn).startswith(self.tmpdir))
|
274 |
+
assert_(tmp_path(fn).startswith(self.tmpdir))
|
275 |
+
|
276 |
+
def test_windows_os_sep(self):
|
277 |
+
orig_os_sep = os.sep
|
278 |
+
try:
|
279 |
+
os.sep = '\\'
|
280 |
+
self.test_ValidHTTP()
|
281 |
+
self.test_sandboxing()
|
282 |
+
finally:
|
283 |
+
os.sep = orig_os_sep
|
284 |
+
|
285 |
+
|
286 |
+
class TestRepositoryExists:
|
287 |
+
def setup_method(self):
|
288 |
+
self.tmpdir = mkdtemp()
|
289 |
+
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
|
290 |
+
|
291 |
+
def teardown_method(self):
|
292 |
+
rmtree(self.tmpdir)
|
293 |
+
del self.repos
|
294 |
+
|
295 |
+
def test_ValidFile(self):
|
296 |
+
# Create local temp file
|
297 |
+
tmpfile = valid_textfile(self.tmpdir)
|
298 |
+
assert_(self.repos.exists(tmpfile))
|
299 |
+
|
300 |
+
def test_InvalidFile(self):
|
301 |
+
tmpfile = invalid_textfile(self.tmpdir)
|
302 |
+
assert_equal(self.repos.exists(tmpfile), False)
|
303 |
+
|
304 |
+
def test_RemoveHTTPFile(self):
|
305 |
+
assert_(self.repos.exists(valid_httpurl()))
|
306 |
+
|
307 |
+
def test_CachedHTTPFile(self):
|
308 |
+
localfile = valid_httpurl()
|
309 |
+
# Create a locally cached temp file with an URL based
|
310 |
+
# directory structure. This is similar to what Repository.open
|
311 |
+
# would do.
|
312 |
+
scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
|
313 |
+
local_path = os.path.join(self.repos._destpath, netloc)
|
314 |
+
os.mkdir(local_path, 0o0700)
|
315 |
+
tmpfile = valid_textfile(local_path)
|
316 |
+
assert_(self.repos.exists(tmpfile))
|
317 |
+
|
318 |
+
|
319 |
+
class TestOpenFunc:
|
320 |
+
def setup_method(self):
|
321 |
+
self.tmpdir = mkdtemp()
|
322 |
+
|
323 |
+
def teardown_method(self):
|
324 |
+
rmtree(self.tmpdir)
|
325 |
+
|
326 |
+
def test_DataSourceOpen(self):
|
327 |
+
local_file = valid_textfile(self.tmpdir)
|
328 |
+
# Test case where destpath is passed in
|
329 |
+
fp = datasource.open(local_file, destpath=self.tmpdir)
|
330 |
+
assert_(fp)
|
331 |
+
fp.close()
|
332 |
+
# Test case where default destpath is used
|
333 |
+
fp = datasource.open(local_file)
|
334 |
+
assert_(fp)
|
335 |
+
fp.close()
|
336 |
+
|
337 |
+
def test_del_attr_handling():
|
338 |
+
# DataSource __del__ can be called
|
339 |
+
# even if __init__ fails when the
|
340 |
+
# Exception object is caught by the
|
341 |
+
# caller as happens in refguide_check
|
342 |
+
# is_deprecated() function
|
343 |
+
|
344 |
+
ds = datasource.DataSource()
|
345 |
+
# simulate failed __init__ by removing key attribute
|
346 |
+
# produced within __init__ and expected by __del__
|
347 |
+
del ds._istmpdest
|
348 |
+
# should not raise an AttributeError if __del__
|
349 |
+
# gracefully handles failed __init__:
|
350 |
+
ds.__del__()
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test__iotools.py
ADDED
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from datetime import date
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.testing import (
|
6 |
+
assert_, assert_equal, assert_allclose, assert_raises,
|
7 |
+
)
|
8 |
+
from numpy.lib._iotools import (
|
9 |
+
LineSplitter, NameValidator, StringConverter,
|
10 |
+
has_nested_fields, easy_dtype, flatten_dtype
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
class TestLineSplitter:
|
15 |
+
"Tests the LineSplitter class."
|
16 |
+
|
17 |
+
def test_no_delimiter(self):
|
18 |
+
"Test LineSplitter w/o delimiter"
|
19 |
+
strg = " 1 2 3 4 5 # test"
|
20 |
+
test = LineSplitter()(strg)
|
21 |
+
assert_equal(test, ['1', '2', '3', '4', '5'])
|
22 |
+
test = LineSplitter('')(strg)
|
23 |
+
assert_equal(test, ['1', '2', '3', '4', '5'])
|
24 |
+
|
25 |
+
def test_space_delimiter(self):
|
26 |
+
"Test space delimiter"
|
27 |
+
strg = " 1 2 3 4 5 # test"
|
28 |
+
test = LineSplitter(' ')(strg)
|
29 |
+
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
30 |
+
test = LineSplitter(' ')(strg)
|
31 |
+
assert_equal(test, ['1 2 3 4', '5'])
|
32 |
+
|
33 |
+
def test_tab_delimiter(self):
|
34 |
+
"Test tab delimiter"
|
35 |
+
strg = " 1\t 2\t 3\t 4\t 5 6"
|
36 |
+
test = LineSplitter('\t')(strg)
|
37 |
+
assert_equal(test, ['1', '2', '3', '4', '5 6'])
|
38 |
+
strg = " 1 2\t 3 4\t 5 6"
|
39 |
+
test = LineSplitter('\t')(strg)
|
40 |
+
assert_equal(test, ['1 2', '3 4', '5 6'])
|
41 |
+
|
42 |
+
def test_other_delimiter(self):
|
43 |
+
"Test LineSplitter on delimiter"
|
44 |
+
strg = "1,2,3,4,,5"
|
45 |
+
test = LineSplitter(',')(strg)
|
46 |
+
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
47 |
+
#
|
48 |
+
strg = " 1,2,3,4,,5 # test"
|
49 |
+
test = LineSplitter(',')(strg)
|
50 |
+
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
51 |
+
|
52 |
+
# gh-11028 bytes comment/delimiters should get encoded
|
53 |
+
strg = b" 1,2,3,4,,5 % test"
|
54 |
+
test = LineSplitter(delimiter=b',', comments=b'%')(strg)
|
55 |
+
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
56 |
+
|
57 |
+
def test_constant_fixed_width(self):
|
58 |
+
"Test LineSplitter w/ fixed-width fields"
|
59 |
+
strg = " 1 2 3 4 5 # test"
|
60 |
+
test = LineSplitter(3)(strg)
|
61 |
+
assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
|
62 |
+
#
|
63 |
+
strg = " 1 3 4 5 6# test"
|
64 |
+
test = LineSplitter(20)(strg)
|
65 |
+
assert_equal(test, ['1 3 4 5 6'])
|
66 |
+
#
|
67 |
+
strg = " 1 3 4 5 6# test"
|
68 |
+
test = LineSplitter(30)(strg)
|
69 |
+
assert_equal(test, ['1 3 4 5 6'])
|
70 |
+
|
71 |
+
def test_variable_fixed_width(self):
|
72 |
+
strg = " 1 3 4 5 6# test"
|
73 |
+
test = LineSplitter((3, 6, 6, 3))(strg)
|
74 |
+
assert_equal(test, ['1', '3', '4 5', '6'])
|
75 |
+
#
|
76 |
+
strg = " 1 3 4 5 6# test"
|
77 |
+
test = LineSplitter((6, 6, 9))(strg)
|
78 |
+
assert_equal(test, ['1', '3 4', '5 6'])
|
79 |
+
|
80 |
+
# -----------------------------------------------------------------------------
|
81 |
+
|
82 |
+
|
83 |
+
class TestNameValidator:
|
84 |
+
|
85 |
+
def test_case_sensitivity(self):
|
86 |
+
"Test case sensitivity"
|
87 |
+
names = ['A', 'a', 'b', 'c']
|
88 |
+
test = NameValidator().validate(names)
|
89 |
+
assert_equal(test, ['A', 'a', 'b', 'c'])
|
90 |
+
test = NameValidator(case_sensitive=False).validate(names)
|
91 |
+
assert_equal(test, ['A', 'A_1', 'B', 'C'])
|
92 |
+
test = NameValidator(case_sensitive='upper').validate(names)
|
93 |
+
assert_equal(test, ['A', 'A_1', 'B', 'C'])
|
94 |
+
test = NameValidator(case_sensitive='lower').validate(names)
|
95 |
+
assert_equal(test, ['a', 'a_1', 'b', 'c'])
|
96 |
+
|
97 |
+
# check exceptions
|
98 |
+
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
|
99 |
+
|
100 |
+
def test_excludelist(self):
|
101 |
+
"Test excludelist"
|
102 |
+
names = ['dates', 'data', 'Other Data', 'mask']
|
103 |
+
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
|
104 |
+
test = validator.validate(names)
|
105 |
+
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
|
106 |
+
|
107 |
+
def test_missing_names(self):
|
108 |
+
"Test validate missing names"
|
109 |
+
namelist = ('a', 'b', 'c')
|
110 |
+
validator = NameValidator()
|
111 |
+
assert_equal(validator(namelist), ['a', 'b', 'c'])
|
112 |
+
namelist = ('', 'b', 'c')
|
113 |
+
assert_equal(validator(namelist), ['f0', 'b', 'c'])
|
114 |
+
namelist = ('a', 'b', '')
|
115 |
+
assert_equal(validator(namelist), ['a', 'b', 'f0'])
|
116 |
+
namelist = ('', 'f0', '')
|
117 |
+
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
|
118 |
+
|
119 |
+
def test_validate_nb_names(self):
|
120 |
+
"Test validate nb names"
|
121 |
+
namelist = ('a', 'b', 'c')
|
122 |
+
validator = NameValidator()
|
123 |
+
assert_equal(validator(namelist, nbfields=1), ('a',))
|
124 |
+
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
|
125 |
+
['a', 'b', 'c', 'g0', 'g1'])
|
126 |
+
|
127 |
+
def test_validate_wo_names(self):
|
128 |
+
"Test validate no names"
|
129 |
+
namelist = None
|
130 |
+
validator = NameValidator()
|
131 |
+
assert_(validator(namelist) is None)
|
132 |
+
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
|
133 |
+
|
134 |
+
# -----------------------------------------------------------------------------
|
135 |
+
|
136 |
+
|
137 |
+
def _bytes_to_date(s):
|
138 |
+
return date(*time.strptime(s, "%Y-%m-%d")[:3])
|
139 |
+
|
140 |
+
|
141 |
+
class TestStringConverter:
|
142 |
+
"Test StringConverter"
|
143 |
+
|
144 |
+
def test_creation(self):
|
145 |
+
"Test creation of a StringConverter"
|
146 |
+
converter = StringConverter(int, -99999)
|
147 |
+
assert_equal(converter._status, 1)
|
148 |
+
assert_equal(converter.default, -99999)
|
149 |
+
|
150 |
+
def test_upgrade(self):
|
151 |
+
"Tests the upgrade method."
|
152 |
+
|
153 |
+
converter = StringConverter()
|
154 |
+
assert_equal(converter._status, 0)
|
155 |
+
|
156 |
+
# test int
|
157 |
+
assert_equal(converter.upgrade('0'), 0)
|
158 |
+
assert_equal(converter._status, 1)
|
159 |
+
|
160 |
+
# On systems where long defaults to 32-bit, the statuses will be
|
161 |
+
# offset by one, so we check for this here.
|
162 |
+
import numpy.core.numeric as nx
|
163 |
+
status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
|
164 |
+
|
165 |
+
# test int > 2**32
|
166 |
+
assert_equal(converter.upgrade('17179869184'), 17179869184)
|
167 |
+
assert_equal(converter._status, 1 + status_offset)
|
168 |
+
|
169 |
+
# test float
|
170 |
+
assert_allclose(converter.upgrade('0.'), 0.0)
|
171 |
+
assert_equal(converter._status, 2 + status_offset)
|
172 |
+
|
173 |
+
# test complex
|
174 |
+
assert_equal(converter.upgrade('0j'), complex('0j'))
|
175 |
+
assert_equal(converter._status, 3 + status_offset)
|
176 |
+
|
177 |
+
# test str
|
178 |
+
# note that the longdouble type has been skipped, so the
|
179 |
+
# _status increases by 2. Everything should succeed with
|
180 |
+
# unicode conversion (8).
|
181 |
+
for s in ['a', b'a']:
|
182 |
+
res = converter.upgrade(s)
|
183 |
+
assert_(type(res) is str)
|
184 |
+
assert_equal(res, 'a')
|
185 |
+
assert_equal(converter._status, 8 + status_offset)
|
186 |
+
|
187 |
+
def test_missing(self):
|
188 |
+
"Tests the use of missing values."
|
189 |
+
converter = StringConverter(missing_values=('missing',
|
190 |
+
'missed'))
|
191 |
+
converter.upgrade('0')
|
192 |
+
assert_equal(converter('0'), 0)
|
193 |
+
assert_equal(converter(''), converter.default)
|
194 |
+
assert_equal(converter('missing'), converter.default)
|
195 |
+
assert_equal(converter('missed'), converter.default)
|
196 |
+
try:
|
197 |
+
converter('miss')
|
198 |
+
except ValueError:
|
199 |
+
pass
|
200 |
+
|
201 |
+
def test_upgrademapper(self):
|
202 |
+
"Tests updatemapper"
|
203 |
+
dateparser = _bytes_to_date
|
204 |
+
_original_mapper = StringConverter._mapper[:]
|
205 |
+
try:
|
206 |
+
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
|
207 |
+
convert = StringConverter(dateparser, date(2000, 1, 1))
|
208 |
+
test = convert('2001-01-01')
|
209 |
+
assert_equal(test, date(2001, 1, 1))
|
210 |
+
test = convert('2009-01-01')
|
211 |
+
assert_equal(test, date(2009, 1, 1))
|
212 |
+
test = convert('')
|
213 |
+
assert_equal(test, date(2000, 1, 1))
|
214 |
+
finally:
|
215 |
+
StringConverter._mapper = _original_mapper
|
216 |
+
|
217 |
+
def test_string_to_object(self):
|
218 |
+
"Make sure that string-to-object functions are properly recognized"
|
219 |
+
old_mapper = StringConverter._mapper[:] # copy of list
|
220 |
+
conv = StringConverter(_bytes_to_date)
|
221 |
+
assert_equal(conv._mapper, old_mapper)
|
222 |
+
assert_(hasattr(conv, 'default'))
|
223 |
+
|
224 |
+
def test_keep_default(self):
|
225 |
+
"Make sure we don't lose an explicit default"
|
226 |
+
converter = StringConverter(None, missing_values='',
|
227 |
+
default=-999)
|
228 |
+
converter.upgrade('3.14159265')
|
229 |
+
assert_equal(converter.default, -999)
|
230 |
+
assert_equal(converter.type, np.dtype(float))
|
231 |
+
#
|
232 |
+
converter = StringConverter(
|
233 |
+
None, missing_values='', default=0)
|
234 |
+
converter.upgrade('3.14159265')
|
235 |
+
assert_equal(converter.default, 0)
|
236 |
+
assert_equal(converter.type, np.dtype(float))
|
237 |
+
|
238 |
+
def test_keep_default_zero(self):
|
239 |
+
"Check that we don't lose a default of 0"
|
240 |
+
converter = StringConverter(int, default=0,
|
241 |
+
missing_values="N/A")
|
242 |
+
assert_equal(converter.default, 0)
|
243 |
+
|
244 |
+
def test_keep_missing_values(self):
|
245 |
+
"Check that we're not losing missing values"
|
246 |
+
converter = StringConverter(int, default=0,
|
247 |
+
missing_values="N/A")
|
248 |
+
assert_equal(
|
249 |
+
converter.missing_values, {'', 'N/A'})
|
250 |
+
|
251 |
+
def test_int64_dtype(self):
|
252 |
+
"Check that int64 integer types can be specified"
|
253 |
+
converter = StringConverter(np.int64, default=0)
|
254 |
+
val = "-9223372036854775807"
|
255 |
+
assert_(converter(val) == -9223372036854775807)
|
256 |
+
val = "9223372036854775807"
|
257 |
+
assert_(converter(val) == 9223372036854775807)
|
258 |
+
|
259 |
+
def test_uint64_dtype(self):
|
260 |
+
"Check that uint64 integer types can be specified"
|
261 |
+
converter = StringConverter(np.uint64, default=0)
|
262 |
+
val = "9223372043271415339"
|
263 |
+
assert_(converter(val) == 9223372043271415339)
|
264 |
+
|
265 |
+
|
266 |
+
class TestMiscFunctions:
|
267 |
+
|
268 |
+
def test_has_nested_dtype(self):
|
269 |
+
"Test has_nested_dtype"
|
270 |
+
ndtype = np.dtype(float)
|
271 |
+
assert_equal(has_nested_fields(ndtype), False)
|
272 |
+
ndtype = np.dtype([('A', '|S3'), ('B', float)])
|
273 |
+
assert_equal(has_nested_fields(ndtype), False)
|
274 |
+
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
|
275 |
+
assert_equal(has_nested_fields(ndtype), True)
|
276 |
+
|
277 |
+
def test_easy_dtype(self):
|
278 |
+
"Test ndtype on dtypes"
|
279 |
+
# Simple case
|
280 |
+
ndtype = float
|
281 |
+
assert_equal(easy_dtype(ndtype), np.dtype(float))
|
282 |
+
# As string w/o names
|
283 |
+
ndtype = "i4, f8"
|
284 |
+
assert_equal(easy_dtype(ndtype),
|
285 |
+
np.dtype([('f0', "i4"), ('f1', "f8")]))
|
286 |
+
# As string w/o names but different default format
|
287 |
+
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
|
288 |
+
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
|
289 |
+
# As string w/ names
|
290 |
+
ndtype = "i4, f8"
|
291 |
+
assert_equal(easy_dtype(ndtype, names="a, b"),
|
292 |
+
np.dtype([('a', "i4"), ('b', "f8")]))
|
293 |
+
# As string w/ names (too many)
|
294 |
+
ndtype = "i4, f8"
|
295 |
+
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
296 |
+
np.dtype([('a', "i4"), ('b', "f8")]))
|
297 |
+
# As string w/ names (not enough)
|
298 |
+
ndtype = "i4, f8"
|
299 |
+
assert_equal(easy_dtype(ndtype, names=", b"),
|
300 |
+
np.dtype([('f0', "i4"), ('b', "f8")]))
|
301 |
+
# ... (with different default format)
|
302 |
+
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
|
303 |
+
np.dtype([('a', "i4"), ('f00', "f8")]))
|
304 |
+
# As list of tuples w/o names
|
305 |
+
ndtype = [('A', int), ('B', float)]
|
306 |
+
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
|
307 |
+
# As list of tuples w/ names
|
308 |
+
assert_equal(easy_dtype(ndtype, names="a,b"),
|
309 |
+
np.dtype([('a', int), ('b', float)]))
|
310 |
+
# As list of tuples w/ not enough names
|
311 |
+
assert_equal(easy_dtype(ndtype, names="a"),
|
312 |
+
np.dtype([('a', int), ('f0', float)]))
|
313 |
+
# As list of tuples w/ too many names
|
314 |
+
assert_equal(easy_dtype(ndtype, names="a,b,c"),
|
315 |
+
np.dtype([('a', int), ('b', float)]))
|
316 |
+
# As list of types w/o names
|
317 |
+
ndtype = (int, float, float)
|
318 |
+
assert_equal(easy_dtype(ndtype),
|
319 |
+
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
|
320 |
+
# As list of types w names
|
321 |
+
ndtype = (int, float, float)
|
322 |
+
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
323 |
+
np.dtype([('a', int), ('b', float), ('c', float)]))
|
324 |
+
# As simple dtype w/ names
|
325 |
+
ndtype = np.dtype(float)
|
326 |
+
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
327 |
+
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
|
328 |
+
# As simple dtype w/o names (but multiple fields)
|
329 |
+
ndtype = np.dtype(float)
|
330 |
+
assert_equal(
|
331 |
+
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
|
332 |
+
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
|
333 |
+
|
334 |
+
def test_flatten_dtype(self):
|
335 |
+
"Testing flatten_dtype"
|
336 |
+
# Standard dtype
|
337 |
+
dt = np.dtype([("a", "f8"), ("b", "f8")])
|
338 |
+
dt_flat = flatten_dtype(dt)
|
339 |
+
assert_equal(dt_flat, [float, float])
|
340 |
+
# Recursive dtype
|
341 |
+
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
|
342 |
+
dt_flat = flatten_dtype(dt)
|
343 |
+
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
|
344 |
+
# dtype with shaped fields
|
345 |
+
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
|
346 |
+
dt_flat = flatten_dtype(dt)
|
347 |
+
assert_equal(dt_flat, [float, int])
|
348 |
+
dt_flat = flatten_dtype(dt, True)
|
349 |
+
assert_equal(dt_flat, [float] * 2 + [int] * 3)
|
350 |
+
# dtype w/ titles
|
351 |
+
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
|
352 |
+
dt_flat = flatten_dtype(dt)
|
353 |
+
assert_equal(dt_flat, [float, float])
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test__version.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for the NumpyVersion class.
|
2 |
+
|
3 |
+
"""
|
4 |
+
from numpy.testing import assert_, assert_raises
|
5 |
+
from numpy.lib import NumpyVersion
|
6 |
+
|
7 |
+
|
8 |
+
def test_main_versions():
|
9 |
+
assert_(NumpyVersion('1.8.0') == '1.8.0')
|
10 |
+
for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']:
|
11 |
+
assert_(NumpyVersion('1.8.0') < ver)
|
12 |
+
|
13 |
+
for ver in ['1.7.0', '1.7.1', '0.9.9']:
|
14 |
+
assert_(NumpyVersion('1.8.0') > ver)
|
15 |
+
|
16 |
+
|
17 |
+
def test_version_1_point_10():
|
18 |
+
# regression test for gh-2998.
|
19 |
+
assert_(NumpyVersion('1.9.0') < '1.10.0')
|
20 |
+
assert_(NumpyVersion('1.11.0') < '1.11.1')
|
21 |
+
assert_(NumpyVersion('1.11.0') == '1.11.0')
|
22 |
+
assert_(NumpyVersion('1.99.11') < '1.99.12')
|
23 |
+
|
24 |
+
|
25 |
+
def test_alpha_beta_rc():
|
26 |
+
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
|
27 |
+
for ver in ['1.8.0', '1.8.0rc2']:
|
28 |
+
assert_(NumpyVersion('1.8.0rc1') < ver)
|
29 |
+
|
30 |
+
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
|
31 |
+
assert_(NumpyVersion('1.8.0rc1') > ver)
|
32 |
+
|
33 |
+
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
|
34 |
+
|
35 |
+
|
36 |
+
def test_dev_version():
|
37 |
+
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
|
38 |
+
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
|
39 |
+
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
|
40 |
+
|
41 |
+
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
|
42 |
+
|
43 |
+
|
44 |
+
def test_dev_a_b_rc_mixed():
|
45 |
+
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
|
46 |
+
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
|
47 |
+
|
48 |
+
|
49 |
+
def test_dev0_version():
|
50 |
+
assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
|
51 |
+
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
|
52 |
+
assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
|
53 |
+
|
54 |
+
assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
|
55 |
+
|
56 |
+
|
57 |
+
def test_dev0_a_b_rc_mixed():
|
58 |
+
assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
|
59 |
+
assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
|
60 |
+
|
61 |
+
|
62 |
+
def test_raises():
|
63 |
+
for ver in ['1.9', '1,9.0', '1.7.x']:
|
64 |
+
assert_raises(ValueError, NumpyVersion, ver)
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_arraypad.py
ADDED
@@ -0,0 +1,1380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for the array padding functions.
|
2 |
+
|
3 |
+
"""
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
|
8 |
+
from numpy.lib.arraypad import _as_pairs
|
9 |
+
|
10 |
+
|
11 |
+
_numeric_dtypes = (
|
12 |
+
np.sctypes["uint"]
|
13 |
+
+ np.sctypes["int"]
|
14 |
+
+ np.sctypes["float"]
|
15 |
+
+ np.sctypes["complex"]
|
16 |
+
)
|
17 |
+
_all_modes = {
|
18 |
+
'constant': {'constant_values': 0},
|
19 |
+
'edge': {},
|
20 |
+
'linear_ramp': {'end_values': 0},
|
21 |
+
'maximum': {'stat_length': None},
|
22 |
+
'mean': {'stat_length': None},
|
23 |
+
'median': {'stat_length': None},
|
24 |
+
'minimum': {'stat_length': None},
|
25 |
+
'reflect': {'reflect_type': 'even'},
|
26 |
+
'symmetric': {'reflect_type': 'even'},
|
27 |
+
'wrap': {},
|
28 |
+
'empty': {}
|
29 |
+
}
|
30 |
+
|
31 |
+
|
32 |
+
class TestAsPairs:
|
33 |
+
def test_single_value(self):
|
34 |
+
"""Test casting for a single value."""
|
35 |
+
expected = np.array([[3, 3]] * 10)
|
36 |
+
for x in (3, [3], [[3]]):
|
37 |
+
result = _as_pairs(x, 10)
|
38 |
+
assert_equal(result, expected)
|
39 |
+
# Test with dtype=object
|
40 |
+
obj = object()
|
41 |
+
assert_equal(
|
42 |
+
_as_pairs(obj, 10),
|
43 |
+
np.array([[obj, obj]] * 10)
|
44 |
+
)
|
45 |
+
|
46 |
+
def test_two_values(self):
|
47 |
+
"""Test proper casting for two different values."""
|
48 |
+
# Broadcasting in the first dimension with numbers
|
49 |
+
expected = np.array([[3, 4]] * 10)
|
50 |
+
for x in ([3, 4], [[3, 4]]):
|
51 |
+
result = _as_pairs(x, 10)
|
52 |
+
assert_equal(result, expected)
|
53 |
+
# and with dtype=object
|
54 |
+
obj = object()
|
55 |
+
assert_equal(
|
56 |
+
_as_pairs(["a", obj], 10),
|
57 |
+
np.array([["a", obj]] * 10)
|
58 |
+
)
|
59 |
+
|
60 |
+
# Broadcasting in the second / last dimension with numbers
|
61 |
+
assert_equal(
|
62 |
+
_as_pairs([[3], [4]], 2),
|
63 |
+
np.array([[3, 3], [4, 4]])
|
64 |
+
)
|
65 |
+
# and with dtype=object
|
66 |
+
assert_equal(
|
67 |
+
_as_pairs([["a"], [obj]], 2),
|
68 |
+
np.array([["a", "a"], [obj, obj]])
|
69 |
+
)
|
70 |
+
|
71 |
+
def test_with_none(self):
|
72 |
+
expected = ((None, None), (None, None), (None, None))
|
73 |
+
assert_equal(
|
74 |
+
_as_pairs(None, 3, as_index=False),
|
75 |
+
expected
|
76 |
+
)
|
77 |
+
assert_equal(
|
78 |
+
_as_pairs(None, 3, as_index=True),
|
79 |
+
expected
|
80 |
+
)
|
81 |
+
|
82 |
+
def test_pass_through(self):
|
83 |
+
"""Test if `x` already matching desired output are passed through."""
|
84 |
+
expected = np.arange(12).reshape((6, 2))
|
85 |
+
assert_equal(
|
86 |
+
_as_pairs(expected, 6),
|
87 |
+
expected
|
88 |
+
)
|
89 |
+
|
90 |
+
def test_as_index(self):
|
91 |
+
"""Test results if `as_index=True`."""
|
92 |
+
assert_equal(
|
93 |
+
_as_pairs([2.6, 3.3], 10, as_index=True),
|
94 |
+
np.array([[3, 3]] * 10, dtype=np.intp)
|
95 |
+
)
|
96 |
+
assert_equal(
|
97 |
+
_as_pairs([2.6, 4.49], 10, as_index=True),
|
98 |
+
np.array([[3, 4]] * 10, dtype=np.intp)
|
99 |
+
)
|
100 |
+
for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
|
101 |
+
[[1, 2]] * 9 + [[1, -2]]):
|
102 |
+
with pytest.raises(ValueError, match="negative values"):
|
103 |
+
_as_pairs(x, 10, as_index=True)
|
104 |
+
|
105 |
+
def test_exceptions(self):
|
106 |
+
"""Ensure faulty usage is discovered."""
|
107 |
+
with pytest.raises(ValueError, match="more dimensions than allowed"):
|
108 |
+
_as_pairs([[[3]]], 10)
|
109 |
+
with pytest.raises(ValueError, match="could not be broadcast"):
|
110 |
+
_as_pairs([[1, 2], [3, 4]], 3)
|
111 |
+
with pytest.raises(ValueError, match="could not be broadcast"):
|
112 |
+
_as_pairs(np.ones((2, 3)), 3)
|
113 |
+
|
114 |
+
|
115 |
+
class TestConditionalShortcuts:
|
116 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
117 |
+
def test_zero_padding_shortcuts(self, mode):
|
118 |
+
test = np.arange(120).reshape(4, 5, 6)
|
119 |
+
pad_amt = [(0, 0) for _ in test.shape]
|
120 |
+
assert_array_equal(test, np.pad(test, pad_amt, mode=mode))
|
121 |
+
|
122 |
+
@pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
|
123 |
+
def test_shallow_statistic_range(self, mode):
|
124 |
+
test = np.arange(120).reshape(4, 5, 6)
|
125 |
+
pad_amt = [(1, 1) for _ in test.shape]
|
126 |
+
assert_array_equal(np.pad(test, pad_amt, mode='edge'),
|
127 |
+
np.pad(test, pad_amt, mode=mode, stat_length=1))
|
128 |
+
|
129 |
+
@pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
|
130 |
+
def test_clip_statistic_range(self, mode):
|
131 |
+
test = np.arange(30).reshape(5, 6)
|
132 |
+
pad_amt = [(3, 3) for _ in test.shape]
|
133 |
+
assert_array_equal(np.pad(test, pad_amt, mode=mode),
|
134 |
+
np.pad(test, pad_amt, mode=mode, stat_length=30))
|
135 |
+
|
136 |
+
|
137 |
+
class TestStatistic:
|
138 |
+
def test_check_mean_stat_length(self):
|
139 |
+
a = np.arange(100).astype('f')
|
140 |
+
a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
|
141 |
+
b = np.array(
|
142 |
+
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
|
143 |
+
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
|
144 |
+
0.5, 0.5, 0.5, 0.5, 0.5,
|
145 |
+
|
146 |
+
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
|
147 |
+
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
|
148 |
+
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
|
149 |
+
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
|
150 |
+
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
|
151 |
+
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
|
152 |
+
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
|
153 |
+
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
|
154 |
+
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
|
155 |
+
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
|
156 |
+
|
157 |
+
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
|
158 |
+
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
|
159 |
+
])
|
160 |
+
assert_array_equal(a, b)
|
161 |
+
|
162 |
+
def test_check_maximum_1(self):
|
163 |
+
a = np.arange(100)
|
164 |
+
a = np.pad(a, (25, 20), 'maximum')
|
165 |
+
b = np.array(
|
166 |
+
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
|
167 |
+
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
|
168 |
+
99, 99, 99, 99, 99,
|
169 |
+
|
170 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
171 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
172 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
173 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
174 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
175 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
176 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
177 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
178 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
179 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
180 |
+
|
181 |
+
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
|
182 |
+
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
|
183 |
+
)
|
184 |
+
assert_array_equal(a, b)
|
185 |
+
|
186 |
+
def test_check_maximum_2(self):
|
187 |
+
a = np.arange(100) + 1
|
188 |
+
a = np.pad(a, (25, 20), 'maximum')
|
189 |
+
b = np.array(
|
190 |
+
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
|
191 |
+
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
|
192 |
+
100, 100, 100, 100, 100,
|
193 |
+
|
194 |
+
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
|
195 |
+
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
|
196 |
+
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
|
197 |
+
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
|
198 |
+
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
|
199 |
+
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
|
200 |
+
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
|
201 |
+
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
|
202 |
+
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
|
203 |
+
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
204 |
+
|
205 |
+
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
|
206 |
+
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
|
207 |
+
)
|
208 |
+
assert_array_equal(a, b)
|
209 |
+
|
210 |
+
def test_check_maximum_stat_length(self):
|
211 |
+
a = np.arange(100) + 1
|
212 |
+
a = np.pad(a, (25, 20), 'maximum', stat_length=10)
|
213 |
+
b = np.array(
|
214 |
+
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
|
215 |
+
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
|
216 |
+
10, 10, 10, 10, 10,
|
217 |
+
|
218 |
+
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
|
219 |
+
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
|
220 |
+
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
|
221 |
+
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
|
222 |
+
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
|
223 |
+
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
|
224 |
+
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
|
225 |
+
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
|
226 |
+
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
|
227 |
+
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
228 |
+
|
229 |
+
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
|
230 |
+
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
|
231 |
+
)
|
232 |
+
assert_array_equal(a, b)
|
233 |
+
|
234 |
+
def test_check_minimum_1(self):
|
235 |
+
a = np.arange(100)
|
236 |
+
a = np.pad(a, (25, 20), 'minimum')
|
237 |
+
b = np.array(
|
238 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
239 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
240 |
+
0, 0, 0, 0, 0,
|
241 |
+
|
242 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
243 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
244 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
245 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
246 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
247 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
248 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
249 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
250 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
251 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
252 |
+
|
253 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
254 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
255 |
+
)
|
256 |
+
assert_array_equal(a, b)
|
257 |
+
|
258 |
+
def test_check_minimum_2(self):
|
259 |
+
a = np.arange(100) + 2
|
260 |
+
a = np.pad(a, (25, 20), 'minimum')
|
261 |
+
b = np.array(
|
262 |
+
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
263 |
+
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
264 |
+
2, 2, 2, 2, 2,
|
265 |
+
|
266 |
+
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
|
267 |
+
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
|
268 |
+
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
|
269 |
+
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
|
270 |
+
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
|
271 |
+
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
|
272 |
+
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
273 |
+
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
|
274 |
+
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
275 |
+
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
|
276 |
+
|
277 |
+
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
278 |
+
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
|
279 |
+
)
|
280 |
+
assert_array_equal(a, b)
|
281 |
+
|
282 |
+
def test_check_minimum_stat_length(self):
|
283 |
+
a = np.arange(100) + 1
|
284 |
+
a = np.pad(a, (25, 20), 'minimum', stat_length=10)
|
285 |
+
b = np.array(
|
286 |
+
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
287 |
+
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
288 |
+
1, 1, 1, 1, 1,
|
289 |
+
|
290 |
+
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
|
291 |
+
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
|
292 |
+
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
|
293 |
+
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
|
294 |
+
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
|
295 |
+
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
|
296 |
+
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
|
297 |
+
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
|
298 |
+
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
|
299 |
+
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
300 |
+
|
301 |
+
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
|
302 |
+
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
|
303 |
+
)
|
304 |
+
assert_array_equal(a, b)
|
305 |
+
|
306 |
+
def test_check_median(self):
|
307 |
+
a = np.arange(100).astype('f')
|
308 |
+
a = np.pad(a, (25, 20), 'median')
|
309 |
+
b = np.array(
|
310 |
+
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
|
311 |
+
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
|
312 |
+
49.5, 49.5, 49.5, 49.5, 49.5,
|
313 |
+
|
314 |
+
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
|
315 |
+
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
|
316 |
+
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
|
317 |
+
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
|
318 |
+
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
|
319 |
+
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
|
320 |
+
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
|
321 |
+
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
|
322 |
+
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
|
323 |
+
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
|
324 |
+
|
325 |
+
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
|
326 |
+
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
|
327 |
+
)
|
328 |
+
assert_array_equal(a, b)
|
329 |
+
|
330 |
+
def test_check_median_01(self):
|
331 |
+
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
|
332 |
+
a = np.pad(a, 1, 'median')
|
333 |
+
b = np.array(
|
334 |
+
[[4, 4, 5, 4, 4],
|
335 |
+
|
336 |
+
[3, 3, 1, 4, 3],
|
337 |
+
[5, 4, 5, 9, 5],
|
338 |
+
[8, 9, 8, 2, 8],
|
339 |
+
|
340 |
+
[4, 4, 5, 4, 4]]
|
341 |
+
)
|
342 |
+
assert_array_equal(a, b)
|
343 |
+
|
344 |
+
def test_check_median_02(self):
|
345 |
+
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
|
346 |
+
a = np.pad(a.T, 1, 'median').T
|
347 |
+
b = np.array(
|
348 |
+
[[5, 4, 5, 4, 5],
|
349 |
+
|
350 |
+
[3, 3, 1, 4, 3],
|
351 |
+
[5, 4, 5, 9, 5],
|
352 |
+
[8, 9, 8, 2, 8],
|
353 |
+
|
354 |
+
[5, 4, 5, 4, 5]]
|
355 |
+
)
|
356 |
+
assert_array_equal(a, b)
|
357 |
+
|
358 |
+
def test_check_median_stat_length(self):
|
359 |
+
a = np.arange(100).astype('f')
|
360 |
+
a[1] = 2.
|
361 |
+
a[97] = 96.
|
362 |
+
a = np.pad(a, (25, 20), 'median', stat_length=(3, 5))
|
363 |
+
b = np.array(
|
364 |
+
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
|
365 |
+
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
|
366 |
+
2., 2., 2., 2., 2.,
|
367 |
+
|
368 |
+
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
|
369 |
+
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
|
370 |
+
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
|
371 |
+
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
|
372 |
+
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
|
373 |
+
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
|
374 |
+
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
|
375 |
+
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
|
376 |
+
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
|
377 |
+
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
|
378 |
+
|
379 |
+
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
|
380 |
+
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
|
381 |
+
)
|
382 |
+
assert_array_equal(a, b)
|
383 |
+
|
384 |
+
def test_check_mean_shape_one(self):
|
385 |
+
a = [[4, 5, 6]]
|
386 |
+
a = np.pad(a, (5, 7), 'mean', stat_length=2)
|
387 |
+
b = np.array(
|
388 |
+
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
389 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
390 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
391 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
392 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
393 |
+
|
394 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
395 |
+
|
396 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
397 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
398 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
399 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
400 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
401 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
|
402 |
+
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
|
403 |
+
)
|
404 |
+
assert_array_equal(a, b)
|
405 |
+
|
406 |
+
def test_check_mean_2(self):
|
407 |
+
a = np.arange(100).astype('f')
|
408 |
+
a = np.pad(a, (25, 20), 'mean')
|
409 |
+
b = np.array(
|
410 |
+
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
|
411 |
+
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
|
412 |
+
49.5, 49.5, 49.5, 49.5, 49.5,
|
413 |
+
|
414 |
+
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
|
415 |
+
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
|
416 |
+
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
|
417 |
+
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
|
418 |
+
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
|
419 |
+
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
|
420 |
+
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
|
421 |
+
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
|
422 |
+
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
|
423 |
+
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
|
424 |
+
|
425 |
+
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
|
426 |
+
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
|
427 |
+
)
|
428 |
+
assert_array_equal(a, b)
|
429 |
+
|
430 |
+
@pytest.mark.parametrize("mode", [
|
431 |
+
"mean",
|
432 |
+
"median",
|
433 |
+
"minimum",
|
434 |
+
"maximum"
|
435 |
+
])
|
436 |
+
def test_same_prepend_append(self, mode):
|
437 |
+
""" Test that appended and prepended values are equal """
|
438 |
+
# This test is constructed to trigger floating point rounding errors in
|
439 |
+
# a way that caused gh-11216 for mode=='mean'
|
440 |
+
a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)
|
441 |
+
a = np.pad(a, (1, 1), mode)
|
442 |
+
assert_equal(a[0], a[-1])
|
443 |
+
|
444 |
+
@pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"])
|
445 |
+
@pytest.mark.parametrize(
|
446 |
+
"stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]
|
447 |
+
)
|
448 |
+
def test_check_negative_stat_length(self, mode, stat_length):
|
449 |
+
arr = np.arange(30).reshape((6, 5))
|
450 |
+
match = "index can't contain negative values"
|
451 |
+
with pytest.raises(ValueError, match=match):
|
452 |
+
np.pad(arr, 2, mode, stat_length=stat_length)
|
453 |
+
|
454 |
+
def test_simple_stat_length(self):
|
455 |
+
a = np.arange(30)
|
456 |
+
a = np.reshape(a, (6, 5))
|
457 |
+
a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
|
458 |
+
b = np.array(
|
459 |
+
[[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
|
460 |
+
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
|
461 |
+
|
462 |
+
[1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
|
463 |
+
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
|
464 |
+
[11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
|
465 |
+
[16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
|
466 |
+
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
|
467 |
+
[26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
|
468 |
+
|
469 |
+
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
|
470 |
+
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
|
471 |
+
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
|
472 |
+
)
|
473 |
+
assert_array_equal(a, b)
|
474 |
+
|
475 |
+
@pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")
|
476 |
+
@pytest.mark.filterwarnings(
|
477 |
+
"ignore:invalid value encountered in( scalar)? divide:RuntimeWarning"
|
478 |
+
)
|
479 |
+
@pytest.mark.parametrize("mode", ["mean", "median"])
|
480 |
+
def test_zero_stat_length_valid(self, mode):
|
481 |
+
arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)
|
482 |
+
expected = np.array([np.nan, 1., 2., np.nan, np.nan])
|
483 |
+
assert_equal(arr, expected)
|
484 |
+
|
485 |
+
@pytest.mark.parametrize("mode", ["minimum", "maximum"])
|
486 |
+
def test_zero_stat_length_invalid(self, mode):
|
487 |
+
match = "stat_length of 0 yields no value for padding"
|
488 |
+
with pytest.raises(ValueError, match=match):
|
489 |
+
np.pad([1., 2.], 0, mode, stat_length=0)
|
490 |
+
with pytest.raises(ValueError, match=match):
|
491 |
+
np.pad([1., 2.], 0, mode, stat_length=(1, 0))
|
492 |
+
with pytest.raises(ValueError, match=match):
|
493 |
+
np.pad([1., 2.], 1, mode, stat_length=0)
|
494 |
+
with pytest.raises(ValueError, match=match):
|
495 |
+
np.pad([1., 2.], 1, mode, stat_length=(1, 0))
|
496 |
+
|
497 |
+
|
498 |
+
class TestConstant:
|
499 |
+
def test_check_constant(self):
|
500 |
+
a = np.arange(100)
|
501 |
+
a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))
|
502 |
+
b = np.array(
|
503 |
+
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
|
504 |
+
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
|
505 |
+
10, 10, 10, 10, 10,
|
506 |
+
|
507 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
508 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
509 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
510 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
511 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
512 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
513 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
514 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
515 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
516 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
517 |
+
|
518 |
+
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
|
519 |
+
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
|
520 |
+
)
|
521 |
+
assert_array_equal(a, b)
|
522 |
+
|
523 |
+
def test_check_constant_zeros(self):
|
524 |
+
a = np.arange(100)
|
525 |
+
a = np.pad(a, (25, 20), 'constant')
|
526 |
+
b = np.array(
|
527 |
+
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
528 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
529 |
+
0, 0, 0, 0, 0,
|
530 |
+
|
531 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
532 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
533 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
534 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
535 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
536 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
537 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
538 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
539 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
540 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
541 |
+
|
542 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
543 |
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
544 |
+
)
|
545 |
+
assert_array_equal(a, b)
|
546 |
+
|
547 |
+
def test_check_constant_float(self):
|
548 |
+
# If input array is int, but constant_values are float, the dtype of
|
549 |
+
# the array to be padded is kept
|
550 |
+
arr = np.arange(30).reshape(5, 6)
|
551 |
+
test = np.pad(arr, (1, 2), mode='constant',
|
552 |
+
constant_values=1.1)
|
553 |
+
expected = np.array(
|
554 |
+
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
555 |
+
|
556 |
+
[ 1, 0, 1, 2, 3, 4, 5, 1, 1],
|
557 |
+
[ 1, 6, 7, 8, 9, 10, 11, 1, 1],
|
558 |
+
[ 1, 12, 13, 14, 15, 16, 17, 1, 1],
|
559 |
+
[ 1, 18, 19, 20, 21, 22, 23, 1, 1],
|
560 |
+
[ 1, 24, 25, 26, 27, 28, 29, 1, 1],
|
561 |
+
|
562 |
+
[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
563 |
+
[ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
|
564 |
+
)
|
565 |
+
assert_allclose(test, expected)
|
566 |
+
|
567 |
+
def test_check_constant_float2(self):
|
568 |
+
# If input array is float, and constant_values are float, the dtype of
|
569 |
+
# the array to be padded is kept - here retaining the float constants
|
570 |
+
arr = np.arange(30).reshape(5, 6)
|
571 |
+
arr_float = arr.astype(np.float64)
|
572 |
+
test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant',
|
573 |
+
constant_values=1.1)
|
574 |
+
expected = np.array(
|
575 |
+
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
|
576 |
+
|
577 |
+
[ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
|
578 |
+
[ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
|
579 |
+
[ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
|
580 |
+
[ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
|
581 |
+
[ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
|
582 |
+
|
583 |
+
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
|
584 |
+
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
|
585 |
+
)
|
586 |
+
assert_allclose(test, expected)
|
587 |
+
|
588 |
+
def test_check_constant_float3(self):
|
589 |
+
a = np.arange(100, dtype=float)
|
590 |
+
a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
|
591 |
+
b = np.array(
|
592 |
+
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
|
593 |
+
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
|
594 |
+
-1.1, -1.1, -1.1, -1.1, -1.1,
|
595 |
+
|
596 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
597 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
598 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
599 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
600 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
601 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
602 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
603 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
604 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
605 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
606 |
+
|
607 |
+
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
|
608 |
+
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
|
609 |
+
)
|
610 |
+
assert_allclose(a, b)
|
611 |
+
|
612 |
+
def test_check_constant_odd_pad_amount(self):
|
613 |
+
arr = np.arange(30).reshape(5, 6)
|
614 |
+
test = np.pad(arr, ((1,), (2,)), mode='constant',
|
615 |
+
constant_values=3)
|
616 |
+
expected = np.array(
|
617 |
+
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
|
618 |
+
|
619 |
+
[ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
|
620 |
+
[ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
|
621 |
+
[ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
|
622 |
+
[ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
|
623 |
+
[ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
|
624 |
+
|
625 |
+
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
|
626 |
+
)
|
627 |
+
assert_allclose(test, expected)
|
628 |
+
|
629 |
+
def test_check_constant_pad_2d(self):
|
630 |
+
arr = np.arange(4).reshape(2, 2)
|
631 |
+
test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant',
|
632 |
+
constant_values=((1, 2), (3, 4)))
|
633 |
+
expected = np.array(
|
634 |
+
[[3, 1, 1, 4, 4, 4],
|
635 |
+
[3, 0, 1, 4, 4, 4],
|
636 |
+
[3, 2, 3, 4, 4, 4],
|
637 |
+
[3, 2, 2, 4, 4, 4],
|
638 |
+
[3, 2, 2, 4, 4, 4]]
|
639 |
+
)
|
640 |
+
assert_allclose(test, expected)
|
641 |
+
|
642 |
+
def test_check_large_integers(self):
|
643 |
+
uint64_max = 2 ** 64 - 1
|
644 |
+
arr = np.full(5, uint64_max, dtype=np.uint64)
|
645 |
+
test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
|
646 |
+
expected = np.full(7, uint64_max, dtype=np.uint64)
|
647 |
+
assert_array_equal(test, expected)
|
648 |
+
|
649 |
+
int64_max = 2 ** 63 - 1
|
650 |
+
arr = np.full(5, int64_max, dtype=np.int64)
|
651 |
+
test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
|
652 |
+
expected = np.full(7, int64_max, dtype=np.int64)
|
653 |
+
assert_array_equal(test, expected)
|
654 |
+
|
655 |
+
def test_check_object_array(self):
|
656 |
+
arr = np.empty(1, dtype=object)
|
657 |
+
obj_a = object()
|
658 |
+
arr[0] = obj_a
|
659 |
+
obj_b = object()
|
660 |
+
obj_c = object()
|
661 |
+
arr = np.pad(arr, pad_width=1, mode='constant',
|
662 |
+
constant_values=(obj_b, obj_c))
|
663 |
+
|
664 |
+
expected = np.empty((3,), dtype=object)
|
665 |
+
expected[0] = obj_b
|
666 |
+
expected[1] = obj_a
|
667 |
+
expected[2] = obj_c
|
668 |
+
|
669 |
+
assert_array_equal(arr, expected)
|
670 |
+
|
671 |
+
def test_pad_empty_dimension(self):
|
672 |
+
arr = np.zeros((3, 0, 2))
|
673 |
+
result = np.pad(arr, [(0,), (2,), (1,)], mode="constant")
|
674 |
+
assert result.shape == (3, 4, 4)
|
675 |
+
|
676 |
+
|
677 |
+
class TestLinearRamp:
|
678 |
+
def test_check_simple(self):
|
679 |
+
a = np.arange(100).astype('f')
|
680 |
+
a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
|
681 |
+
b = np.array(
|
682 |
+
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
|
683 |
+
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
|
684 |
+
0.80, 0.64, 0.48, 0.32, 0.16,
|
685 |
+
|
686 |
+
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
|
687 |
+
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
|
688 |
+
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
|
689 |
+
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
|
690 |
+
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
|
691 |
+
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
|
692 |
+
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
|
693 |
+
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
|
694 |
+
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
|
695 |
+
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
|
696 |
+
|
697 |
+
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
|
698 |
+
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
|
699 |
+
)
|
700 |
+
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
|
701 |
+
|
702 |
+
def test_check_2d(self):
|
703 |
+
arr = np.arange(20).reshape(4, 5).astype(np.float64)
|
704 |
+
test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
|
705 |
+
expected = np.array(
|
706 |
+
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
|
707 |
+
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
|
708 |
+
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
|
709 |
+
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
|
710 |
+
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
|
711 |
+
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
|
712 |
+
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
|
713 |
+
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
|
714 |
+
assert_allclose(test, expected)
|
715 |
+
|
716 |
+
@pytest.mark.xfail(exceptions=(AssertionError,))
|
717 |
+
def test_object_array(self):
|
718 |
+
from fractions import Fraction
|
719 |
+
arr = np.array([Fraction(1, 2), Fraction(-1, 2)])
|
720 |
+
actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)
|
721 |
+
|
722 |
+
# deliberately chosen to have a non-power-of-2 denominator such that
|
723 |
+
# rounding to floats causes a failure.
|
724 |
+
expected = np.array([
|
725 |
+
Fraction( 0, 12),
|
726 |
+
Fraction( 3, 12),
|
727 |
+
Fraction( 6, 12),
|
728 |
+
Fraction(-6, 12),
|
729 |
+
Fraction(-4, 12),
|
730 |
+
Fraction(-2, 12),
|
731 |
+
Fraction(-0, 12),
|
732 |
+
])
|
733 |
+
assert_equal(actual, expected)
|
734 |
+
|
735 |
+
def test_end_values(self):
|
736 |
+
"""Ensure that end values are exact."""
|
737 |
+
a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp")
|
738 |
+
assert_equal(a[:, 0], 0.)
|
739 |
+
assert_equal(a[:, -1], 0.)
|
740 |
+
assert_equal(a[0, :], 0.)
|
741 |
+
assert_equal(a[-1, :], 0.)
|
742 |
+
|
743 |
+
@pytest.mark.parametrize("dtype", _numeric_dtypes)
|
744 |
+
def test_negative_difference(self, dtype):
|
745 |
+
"""
|
746 |
+
Check correct behavior of unsigned dtypes if there is a negative
|
747 |
+
difference between the edge to pad and `end_values`. Check both cases
|
748 |
+
to be independent of implementation. Test behavior for all other dtypes
|
749 |
+
in case dtype casting interferes with complex dtypes. See gh-14191.
|
750 |
+
"""
|
751 |
+
x = np.array([3], dtype=dtype)
|
752 |
+
result = np.pad(x, 3, mode="linear_ramp", end_values=0)
|
753 |
+
expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
|
754 |
+
assert_equal(result, expected)
|
755 |
+
|
756 |
+
x = np.array([0], dtype=dtype)
|
757 |
+
result = np.pad(x, 3, mode="linear_ramp", end_values=3)
|
758 |
+
expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
|
759 |
+
assert_equal(result, expected)
|
760 |
+
|
761 |
+
|
762 |
+
class TestReflect:
|
763 |
+
def test_check_simple(self):
|
764 |
+
a = np.arange(100)
|
765 |
+
a = np.pad(a, (25, 20), 'reflect')
|
766 |
+
b = np.array(
|
767 |
+
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
|
768 |
+
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
|
769 |
+
5, 4, 3, 2, 1,
|
770 |
+
|
771 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
772 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
773 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
774 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
775 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
776 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
777 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
778 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
779 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
780 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
781 |
+
|
782 |
+
98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
|
783 |
+
88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
|
784 |
+
)
|
785 |
+
assert_array_equal(a, b)
|
786 |
+
|
787 |
+
def test_check_odd_method(self):
|
788 |
+
a = np.arange(100)
|
789 |
+
a = np.pad(a, (25, 20), 'reflect', reflect_type='odd')
|
790 |
+
b = np.array(
|
791 |
+
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
|
792 |
+
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
|
793 |
+
-5, -4, -3, -2, -1,
|
794 |
+
|
795 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
796 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
797 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
798 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
799 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
800 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
801 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
802 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
803 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
804 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
805 |
+
|
806 |
+
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
|
807 |
+
110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
|
808 |
+
)
|
809 |
+
assert_array_equal(a, b)
|
810 |
+
|
811 |
+
def test_check_large_pad(self):
|
812 |
+
a = [[4, 5, 6], [6, 7, 8]]
|
813 |
+
a = np.pad(a, (5, 7), 'reflect')
|
814 |
+
b = np.array(
|
815 |
+
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
|
816 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
817 |
+
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
|
818 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
819 |
+
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
|
820 |
+
|
821 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
822 |
+
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
|
823 |
+
|
824 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
825 |
+
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
|
826 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
827 |
+
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
|
828 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
829 |
+
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
|
830 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
|
831 |
+
)
|
832 |
+
assert_array_equal(a, b)
|
833 |
+
|
834 |
+
def test_check_shape(self):
|
835 |
+
a = [[4, 5, 6]]
|
836 |
+
a = np.pad(a, (5, 7), 'reflect')
|
837 |
+
b = np.array(
|
838 |
+
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
839 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
840 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
841 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
842 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
843 |
+
|
844 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
845 |
+
|
846 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
847 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
848 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
849 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
850 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
851 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
|
852 |
+
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
|
853 |
+
)
|
854 |
+
assert_array_equal(a, b)
|
855 |
+
|
856 |
+
def test_check_01(self):
|
857 |
+
a = np.pad([1, 2, 3], 2, 'reflect')
|
858 |
+
b = np.array([3, 2, 1, 2, 3, 2, 1])
|
859 |
+
assert_array_equal(a, b)
|
860 |
+
|
861 |
+
def test_check_02(self):
|
862 |
+
a = np.pad([1, 2, 3], 3, 'reflect')
|
863 |
+
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
|
864 |
+
assert_array_equal(a, b)
|
865 |
+
|
866 |
+
def test_check_03(self):
|
867 |
+
a = np.pad([1, 2, 3], 4, 'reflect')
|
868 |
+
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
|
869 |
+
assert_array_equal(a, b)
|
870 |
+
|
871 |
+
|
872 |
+
class TestEmptyArray:
|
873 |
+
"""Check how padding behaves on arrays with an empty dimension."""
|
874 |
+
|
875 |
+
@pytest.mark.parametrize(
|
876 |
+
# Keep parametrization ordered, otherwise pytest-xdist might believe
|
877 |
+
# that different tests were collected during parallelization
|
878 |
+
"mode", sorted(_all_modes.keys() - {"constant", "empty"})
|
879 |
+
)
|
880 |
+
def test_pad_empty_dimension(self, mode):
|
881 |
+
match = ("can't extend empty axis 0 using modes other than 'constant' "
|
882 |
+
"or 'empty'")
|
883 |
+
with pytest.raises(ValueError, match=match):
|
884 |
+
np.pad([], 4, mode=mode)
|
885 |
+
with pytest.raises(ValueError, match=match):
|
886 |
+
np.pad(np.ndarray(0), 4, mode=mode)
|
887 |
+
with pytest.raises(ValueError, match=match):
|
888 |
+
np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode)
|
889 |
+
|
890 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
891 |
+
def test_pad_non_empty_dimension(self, mode):
|
892 |
+
result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode)
|
893 |
+
assert result.shape == (8, 0, 4)
|
894 |
+
|
895 |
+
|
896 |
+
class TestSymmetric:
|
897 |
+
def test_check_simple(self):
|
898 |
+
a = np.arange(100)
|
899 |
+
a = np.pad(a, (25, 20), 'symmetric')
|
900 |
+
b = np.array(
|
901 |
+
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
|
902 |
+
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
|
903 |
+
4, 3, 2, 1, 0,
|
904 |
+
|
905 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
906 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
907 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
908 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
909 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
910 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
911 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
912 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
913 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
914 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
915 |
+
|
916 |
+
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
|
917 |
+
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
|
918 |
+
)
|
919 |
+
assert_array_equal(a, b)
|
920 |
+
|
921 |
+
def test_check_odd_method(self):
|
922 |
+
a = np.arange(100)
|
923 |
+
a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd')
|
924 |
+
b = np.array(
|
925 |
+
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
|
926 |
+
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
|
927 |
+
-4, -3, -2, -1, 0,
|
928 |
+
|
929 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
930 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
931 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
932 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
933 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
934 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
935 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
936 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
937 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
938 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
939 |
+
|
940 |
+
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
|
941 |
+
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
|
942 |
+
)
|
943 |
+
assert_array_equal(a, b)
|
944 |
+
|
945 |
+
def test_check_large_pad(self):
|
946 |
+
a = [[4, 5, 6], [6, 7, 8]]
|
947 |
+
a = np.pad(a, (5, 7), 'symmetric')
|
948 |
+
b = np.array(
|
949 |
+
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
950 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
951 |
+
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
|
952 |
+
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
|
953 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
954 |
+
|
955 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
956 |
+
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
|
957 |
+
|
958 |
+
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
|
959 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
960 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
961 |
+
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
|
962 |
+
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
|
963 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
964 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
|
965 |
+
)
|
966 |
+
|
967 |
+
assert_array_equal(a, b)
|
968 |
+
|
969 |
+
def test_check_large_pad_odd(self):
|
970 |
+
a = [[4, 5, 6], [6, 7, 8]]
|
971 |
+
a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd')
|
972 |
+
b = np.array(
|
973 |
+
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
|
974 |
+
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
|
975 |
+
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
|
976 |
+
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
|
977 |
+
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
|
978 |
+
|
979 |
+
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
|
980 |
+
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
|
981 |
+
|
982 |
+
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
|
983 |
+
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
|
984 |
+
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
|
985 |
+
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
|
986 |
+
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
|
987 |
+
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
|
988 |
+
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
|
989 |
+
)
|
990 |
+
assert_array_equal(a, b)
|
991 |
+
|
992 |
+
def test_check_shape(self):
|
993 |
+
a = [[4, 5, 6]]
|
994 |
+
a = np.pad(a, (5, 7), 'symmetric')
|
995 |
+
b = np.array(
|
996 |
+
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
997 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
998 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
999 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1000 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1001 |
+
|
1002 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1003 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1004 |
+
|
1005 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1006 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1007 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1008 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1009 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
|
1010 |
+
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
|
1011 |
+
)
|
1012 |
+
assert_array_equal(a, b)
|
1013 |
+
|
1014 |
+
def test_check_01(self):
|
1015 |
+
a = np.pad([1, 2, 3], 2, 'symmetric')
|
1016 |
+
b = np.array([2, 1, 1, 2, 3, 3, 2])
|
1017 |
+
assert_array_equal(a, b)
|
1018 |
+
|
1019 |
+
def test_check_02(self):
|
1020 |
+
a = np.pad([1, 2, 3], 3, 'symmetric')
|
1021 |
+
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
|
1022 |
+
assert_array_equal(a, b)
|
1023 |
+
|
1024 |
+
def test_check_03(self):
|
1025 |
+
a = np.pad([1, 2, 3], 6, 'symmetric')
|
1026 |
+
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
|
1027 |
+
assert_array_equal(a, b)
|
1028 |
+
|
1029 |
+
|
1030 |
+
class TestWrap:
|
1031 |
+
def test_check_simple(self):
|
1032 |
+
a = np.arange(100)
|
1033 |
+
a = np.pad(a, (25, 20), 'wrap')
|
1034 |
+
b = np.array(
|
1035 |
+
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
|
1036 |
+
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
|
1037 |
+
95, 96, 97, 98, 99,
|
1038 |
+
|
1039 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
1040 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
1041 |
+
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
|
1042 |
+
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
|
1043 |
+
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
|
1044 |
+
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
|
1045 |
+
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
1046 |
+
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
|
1047 |
+
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
|
1048 |
+
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
|
1049 |
+
|
1050 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
1051 |
+
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
|
1052 |
+
)
|
1053 |
+
assert_array_equal(a, b)
|
1054 |
+
|
1055 |
+
def test_check_large_pad(self):
|
1056 |
+
a = np.arange(12)
|
1057 |
+
a = np.reshape(a, (3, 4))
|
1058 |
+
a = np.pad(a, (10, 12), 'wrap')
|
1059 |
+
b = np.array(
|
1060 |
+
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1061 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11],
|
1062 |
+
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
|
1063 |
+
3, 0, 1, 2, 3, 0, 1, 2, 3],
|
1064 |
+
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
|
1065 |
+
7, 4, 5, 6, 7, 4, 5, 6, 7],
|
1066 |
+
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1067 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11],
|
1068 |
+
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
|
1069 |
+
3, 0, 1, 2, 3, 0, 1, 2, 3],
|
1070 |
+
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
|
1071 |
+
7, 4, 5, 6, 7, 4, 5, 6, 7],
|
1072 |
+
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1073 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11],
|
1074 |
+
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
|
1075 |
+
3, 0, 1, 2, 3, 0, 1, 2, 3],
|
1076 |
+
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
|
1077 |
+
7, 4, 5, 6, 7, 4, 5, 6, 7],
|
1078 |
+
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1079 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11],
|
1080 |
+
|
1081 |
+
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
|
1082 |
+
3, 0, 1, 2, 3, 0, 1, 2, 3],
|
1083 |
+
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
|
1084 |
+
7, 4, 5, 6, 7, 4, 5, 6, 7],
|
1085 |
+
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1086 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11],
|
1087 |
+
|
1088 |
+
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
|
1089 |
+
3, 0, 1, 2, 3, 0, 1, 2, 3],
|
1090 |
+
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
|
1091 |
+
7, 4, 5, 6, 7, 4, 5, 6, 7],
|
1092 |
+
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1093 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11],
|
1094 |
+
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
|
1095 |
+
3, 0, 1, 2, 3, 0, 1, 2, 3],
|
1096 |
+
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
|
1097 |
+
7, 4, 5, 6, 7, 4, 5, 6, 7],
|
1098 |
+
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1099 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11],
|
1100 |
+
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
|
1101 |
+
3, 0, 1, 2, 3, 0, 1, 2, 3],
|
1102 |
+
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
|
1103 |
+
7, 4, 5, 6, 7, 4, 5, 6, 7],
|
1104 |
+
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1105 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11],
|
1106 |
+
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
|
1107 |
+
3, 0, 1, 2, 3, 0, 1, 2, 3],
|
1108 |
+
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
|
1109 |
+
7, 4, 5, 6, 7, 4, 5, 6, 7],
|
1110 |
+
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
|
1111 |
+
11, 8, 9, 10, 11, 8, 9, 10, 11]]
|
1112 |
+
)
|
1113 |
+
assert_array_equal(a, b)
|
1114 |
+
|
1115 |
+
def test_check_01(self):
|
1116 |
+
a = np.pad([1, 2, 3], 3, 'wrap')
|
1117 |
+
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
|
1118 |
+
assert_array_equal(a, b)
|
1119 |
+
|
1120 |
+
def test_check_02(self):
|
1121 |
+
a = np.pad([1, 2, 3], 4, 'wrap')
|
1122 |
+
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
|
1123 |
+
assert_array_equal(a, b)
|
1124 |
+
|
1125 |
+
def test_pad_with_zero(self):
|
1126 |
+
a = np.ones((3, 5))
|
1127 |
+
b = np.pad(a, (0, 5), mode="wrap")
|
1128 |
+
assert_array_equal(a, b[:-5, :-5])
|
1129 |
+
|
1130 |
+
def test_repeated_wrapping(self):
|
1131 |
+
"""
|
1132 |
+
Check wrapping on each side individually if the wrapped area is longer
|
1133 |
+
than the original array.
|
1134 |
+
"""
|
1135 |
+
a = np.arange(5)
|
1136 |
+
b = np.pad(a, (12, 0), mode="wrap")
|
1137 |
+
assert_array_equal(np.r_[a, a, a, a][3:], b)
|
1138 |
+
|
1139 |
+
a = np.arange(5)
|
1140 |
+
b = np.pad(a, (0, 12), mode="wrap")
|
1141 |
+
assert_array_equal(np.r_[a, a, a, a][:-3], b)
|
1142 |
+
|
1143 |
+
def test_repeated_wrapping_multiple_origin(self):
|
1144 |
+
"""
|
1145 |
+
Assert that 'wrap' pads only with multiples of the original area if
|
1146 |
+
the pad width is larger than the original array.
|
1147 |
+
"""
|
1148 |
+
a = np.arange(4).reshape(2, 2)
|
1149 |
+
a = np.pad(a, [(1, 3), (3, 1)], mode='wrap')
|
1150 |
+
b = np.array(
|
1151 |
+
[[3, 2, 3, 2, 3, 2],
|
1152 |
+
[1, 0, 1, 0, 1, 0],
|
1153 |
+
[3, 2, 3, 2, 3, 2],
|
1154 |
+
[1, 0, 1, 0, 1, 0],
|
1155 |
+
[3, 2, 3, 2, 3, 2],
|
1156 |
+
[1, 0, 1, 0, 1, 0]]
|
1157 |
+
)
|
1158 |
+
assert_array_equal(a, b)
|
1159 |
+
|
1160 |
+
|
1161 |
+
class TestEdge:
|
1162 |
+
def test_check_simple(self):
|
1163 |
+
a = np.arange(12)
|
1164 |
+
a = np.reshape(a, (4, 3))
|
1165 |
+
a = np.pad(a, ((2, 3), (3, 2)), 'edge')
|
1166 |
+
b = np.array(
|
1167 |
+
[[0, 0, 0, 0, 1, 2, 2, 2],
|
1168 |
+
[0, 0, 0, 0, 1, 2, 2, 2],
|
1169 |
+
|
1170 |
+
[0, 0, 0, 0, 1, 2, 2, 2],
|
1171 |
+
[3, 3, 3, 3, 4, 5, 5, 5],
|
1172 |
+
[6, 6, 6, 6, 7, 8, 8, 8],
|
1173 |
+
[9, 9, 9, 9, 10, 11, 11, 11],
|
1174 |
+
|
1175 |
+
[9, 9, 9, 9, 10, 11, 11, 11],
|
1176 |
+
[9, 9, 9, 9, 10, 11, 11, 11],
|
1177 |
+
[9, 9, 9, 9, 10, 11, 11, 11]]
|
1178 |
+
)
|
1179 |
+
assert_array_equal(a, b)
|
1180 |
+
|
1181 |
+
def test_check_width_shape_1_2(self):
|
1182 |
+
# Check a pad_width of the form ((1, 2),).
|
1183 |
+
# Regression test for issue gh-7808.
|
1184 |
+
a = np.array([1, 2, 3])
|
1185 |
+
padded = np.pad(a, ((1, 2),), 'edge')
|
1186 |
+
expected = np.array([1, 1, 2, 3, 3, 3])
|
1187 |
+
assert_array_equal(padded, expected)
|
1188 |
+
|
1189 |
+
a = np.array([[1, 2, 3], [4, 5, 6]])
|
1190 |
+
padded = np.pad(a, ((1, 2),), 'edge')
|
1191 |
+
expected = np.pad(a, ((1, 2), (1, 2)), 'edge')
|
1192 |
+
assert_array_equal(padded, expected)
|
1193 |
+
|
1194 |
+
a = np.arange(24).reshape(2, 3, 4)
|
1195 |
+
padded = np.pad(a, ((1, 2),), 'edge')
|
1196 |
+
expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
|
1197 |
+
assert_array_equal(padded, expected)
|
1198 |
+
|
1199 |
+
|
1200 |
+
class TestEmpty:
|
1201 |
+
def test_simple(self):
|
1202 |
+
arr = np.arange(24).reshape(4, 6)
|
1203 |
+
result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
|
1204 |
+
assert result.shape == (9, 10)
|
1205 |
+
assert_equal(arr, result[2:-3, 3:-1])
|
1206 |
+
|
1207 |
+
def test_pad_empty_dimension(self):
|
1208 |
+
arr = np.zeros((3, 0, 2))
|
1209 |
+
result = np.pad(arr, [(0,), (2,), (1,)], mode="empty")
|
1210 |
+
assert result.shape == (3, 4, 4)
|
1211 |
+
|
1212 |
+
|
1213 |
+
def test_legacy_vector_functionality():
|
1214 |
+
def _padwithtens(vector, pad_width, iaxis, kwargs):
|
1215 |
+
vector[:pad_width[0]] = 10
|
1216 |
+
vector[-pad_width[1]:] = 10
|
1217 |
+
|
1218 |
+
a = np.arange(6).reshape(2, 3)
|
1219 |
+
a = np.pad(a, 2, _padwithtens)
|
1220 |
+
b = np.array(
|
1221 |
+
[[10, 10, 10, 10, 10, 10, 10],
|
1222 |
+
[10, 10, 10, 10, 10, 10, 10],
|
1223 |
+
|
1224 |
+
[10, 10, 0, 1, 2, 10, 10],
|
1225 |
+
[10, 10, 3, 4, 5, 10, 10],
|
1226 |
+
|
1227 |
+
[10, 10, 10, 10, 10, 10, 10],
|
1228 |
+
[10, 10, 10, 10, 10, 10, 10]]
|
1229 |
+
)
|
1230 |
+
assert_array_equal(a, b)
|
1231 |
+
|
1232 |
+
|
1233 |
+
def test_unicode_mode():
|
1234 |
+
a = np.pad([1], 2, mode='constant')
|
1235 |
+
b = np.array([0, 0, 1, 0, 0])
|
1236 |
+
assert_array_equal(a, b)
|
1237 |
+
|
1238 |
+
|
1239 |
+
@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"])
|
1240 |
+
def test_object_input(mode):
|
1241 |
+
# Regression test for issue gh-11395.
|
1242 |
+
a = np.full((4, 3), fill_value=None)
|
1243 |
+
pad_amt = ((2, 3), (3, 2))
|
1244 |
+
b = np.full((9, 8), fill_value=None)
|
1245 |
+
assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
|
1246 |
+
|
1247 |
+
|
1248 |
+
class TestPadWidth:
|
1249 |
+
@pytest.mark.parametrize("pad_width", [
|
1250 |
+
(4, 5, 6, 7),
|
1251 |
+
((1,), (2,), (3,)),
|
1252 |
+
((1, 2), (3, 4), (5, 6)),
|
1253 |
+
((3, 4, 5), (0, 1, 2)),
|
1254 |
+
])
|
1255 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1256 |
+
def test_misshaped_pad_width(self, pad_width, mode):
|
1257 |
+
arr = np.arange(30).reshape((6, 5))
|
1258 |
+
match = "operands could not be broadcast together"
|
1259 |
+
with pytest.raises(ValueError, match=match):
|
1260 |
+
np.pad(arr, pad_width, mode)
|
1261 |
+
|
1262 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1263 |
+
def test_misshaped_pad_width_2(self, mode):
|
1264 |
+
arr = np.arange(30).reshape((6, 5))
|
1265 |
+
match = ("input operand has more dimensions than allowed by the axis "
|
1266 |
+
"remapping")
|
1267 |
+
with pytest.raises(ValueError, match=match):
|
1268 |
+
np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode)
|
1269 |
+
|
1270 |
+
@pytest.mark.parametrize(
|
1271 |
+
"pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))])
|
1272 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1273 |
+
def test_negative_pad_width(self, pad_width, mode):
|
1274 |
+
arr = np.arange(30).reshape((6, 5))
|
1275 |
+
match = "index can't contain negative values"
|
1276 |
+
with pytest.raises(ValueError, match=match):
|
1277 |
+
np.pad(arr, pad_width, mode)
|
1278 |
+
|
1279 |
+
@pytest.mark.parametrize("pad_width, dtype", [
|
1280 |
+
("3", None),
|
1281 |
+
("word", None),
|
1282 |
+
(None, None),
|
1283 |
+
(object(), None),
|
1284 |
+
(3.4, None),
|
1285 |
+
(((2, 3, 4), (3, 2)), object),
|
1286 |
+
(complex(1, -1), None),
|
1287 |
+
(((-2.1, 3), (3, 2)), None),
|
1288 |
+
])
|
1289 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1290 |
+
def test_bad_type(self, pad_width, dtype, mode):
|
1291 |
+
arr = np.arange(30).reshape((6, 5))
|
1292 |
+
match = "`pad_width` must be of integral type."
|
1293 |
+
if dtype is not None:
|
1294 |
+
# avoid DeprecationWarning when not specifying dtype
|
1295 |
+
with pytest.raises(TypeError, match=match):
|
1296 |
+
np.pad(arr, np.array(pad_width, dtype=dtype), mode)
|
1297 |
+
else:
|
1298 |
+
with pytest.raises(TypeError, match=match):
|
1299 |
+
np.pad(arr, pad_width, mode)
|
1300 |
+
with pytest.raises(TypeError, match=match):
|
1301 |
+
np.pad(arr, np.array(pad_width), mode)
|
1302 |
+
|
1303 |
+
def test_pad_width_as_ndarray(self):
|
1304 |
+
a = np.arange(12)
|
1305 |
+
a = np.reshape(a, (4, 3))
|
1306 |
+
a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge')
|
1307 |
+
b = np.array(
|
1308 |
+
[[0, 0, 0, 0, 1, 2, 2, 2],
|
1309 |
+
[0, 0, 0, 0, 1, 2, 2, 2],
|
1310 |
+
|
1311 |
+
[0, 0, 0, 0, 1, 2, 2, 2],
|
1312 |
+
[3, 3, 3, 3, 4, 5, 5, 5],
|
1313 |
+
[6, 6, 6, 6, 7, 8, 8, 8],
|
1314 |
+
[9, 9, 9, 9, 10, 11, 11, 11],
|
1315 |
+
|
1316 |
+
[9, 9, 9, 9, 10, 11, 11, 11],
|
1317 |
+
[9, 9, 9, 9, 10, 11, 11, 11],
|
1318 |
+
[9, 9, 9, 9, 10, 11, 11, 11]]
|
1319 |
+
)
|
1320 |
+
assert_array_equal(a, b)
|
1321 |
+
|
1322 |
+
@pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))])
|
1323 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1324 |
+
def test_zero_pad_width(self, pad_width, mode):
|
1325 |
+
arr = np.arange(30).reshape(6, 5)
|
1326 |
+
assert_array_equal(arr, np.pad(arr, pad_width, mode=mode))
|
1327 |
+
|
1328 |
+
|
1329 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1330 |
+
def test_kwargs(mode):
|
1331 |
+
"""Test behavior of pad's kwargs for the given mode."""
|
1332 |
+
allowed = _all_modes[mode]
|
1333 |
+
not_allowed = {}
|
1334 |
+
for kwargs in _all_modes.values():
|
1335 |
+
if kwargs != allowed:
|
1336 |
+
not_allowed.update(kwargs)
|
1337 |
+
# Test if allowed keyword arguments pass
|
1338 |
+
np.pad([1, 2, 3], 1, mode, **allowed)
|
1339 |
+
# Test if prohibited keyword arguments of other modes raise an error
|
1340 |
+
for key, value in not_allowed.items():
|
1341 |
+
match = "unsupported keyword arguments for mode '{}'".format(mode)
|
1342 |
+
with pytest.raises(ValueError, match=match):
|
1343 |
+
np.pad([1, 2, 3], 1, mode, **{key: value})
|
1344 |
+
|
1345 |
+
|
1346 |
+
def test_constant_zero_default():
|
1347 |
+
arr = np.array([1, 1])
|
1348 |
+
assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0])
|
1349 |
+
|
1350 |
+
|
1351 |
+
@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False])
|
1352 |
+
def test_unsupported_mode(mode):
|
1353 |
+
match= "mode '{}' is not supported".format(mode)
|
1354 |
+
with pytest.raises(ValueError, match=match):
|
1355 |
+
np.pad([1, 2, 3], 4, mode=mode)
|
1356 |
+
|
1357 |
+
|
1358 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1359 |
+
def test_non_contiguous_array(mode):
|
1360 |
+
arr = np.arange(24).reshape(4, 6)[::2, ::2]
|
1361 |
+
result = np.pad(arr, (2, 3), mode)
|
1362 |
+
assert result.shape == (7, 8)
|
1363 |
+
assert_equal(result[2:-3, 2:-3], arr)
|
1364 |
+
|
1365 |
+
|
1366 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1367 |
+
def test_memory_layout_persistence(mode):
|
1368 |
+
"""Test if C and F order is preserved for all pad modes."""
|
1369 |
+
x = np.ones((5, 10), order='C')
|
1370 |
+
assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"]
|
1371 |
+
x = np.ones((5, 10), order='F')
|
1372 |
+
assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"]
|
1373 |
+
|
1374 |
+
|
1375 |
+
@pytest.mark.parametrize("dtype", _numeric_dtypes)
|
1376 |
+
@pytest.mark.parametrize("mode", _all_modes.keys())
|
1377 |
+
def test_dtype_persistence(dtype, mode):
|
1378 |
+
arr = np.zeros((3, 2, 1), dtype=dtype)
|
1379 |
+
result = np.pad(arr, 1, mode=mode)
|
1380 |
+
assert result.dtype == dtype
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_arraysetops.py
ADDED
@@ -0,0 +1,944 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Test functions for 1D array set operations.
|
2 |
+
|
3 |
+
"""
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
from numpy.testing import (assert_array_equal, assert_equal,
|
7 |
+
assert_raises, assert_raises_regex)
|
8 |
+
from numpy.lib.arraysetops import (
|
9 |
+
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
|
10 |
+
)
|
11 |
+
import pytest
|
12 |
+
|
13 |
+
|
14 |
+
class TestSetOps:
|
15 |
+
|
16 |
+
def test_intersect1d(self):
|
17 |
+
# unique inputs
|
18 |
+
a = np.array([5, 7, 1, 2])
|
19 |
+
b = np.array([2, 4, 3, 1, 5])
|
20 |
+
|
21 |
+
ec = np.array([1, 2, 5])
|
22 |
+
c = intersect1d(a, b, assume_unique=True)
|
23 |
+
assert_array_equal(c, ec)
|
24 |
+
|
25 |
+
# non-unique inputs
|
26 |
+
a = np.array([5, 5, 7, 1, 2])
|
27 |
+
b = np.array([2, 1, 4, 3, 3, 1, 5])
|
28 |
+
|
29 |
+
ed = np.array([1, 2, 5])
|
30 |
+
c = intersect1d(a, b)
|
31 |
+
assert_array_equal(c, ed)
|
32 |
+
assert_array_equal([], intersect1d([], []))
|
33 |
+
|
34 |
+
def test_intersect1d_array_like(self):
|
35 |
+
# See gh-11772
|
36 |
+
class Test:
|
37 |
+
def __array__(self):
|
38 |
+
return np.arange(3)
|
39 |
+
|
40 |
+
a = Test()
|
41 |
+
res = intersect1d(a, a)
|
42 |
+
assert_array_equal(res, a)
|
43 |
+
res = intersect1d([1, 2, 3], [1, 2, 3])
|
44 |
+
assert_array_equal(res, [1, 2, 3])
|
45 |
+
|
46 |
+
def test_intersect1d_indices(self):
|
47 |
+
# unique inputs
|
48 |
+
a = np.array([1, 2, 3, 4])
|
49 |
+
b = np.array([2, 1, 4, 6])
|
50 |
+
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
|
51 |
+
ee = np.array([1, 2, 4])
|
52 |
+
assert_array_equal(c, ee)
|
53 |
+
assert_array_equal(a[i1], ee)
|
54 |
+
assert_array_equal(b[i2], ee)
|
55 |
+
|
56 |
+
# non-unique inputs
|
57 |
+
a = np.array([1, 2, 2, 3, 4, 3, 2])
|
58 |
+
b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
|
59 |
+
c, i1, i2 = intersect1d(a, b, return_indices=True)
|
60 |
+
ef = np.array([1, 2, 3, 4])
|
61 |
+
assert_array_equal(c, ef)
|
62 |
+
assert_array_equal(a[i1], ef)
|
63 |
+
assert_array_equal(b[i2], ef)
|
64 |
+
|
65 |
+
# non1d, unique inputs
|
66 |
+
a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
|
67 |
+
b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
|
68 |
+
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
|
69 |
+
ui1 = np.unravel_index(i1, a.shape)
|
70 |
+
ui2 = np.unravel_index(i2, b.shape)
|
71 |
+
ea = np.array([2, 6, 7, 8])
|
72 |
+
assert_array_equal(ea, a[ui1])
|
73 |
+
assert_array_equal(ea, b[ui2])
|
74 |
+
|
75 |
+
# non1d, not assumed to be uniqueinputs
|
76 |
+
a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
|
77 |
+
b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
|
78 |
+
c, i1, i2 = intersect1d(a, b, return_indices=True)
|
79 |
+
ui1 = np.unravel_index(i1, a.shape)
|
80 |
+
ui2 = np.unravel_index(i2, b.shape)
|
81 |
+
ea = np.array([2, 7, 8])
|
82 |
+
assert_array_equal(ea, a[ui1])
|
83 |
+
assert_array_equal(ea, b[ui2])
|
84 |
+
|
85 |
+
def test_setxor1d(self):
|
86 |
+
a = np.array([5, 7, 1, 2])
|
87 |
+
b = np.array([2, 4, 3, 1, 5])
|
88 |
+
|
89 |
+
ec = np.array([3, 4, 7])
|
90 |
+
c = setxor1d(a, b)
|
91 |
+
assert_array_equal(c, ec)
|
92 |
+
|
93 |
+
a = np.array([1, 2, 3])
|
94 |
+
b = np.array([6, 5, 4])
|
95 |
+
|
96 |
+
ec = np.array([1, 2, 3, 4, 5, 6])
|
97 |
+
c = setxor1d(a, b)
|
98 |
+
assert_array_equal(c, ec)
|
99 |
+
|
100 |
+
a = np.array([1, 8, 2, 3])
|
101 |
+
b = np.array([6, 5, 4, 8])
|
102 |
+
|
103 |
+
ec = np.array([1, 2, 3, 4, 5, 6])
|
104 |
+
c = setxor1d(a, b)
|
105 |
+
assert_array_equal(c, ec)
|
106 |
+
|
107 |
+
assert_array_equal([], setxor1d([], []))
|
108 |
+
|
109 |
+
def test_ediff1d(self):
|
110 |
+
zero_elem = np.array([])
|
111 |
+
one_elem = np.array([1])
|
112 |
+
two_elem = np.array([1, 2])
|
113 |
+
|
114 |
+
assert_array_equal([], ediff1d(zero_elem))
|
115 |
+
assert_array_equal([0], ediff1d(zero_elem, to_begin=0))
|
116 |
+
assert_array_equal([0], ediff1d(zero_elem, to_end=0))
|
117 |
+
assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
|
118 |
+
assert_array_equal([], ediff1d(one_elem))
|
119 |
+
assert_array_equal([1], ediff1d(two_elem))
|
120 |
+
assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9))
|
121 |
+
assert_array_equal([5, 6, 1, 7, 8],
|
122 |
+
ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8]))
|
123 |
+
assert_array_equal([1, 9], ediff1d(two_elem, to_end=9))
|
124 |
+
assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8]))
|
125 |
+
assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))
|
126 |
+
assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))
|
127 |
+
|
128 |
+
@pytest.mark.parametrize("ary, prepend, append, expected", [
|
129 |
+
# should fail because trying to cast
|
130 |
+
# np.nan standard floating point value
|
131 |
+
# into an integer array:
|
132 |
+
(np.array([1, 2, 3], dtype=np.int64),
|
133 |
+
None,
|
134 |
+
np.nan,
|
135 |
+
'to_end'),
|
136 |
+
# should fail because attempting
|
137 |
+
# to downcast to int type:
|
138 |
+
(np.array([1, 2, 3], dtype=np.int64),
|
139 |
+
np.array([5, 7, 2], dtype=np.float32),
|
140 |
+
None,
|
141 |
+
'to_begin'),
|
142 |
+
# should fail because attempting to cast
|
143 |
+
# two special floating point values
|
144 |
+
# to integers (on both sides of ary),
|
145 |
+
# `to_begin` is in the error message as the impl checks this first:
|
146 |
+
(np.array([1., 3., 9.], dtype=np.int8),
|
147 |
+
np.nan,
|
148 |
+
np.nan,
|
149 |
+
'to_begin'),
|
150 |
+
])
|
151 |
+
def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected):
|
152 |
+
# verify resolution of gh-11490
|
153 |
+
|
154 |
+
# specifically, raise an appropriate
|
155 |
+
# Exception when attempting to append or
|
156 |
+
# prepend with an incompatible type
|
157 |
+
msg = 'dtype of `{}` must be compatible'.format(expected)
|
158 |
+
with assert_raises_regex(TypeError, msg):
|
159 |
+
ediff1d(ary=ary,
|
160 |
+
to_end=append,
|
161 |
+
to_begin=prepend)
|
162 |
+
|
163 |
+
@pytest.mark.parametrize(
|
164 |
+
"ary,prepend,append,expected",
|
165 |
+
[
|
166 |
+
(np.array([1, 2, 3], dtype=np.int16),
|
167 |
+
2**16, # will be cast to int16 under same kind rule.
|
168 |
+
2**16 + 4,
|
169 |
+
np.array([0, 1, 1, 4], dtype=np.int16)),
|
170 |
+
(np.array([1, 2, 3], dtype=np.float32),
|
171 |
+
np.array([5], dtype=np.float64),
|
172 |
+
None,
|
173 |
+
np.array([5, 1, 1], dtype=np.float32)),
|
174 |
+
(np.array([1, 2, 3], dtype=np.int32),
|
175 |
+
0,
|
176 |
+
0,
|
177 |
+
np.array([0, 1, 1, 0], dtype=np.int32)),
|
178 |
+
(np.array([1, 2, 3], dtype=np.int64),
|
179 |
+
3,
|
180 |
+
-9,
|
181 |
+
np.array([3, 1, 1, -9], dtype=np.int64)),
|
182 |
+
]
|
183 |
+
)
|
184 |
+
def test_ediff1d_scalar_handling(self,
|
185 |
+
ary,
|
186 |
+
prepend,
|
187 |
+
append,
|
188 |
+
expected):
|
189 |
+
# maintain backwards-compatibility
|
190 |
+
# of scalar prepend / append behavior
|
191 |
+
# in ediff1d following fix for gh-11490
|
192 |
+
actual = np.ediff1d(ary=ary,
|
193 |
+
to_end=append,
|
194 |
+
to_begin=prepend)
|
195 |
+
assert_equal(actual, expected)
|
196 |
+
assert actual.dtype == expected.dtype
|
197 |
+
|
198 |
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
199 |
+
def test_isin(self, kind):
|
200 |
+
# the tests for in1d cover most of isin's behavior
|
201 |
+
# if in1d is removed, would need to change those tests to test
|
202 |
+
# isin instead.
|
203 |
+
def _isin_slow(a, b):
|
204 |
+
b = np.asarray(b).flatten().tolist()
|
205 |
+
return a in b
|
206 |
+
isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
|
207 |
+
|
208 |
+
def assert_isin_equal(a, b):
|
209 |
+
x = isin(a, b, kind=kind)
|
210 |
+
y = isin_slow(a, b)
|
211 |
+
assert_array_equal(x, y)
|
212 |
+
|
213 |
+
# multidimensional arrays in both arguments
|
214 |
+
a = np.arange(24).reshape([2, 3, 4])
|
215 |
+
b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
|
216 |
+
assert_isin_equal(a, b)
|
217 |
+
|
218 |
+
# array-likes as both arguments
|
219 |
+
c = [(9, 8), (7, 6)]
|
220 |
+
d = (9, 7)
|
221 |
+
assert_isin_equal(c, d)
|
222 |
+
|
223 |
+
# zero-d array:
|
224 |
+
f = np.array(3)
|
225 |
+
assert_isin_equal(f, b)
|
226 |
+
assert_isin_equal(a, f)
|
227 |
+
assert_isin_equal(f, f)
|
228 |
+
|
229 |
+
# scalar:
|
230 |
+
assert_isin_equal(5, b)
|
231 |
+
assert_isin_equal(a, 6)
|
232 |
+
assert_isin_equal(5, 6)
|
233 |
+
|
234 |
+
# empty array-like:
|
235 |
+
if kind != "table":
|
236 |
+
# An empty list will become float64,
|
237 |
+
# which is invalid for kind="table"
|
238 |
+
x = []
|
239 |
+
assert_isin_equal(x, b)
|
240 |
+
assert_isin_equal(a, x)
|
241 |
+
assert_isin_equal(x, x)
|
242 |
+
|
243 |
+
# empty array with various types:
|
244 |
+
for dtype in [bool, np.int64, np.float64]:
|
245 |
+
if kind == "table" and dtype == np.float64:
|
246 |
+
continue
|
247 |
+
|
248 |
+
if dtype in {np.int64, np.float64}:
|
249 |
+
ar = np.array([10, 20, 30], dtype=dtype)
|
250 |
+
elif dtype in {bool}:
|
251 |
+
ar = np.array([True, False, False])
|
252 |
+
|
253 |
+
empty_array = np.array([], dtype=dtype)
|
254 |
+
|
255 |
+
assert_isin_equal(empty_array, ar)
|
256 |
+
assert_isin_equal(ar, empty_array)
|
257 |
+
assert_isin_equal(empty_array, empty_array)
|
258 |
+
|
259 |
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
260 |
+
def test_in1d(self, kind):
|
261 |
+
# we use two different sizes for the b array here to test the
|
262 |
+
# two different paths in in1d().
|
263 |
+
for mult in (1, 10):
|
264 |
+
# One check without np.array to make sure lists are handled correct
|
265 |
+
a = [5, 7, 1, 2]
|
266 |
+
b = [2, 4, 3, 1, 5] * mult
|
267 |
+
ec = np.array([True, False, True, True])
|
268 |
+
c = in1d(a, b, assume_unique=True, kind=kind)
|
269 |
+
assert_array_equal(c, ec)
|
270 |
+
|
271 |
+
a[0] = 8
|
272 |
+
ec = np.array([False, False, True, True])
|
273 |
+
c = in1d(a, b, assume_unique=True, kind=kind)
|
274 |
+
assert_array_equal(c, ec)
|
275 |
+
|
276 |
+
a[0], a[3] = 4, 8
|
277 |
+
ec = np.array([True, False, True, False])
|
278 |
+
c = in1d(a, b, assume_unique=True, kind=kind)
|
279 |
+
assert_array_equal(c, ec)
|
280 |
+
|
281 |
+
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
|
282 |
+
b = [2, 3, 4] * mult
|
283 |
+
ec = [False, True, False, True, True, True, True, True, True,
|
284 |
+
False, True, False, False, False]
|
285 |
+
c = in1d(a, b, kind=kind)
|
286 |
+
assert_array_equal(c, ec)
|
287 |
+
|
288 |
+
b = b + [5, 5, 4] * mult
|
289 |
+
ec = [True, True, True, True, True, True, True, True, True, True,
|
290 |
+
True, False, True, True]
|
291 |
+
c = in1d(a, b, kind=kind)
|
292 |
+
assert_array_equal(c, ec)
|
293 |
+
|
294 |
+
a = np.array([5, 7, 1, 2])
|
295 |
+
b = np.array([2, 4, 3, 1, 5] * mult)
|
296 |
+
ec = np.array([True, False, True, True])
|
297 |
+
c = in1d(a, b, kind=kind)
|
298 |
+
assert_array_equal(c, ec)
|
299 |
+
|
300 |
+
a = np.array([5, 7, 1, 1, 2])
|
301 |
+
b = np.array([2, 4, 3, 3, 1, 5] * mult)
|
302 |
+
ec = np.array([True, False, True, True, True])
|
303 |
+
c = in1d(a, b, kind=kind)
|
304 |
+
assert_array_equal(c, ec)
|
305 |
+
|
306 |
+
a = np.array([5, 5])
|
307 |
+
b = np.array([2, 2] * mult)
|
308 |
+
ec = np.array([False, False])
|
309 |
+
c = in1d(a, b, kind=kind)
|
310 |
+
assert_array_equal(c, ec)
|
311 |
+
|
312 |
+
a = np.array([5])
|
313 |
+
b = np.array([2])
|
314 |
+
ec = np.array([False])
|
315 |
+
c = in1d(a, b, kind=kind)
|
316 |
+
assert_array_equal(c, ec)
|
317 |
+
|
318 |
+
if kind in {None, "sort"}:
|
319 |
+
assert_array_equal(in1d([], [], kind=kind), [])
|
320 |
+
|
321 |
+
def test_in1d_char_array(self):
|
322 |
+
a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b'])
|
323 |
+
b = np.array(['a', 'c'])
|
324 |
+
|
325 |
+
ec = np.array([True, False, True, False, False, True, False, False])
|
326 |
+
c = in1d(a, b)
|
327 |
+
|
328 |
+
assert_array_equal(c, ec)
|
329 |
+
|
330 |
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
331 |
+
def test_in1d_invert(self, kind):
|
332 |
+
"Test in1d's invert parameter"
|
333 |
+
# We use two different sizes for the b array here to test the
|
334 |
+
# two different paths in in1d().
|
335 |
+
for mult in (1, 10):
|
336 |
+
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
|
337 |
+
b = [2, 3, 4] * mult
|
338 |
+
assert_array_equal(np.invert(in1d(a, b, kind=kind)),
|
339 |
+
in1d(a, b, invert=True, kind=kind))
|
340 |
+
|
341 |
+
# float:
|
342 |
+
if kind in {None, "sort"}:
|
343 |
+
for mult in (1, 10):
|
344 |
+
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5],
|
345 |
+
dtype=np.float32)
|
346 |
+
b = [2, 3, 4] * mult
|
347 |
+
b = np.array(b, dtype=np.float32)
|
348 |
+
assert_array_equal(np.invert(in1d(a, b, kind=kind)),
|
349 |
+
in1d(a, b, invert=True, kind=kind))
|
350 |
+
|
351 |
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
352 |
+
def test_in1d_ravel(self, kind):
|
353 |
+
# Test that in1d ravels its input arrays. This is not documented
|
354 |
+
# behavior however. The test is to ensure consistentency.
|
355 |
+
a = np.arange(6).reshape(2, 3)
|
356 |
+
b = np.arange(3, 9).reshape(3, 2)
|
357 |
+
long_b = np.arange(3, 63).reshape(30, 2)
|
358 |
+
ec = np.array([False, False, False, True, True, True])
|
359 |
+
|
360 |
+
assert_array_equal(in1d(a, b, assume_unique=True, kind=kind),
|
361 |
+
ec)
|
362 |
+
assert_array_equal(in1d(a, b, assume_unique=False,
|
363 |
+
kind=kind),
|
364 |
+
ec)
|
365 |
+
assert_array_equal(in1d(a, long_b, assume_unique=True,
|
366 |
+
kind=kind),
|
367 |
+
ec)
|
368 |
+
assert_array_equal(in1d(a, long_b, assume_unique=False,
|
369 |
+
kind=kind),
|
370 |
+
ec)
|
371 |
+
|
372 |
+
def test_in1d_hit_alternate_algorithm(self):
|
373 |
+
"""Hit the standard isin code with integers"""
|
374 |
+
# Need extreme range to hit standard code
|
375 |
+
# This hits it without the use of kind='table'
|
376 |
+
a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64)
|
377 |
+
b = np.array([2, 3, 4, 1e9], dtype=np.int64)
|
378 |
+
expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool)
|
379 |
+
assert_array_equal(expected, in1d(a, b))
|
380 |
+
assert_array_equal(np.invert(expected), in1d(a, b, invert=True))
|
381 |
+
|
382 |
+
a = np.array([5, 7, 1, 2], dtype=np.int64)
|
383 |
+
b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64)
|
384 |
+
ec = np.array([True, False, True, True])
|
385 |
+
c = in1d(a, b, assume_unique=True)
|
386 |
+
assert_array_equal(c, ec)
|
387 |
+
|
388 |
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
389 |
+
def test_in1d_boolean(self, kind):
|
390 |
+
"""Test that in1d works for boolean input"""
|
391 |
+
a = np.array([True, False])
|
392 |
+
b = np.array([False, False, False])
|
393 |
+
expected = np.array([False, True])
|
394 |
+
assert_array_equal(expected,
|
395 |
+
in1d(a, b, kind=kind))
|
396 |
+
assert_array_equal(np.invert(expected),
|
397 |
+
in1d(a, b, invert=True, kind=kind))
|
398 |
+
|
399 |
+
@pytest.mark.parametrize("kind", [None, "sort"])
|
400 |
+
def test_in1d_timedelta(self, kind):
|
401 |
+
"""Test that in1d works for timedelta input"""
|
402 |
+
rstate = np.random.RandomState(0)
|
403 |
+
a = rstate.randint(0, 100, size=10)
|
404 |
+
b = rstate.randint(0, 100, size=10)
|
405 |
+
truth = in1d(a, b)
|
406 |
+
a_timedelta = a.astype("timedelta64[s]")
|
407 |
+
b_timedelta = b.astype("timedelta64[s]")
|
408 |
+
assert_array_equal(truth, in1d(a_timedelta, b_timedelta, kind=kind))
|
409 |
+
|
410 |
+
def test_in1d_table_timedelta_fails(self):
|
411 |
+
a = np.array([0, 1, 2], dtype="timedelta64[s]")
|
412 |
+
b = a
|
413 |
+
# Make sure it raises a value error:
|
414 |
+
with pytest.raises(ValueError):
|
415 |
+
in1d(a, b, kind="table")
|
416 |
+
|
417 |
+
@pytest.mark.parametrize(
|
418 |
+
"dtype1,dtype2",
|
419 |
+
[
|
420 |
+
(np.int8, np.int16),
|
421 |
+
(np.int16, np.int8),
|
422 |
+
(np.uint8, np.uint16),
|
423 |
+
(np.uint16, np.uint8),
|
424 |
+
(np.uint8, np.int16),
|
425 |
+
(np.int16, np.uint8),
|
426 |
+
]
|
427 |
+
)
|
428 |
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
429 |
+
def test_in1d_mixed_dtype(self, dtype1, dtype2, kind):
|
430 |
+
"""Test that in1d works as expected for mixed dtype input."""
|
431 |
+
is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger)
|
432 |
+
ar1 = np.array([0, 0, 1, 1], dtype=dtype1)
|
433 |
+
|
434 |
+
if is_dtype2_signed:
|
435 |
+
ar2 = np.array([-128, 0, 127], dtype=dtype2)
|
436 |
+
else:
|
437 |
+
ar2 = np.array([127, 0, 255], dtype=dtype2)
|
438 |
+
|
439 |
+
expected = np.array([True, True, False, False])
|
440 |
+
|
441 |
+
expect_failure = kind == "table" and any((
|
442 |
+
dtype1 == np.int8 and dtype2 == np.int16,
|
443 |
+
dtype1 == np.int16 and dtype2 == np.int8
|
444 |
+
))
|
445 |
+
|
446 |
+
if expect_failure:
|
447 |
+
with pytest.raises(RuntimeError, match="exceed the maximum"):
|
448 |
+
in1d(ar1, ar2, kind=kind)
|
449 |
+
else:
|
450 |
+
assert_array_equal(in1d(ar1, ar2, kind=kind), expected)
|
451 |
+
|
452 |
+
@pytest.mark.parametrize("kind", [None, "sort", "table"])
|
453 |
+
def test_in1d_mixed_boolean(self, kind):
|
454 |
+
"""Test that in1d works as expected for bool/int input."""
|
455 |
+
for dtype in np.typecodes["AllInteger"]:
|
456 |
+
a = np.array([True, False, False], dtype=bool)
|
457 |
+
b = np.array([0, 0, 0, 0], dtype=dtype)
|
458 |
+
expected = np.array([False, True, True], dtype=bool)
|
459 |
+
assert_array_equal(in1d(a, b, kind=kind), expected)
|
460 |
+
|
461 |
+
a, b = b, a
|
462 |
+
expected = np.array([True, True, True, True], dtype=bool)
|
463 |
+
assert_array_equal(in1d(a, b, kind=kind), expected)
|
464 |
+
|
465 |
+
def test_in1d_first_array_is_object(self):
|
466 |
+
ar1 = [None]
|
467 |
+
ar2 = np.array([1]*10)
|
468 |
+
expected = np.array([False])
|
469 |
+
result = np.in1d(ar1, ar2)
|
470 |
+
assert_array_equal(result, expected)
|
471 |
+
|
472 |
+
def test_in1d_second_array_is_object(self):
|
473 |
+
ar1 = 1
|
474 |
+
ar2 = np.array([None]*10)
|
475 |
+
expected = np.array([False])
|
476 |
+
result = np.in1d(ar1, ar2)
|
477 |
+
assert_array_equal(result, expected)
|
478 |
+
|
479 |
+
def test_in1d_both_arrays_are_object(self):
|
480 |
+
ar1 = [None]
|
481 |
+
ar2 = np.array([None]*10)
|
482 |
+
expected = np.array([True])
|
483 |
+
result = np.in1d(ar1, ar2)
|
484 |
+
assert_array_equal(result, expected)
|
485 |
+
|
486 |
+
def test_in1d_both_arrays_have_structured_dtype(self):
|
487 |
+
# Test arrays of a structured data type containing an integer field
|
488 |
+
# and a field of dtype `object` allowing for arbitrary Python objects
|
489 |
+
dt = np.dtype([('field1', int), ('field2', object)])
|
490 |
+
ar1 = np.array([(1, None)], dtype=dt)
|
491 |
+
ar2 = np.array([(1, None)]*10, dtype=dt)
|
492 |
+
expected = np.array([True])
|
493 |
+
result = np.in1d(ar1, ar2)
|
494 |
+
assert_array_equal(result, expected)
|
495 |
+
|
496 |
+
def test_in1d_with_arrays_containing_tuples(self):
|
497 |
+
ar1 = np.array([(1,), 2], dtype=object)
|
498 |
+
ar2 = np.array([(1,), 2], dtype=object)
|
499 |
+
expected = np.array([True, True])
|
500 |
+
result = np.in1d(ar1, ar2)
|
501 |
+
assert_array_equal(result, expected)
|
502 |
+
result = np.in1d(ar1, ar2, invert=True)
|
503 |
+
assert_array_equal(result, np.invert(expected))
|
504 |
+
|
505 |
+
# An integer is added at the end of the array to make sure
|
506 |
+
# that the array builder will create the array with tuples
|
507 |
+
# and after it's created the integer is removed.
|
508 |
+
# There's a bug in the array constructor that doesn't handle
|
509 |
+
# tuples properly and adding the integer fixes that.
|
510 |
+
ar1 = np.array([(1,), (2, 1), 1], dtype=object)
|
511 |
+
ar1 = ar1[:-1]
|
512 |
+
ar2 = np.array([(1,), (2, 1), 1], dtype=object)
|
513 |
+
ar2 = ar2[:-1]
|
514 |
+
expected = np.array([True, True])
|
515 |
+
result = np.in1d(ar1, ar2)
|
516 |
+
assert_array_equal(result, expected)
|
517 |
+
result = np.in1d(ar1, ar2, invert=True)
|
518 |
+
assert_array_equal(result, np.invert(expected))
|
519 |
+
|
520 |
+
ar1 = np.array([(1,), (2, 3), 1], dtype=object)
|
521 |
+
ar1 = ar1[:-1]
|
522 |
+
ar2 = np.array([(1,), 2], dtype=object)
|
523 |
+
expected = np.array([True, False])
|
524 |
+
result = np.in1d(ar1, ar2)
|
525 |
+
assert_array_equal(result, expected)
|
526 |
+
result = np.in1d(ar1, ar2, invert=True)
|
527 |
+
assert_array_equal(result, np.invert(expected))
|
528 |
+
|
529 |
+
def test_in1d_errors(self):
|
530 |
+
"""Test that in1d raises expected errors."""
|
531 |
+
|
532 |
+
# Error 1: `kind` is not one of 'sort' 'table' or None.
|
533 |
+
ar1 = np.array([1, 2, 3, 4, 5])
|
534 |
+
ar2 = np.array([2, 4, 6, 8, 10])
|
535 |
+
assert_raises(ValueError, in1d, ar1, ar2, kind='quicksort')
|
536 |
+
|
537 |
+
# Error 2: `kind="table"` does not work for non-integral arrays.
|
538 |
+
obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object)
|
539 |
+
obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object)
|
540 |
+
assert_raises(ValueError, in1d, obj_ar1, obj_ar2, kind='table')
|
541 |
+
|
542 |
+
for dtype in [np.int32, np.int64]:
|
543 |
+
ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype)
|
544 |
+
# The range of this array will overflow:
|
545 |
+
overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype)
|
546 |
+
|
547 |
+
# Error 3: `kind="table"` will trigger a runtime error
|
548 |
+
# if there is an integer overflow expected when computing the
|
549 |
+
# range of ar2
|
550 |
+
assert_raises(
|
551 |
+
RuntimeError,
|
552 |
+
in1d, ar1, overflow_ar2, kind='table'
|
553 |
+
)
|
554 |
+
|
555 |
+
# Non-error: `kind=None` will *not* trigger a runtime error
|
556 |
+
# if there is an integer overflow, it will switch to
|
557 |
+
# the `sort` algorithm.
|
558 |
+
result = np.in1d(ar1, overflow_ar2, kind=None)
|
559 |
+
assert_array_equal(result, [True] + [False] * 4)
|
560 |
+
result = np.in1d(ar1, overflow_ar2, kind='sort')
|
561 |
+
assert_array_equal(result, [True] + [False] * 4)
|
562 |
+
|
563 |
+
def test_union1d(self):
|
564 |
+
a = np.array([5, 4, 7, 1, 2])
|
565 |
+
b = np.array([2, 4, 3, 3, 2, 1, 5])
|
566 |
+
|
567 |
+
ec = np.array([1, 2, 3, 4, 5, 7])
|
568 |
+
c = union1d(a, b)
|
569 |
+
assert_array_equal(c, ec)
|
570 |
+
|
571 |
+
# Tests gh-10340, arguments to union1d should be
|
572 |
+
# flattened if they are not already 1D
|
573 |
+
x = np.array([[0, 1, 2], [3, 4, 5]])
|
574 |
+
y = np.array([0, 1, 2, 3, 4])
|
575 |
+
ez = np.array([0, 1, 2, 3, 4, 5])
|
576 |
+
z = union1d(x, y)
|
577 |
+
assert_array_equal(z, ez)
|
578 |
+
|
579 |
+
assert_array_equal([], union1d([], []))
|
580 |
+
|
581 |
+
def test_setdiff1d(self):
|
582 |
+
a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
|
583 |
+
b = np.array([2, 4, 3, 3, 2, 1, 5])
|
584 |
+
|
585 |
+
ec = np.array([6, 7])
|
586 |
+
c = setdiff1d(a, b)
|
587 |
+
assert_array_equal(c, ec)
|
588 |
+
|
589 |
+
a = np.arange(21)
|
590 |
+
b = np.arange(19)
|
591 |
+
ec = np.array([19, 20])
|
592 |
+
c = setdiff1d(a, b)
|
593 |
+
assert_array_equal(c, ec)
|
594 |
+
|
595 |
+
assert_array_equal([], setdiff1d([], []))
|
596 |
+
a = np.array((), np.uint32)
|
597 |
+
assert_equal(setdiff1d(a, []).dtype, np.uint32)
|
598 |
+
|
599 |
+
def test_setdiff1d_unique(self):
|
600 |
+
a = np.array([3, 2, 1])
|
601 |
+
b = np.array([7, 5, 2])
|
602 |
+
expected = np.array([3, 1])
|
603 |
+
actual = setdiff1d(a, b, assume_unique=True)
|
604 |
+
assert_equal(actual, expected)
|
605 |
+
|
606 |
+
def test_setdiff1d_char_array(self):
|
607 |
+
a = np.array(['a', 'b', 'c'])
|
608 |
+
b = np.array(['a', 'b', 's'])
|
609 |
+
assert_array_equal(setdiff1d(a, b), np.array(['c']))
|
610 |
+
|
611 |
+
def test_manyways(self):
|
612 |
+
a = np.array([5, 7, 1, 2, 8])
|
613 |
+
b = np.array([9, 8, 2, 4, 3, 1, 5])
|
614 |
+
|
615 |
+
c1 = setxor1d(a, b)
|
616 |
+
aux1 = intersect1d(a, b)
|
617 |
+
aux2 = union1d(a, b)
|
618 |
+
c2 = setdiff1d(aux2, aux1)
|
619 |
+
assert_array_equal(c1, c2)
|
620 |
+
|
621 |
+
|
622 |
+
class TestUnique:
|
623 |
+
|
624 |
+
def test_unique_1d(self):
|
625 |
+
|
626 |
+
def check_all(a, b, i1, i2, c, dt):
|
627 |
+
base_msg = 'check {0} failed for type {1}'
|
628 |
+
|
629 |
+
msg = base_msg.format('values', dt)
|
630 |
+
v = unique(a)
|
631 |
+
assert_array_equal(v, b, msg)
|
632 |
+
|
633 |
+
msg = base_msg.format('return_index', dt)
|
634 |
+
v, j = unique(a, True, False, False)
|
635 |
+
assert_array_equal(v, b, msg)
|
636 |
+
assert_array_equal(j, i1, msg)
|
637 |
+
|
638 |
+
msg = base_msg.format('return_inverse', dt)
|
639 |
+
v, j = unique(a, False, True, False)
|
640 |
+
assert_array_equal(v, b, msg)
|
641 |
+
assert_array_equal(j, i2, msg)
|
642 |
+
|
643 |
+
msg = base_msg.format('return_counts', dt)
|
644 |
+
v, j = unique(a, False, False, True)
|
645 |
+
assert_array_equal(v, b, msg)
|
646 |
+
assert_array_equal(j, c, msg)
|
647 |
+
|
648 |
+
msg = base_msg.format('return_index and return_inverse', dt)
|
649 |
+
v, j1, j2 = unique(a, True, True, False)
|
650 |
+
assert_array_equal(v, b, msg)
|
651 |
+
assert_array_equal(j1, i1, msg)
|
652 |
+
assert_array_equal(j2, i2, msg)
|
653 |
+
|
654 |
+
msg = base_msg.format('return_index and return_counts', dt)
|
655 |
+
v, j1, j2 = unique(a, True, False, True)
|
656 |
+
assert_array_equal(v, b, msg)
|
657 |
+
assert_array_equal(j1, i1, msg)
|
658 |
+
assert_array_equal(j2, c, msg)
|
659 |
+
|
660 |
+
msg = base_msg.format('return_inverse and return_counts', dt)
|
661 |
+
v, j1, j2 = unique(a, False, True, True)
|
662 |
+
assert_array_equal(v, b, msg)
|
663 |
+
assert_array_equal(j1, i2, msg)
|
664 |
+
assert_array_equal(j2, c, msg)
|
665 |
+
|
666 |
+
msg = base_msg.format(('return_index, return_inverse '
|
667 |
+
'and return_counts'), dt)
|
668 |
+
v, j1, j2, j3 = unique(a, True, True, True)
|
669 |
+
assert_array_equal(v, b, msg)
|
670 |
+
assert_array_equal(j1, i1, msg)
|
671 |
+
assert_array_equal(j2, i2, msg)
|
672 |
+
assert_array_equal(j3, c, msg)
|
673 |
+
|
674 |
+
a = [5, 7, 1, 2, 1, 5, 7]*10
|
675 |
+
b = [1, 2, 5, 7]
|
676 |
+
i1 = [2, 3, 0, 1]
|
677 |
+
i2 = [2, 3, 0, 1, 0, 2, 3]*10
|
678 |
+
c = np.multiply([2, 1, 2, 2], 10)
|
679 |
+
|
680 |
+
# test for numeric arrays
|
681 |
+
types = []
|
682 |
+
types.extend(np.typecodes['AllInteger'])
|
683 |
+
types.extend(np.typecodes['AllFloat'])
|
684 |
+
types.append('datetime64[D]')
|
685 |
+
types.append('timedelta64[D]')
|
686 |
+
for dt in types:
|
687 |
+
aa = np.array(a, dt)
|
688 |
+
bb = np.array(b, dt)
|
689 |
+
check_all(aa, bb, i1, i2, c, dt)
|
690 |
+
|
691 |
+
# test for object arrays
|
692 |
+
dt = 'O'
|
693 |
+
aa = np.empty(len(a), dt)
|
694 |
+
aa[:] = a
|
695 |
+
bb = np.empty(len(b), dt)
|
696 |
+
bb[:] = b
|
697 |
+
check_all(aa, bb, i1, i2, c, dt)
|
698 |
+
|
699 |
+
# test for structured arrays
|
700 |
+
dt = [('', 'i'), ('', 'i')]
|
701 |
+
aa = np.array(list(zip(a, a)), dt)
|
702 |
+
bb = np.array(list(zip(b, b)), dt)
|
703 |
+
check_all(aa, bb, i1, i2, c, dt)
|
704 |
+
|
705 |
+
# test for ticket #2799
|
706 |
+
aa = [1. + 0.j, 1 - 1.j, 1]
|
707 |
+
assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j])
|
708 |
+
|
709 |
+
# test for ticket #4785
|
710 |
+
a = [(1, 2), (1, 2), (2, 3)]
|
711 |
+
unq = [1, 2, 3]
|
712 |
+
inv = [0, 1, 0, 1, 1, 2]
|
713 |
+
a1 = unique(a)
|
714 |
+
assert_array_equal(a1, unq)
|
715 |
+
a2, a2_inv = unique(a, return_inverse=True)
|
716 |
+
assert_array_equal(a2, unq)
|
717 |
+
assert_array_equal(a2_inv, inv)
|
718 |
+
|
719 |
+
# test for chararrays with return_inverse (gh-5099)
|
720 |
+
a = np.chararray(5)
|
721 |
+
a[...] = ''
|
722 |
+
a2, a2_inv = np.unique(a, return_inverse=True)
|
723 |
+
assert_array_equal(a2_inv, np.zeros(5))
|
724 |
+
|
725 |
+
# test for ticket #9137
|
726 |
+
a = []
|
727 |
+
a1_idx = np.unique(a, return_index=True)[1]
|
728 |
+
a2_inv = np.unique(a, return_inverse=True)[1]
|
729 |
+
a3_idx, a3_inv = np.unique(a, return_index=True,
|
730 |
+
return_inverse=True)[1:]
|
731 |
+
assert_equal(a1_idx.dtype, np.intp)
|
732 |
+
assert_equal(a2_inv.dtype, np.intp)
|
733 |
+
assert_equal(a3_idx.dtype, np.intp)
|
734 |
+
assert_equal(a3_inv.dtype, np.intp)
|
735 |
+
|
736 |
+
# test for ticket 2111 - float
|
737 |
+
a = [2.0, np.nan, 1.0, np.nan]
|
738 |
+
ua = [1.0, 2.0, np.nan]
|
739 |
+
ua_idx = [2, 0, 1]
|
740 |
+
ua_inv = [1, 2, 0, 2]
|
741 |
+
ua_cnt = [1, 1, 2]
|
742 |
+
assert_equal(np.unique(a), ua)
|
743 |
+
assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
|
744 |
+
assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
|
745 |
+
assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
|
746 |
+
|
747 |
+
# test for ticket 2111 - complex
|
748 |
+
a = [2.0-1j, np.nan, 1.0+1j, complex(0.0, np.nan), complex(1.0, np.nan)]
|
749 |
+
ua = [1.0+1j, 2.0-1j, complex(0.0, np.nan)]
|
750 |
+
ua_idx = [2, 0, 3]
|
751 |
+
ua_inv = [1, 2, 0, 2, 2]
|
752 |
+
ua_cnt = [1, 1, 3]
|
753 |
+
assert_equal(np.unique(a), ua)
|
754 |
+
assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
|
755 |
+
assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
|
756 |
+
assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
|
757 |
+
|
758 |
+
# test for ticket 2111 - datetime64
|
759 |
+
nat = np.datetime64('nat')
|
760 |
+
a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat]
|
761 |
+
ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat]
|
762 |
+
ua_idx = [2, 0, 1]
|
763 |
+
ua_inv = [1, 2, 0, 2]
|
764 |
+
ua_cnt = [1, 1, 2]
|
765 |
+
assert_equal(np.unique(a), ua)
|
766 |
+
assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
|
767 |
+
assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
|
768 |
+
assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
|
769 |
+
|
770 |
+
# test for ticket 2111 - timedelta
|
771 |
+
nat = np.timedelta64('nat')
|
772 |
+
a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat]
|
773 |
+
ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat]
|
774 |
+
ua_idx = [2, 0, 1]
|
775 |
+
ua_inv = [1, 2, 0, 2]
|
776 |
+
ua_cnt = [1, 1, 2]
|
777 |
+
assert_equal(np.unique(a), ua)
|
778 |
+
assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
|
779 |
+
assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
|
780 |
+
assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
|
781 |
+
|
782 |
+
# test for gh-19300
|
783 |
+
all_nans = [np.nan] * 4
|
784 |
+
ua = [np.nan]
|
785 |
+
ua_idx = [0]
|
786 |
+
ua_inv = [0, 0, 0, 0]
|
787 |
+
ua_cnt = [4]
|
788 |
+
assert_equal(np.unique(all_nans), ua)
|
789 |
+
assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx))
|
790 |
+
assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv))
|
791 |
+
assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt))
|
792 |
+
|
793 |
+
def test_unique_axis_errors(self):
|
794 |
+
assert_raises(TypeError, self._run_axis_tests, object)
|
795 |
+
assert_raises(TypeError, self._run_axis_tests,
|
796 |
+
[('a', int), ('b', object)])
|
797 |
+
|
798 |
+
assert_raises(np.AxisError, unique, np.arange(10), axis=2)
|
799 |
+
assert_raises(np.AxisError, unique, np.arange(10), axis=-2)
|
800 |
+
|
801 |
+
def test_unique_axis_list(self):
|
802 |
+
msg = "Unique failed on list of lists"
|
803 |
+
inp = [[0, 1, 0], [0, 1, 0]]
|
804 |
+
inp_arr = np.asarray(inp)
|
805 |
+
assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)
|
806 |
+
assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)
|
807 |
+
|
808 |
+
def test_unique_axis(self):
|
809 |
+
types = []
|
810 |
+
types.extend(np.typecodes['AllInteger'])
|
811 |
+
types.extend(np.typecodes['AllFloat'])
|
812 |
+
types.append('datetime64[D]')
|
813 |
+
types.append('timedelta64[D]')
|
814 |
+
types.append([('a', int), ('b', int)])
|
815 |
+
types.append([('a', int), ('b', float)])
|
816 |
+
|
817 |
+
for dtype in types:
|
818 |
+
self._run_axis_tests(dtype)
|
819 |
+
|
820 |
+
msg = 'Non-bitwise-equal booleans test failed'
|
821 |
+
data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool)
|
822 |
+
result = np.array([[False, True], [True, True]], dtype=bool)
|
823 |
+
assert_array_equal(unique(data, axis=0), result, msg)
|
824 |
+
|
825 |
+
msg = 'Negative zero equality test failed'
|
826 |
+
data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]])
|
827 |
+
result = np.array([[-0.0, 0.0]])
|
828 |
+
assert_array_equal(unique(data, axis=0), result, msg)
|
829 |
+
|
830 |
+
@pytest.mark.parametrize("axis", [0, -1])
|
831 |
+
def test_unique_1d_with_axis(self, axis):
|
832 |
+
x = np.array([4, 3, 2, 3, 2, 1, 2, 2])
|
833 |
+
uniq = unique(x, axis=axis)
|
834 |
+
assert_array_equal(uniq, [1, 2, 3, 4])
|
835 |
+
|
836 |
+
def test_unique_axis_zeros(self):
|
837 |
+
# issue 15559
|
838 |
+
single_zero = np.empty(shape=(2, 0), dtype=np.int8)
|
839 |
+
uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True,
|
840 |
+
return_inverse=True, return_counts=True)
|
841 |
+
|
842 |
+
# there's 1 element of shape (0,) along axis 0
|
843 |
+
assert_equal(uniq.dtype, single_zero.dtype)
|
844 |
+
assert_array_equal(uniq, np.empty(shape=(1, 0)))
|
845 |
+
assert_array_equal(idx, np.array([0]))
|
846 |
+
assert_array_equal(inv, np.array([0, 0]))
|
847 |
+
assert_array_equal(cnt, np.array([2]))
|
848 |
+
|
849 |
+
# there's 0 elements of shape (2,) along axis 1
|
850 |
+
uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True,
|
851 |
+
return_inverse=True, return_counts=True)
|
852 |
+
|
853 |
+
assert_equal(uniq.dtype, single_zero.dtype)
|
854 |
+
assert_array_equal(uniq, np.empty(shape=(2, 0)))
|
855 |
+
assert_array_equal(idx, np.array([]))
|
856 |
+
assert_array_equal(inv, np.array([]))
|
857 |
+
assert_array_equal(cnt, np.array([]))
|
858 |
+
|
859 |
+
# test a "complicated" shape
|
860 |
+
shape = (0, 2, 0, 3, 0, 4, 0)
|
861 |
+
multiple_zeros = np.empty(shape=shape)
|
862 |
+
for axis in range(len(shape)):
|
863 |
+
expected_shape = list(shape)
|
864 |
+
if shape[axis] == 0:
|
865 |
+
expected_shape[axis] = 0
|
866 |
+
else:
|
867 |
+
expected_shape[axis] = 1
|
868 |
+
|
869 |
+
assert_array_equal(unique(multiple_zeros, axis=axis),
|
870 |
+
np.empty(shape=expected_shape))
|
871 |
+
|
872 |
+
def test_unique_masked(self):
|
873 |
+
# issue 8664
|
874 |
+
x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0],
|
875 |
+
dtype='uint8')
|
876 |
+
y = np.ma.masked_equal(x, 0)
|
877 |
+
|
878 |
+
v = np.unique(y)
|
879 |
+
v2, i, c = np.unique(y, return_index=True, return_counts=True)
|
880 |
+
|
881 |
+
msg = 'Unique returned different results when asked for index'
|
882 |
+
assert_array_equal(v.data, v2.data, msg)
|
883 |
+
assert_array_equal(v.mask, v2.mask, msg)
|
884 |
+
|
885 |
+
def test_unique_sort_order_with_axis(self):
|
886 |
+
# These tests fail if sorting along axis is done by treating subarrays
|
887 |
+
# as unsigned byte strings. See gh-10495.
|
888 |
+
fmt = "sort order incorrect for integer type '%s'"
|
889 |
+
for dt in 'bhilq':
|
890 |
+
a = np.array([[-1], [0]], dt)
|
891 |
+
b = np.unique(a, axis=0)
|
892 |
+
assert_array_equal(a, b, fmt % dt)
|
893 |
+
|
894 |
+
def _run_axis_tests(self, dtype):
|
895 |
+
data = np.array([[0, 1, 0, 0],
|
896 |
+
[1, 0, 0, 0],
|
897 |
+
[0, 1, 0, 0],
|
898 |
+
[1, 0, 0, 0]]).astype(dtype)
|
899 |
+
|
900 |
+
msg = 'Unique with 1d array and axis=0 failed'
|
901 |
+
result = np.array([0, 1])
|
902 |
+
assert_array_equal(unique(data), result.astype(dtype), msg)
|
903 |
+
|
904 |
+
msg = 'Unique with 2d array and axis=0 failed'
|
905 |
+
result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]])
|
906 |
+
assert_array_equal(unique(data, axis=0), result.astype(dtype), msg)
|
907 |
+
|
908 |
+
msg = 'Unique with 2d array and axis=1 failed'
|
909 |
+
result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]])
|
910 |
+
assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
|
911 |
+
|
912 |
+
msg = 'Unique with 3d array and axis=2 failed'
|
913 |
+
data3d = np.array([[[1, 1],
|
914 |
+
[1, 0]],
|
915 |
+
[[0, 1],
|
916 |
+
[0, 0]]]).astype(dtype)
|
917 |
+
result = np.take(data3d, [1, 0], axis=2)
|
918 |
+
assert_array_equal(unique(data3d, axis=2), result, msg)
|
919 |
+
|
920 |
+
uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
|
921 |
+
return_inverse=True, return_counts=True)
|
922 |
+
msg = "Unique's return_index=True failed with axis=0"
|
923 |
+
assert_array_equal(data[idx], uniq, msg)
|
924 |
+
msg = "Unique's return_inverse=True failed with axis=0"
|
925 |
+
assert_array_equal(uniq[inv], data)
|
926 |
+
msg = "Unique's return_counts=True failed with axis=0"
|
927 |
+
assert_array_equal(cnt, np.array([2, 2]), msg)
|
928 |
+
|
929 |
+
uniq, idx, inv, cnt = unique(data, axis=1, return_index=True,
|
930 |
+
return_inverse=True, return_counts=True)
|
931 |
+
msg = "Unique's return_index=True failed with axis=1"
|
932 |
+
assert_array_equal(data[:, idx], uniq)
|
933 |
+
msg = "Unique's return_inverse=True failed with axis=1"
|
934 |
+
assert_array_equal(uniq[:, inv], data)
|
935 |
+
msg = "Unique's return_counts=True failed with axis=1"
|
936 |
+
assert_array_equal(cnt, np.array([2, 1, 1]), msg)
|
937 |
+
|
938 |
+
def test_unique_nanequals(self):
|
939 |
+
# issue 20326
|
940 |
+
a = np.array([1, 1, np.nan, np.nan, np.nan])
|
941 |
+
unq = np.unique(a)
|
942 |
+
not_unq = np.unique(a, equal_nan=False)
|
943 |
+
assert_array_equal(unq, np.array([1, np.nan]))
|
944 |
+
assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan]))
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_arrayterator.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from operator import mul
|
2 |
+
from functools import reduce
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.random import randint
|
6 |
+
from numpy.lib import Arrayterator
|
7 |
+
from numpy.testing import assert_
|
8 |
+
|
9 |
+
|
10 |
+
def test():
|
11 |
+
np.random.seed(np.arange(10))
|
12 |
+
|
13 |
+
# Create a random array
|
14 |
+
ndims = randint(5)+1
|
15 |
+
shape = tuple(randint(10)+1 for dim in range(ndims))
|
16 |
+
els = reduce(mul, shape)
|
17 |
+
a = np.arange(els)
|
18 |
+
a.shape = shape
|
19 |
+
|
20 |
+
buf_size = randint(2*els)
|
21 |
+
b = Arrayterator(a, buf_size)
|
22 |
+
|
23 |
+
# Check that each block has at most ``buf_size`` elements
|
24 |
+
for block in b:
|
25 |
+
assert_(len(block.flat) <= (buf_size or els))
|
26 |
+
|
27 |
+
# Check that all elements are iterated correctly
|
28 |
+
assert_(list(b.flat) == list(a.flat))
|
29 |
+
|
30 |
+
# Slice arrayterator
|
31 |
+
start = [randint(dim) for dim in shape]
|
32 |
+
stop = [randint(dim)+1 for dim in shape]
|
33 |
+
step = [randint(dim)+1 for dim in shape]
|
34 |
+
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
|
35 |
+
c = b[slice_]
|
36 |
+
d = a[slice_]
|
37 |
+
|
38 |
+
# Check that each block has at most ``buf_size`` elements
|
39 |
+
for block in c:
|
40 |
+
assert_(len(block.flat) <= (buf_size or els))
|
41 |
+
|
42 |
+
# Check that the arrayterator is sliced correctly
|
43 |
+
assert_(np.all(c.__array__() == d))
|
44 |
+
|
45 |
+
# Check that all elements are iterated correctly
|
46 |
+
assert_(list(c.flat) == list(d.flat))
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_financial_expired.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import pytest
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
|
6 |
+
def test_financial_expired():
|
7 |
+
match = 'NEP 32'
|
8 |
+
with pytest.warns(DeprecationWarning, match=match):
|
9 |
+
func = np.fv
|
10 |
+
with pytest.raises(RuntimeError, match=match):
|
11 |
+
func(1, 2, 3)
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_format.py
ADDED
@@ -0,0 +1,1028 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# doctest
|
2 |
+
r''' Test the .npy file format.
|
3 |
+
|
4 |
+
Set up:
|
5 |
+
|
6 |
+
>>> import sys
|
7 |
+
>>> from io import BytesIO
|
8 |
+
>>> from numpy.lib import format
|
9 |
+
>>>
|
10 |
+
>>> scalars = [
|
11 |
+
... np.uint8,
|
12 |
+
... np.int8,
|
13 |
+
... np.uint16,
|
14 |
+
... np.int16,
|
15 |
+
... np.uint32,
|
16 |
+
... np.int32,
|
17 |
+
... np.uint64,
|
18 |
+
... np.int64,
|
19 |
+
... np.float32,
|
20 |
+
... np.float64,
|
21 |
+
... np.complex64,
|
22 |
+
... np.complex128,
|
23 |
+
... object,
|
24 |
+
... ]
|
25 |
+
>>>
|
26 |
+
>>> basic_arrays = []
|
27 |
+
>>>
|
28 |
+
>>> for scalar in scalars:
|
29 |
+
... for endian in '<>':
|
30 |
+
... dtype = np.dtype(scalar).newbyteorder(endian)
|
31 |
+
... basic = np.arange(15).astype(dtype)
|
32 |
+
... basic_arrays.extend([
|
33 |
+
... np.array([], dtype=dtype),
|
34 |
+
... np.array(10, dtype=dtype),
|
35 |
+
... basic,
|
36 |
+
... basic.reshape((3,5)),
|
37 |
+
... basic.reshape((3,5)).T,
|
38 |
+
... basic.reshape((3,5))[::-1,::2],
|
39 |
+
... ])
|
40 |
+
...
|
41 |
+
>>>
|
42 |
+
>>> Pdescr = [
|
43 |
+
... ('x', 'i4', (2,)),
|
44 |
+
... ('y', 'f8', (2, 2)),
|
45 |
+
... ('z', 'u1')]
|
46 |
+
>>>
|
47 |
+
>>>
|
48 |
+
>>> PbufferT = [
|
49 |
+
... ([3,2], [[6.,4.],[6.,4.]], 8),
|
50 |
+
... ([4,3], [[7.,5.],[7.,5.]], 9),
|
51 |
+
... ]
|
52 |
+
>>>
|
53 |
+
>>>
|
54 |
+
>>> Ndescr = [
|
55 |
+
... ('x', 'i4', (2,)),
|
56 |
+
... ('Info', [
|
57 |
+
... ('value', 'c16'),
|
58 |
+
... ('y2', 'f8'),
|
59 |
+
... ('Info2', [
|
60 |
+
... ('name', 'S2'),
|
61 |
+
... ('value', 'c16', (2,)),
|
62 |
+
... ('y3', 'f8', (2,)),
|
63 |
+
... ('z3', 'u4', (2,))]),
|
64 |
+
... ('name', 'S2'),
|
65 |
+
... ('z2', 'b1')]),
|
66 |
+
... ('color', 'S2'),
|
67 |
+
... ('info', [
|
68 |
+
... ('Name', 'U8'),
|
69 |
+
... ('Value', 'c16')]),
|
70 |
+
... ('y', 'f8', (2, 2)),
|
71 |
+
... ('z', 'u1')]
|
72 |
+
>>>
|
73 |
+
>>>
|
74 |
+
>>> NbufferT = [
|
75 |
+
... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
|
76 |
+
... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
|
77 |
+
... ]
|
78 |
+
>>>
|
79 |
+
>>>
|
80 |
+
>>> record_arrays = [
|
81 |
+
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
|
82 |
+
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
|
83 |
+
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
|
84 |
+
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
|
85 |
+
... ]
|
86 |
+
|
87 |
+
Test the magic string writing.
|
88 |
+
|
89 |
+
>>> format.magic(1, 0)
|
90 |
+
'\x93NUMPY\x01\x00'
|
91 |
+
>>> format.magic(0, 0)
|
92 |
+
'\x93NUMPY\x00\x00'
|
93 |
+
>>> format.magic(255, 255)
|
94 |
+
'\x93NUMPY\xff\xff'
|
95 |
+
>>> format.magic(2, 5)
|
96 |
+
'\x93NUMPY\x02\x05'
|
97 |
+
|
98 |
+
Test the magic string reading.
|
99 |
+
|
100 |
+
>>> format.read_magic(BytesIO(format.magic(1, 0)))
|
101 |
+
(1, 0)
|
102 |
+
>>> format.read_magic(BytesIO(format.magic(0, 0)))
|
103 |
+
(0, 0)
|
104 |
+
>>> format.read_magic(BytesIO(format.magic(255, 255)))
|
105 |
+
(255, 255)
|
106 |
+
>>> format.read_magic(BytesIO(format.magic(2, 5)))
|
107 |
+
(2, 5)
|
108 |
+
|
109 |
+
Test the header writing.
|
110 |
+
|
111 |
+
>>> for arr in basic_arrays + record_arrays:
|
112 |
+
... f = BytesIO()
|
113 |
+
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
|
114 |
+
... print(repr(f.getvalue()))
|
115 |
+
...
|
116 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
|
117 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
|
118 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
|
119 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
|
120 |
+
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
|
121 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
|
122 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
|
123 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
|
124 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
|
125 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
|
126 |
+
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
|
127 |
+
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
|
128 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
|
129 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
|
130 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
|
131 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
|
132 |
+
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
|
133 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
|
134 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
|
135 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
|
136 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
|
137 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
|
138 |
+
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
|
139 |
+
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
|
140 |
+
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
|
141 |
+
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
|
142 |
+
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
|
143 |
+
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
|
144 |
+
"F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
|
145 |
+
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
|
146 |
+
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
|
147 |
+
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
|
148 |
+
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
|
149 |
+
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
|
150 |
+
"F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
|
151 |
+
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
|
152 |
+
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
|
153 |
+
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
|
154 |
+
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
|
155 |
+
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
|
156 |
+
"F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
|
157 |
+
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
|
158 |
+
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
|
159 |
+
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
|
160 |
+
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
|
161 |
+
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
|
162 |
+
"F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
|
163 |
+
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
|
164 |
+
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
|
165 |
+
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
|
166 |
+
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
|
167 |
+
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
168 |
+
"F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
169 |
+
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
170 |
+
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
|
171 |
+
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
|
172 |
+
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
|
173 |
+
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
174 |
+
"F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
175 |
+
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
176 |
+
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
|
177 |
+
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
|
178 |
+
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
|
179 |
+
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
180 |
+
"F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
181 |
+
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
182 |
+
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
|
183 |
+
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
|
184 |
+
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
|
185 |
+
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
186 |
+
"F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
187 |
+
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
188 |
+
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
|
189 |
+
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
|
190 |
+
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
|
191 |
+
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
192 |
+
"F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
193 |
+
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
194 |
+
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
|
195 |
+
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
|
196 |
+
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
|
197 |
+
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
198 |
+
"F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
199 |
+
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
200 |
+
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
|
201 |
+
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
|
202 |
+
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
|
203 |
+
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
204 |
+
"F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
205 |
+
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
206 |
+
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
|
207 |
+
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
|
208 |
+
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
|
209 |
+
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
210 |
+
"F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
211 |
+
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
212 |
+
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
|
213 |
+
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
|
214 |
+
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
|
215 |
+
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
216 |
+
"F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
217 |
+
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
218 |
+
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
|
219 |
+
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
|
220 |
+
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
|
221 |
+
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
222 |
+
"F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
223 |
+
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
224 |
+
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
|
225 |
+
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
|
226 |
+
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
|
227 |
+
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
228 |
+
"F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
229 |
+
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
230 |
+
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
|
231 |
+
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
|
232 |
+
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
|
233 |
+
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
234 |
+
"F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
235 |
+
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
236 |
+
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
|
237 |
+
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
|
238 |
+
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
|
239 |
+
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
240 |
+
"F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
241 |
+
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
242 |
+
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
|
243 |
+
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
|
244 |
+
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
|
245 |
+
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
246 |
+
"F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
247 |
+
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
248 |
+
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
|
249 |
+
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
|
250 |
+
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
|
251 |
+
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
|
252 |
+
"F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
|
253 |
+
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
|
254 |
+
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
|
255 |
+
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
|
256 |
+
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
|
257 |
+
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
|
258 |
+
"F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
|
259 |
+
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
|
260 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
|
261 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
|
262 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
|
263 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
|
264 |
+
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
|
265 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
|
266 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
|
267 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
|
268 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
|
269 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
|
270 |
+
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
|
271 |
+
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
|
272 |
+
"v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
|
273 |
+
"\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
|
274 |
+
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
|
275 |
+
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
|
276 |
+
'''
|
277 |
+
import sys
|
278 |
+
import os
|
279 |
+
import warnings
|
280 |
+
import pytest
|
281 |
+
from io import BytesIO
|
282 |
+
|
283 |
+
import numpy as np
|
284 |
+
from numpy.testing import (
|
285 |
+
assert_, assert_array_equal, assert_raises, assert_raises_regex,
|
286 |
+
assert_warns, IS_PYPY, IS_WASM
|
287 |
+
)
|
288 |
+
from numpy.testing._private.utils import requires_memory
|
289 |
+
from numpy.lib import format
|
290 |
+
|
291 |
+
|
292 |
+
# Generate some basic arrays to test with.
|
293 |
+
scalars = [
|
294 |
+
np.uint8,
|
295 |
+
np.int8,
|
296 |
+
np.uint16,
|
297 |
+
np.int16,
|
298 |
+
np.uint32,
|
299 |
+
np.int32,
|
300 |
+
np.uint64,
|
301 |
+
np.int64,
|
302 |
+
np.float32,
|
303 |
+
np.float64,
|
304 |
+
np.complex64,
|
305 |
+
np.complex128,
|
306 |
+
object,
|
307 |
+
]
|
308 |
+
basic_arrays = []
|
309 |
+
for scalar in scalars:
|
310 |
+
for endian in '<>':
|
311 |
+
dtype = np.dtype(scalar).newbyteorder(endian)
|
312 |
+
basic = np.arange(1500).astype(dtype)
|
313 |
+
basic_arrays.extend([
|
314 |
+
# Empty
|
315 |
+
np.array([], dtype=dtype),
|
316 |
+
# Rank-0
|
317 |
+
np.array(10, dtype=dtype),
|
318 |
+
# 1-D
|
319 |
+
basic,
|
320 |
+
# 2-D C-contiguous
|
321 |
+
basic.reshape((30, 50)),
|
322 |
+
# 2-D F-contiguous
|
323 |
+
basic.reshape((30, 50)).T,
|
324 |
+
# 2-D non-contiguous
|
325 |
+
basic.reshape((30, 50))[::-1, ::2],
|
326 |
+
])
|
327 |
+
|
328 |
+
# More complicated record arrays.
|
329 |
+
# This is the structure of the table used for plain objects:
|
330 |
+
#
|
331 |
+
# +-+-+-+
|
332 |
+
# |x|y|z|
|
333 |
+
# +-+-+-+
|
334 |
+
|
335 |
+
# Structure of a plain array description:
|
336 |
+
Pdescr = [
|
337 |
+
('x', 'i4', (2,)),
|
338 |
+
('y', 'f8', (2, 2)),
|
339 |
+
('z', 'u1')]
|
340 |
+
|
341 |
+
# A plain list of tuples with values for testing:
|
342 |
+
PbufferT = [
|
343 |
+
# x y z
|
344 |
+
([3, 2], [[6., 4.], [6., 4.]], 8),
|
345 |
+
([4, 3], [[7., 5.], [7., 5.]], 9),
|
346 |
+
]
|
347 |
+
|
348 |
+
|
349 |
+
# This is the structure of the table used for nested objects (DON'T PANIC!):
|
350 |
+
#
|
351 |
+
# +-+---------------------------------+-----+----------+-+-+
|
352 |
+
# |x|Info |color|info |y|z|
|
353 |
+
# | +-----+--+----------------+----+--+ +----+-----+ | |
|
354 |
+
# | |value|y2|Info2 |name|z2| |Name|Value| | |
|
355 |
+
# | | | +----+-----+--+--+ | | | | | | |
|
356 |
+
# | | | |name|value|y3|z3| | | | | | | |
|
357 |
+
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
|
358 |
+
#
|
359 |
+
|
360 |
+
# The corresponding nested array description:
|
361 |
+
Ndescr = [
|
362 |
+
('x', 'i4', (2,)),
|
363 |
+
('Info', [
|
364 |
+
('value', 'c16'),
|
365 |
+
('y2', 'f8'),
|
366 |
+
('Info2', [
|
367 |
+
('name', 'S2'),
|
368 |
+
('value', 'c16', (2,)),
|
369 |
+
('y3', 'f8', (2,)),
|
370 |
+
('z3', 'u4', (2,))]),
|
371 |
+
('name', 'S2'),
|
372 |
+
('z2', 'b1')]),
|
373 |
+
('color', 'S2'),
|
374 |
+
('info', [
|
375 |
+
('Name', 'U8'),
|
376 |
+
('Value', 'c16')]),
|
377 |
+
('y', 'f8', (2, 2)),
|
378 |
+
('z', 'u1')]
|
379 |
+
|
380 |
+
NbufferT = [
|
381 |
+
# x Info color info y z
|
382 |
+
# value y2 Info2 name z2 Name Value
|
383 |
+
# name value y3 z3
|
384 |
+
([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
|
385 |
+
'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
|
386 |
+
([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
|
387 |
+
'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
|
388 |
+
]
|
389 |
+
|
390 |
+
record_arrays = [
|
391 |
+
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
|
392 |
+
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
|
393 |
+
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
|
394 |
+
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
|
395 |
+
np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])
|
396 |
+
]
|
397 |
+
|
398 |
+
|
399 |
+
#BytesIO that reads a random number of bytes at a time
|
400 |
+
class BytesIOSRandomSize(BytesIO):
|
401 |
+
def read(self, size=None):
|
402 |
+
import random
|
403 |
+
size = random.randint(1, size)
|
404 |
+
return super().read(size)
|
405 |
+
|
406 |
+
|
407 |
+
def roundtrip(arr):
|
408 |
+
f = BytesIO()
|
409 |
+
format.write_array(f, arr)
|
410 |
+
f2 = BytesIO(f.getvalue())
|
411 |
+
arr2 = format.read_array(f2, allow_pickle=True)
|
412 |
+
return arr2
|
413 |
+
|
414 |
+
|
415 |
+
def roundtrip_randsize(arr):
|
416 |
+
f = BytesIO()
|
417 |
+
format.write_array(f, arr)
|
418 |
+
f2 = BytesIOSRandomSize(f.getvalue())
|
419 |
+
arr2 = format.read_array(f2)
|
420 |
+
return arr2
|
421 |
+
|
422 |
+
|
423 |
+
def roundtrip_truncated(arr):
|
424 |
+
f = BytesIO()
|
425 |
+
format.write_array(f, arr)
|
426 |
+
#BytesIO is one byte short
|
427 |
+
f2 = BytesIO(f.getvalue()[0:-1])
|
428 |
+
arr2 = format.read_array(f2)
|
429 |
+
return arr2
|
430 |
+
|
431 |
+
|
432 |
+
def assert_equal_(o1, o2):
|
433 |
+
assert_(o1 == o2)
|
434 |
+
|
435 |
+
|
436 |
+
def test_roundtrip():
|
437 |
+
for arr in basic_arrays + record_arrays:
|
438 |
+
arr2 = roundtrip(arr)
|
439 |
+
assert_array_equal(arr, arr2)
|
440 |
+
|
441 |
+
|
442 |
+
def test_roundtrip_randsize():
|
443 |
+
for arr in basic_arrays + record_arrays:
|
444 |
+
if arr.dtype != object:
|
445 |
+
arr2 = roundtrip_randsize(arr)
|
446 |
+
assert_array_equal(arr, arr2)
|
447 |
+
|
448 |
+
|
449 |
+
def test_roundtrip_truncated():
|
450 |
+
for arr in basic_arrays:
|
451 |
+
if arr.dtype != object:
|
452 |
+
assert_raises(ValueError, roundtrip_truncated, arr)
|
453 |
+
|
454 |
+
|
455 |
+
def test_long_str():
|
456 |
+
# check items larger than internal buffer size, gh-4027
|
457 |
+
long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
|
458 |
+
long_str_arr2 = roundtrip(long_str_arr)
|
459 |
+
assert_array_equal(long_str_arr, long_str_arr2)
|
460 |
+
|
461 |
+
|
462 |
+
@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")
|
463 |
+
@pytest.mark.slow
|
464 |
+
def test_memmap_roundtrip(tmpdir):
|
465 |
+
for i, arr in enumerate(basic_arrays + record_arrays):
|
466 |
+
if arr.dtype.hasobject:
|
467 |
+
# Skip these since they can't be mmap'ed.
|
468 |
+
continue
|
469 |
+
# Write it out normally and through mmap.
|
470 |
+
nfn = os.path.join(tmpdir, f'normal{i}.npy')
|
471 |
+
mfn = os.path.join(tmpdir, f'memmap{i}.npy')
|
472 |
+
with open(nfn, 'wb') as fp:
|
473 |
+
format.write_array(fp, arr)
|
474 |
+
|
475 |
+
fortran_order = (
|
476 |
+
arr.flags.f_contiguous and not arr.flags.c_contiguous)
|
477 |
+
ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
|
478 |
+
shape=arr.shape, fortran_order=fortran_order)
|
479 |
+
ma[...] = arr
|
480 |
+
ma.flush()
|
481 |
+
|
482 |
+
# Check that both of these files' contents are the same.
|
483 |
+
with open(nfn, 'rb') as fp:
|
484 |
+
normal_bytes = fp.read()
|
485 |
+
with open(mfn, 'rb') as fp:
|
486 |
+
memmap_bytes = fp.read()
|
487 |
+
assert_equal_(normal_bytes, memmap_bytes)
|
488 |
+
|
489 |
+
# Check that reading the file using memmap works.
|
490 |
+
ma = format.open_memmap(nfn, mode='r')
|
491 |
+
ma.flush()
|
492 |
+
|
493 |
+
|
494 |
+
def test_compressed_roundtrip(tmpdir):
|
495 |
+
arr = np.random.rand(200, 200)
|
496 |
+
npz_file = os.path.join(tmpdir, 'compressed.npz')
|
497 |
+
np.savez_compressed(npz_file, arr=arr)
|
498 |
+
with np.load(npz_file) as npz:
|
499 |
+
arr1 = npz['arr']
|
500 |
+
assert_array_equal(arr, arr1)
|
501 |
+
|
502 |
+
|
503 |
+
# aligned
|
504 |
+
dt1 = np.dtype('i1, i4, i1', align=True)
|
505 |
+
# non-aligned, explicit offsets
|
506 |
+
dt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
|
507 |
+
'offsets': [1, 6]})
|
508 |
+
# nested struct-in-struct
|
509 |
+
dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]})
|
510 |
+
# field with '' name
|
511 |
+
dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3})
|
512 |
+
# titles
|
513 |
+
dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
|
514 |
+
'offsets': [1, 6], 'titles': ['aa', 'bb']})
|
515 |
+
# empty
|
516 |
+
dt6 = np.dtype({'names': [], 'formats': [], 'itemsize': 8})
|
517 |
+
|
518 |
+
@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5, dt6])
|
519 |
+
def test_load_padded_dtype(tmpdir, dt):
|
520 |
+
arr = np.zeros(3, dt)
|
521 |
+
for i in range(3):
|
522 |
+
arr[i] = i + 5
|
523 |
+
npz_file = os.path.join(tmpdir, 'aligned.npz')
|
524 |
+
np.savez(npz_file, arr=arr)
|
525 |
+
with np.load(npz_file) as npz:
|
526 |
+
arr1 = npz['arr']
|
527 |
+
assert_array_equal(arr, arr1)
|
528 |
+
|
529 |
+
|
530 |
+
@pytest.mark.skipif(sys.version_info >= (3, 12), reason="see gh-23988")
|
531 |
+
@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup")
|
532 |
+
def test_python2_python3_interoperability():
|
533 |
+
fname = 'win64python2.npy'
|
534 |
+
path = os.path.join(os.path.dirname(__file__), 'data', fname)
|
535 |
+
with pytest.warns(UserWarning, match="Reading.*this warning\\."):
|
536 |
+
data = np.load(path)
|
537 |
+
assert_array_equal(data, np.ones(2))
|
538 |
+
|
539 |
+
def test_pickle_python2_python3():
|
540 |
+
# Test that loading object arrays saved on Python 2 works both on
|
541 |
+
# Python 2 and Python 3 and vice versa
|
542 |
+
data_dir = os.path.join(os.path.dirname(__file__), 'data')
|
543 |
+
|
544 |
+
expected = np.array([None, range, '\u512a\u826f',
|
545 |
+
b'\xe4\xb8\x8d\xe8\x89\xaf'],
|
546 |
+
dtype=object)
|
547 |
+
|
548 |
+
for fname in ['py2-objarr.npy', 'py2-objarr.npz',
|
549 |
+
'py3-objarr.npy', 'py3-objarr.npz']:
|
550 |
+
path = os.path.join(data_dir, fname)
|
551 |
+
|
552 |
+
for encoding in ['bytes', 'latin1']:
|
553 |
+
data_f = np.load(path, allow_pickle=True, encoding=encoding)
|
554 |
+
if fname.endswith('.npz'):
|
555 |
+
data = data_f['x']
|
556 |
+
data_f.close()
|
557 |
+
else:
|
558 |
+
data = data_f
|
559 |
+
|
560 |
+
if encoding == 'latin1' and fname.startswith('py2'):
|
561 |
+
assert_(isinstance(data[3], str))
|
562 |
+
assert_array_equal(data[:-1], expected[:-1])
|
563 |
+
# mojibake occurs
|
564 |
+
assert_array_equal(data[-1].encode(encoding), expected[-1])
|
565 |
+
else:
|
566 |
+
assert_(isinstance(data[3], bytes))
|
567 |
+
assert_array_equal(data, expected)
|
568 |
+
|
569 |
+
if fname.startswith('py2'):
|
570 |
+
if fname.endswith('.npz'):
|
571 |
+
data = np.load(path, allow_pickle=True)
|
572 |
+
assert_raises(UnicodeError, data.__getitem__, 'x')
|
573 |
+
data.close()
|
574 |
+
data = np.load(path, allow_pickle=True, fix_imports=False,
|
575 |
+
encoding='latin1')
|
576 |
+
assert_raises(ImportError, data.__getitem__, 'x')
|
577 |
+
data.close()
|
578 |
+
else:
|
579 |
+
assert_raises(UnicodeError, np.load, path,
|
580 |
+
allow_pickle=True)
|
581 |
+
assert_raises(ImportError, np.load, path,
|
582 |
+
allow_pickle=True, fix_imports=False,
|
583 |
+
encoding='latin1')
|
584 |
+
|
585 |
+
|
586 |
+
def test_pickle_disallow(tmpdir):
|
587 |
+
data_dir = os.path.join(os.path.dirname(__file__), 'data')
|
588 |
+
|
589 |
+
path = os.path.join(data_dir, 'py2-objarr.npy')
|
590 |
+
assert_raises(ValueError, np.load, path,
|
591 |
+
allow_pickle=False, encoding='latin1')
|
592 |
+
|
593 |
+
path = os.path.join(data_dir, 'py2-objarr.npz')
|
594 |
+
with np.load(path, allow_pickle=False, encoding='latin1') as f:
|
595 |
+
assert_raises(ValueError, f.__getitem__, 'x')
|
596 |
+
|
597 |
+
path = os.path.join(tmpdir, 'pickle-disabled.npy')
|
598 |
+
assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
|
599 |
+
allow_pickle=False)
|
600 |
+
|
601 |
+
@pytest.mark.parametrize('dt', [
|
602 |
+
np.dtype(np.dtype([('a', np.int8),
|
603 |
+
('b', np.int16),
|
604 |
+
('c', np.int32),
|
605 |
+
], align=True),
|
606 |
+
(3,)),
|
607 |
+
np.dtype([('x', np.dtype({'names':['a','b'],
|
608 |
+
'formats':['i1','i1'],
|
609 |
+
'offsets':[0,4],
|
610 |
+
'itemsize':8,
|
611 |
+
},
|
612 |
+
(3,)),
|
613 |
+
(4,),
|
614 |
+
)]),
|
615 |
+
np.dtype([('x',
|
616 |
+
('<f8', (5,)),
|
617 |
+
(2,),
|
618 |
+
)]),
|
619 |
+
np.dtype([('x', np.dtype((
|
620 |
+
np.dtype((
|
621 |
+
np.dtype({'names':['a','b'],
|
622 |
+
'formats':['i1','i1'],
|
623 |
+
'offsets':[0,4],
|
624 |
+
'itemsize':8}),
|
625 |
+
(3,)
|
626 |
+
)),
|
627 |
+
(4,)
|
628 |
+
)))
|
629 |
+
]),
|
630 |
+
np.dtype([
|
631 |
+
('a', np.dtype((
|
632 |
+
np.dtype((
|
633 |
+
np.dtype((
|
634 |
+
np.dtype([
|
635 |
+
('a', int),
|
636 |
+
('b', np.dtype({'names':['a','b'],
|
637 |
+
'formats':['i1','i1'],
|
638 |
+
'offsets':[0,4],
|
639 |
+
'itemsize':8})),
|
640 |
+
]),
|
641 |
+
(3,),
|
642 |
+
)),
|
643 |
+
(4,),
|
644 |
+
)),
|
645 |
+
(5,),
|
646 |
+
)))
|
647 |
+
]),
|
648 |
+
])
|
649 |
+
|
650 |
+
def test_descr_to_dtype(dt):
|
651 |
+
dt1 = format.descr_to_dtype(dt.descr)
|
652 |
+
assert_equal_(dt1, dt)
|
653 |
+
arr1 = np.zeros(3, dt)
|
654 |
+
arr2 = roundtrip(arr1)
|
655 |
+
assert_array_equal(arr1, arr2)
|
656 |
+
|
657 |
+
def test_version_2_0():
|
658 |
+
f = BytesIO()
|
659 |
+
# requires more than 2 byte for header
|
660 |
+
dt = [(("%d" % i) * 100, float) for i in range(500)]
|
661 |
+
d = np.ones(1000, dtype=dt)
|
662 |
+
|
663 |
+
format.write_array(f, d, version=(2, 0))
|
664 |
+
with warnings.catch_warnings(record=True) as w:
|
665 |
+
warnings.filterwarnings('always', '', UserWarning)
|
666 |
+
format.write_array(f, d)
|
667 |
+
assert_(w[0].category is UserWarning)
|
668 |
+
|
669 |
+
# check alignment of data portion
|
670 |
+
f.seek(0)
|
671 |
+
header = f.readline()
|
672 |
+
assert_(len(header) % format.ARRAY_ALIGN == 0)
|
673 |
+
|
674 |
+
f.seek(0)
|
675 |
+
n = format.read_array(f, max_header_size=200000)
|
676 |
+
assert_array_equal(d, n)
|
677 |
+
|
678 |
+
# 1.0 requested but data cannot be saved this way
|
679 |
+
assert_raises(ValueError, format.write_array, f, d, (1, 0))
|
680 |
+
|
681 |
+
|
682 |
+
@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")
|
683 |
+
def test_version_2_0_memmap(tmpdir):
|
684 |
+
# requires more than 2 byte for header
|
685 |
+
dt = [(("%d" % i) * 100, float) for i in range(500)]
|
686 |
+
d = np.ones(1000, dtype=dt)
|
687 |
+
tf1 = os.path.join(tmpdir, f'version2_01.npy')
|
688 |
+
tf2 = os.path.join(tmpdir, f'version2_02.npy')
|
689 |
+
|
690 |
+
# 1.0 requested but data cannot be saved this way
|
691 |
+
assert_raises(ValueError, format.open_memmap, tf1, mode='w+', dtype=d.dtype,
|
692 |
+
shape=d.shape, version=(1, 0))
|
693 |
+
|
694 |
+
ma = format.open_memmap(tf1, mode='w+', dtype=d.dtype,
|
695 |
+
shape=d.shape, version=(2, 0))
|
696 |
+
ma[...] = d
|
697 |
+
ma.flush()
|
698 |
+
ma = format.open_memmap(tf1, mode='r', max_header_size=200000)
|
699 |
+
assert_array_equal(ma, d)
|
700 |
+
|
701 |
+
with warnings.catch_warnings(record=True) as w:
|
702 |
+
warnings.filterwarnings('always', '', UserWarning)
|
703 |
+
ma = format.open_memmap(tf2, mode='w+', dtype=d.dtype,
|
704 |
+
shape=d.shape, version=None)
|
705 |
+
assert_(w[0].category is UserWarning)
|
706 |
+
ma[...] = d
|
707 |
+
ma.flush()
|
708 |
+
|
709 |
+
ma = format.open_memmap(tf2, mode='r', max_header_size=200000)
|
710 |
+
|
711 |
+
assert_array_equal(ma, d)
|
712 |
+
|
713 |
+
@pytest.mark.parametrize("mmap_mode", ["r", None])
|
714 |
+
def test_huge_header(tmpdir, mmap_mode):
|
715 |
+
f = os.path.join(tmpdir, f'large_header.npy')
|
716 |
+
arr = np.array(1, dtype="i,"*10000+"i")
|
717 |
+
|
718 |
+
with pytest.warns(UserWarning, match=".*format 2.0"):
|
719 |
+
np.save(f, arr)
|
720 |
+
|
721 |
+
with pytest.raises(ValueError, match="Header.*large"):
|
722 |
+
np.load(f, mmap_mode=mmap_mode)
|
723 |
+
|
724 |
+
with pytest.raises(ValueError, match="Header.*large"):
|
725 |
+
np.load(f, mmap_mode=mmap_mode, max_header_size=20000)
|
726 |
+
|
727 |
+
res = np.load(f, mmap_mode=mmap_mode, allow_pickle=True)
|
728 |
+
assert_array_equal(res, arr)
|
729 |
+
|
730 |
+
res = np.load(f, mmap_mode=mmap_mode, max_header_size=180000)
|
731 |
+
assert_array_equal(res, arr)
|
732 |
+
|
733 |
+
def test_huge_header_npz(tmpdir):
|
734 |
+
f = os.path.join(tmpdir, f'large_header.npz')
|
735 |
+
arr = np.array(1, dtype="i,"*10000+"i")
|
736 |
+
|
737 |
+
with pytest.warns(UserWarning, match=".*format 2.0"):
|
738 |
+
np.savez(f, arr=arr)
|
739 |
+
|
740 |
+
# Only getting the array from the file actually reads it
|
741 |
+
with pytest.raises(ValueError, match="Header.*large"):
|
742 |
+
np.load(f)["arr"]
|
743 |
+
|
744 |
+
with pytest.raises(ValueError, match="Header.*large"):
|
745 |
+
np.load(f, max_header_size=20000)["arr"]
|
746 |
+
|
747 |
+
res = np.load(f, allow_pickle=True)["arr"]
|
748 |
+
assert_array_equal(res, arr)
|
749 |
+
|
750 |
+
res = np.load(f, max_header_size=180000)["arr"]
|
751 |
+
assert_array_equal(res, arr)
|
752 |
+
|
753 |
+
def test_write_version():
|
754 |
+
f = BytesIO()
|
755 |
+
arr = np.arange(1)
|
756 |
+
# These should pass.
|
757 |
+
format.write_array(f, arr, version=(1, 0))
|
758 |
+
format.write_array(f, arr)
|
759 |
+
|
760 |
+
format.write_array(f, arr, version=None)
|
761 |
+
format.write_array(f, arr)
|
762 |
+
|
763 |
+
format.write_array(f, arr, version=(2, 0))
|
764 |
+
format.write_array(f, arr)
|
765 |
+
|
766 |
+
# These should all fail.
|
767 |
+
bad_versions = [
|
768 |
+
(1, 1),
|
769 |
+
(0, 0),
|
770 |
+
(0, 1),
|
771 |
+
(2, 2),
|
772 |
+
(255, 255),
|
773 |
+
]
|
774 |
+
for version in bad_versions:
|
775 |
+
with assert_raises_regex(ValueError,
|
776 |
+
'we only support format version.*'):
|
777 |
+
format.write_array(f, arr, version=version)
|
778 |
+
|
779 |
+
|
780 |
+
bad_version_magic = [
|
781 |
+
b'\x93NUMPY\x01\x01',
|
782 |
+
b'\x93NUMPY\x00\x00',
|
783 |
+
b'\x93NUMPY\x00\x01',
|
784 |
+
b'\x93NUMPY\x02\x00',
|
785 |
+
b'\x93NUMPY\x02\x02',
|
786 |
+
b'\x93NUMPY\xff\xff',
|
787 |
+
]
|
788 |
+
malformed_magic = [
|
789 |
+
b'\x92NUMPY\x01\x00',
|
790 |
+
b'\x00NUMPY\x01\x00',
|
791 |
+
b'\x93numpy\x01\x00',
|
792 |
+
b'\x93MATLB\x01\x00',
|
793 |
+
b'\x93NUMPY\x01',
|
794 |
+
b'\x93NUMPY',
|
795 |
+
b'',
|
796 |
+
]
|
797 |
+
|
798 |
+
def test_read_magic():
|
799 |
+
s1 = BytesIO()
|
800 |
+
s2 = BytesIO()
|
801 |
+
|
802 |
+
arr = np.ones((3, 6), dtype=float)
|
803 |
+
|
804 |
+
format.write_array(s1, arr, version=(1, 0))
|
805 |
+
format.write_array(s2, arr, version=(2, 0))
|
806 |
+
|
807 |
+
s1.seek(0)
|
808 |
+
s2.seek(0)
|
809 |
+
|
810 |
+
version1 = format.read_magic(s1)
|
811 |
+
version2 = format.read_magic(s2)
|
812 |
+
|
813 |
+
assert_(version1 == (1, 0))
|
814 |
+
assert_(version2 == (2, 0))
|
815 |
+
|
816 |
+
assert_(s1.tell() == format.MAGIC_LEN)
|
817 |
+
assert_(s2.tell() == format.MAGIC_LEN)
|
818 |
+
|
819 |
+
def test_read_magic_bad_magic():
|
820 |
+
for magic in malformed_magic:
|
821 |
+
f = BytesIO(magic)
|
822 |
+
assert_raises(ValueError, format.read_array, f)
|
823 |
+
|
824 |
+
|
825 |
+
def test_read_version_1_0_bad_magic():
|
826 |
+
for magic in bad_version_magic + malformed_magic:
|
827 |
+
f = BytesIO(magic)
|
828 |
+
assert_raises(ValueError, format.read_array, f)
|
829 |
+
|
830 |
+
|
831 |
+
def test_bad_magic_args():
|
832 |
+
assert_raises(ValueError, format.magic, -1, 1)
|
833 |
+
assert_raises(ValueError, format.magic, 256, 1)
|
834 |
+
assert_raises(ValueError, format.magic, 1, -1)
|
835 |
+
assert_raises(ValueError, format.magic, 1, 256)
|
836 |
+
|
837 |
+
|
838 |
+
def test_large_header():
|
839 |
+
s = BytesIO()
|
840 |
+
d = {'shape': tuple(), 'fortran_order': False, 'descr': '<i8'}
|
841 |
+
format.write_array_header_1_0(s, d)
|
842 |
+
|
843 |
+
s = BytesIO()
|
844 |
+
d['descr'] = [('x'*256*256, '<i8')]
|
845 |
+
assert_raises(ValueError, format.write_array_header_1_0, s, d)
|
846 |
+
|
847 |
+
|
848 |
+
def test_read_array_header_1_0():
|
849 |
+
s = BytesIO()
|
850 |
+
|
851 |
+
arr = np.ones((3, 6), dtype=float)
|
852 |
+
format.write_array(s, arr, version=(1, 0))
|
853 |
+
|
854 |
+
s.seek(format.MAGIC_LEN)
|
855 |
+
shape, fortran, dtype = format.read_array_header_1_0(s)
|
856 |
+
|
857 |
+
assert_(s.tell() % format.ARRAY_ALIGN == 0)
|
858 |
+
assert_((shape, fortran, dtype) == ((3, 6), False, float))
|
859 |
+
|
860 |
+
|
861 |
+
def test_read_array_header_2_0():
|
862 |
+
s = BytesIO()
|
863 |
+
|
864 |
+
arr = np.ones((3, 6), dtype=float)
|
865 |
+
format.write_array(s, arr, version=(2, 0))
|
866 |
+
|
867 |
+
s.seek(format.MAGIC_LEN)
|
868 |
+
shape, fortran, dtype = format.read_array_header_2_0(s)
|
869 |
+
|
870 |
+
assert_(s.tell() % format.ARRAY_ALIGN == 0)
|
871 |
+
assert_((shape, fortran, dtype) == ((3, 6), False, float))
|
872 |
+
|
873 |
+
|
874 |
+
def test_bad_header():
|
875 |
+
# header of length less than 2 should fail
|
876 |
+
s = BytesIO()
|
877 |
+
assert_raises(ValueError, format.read_array_header_1_0, s)
|
878 |
+
s = BytesIO(b'1')
|
879 |
+
assert_raises(ValueError, format.read_array_header_1_0, s)
|
880 |
+
|
881 |
+
# header shorter than indicated size should fail
|
882 |
+
s = BytesIO(b'\x01\x00')
|
883 |
+
assert_raises(ValueError, format.read_array_header_1_0, s)
|
884 |
+
|
885 |
+
# headers without the exact keys required should fail
|
886 |
+
# d = {"shape": (1, 2),
|
887 |
+
# "descr": "x"}
|
888 |
+
s = BytesIO(
|
889 |
+
b"\x93NUMPY\x01\x006\x00{'descr': 'x', 'shape': (1, 2), }" +
|
890 |
+
b" \n"
|
891 |
+
)
|
892 |
+
assert_raises(ValueError, format.read_array_header_1_0, s)
|
893 |
+
|
894 |
+
d = {"shape": (1, 2),
|
895 |
+
"fortran_order": False,
|
896 |
+
"descr": "x",
|
897 |
+
"extrakey": -1}
|
898 |
+
s = BytesIO()
|
899 |
+
format.write_array_header_1_0(s, d)
|
900 |
+
assert_raises(ValueError, format.read_array_header_1_0, s)
|
901 |
+
|
902 |
+
|
903 |
+
def test_large_file_support(tmpdir):
|
904 |
+
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
|
905 |
+
pytest.skip("Unknown if Windows has sparse filesystems")
|
906 |
+
# try creating a large sparse file
|
907 |
+
tf_name = os.path.join(tmpdir, 'sparse_file')
|
908 |
+
try:
|
909 |
+
# seek past end would work too, but linux truncate somewhat
|
910 |
+
# increases the chances that we have a sparse filesystem and can
|
911 |
+
# avoid actually writing 5GB
|
912 |
+
import subprocess as sp
|
913 |
+
sp.check_call(["truncate", "-s", "5368709120", tf_name])
|
914 |
+
except Exception:
|
915 |
+
pytest.skip("Could not create 5GB large file")
|
916 |
+
# write a small array to the end
|
917 |
+
with open(tf_name, "wb") as f:
|
918 |
+
f.seek(5368709120)
|
919 |
+
d = np.arange(5)
|
920 |
+
np.save(f, d)
|
921 |
+
# read it back
|
922 |
+
with open(tf_name, "rb") as f:
|
923 |
+
f.seek(5368709120)
|
924 |
+
r = np.load(f)
|
925 |
+
assert_array_equal(r, d)
|
926 |
+
|
927 |
+
|
928 |
+
@pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy")
|
929 |
+
@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
|
930 |
+
reason="test requires 64-bit system")
|
931 |
+
@pytest.mark.slow
|
932 |
+
@requires_memory(free_bytes=2 * 2**30)
|
933 |
+
def test_large_archive(tmpdir):
|
934 |
+
# Regression test for product of saving arrays with dimensions of array
|
935 |
+
# having a product that doesn't fit in int32. See gh-7598 for details.
|
936 |
+
shape = (2**30, 2)
|
937 |
+
try:
|
938 |
+
a = np.empty(shape, dtype=np.uint8)
|
939 |
+
except MemoryError:
|
940 |
+
pytest.skip("Could not create large file")
|
941 |
+
|
942 |
+
fname = os.path.join(tmpdir, "large_archive")
|
943 |
+
|
944 |
+
with open(fname, "wb") as f:
|
945 |
+
np.savez(f, arr=a)
|
946 |
+
|
947 |
+
del a
|
948 |
+
|
949 |
+
with open(fname, "rb") as f:
|
950 |
+
new_a = np.load(f)["arr"]
|
951 |
+
|
952 |
+
assert new_a.shape == shape
|
953 |
+
|
954 |
+
|
955 |
+
def test_empty_npz(tmpdir):
|
956 |
+
# Test for gh-9989
|
957 |
+
fname = os.path.join(tmpdir, "nothing.npz")
|
958 |
+
np.savez(fname)
|
959 |
+
with np.load(fname) as nps:
|
960 |
+
pass
|
961 |
+
|
962 |
+
|
963 |
+
def test_unicode_field_names(tmpdir):
|
964 |
+
# gh-7391
|
965 |
+
arr = np.array([
|
966 |
+
(1, 3),
|
967 |
+
(1, 2),
|
968 |
+
(1, 3),
|
969 |
+
(1, 2)
|
970 |
+
], dtype=[
|
971 |
+
('int', int),
|
972 |
+
('\N{CJK UNIFIED IDEOGRAPH-6574}\N{CJK UNIFIED IDEOGRAPH-5F62}', int)
|
973 |
+
])
|
974 |
+
fname = os.path.join(tmpdir, "unicode.npy")
|
975 |
+
with open(fname, 'wb') as f:
|
976 |
+
format.write_array(f, arr, version=(3, 0))
|
977 |
+
with open(fname, 'rb') as f:
|
978 |
+
arr2 = format.read_array(f)
|
979 |
+
assert_array_equal(arr, arr2)
|
980 |
+
|
981 |
+
# notifies the user that 3.0 is selected
|
982 |
+
with open(fname, 'wb') as f:
|
983 |
+
with assert_warns(UserWarning):
|
984 |
+
format.write_array(f, arr, version=None)
|
985 |
+
|
986 |
+
def test_header_growth_axis():
|
987 |
+
for is_fortran_array, dtype_space, expected_header_length in [
|
988 |
+
[False, 22, 128], [False, 23, 192], [True, 23, 128], [True, 24, 192]
|
989 |
+
]:
|
990 |
+
for size in [10**i for i in range(format.GROWTH_AXIS_MAX_DIGITS)]:
|
991 |
+
fp = BytesIO()
|
992 |
+
format.write_array_header_1_0(fp, {
|
993 |
+
'shape': (2, size) if is_fortran_array else (size, 2),
|
994 |
+
'fortran_order': is_fortran_array,
|
995 |
+
'descr': np.dtype([(' '*dtype_space, int)])
|
996 |
+
})
|
997 |
+
|
998 |
+
assert len(fp.getvalue()) == expected_header_length
|
999 |
+
|
1000 |
+
@pytest.mark.parametrize('dt, fail', [
|
1001 |
+
(np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
|
1002 |
+
metadata={'some': 'stuff'})]}), True),
|
1003 |
+
(np.dtype(int, metadata={'some': 'stuff'}), False),
|
1004 |
+
(np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False),
|
1005 |
+
# recursive: metadata on the field of a dtype
|
1006 |
+
(np.dtype({'names': ['a', 'b'], 'formats': [
|
1007 |
+
float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})
|
1008 |
+
]}), False)
|
1009 |
+
])
|
1010 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
1011 |
+
reason="PyPy bug in error formatting")
|
1012 |
+
def test_metadata_dtype(dt, fail):
|
1013 |
+
# gh-14142
|
1014 |
+
arr = np.ones(10, dtype=dt)
|
1015 |
+
buf = BytesIO()
|
1016 |
+
with assert_warns(UserWarning):
|
1017 |
+
np.save(buf, arr)
|
1018 |
+
buf.seek(0)
|
1019 |
+
if fail:
|
1020 |
+
with assert_raises(ValueError):
|
1021 |
+
np.load(buf)
|
1022 |
+
else:
|
1023 |
+
arr2 = np.load(buf)
|
1024 |
+
# BUG: assert_array_equal does not check metadata
|
1025 |
+
from numpy.lib.utils import drop_metadata
|
1026 |
+
assert_array_equal(arr, arr2)
|
1027 |
+
assert drop_metadata(arr.dtype) is not arr.dtype
|
1028 |
+
assert drop_metadata(arr2.dtype) is arr2.dtype
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_function_base.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_histograms.py
ADDED
@@ -0,0 +1,816 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
|
4 |
+
from numpy.testing import (
|
5 |
+
assert_, assert_equal, assert_array_equal, assert_almost_equal,
|
6 |
+
assert_array_almost_equal, assert_raises, assert_allclose,
|
7 |
+
assert_array_max_ulp, assert_raises_regex, suppress_warnings,
|
8 |
+
)
|
9 |
+
from numpy.testing._private.utils import requires_memory
|
10 |
+
import pytest
|
11 |
+
|
12 |
+
|
13 |
+
class TestHistogram:
|
14 |
+
|
15 |
+
def setup_method(self):
|
16 |
+
pass
|
17 |
+
|
18 |
+
def teardown_method(self):
|
19 |
+
pass
|
20 |
+
|
21 |
+
def test_simple(self):
|
22 |
+
n = 100
|
23 |
+
v = np.random.rand(n)
|
24 |
+
(a, b) = histogram(v)
|
25 |
+
# check if the sum of the bins equals the number of samples
|
26 |
+
assert_equal(np.sum(a, axis=0), n)
|
27 |
+
# check that the bin counts are evenly spaced when the data is from
|
28 |
+
# a linear function
|
29 |
+
(a, b) = histogram(np.linspace(0, 10, 100))
|
30 |
+
assert_array_equal(a, 10)
|
31 |
+
|
32 |
+
def test_one_bin(self):
|
33 |
+
# Ticket 632
|
34 |
+
hist, edges = histogram([1, 2, 3, 4], [1, 2])
|
35 |
+
assert_array_equal(hist, [2, ])
|
36 |
+
assert_array_equal(edges, [1, 2])
|
37 |
+
assert_raises(ValueError, histogram, [1, 2], bins=0)
|
38 |
+
h, e = histogram([1, 2], bins=1)
|
39 |
+
assert_equal(h, np.array([2]))
|
40 |
+
assert_allclose(e, np.array([1., 2.]))
|
41 |
+
|
42 |
+
def test_density(self):
|
43 |
+
# Check that the integral of the density equals 1.
|
44 |
+
n = 100
|
45 |
+
v = np.random.rand(n)
|
46 |
+
a, b = histogram(v, density=True)
|
47 |
+
area = np.sum(a * np.diff(b))
|
48 |
+
assert_almost_equal(area, 1)
|
49 |
+
|
50 |
+
# Check with non-constant bin widths
|
51 |
+
v = np.arange(10)
|
52 |
+
bins = [0, 1, 3, 6, 10]
|
53 |
+
a, b = histogram(v, bins, density=True)
|
54 |
+
assert_array_equal(a, .1)
|
55 |
+
assert_equal(np.sum(a * np.diff(b)), 1)
|
56 |
+
|
57 |
+
# Test that passing False works too
|
58 |
+
a, b = histogram(v, bins, density=False)
|
59 |
+
assert_array_equal(a, [1, 2, 3, 4])
|
60 |
+
|
61 |
+
# Variable bin widths are especially useful to deal with
|
62 |
+
# infinities.
|
63 |
+
v = np.arange(10)
|
64 |
+
bins = [0, 1, 3, 6, np.inf]
|
65 |
+
a, b = histogram(v, bins, density=True)
|
66 |
+
assert_array_equal(a, [.1, .1, .1, 0.])
|
67 |
+
|
68 |
+
# Taken from a bug report from N. Becker on the numpy-discussion
|
69 |
+
# mailing list Aug. 6, 2010.
|
70 |
+
counts, dmy = np.histogram(
|
71 |
+
[1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
|
72 |
+
assert_equal(counts, [.25, 0])
|
73 |
+
|
74 |
+
def test_outliers(self):
|
75 |
+
# Check that outliers are not tallied
|
76 |
+
a = np.arange(10) + .5
|
77 |
+
|
78 |
+
# Lower outliers
|
79 |
+
h, b = histogram(a, range=[0, 9])
|
80 |
+
assert_equal(h.sum(), 9)
|
81 |
+
|
82 |
+
# Upper outliers
|
83 |
+
h, b = histogram(a, range=[1, 10])
|
84 |
+
assert_equal(h.sum(), 9)
|
85 |
+
|
86 |
+
# Normalization
|
87 |
+
h, b = histogram(a, range=[1, 9], density=True)
|
88 |
+
assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
|
89 |
+
|
90 |
+
# Weights
|
91 |
+
w = np.arange(10) + .5
|
92 |
+
h, b = histogram(a, range=[1, 9], weights=w, density=True)
|
93 |
+
assert_equal((h * np.diff(b)).sum(), 1)
|
94 |
+
|
95 |
+
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
|
96 |
+
assert_equal(h, w[1:-1])
|
97 |
+
|
98 |
+
def test_arr_weights_mismatch(self):
|
99 |
+
a = np.arange(10) + .5
|
100 |
+
w = np.arange(11) + .5
|
101 |
+
with assert_raises_regex(ValueError, "same shape as"):
|
102 |
+
h, b = histogram(a, range=[1, 9], weights=w, density=True)
|
103 |
+
|
104 |
+
|
105 |
+
def test_type(self):
|
106 |
+
# Check the type of the returned histogram
|
107 |
+
a = np.arange(10) + .5
|
108 |
+
h, b = histogram(a)
|
109 |
+
assert_(np.issubdtype(h.dtype, np.integer))
|
110 |
+
|
111 |
+
h, b = histogram(a, density=True)
|
112 |
+
assert_(np.issubdtype(h.dtype, np.floating))
|
113 |
+
|
114 |
+
h, b = histogram(a, weights=np.ones(10, int))
|
115 |
+
assert_(np.issubdtype(h.dtype, np.integer))
|
116 |
+
|
117 |
+
h, b = histogram(a, weights=np.ones(10, float))
|
118 |
+
assert_(np.issubdtype(h.dtype, np.floating))
|
119 |
+
|
120 |
+
def test_f32_rounding(self):
|
121 |
+
# gh-4799, check that the rounding of the edges works with float32
|
122 |
+
x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
|
123 |
+
y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
|
124 |
+
counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
|
125 |
+
assert_equal(counts_hist.sum(), 3.)
|
126 |
+
|
127 |
+
def test_bool_conversion(self):
|
128 |
+
# gh-12107
|
129 |
+
# Reference integer histogram
|
130 |
+
a = np.array([1, 1, 0], dtype=np.uint8)
|
131 |
+
int_hist, int_edges = np.histogram(a)
|
132 |
+
|
133 |
+
# Should raise an warning on booleans
|
134 |
+
# Ensure that the histograms are equivalent, need to suppress
|
135 |
+
# the warnings to get the actual outputs
|
136 |
+
with suppress_warnings() as sup:
|
137 |
+
rec = sup.record(RuntimeWarning, 'Converting input from .*')
|
138 |
+
hist, edges = np.histogram([True, True, False])
|
139 |
+
# A warning should be issued
|
140 |
+
assert_equal(len(rec), 1)
|
141 |
+
assert_array_equal(hist, int_hist)
|
142 |
+
assert_array_equal(edges, int_edges)
|
143 |
+
|
144 |
+
def test_weights(self):
|
145 |
+
v = np.random.rand(100)
|
146 |
+
w = np.ones(100) * 5
|
147 |
+
a, b = histogram(v)
|
148 |
+
na, nb = histogram(v, density=True)
|
149 |
+
wa, wb = histogram(v, weights=w)
|
150 |
+
nwa, nwb = histogram(v, weights=w, density=True)
|
151 |
+
assert_array_almost_equal(a * 5, wa)
|
152 |
+
assert_array_almost_equal(na, nwa)
|
153 |
+
|
154 |
+
# Check weights are properly applied.
|
155 |
+
v = np.linspace(0, 10, 10)
|
156 |
+
w = np.concatenate((np.zeros(5), np.ones(5)))
|
157 |
+
wa, wb = histogram(v, bins=np.arange(11), weights=w)
|
158 |
+
assert_array_almost_equal(wa, w)
|
159 |
+
|
160 |
+
# Check with integer weights
|
161 |
+
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
|
162 |
+
assert_array_equal(wa, [4, 5, 0, 1])
|
163 |
+
wa, wb = histogram(
|
164 |
+
[1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
|
165 |
+
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
|
166 |
+
|
167 |
+
# Check weights with non-uniform bin widths
|
168 |
+
a, b = histogram(
|
169 |
+
np.arange(9), [0, 1, 3, 6, 10],
|
170 |
+
weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
|
171 |
+
assert_almost_equal(a, [.2, .1, .1, .075])
|
172 |
+
|
173 |
+
def test_exotic_weights(self):
|
174 |
+
|
175 |
+
# Test the use of weights that are not integer or floats, but e.g.
|
176 |
+
# complex numbers or object types.
|
177 |
+
|
178 |
+
# Complex weights
|
179 |
+
values = np.array([1.3, 2.5, 2.3])
|
180 |
+
weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
|
181 |
+
|
182 |
+
# Check with custom bins
|
183 |
+
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
|
184 |
+
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
|
185 |
+
|
186 |
+
# Check with even bins
|
187 |
+
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
|
188 |
+
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
|
189 |
+
|
190 |
+
# Decimal weights
|
191 |
+
from decimal import Decimal
|
192 |
+
values = np.array([1.3, 2.5, 2.3])
|
193 |
+
weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
|
194 |
+
|
195 |
+
# Check with custom bins
|
196 |
+
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
|
197 |
+
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
|
198 |
+
|
199 |
+
# Check with even bins
|
200 |
+
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
|
201 |
+
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
|
202 |
+
|
203 |
+
def test_no_side_effects(self):
|
204 |
+
# This is a regression test that ensures that values passed to
|
205 |
+
# ``histogram`` are unchanged.
|
206 |
+
values = np.array([1.3, 2.5, 2.3])
|
207 |
+
np.histogram(values, range=[-10, 10], bins=100)
|
208 |
+
assert_array_almost_equal(values, [1.3, 2.5, 2.3])
|
209 |
+
|
210 |
+
def test_empty(self):
|
211 |
+
a, b = histogram([], bins=([0, 1]))
|
212 |
+
assert_array_equal(a, np.array([0]))
|
213 |
+
assert_array_equal(b, np.array([0, 1]))
|
214 |
+
|
215 |
+
def test_error_binnum_type (self):
|
216 |
+
# Tests if right Error is raised if bins argument is float
|
217 |
+
vals = np.linspace(0.0, 1.0, num=100)
|
218 |
+
histogram(vals, 5)
|
219 |
+
assert_raises(TypeError, histogram, vals, 2.4)
|
220 |
+
|
221 |
+
def test_finite_range(self):
|
222 |
+
# Normal ranges should be fine
|
223 |
+
vals = np.linspace(0.0, 1.0, num=100)
|
224 |
+
histogram(vals, range=[0.25,0.75])
|
225 |
+
assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
|
226 |
+
assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
|
227 |
+
|
228 |
+
def test_invalid_range(self):
|
229 |
+
# start of range must be < end of range
|
230 |
+
vals = np.linspace(0.0, 1.0, num=100)
|
231 |
+
with assert_raises_regex(ValueError, "max must be larger than"):
|
232 |
+
np.histogram(vals, range=[0.1, 0.01])
|
233 |
+
|
234 |
+
def test_bin_edge_cases(self):
|
235 |
+
# Ensure that floating-point computations correctly place edge cases.
|
236 |
+
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
|
237 |
+
hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
|
238 |
+
mask = hist > 0
|
239 |
+
left_edges = edges[:-1][mask]
|
240 |
+
right_edges = edges[1:][mask]
|
241 |
+
for x, left, right in zip(arr, left_edges, right_edges):
|
242 |
+
assert_(x >= left)
|
243 |
+
assert_(x < right)
|
244 |
+
|
245 |
+
def test_last_bin_inclusive_range(self):
|
246 |
+
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
|
247 |
+
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
|
248 |
+
assert_equal(hist[-1], 1)
|
249 |
+
|
250 |
+
def test_bin_array_dims(self):
|
251 |
+
# gracefully handle bins object > 1 dimension
|
252 |
+
vals = np.linspace(0.0, 1.0, num=100)
|
253 |
+
bins = np.array([[0, 0.5], [0.6, 1.0]])
|
254 |
+
with assert_raises_regex(ValueError, "must be 1d"):
|
255 |
+
np.histogram(vals, bins=bins)
|
256 |
+
|
257 |
+
def test_unsigned_monotonicity_check(self):
|
258 |
+
# Ensures ValueError is raised if bins not increasing monotonically
|
259 |
+
# when bins contain unsigned values (see #9222)
|
260 |
+
arr = np.array([2])
|
261 |
+
bins = np.array([1, 3, 1], dtype='uint64')
|
262 |
+
with assert_raises(ValueError):
|
263 |
+
hist, edges = np.histogram(arr, bins=bins)
|
264 |
+
|
265 |
+
def test_object_array_of_0d(self):
|
266 |
+
# gh-7864
|
267 |
+
assert_raises(ValueError,
|
268 |
+
histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
|
269 |
+
assert_raises(ValueError,
|
270 |
+
histogram, [np.array(0.4) for i in range(10)] + [np.inf])
|
271 |
+
|
272 |
+
# these should not crash
|
273 |
+
np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001])
|
274 |
+
np.histogram([np.array(0.5) for i in range(10)] + [.5])
|
275 |
+
|
276 |
+
def test_some_nan_values(self):
|
277 |
+
# gh-7503
|
278 |
+
one_nan = np.array([0, 1, np.nan])
|
279 |
+
all_nan = np.array([np.nan, np.nan])
|
280 |
+
|
281 |
+
# the internal comparisons with NaN give warnings
|
282 |
+
sup = suppress_warnings()
|
283 |
+
sup.filter(RuntimeWarning)
|
284 |
+
with sup:
|
285 |
+
# can't infer range with nan
|
286 |
+
assert_raises(ValueError, histogram, one_nan, bins='auto')
|
287 |
+
assert_raises(ValueError, histogram, all_nan, bins='auto')
|
288 |
+
|
289 |
+
# explicit range solves the problem
|
290 |
+
h, b = histogram(one_nan, bins='auto', range=(0, 1))
|
291 |
+
assert_equal(h.sum(), 2) # nan is not counted
|
292 |
+
h, b = histogram(all_nan, bins='auto', range=(0, 1))
|
293 |
+
assert_equal(h.sum(), 0) # nan is not counted
|
294 |
+
|
295 |
+
# as does an explicit set of bins
|
296 |
+
h, b = histogram(one_nan, bins=[0, 1])
|
297 |
+
assert_equal(h.sum(), 2) # nan is not counted
|
298 |
+
h, b = histogram(all_nan, bins=[0, 1])
|
299 |
+
assert_equal(h.sum(), 0) # nan is not counted
|
300 |
+
|
301 |
+
def test_datetime(self):
|
302 |
+
begin = np.datetime64('2000-01-01', 'D')
|
303 |
+
offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])
|
304 |
+
bins = np.array([0, 2, 7, 20])
|
305 |
+
dates = begin + offsets
|
306 |
+
date_bins = begin + bins
|
307 |
+
|
308 |
+
td = np.dtype('timedelta64[D]')
|
309 |
+
|
310 |
+
# Results should be the same for integer offsets or datetime values.
|
311 |
+
# For now, only explicit bins are supported, since linspace does not
|
312 |
+
# work on datetimes or timedeltas
|
313 |
+
d_count, d_edge = histogram(dates, bins=date_bins)
|
314 |
+
t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))
|
315 |
+
i_count, i_edge = histogram(offsets, bins=bins)
|
316 |
+
|
317 |
+
assert_equal(d_count, i_count)
|
318 |
+
assert_equal(t_count, i_count)
|
319 |
+
|
320 |
+
assert_equal((d_edge - begin).astype(int), i_edge)
|
321 |
+
assert_equal(t_edge.astype(int), i_edge)
|
322 |
+
|
323 |
+
assert_equal(d_edge.dtype, dates.dtype)
|
324 |
+
assert_equal(t_edge.dtype, td)
|
325 |
+
|
326 |
+
def do_signed_overflow_bounds(self, dtype):
|
327 |
+
exponent = 8 * np.dtype(dtype).itemsize - 1
|
328 |
+
arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
|
329 |
+
hist, e = histogram(arr, bins=2)
|
330 |
+
assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
|
331 |
+
assert_equal(hist, [1, 1])
|
332 |
+
|
333 |
+
def test_signed_overflow_bounds(self):
|
334 |
+
self.do_signed_overflow_bounds(np.byte)
|
335 |
+
self.do_signed_overflow_bounds(np.short)
|
336 |
+
self.do_signed_overflow_bounds(np.intc)
|
337 |
+
self.do_signed_overflow_bounds(np.int_)
|
338 |
+
self.do_signed_overflow_bounds(np.longlong)
|
339 |
+
|
340 |
+
def do_precision_lower_bound(self, float_small, float_large):
|
341 |
+
eps = np.finfo(float_large).eps
|
342 |
+
|
343 |
+
arr = np.array([1.0], float_small)
|
344 |
+
range = np.array([1.0 + eps, 2.0], float_large)
|
345 |
+
|
346 |
+
# test is looking for behavior when the bounds change between dtypes
|
347 |
+
if range.astype(float_small)[0] != 1:
|
348 |
+
return
|
349 |
+
|
350 |
+
# previously crashed
|
351 |
+
count, x_loc = np.histogram(arr, bins=1, range=range)
|
352 |
+
assert_equal(count, [1])
|
353 |
+
|
354 |
+
# gh-10322 means that the type comes from arr - this may change
|
355 |
+
assert_equal(x_loc.dtype, float_small)
|
356 |
+
|
357 |
+
def do_precision_upper_bound(self, float_small, float_large):
|
358 |
+
eps = np.finfo(float_large).eps
|
359 |
+
|
360 |
+
arr = np.array([1.0], float_small)
|
361 |
+
range = np.array([0.0, 1.0 - eps], float_large)
|
362 |
+
|
363 |
+
# test is looking for behavior when the bounds change between dtypes
|
364 |
+
if range.astype(float_small)[-1] != 1:
|
365 |
+
return
|
366 |
+
|
367 |
+
# previously crashed
|
368 |
+
count, x_loc = np.histogram(arr, bins=1, range=range)
|
369 |
+
assert_equal(count, [1])
|
370 |
+
|
371 |
+
# gh-10322 means that the type comes from arr - this may change
|
372 |
+
assert_equal(x_loc.dtype, float_small)
|
373 |
+
|
374 |
+
def do_precision(self, float_small, float_large):
|
375 |
+
self.do_precision_lower_bound(float_small, float_large)
|
376 |
+
self.do_precision_upper_bound(float_small, float_large)
|
377 |
+
|
378 |
+
def test_precision(self):
|
379 |
+
# not looping results in a useful stack trace upon failure
|
380 |
+
self.do_precision(np.half, np.single)
|
381 |
+
self.do_precision(np.half, np.double)
|
382 |
+
self.do_precision(np.half, np.longdouble)
|
383 |
+
self.do_precision(np.single, np.double)
|
384 |
+
self.do_precision(np.single, np.longdouble)
|
385 |
+
self.do_precision(np.double, np.longdouble)
|
386 |
+
|
387 |
+
def test_histogram_bin_edges(self):
|
388 |
+
hist, e = histogram([1, 2, 3, 4], [1, 2])
|
389 |
+
edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
|
390 |
+
assert_array_equal(edges, e)
|
391 |
+
|
392 |
+
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
|
393 |
+
hist, e = histogram(arr, bins=30, range=(-0.5, 5))
|
394 |
+
edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
|
395 |
+
assert_array_equal(edges, e)
|
396 |
+
|
397 |
+
hist, e = histogram(arr, bins='auto', range=(0, 1))
|
398 |
+
edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
|
399 |
+
assert_array_equal(edges, e)
|
400 |
+
|
401 |
+
# @requires_memory(free_bytes=1e10)
|
402 |
+
# @pytest.mark.slow
|
403 |
+
@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing")
|
404 |
+
def test_big_arrays(self):
|
405 |
+
sample = np.zeros([100000000, 3])
|
406 |
+
xbins = 400
|
407 |
+
ybins = 400
|
408 |
+
zbins = np.arange(16000)
|
409 |
+
hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins))
|
410 |
+
assert_equal(type(hist), type((1, 2)))
|
411 |
+
|
412 |
+
def test_gh_23110(self):
|
413 |
+
hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'),
|
414 |
+
bins=2,
|
415 |
+
range=(-1e-308, -2e-313))
|
416 |
+
expected_hist = np.array([1, 0])
|
417 |
+
assert_array_equal(hist, expected_hist)
|
418 |
+
|
419 |
+
|
420 |
+
class TestHistogramOptimBinNums:
|
421 |
+
"""
|
422 |
+
Provide test coverage when using provided estimators for optimal number of
|
423 |
+
bins
|
424 |
+
"""
|
425 |
+
|
426 |
+
def test_empty(self):
|
427 |
+
estimator_list = ['fd', 'scott', 'rice', 'sturges',
|
428 |
+
'doane', 'sqrt', 'auto', 'stone']
|
429 |
+
# check it can deal with empty data
|
430 |
+
for estimator in estimator_list:
|
431 |
+
a, b = histogram([], bins=estimator)
|
432 |
+
assert_array_equal(a, np.array([0]))
|
433 |
+
assert_array_equal(b, np.array([0, 1]))
|
434 |
+
|
435 |
+
def test_simple(self):
|
436 |
+
"""
|
437 |
+
Straightforward testing with a mixture of linspace data (for
|
438 |
+
consistency). All test values have been precomputed and the values
|
439 |
+
shouldn't change
|
440 |
+
"""
|
441 |
+
# Some basic sanity checking, with some fixed data.
|
442 |
+
# Checking for the correct number of bins
|
443 |
+
basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
|
444 |
+
'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
|
445 |
+
500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
|
446 |
+
'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
|
447 |
+
5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
|
448 |
+
'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
|
449 |
+
|
450 |
+
for testlen, expectedResults in basic_test.items():
|
451 |
+
# Create some sort of non uniform data to test with
|
452 |
+
# (2 peak uniform mixture)
|
453 |
+
x1 = np.linspace(-10, -1, testlen // 5 * 2)
|
454 |
+
x2 = np.linspace(1, 10, testlen // 5 * 3)
|
455 |
+
x = np.concatenate((x1, x2))
|
456 |
+
for estimator, numbins in expectedResults.items():
|
457 |
+
a, b = np.histogram(x, estimator)
|
458 |
+
assert_equal(len(a), numbins, err_msg="For the {0} estimator "
|
459 |
+
"with datasize of {1}".format(estimator, testlen))
|
460 |
+
|
461 |
+
def test_small(self):
|
462 |
+
"""
|
463 |
+
Smaller datasets have the potential to cause issues with the data
|
464 |
+
adaptive methods, especially the FD method. All bin numbers have been
|
465 |
+
precalculated.
|
466 |
+
"""
|
467 |
+
small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
|
468 |
+
'doane': 1, 'sqrt': 1, 'stone': 1},
|
469 |
+
2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
|
470 |
+
'doane': 1, 'sqrt': 2, 'stone': 1},
|
471 |
+
3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
|
472 |
+
'doane': 3, 'sqrt': 2, 'stone': 1}}
|
473 |
+
|
474 |
+
for testlen, expectedResults in small_dat.items():
|
475 |
+
testdat = np.arange(testlen)
|
476 |
+
for estimator, expbins in expectedResults.items():
|
477 |
+
a, b = np.histogram(testdat, estimator)
|
478 |
+
assert_equal(len(a), expbins, err_msg="For the {0} estimator "
|
479 |
+
"with datasize of {1}".format(estimator, testlen))
|
480 |
+
|
481 |
+
def test_incorrect_methods(self):
|
482 |
+
"""
|
483 |
+
Check a Value Error is thrown when an unknown string is passed in
|
484 |
+
"""
|
485 |
+
check_list = ['mad', 'freeman', 'histograms', 'IQR']
|
486 |
+
for estimator in check_list:
|
487 |
+
assert_raises(ValueError, histogram, [1, 2, 3], estimator)
|
488 |
+
|
489 |
+
def test_novariance(self):
|
490 |
+
"""
|
491 |
+
Check that methods handle no variance in data
|
492 |
+
Primarily for Scott and FD as the SD and IQR are both 0 in this case
|
493 |
+
"""
|
494 |
+
novar_dataset = np.ones(100)
|
495 |
+
novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
|
496 |
+
'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
|
497 |
+
|
498 |
+
for estimator, numbins in novar_resultdict.items():
|
499 |
+
a, b = np.histogram(novar_dataset, estimator)
|
500 |
+
assert_equal(len(a), numbins, err_msg="{0} estimator, "
|
501 |
+
"No Variance test".format(estimator))
|
502 |
+
|
503 |
+
def test_limited_variance(self):
|
504 |
+
"""
|
505 |
+
Check when IQR is 0, but variance exists, we return the sturges value
|
506 |
+
and not the fd value.
|
507 |
+
"""
|
508 |
+
lim_var_data = np.ones(1000)
|
509 |
+
lim_var_data[:3] = 0
|
510 |
+
lim_var_data[-4:] = 100
|
511 |
+
|
512 |
+
edges_auto = histogram_bin_edges(lim_var_data, 'auto')
|
513 |
+
assert_equal(edges_auto, np.linspace(0, 100, 12))
|
514 |
+
|
515 |
+
edges_fd = histogram_bin_edges(lim_var_data, 'fd')
|
516 |
+
assert_equal(edges_fd, np.array([0, 100]))
|
517 |
+
|
518 |
+
edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
|
519 |
+
assert_equal(edges_sturges, np.linspace(0, 100, 12))
|
520 |
+
|
521 |
+
def test_outlier(self):
|
522 |
+
"""
|
523 |
+
Check the FD, Scott and Doane with outliers.
|
524 |
+
|
525 |
+
The FD estimates a smaller binwidth since it's less affected by
|
526 |
+
outliers. Since the range is so (artificially) large, this means more
|
527 |
+
bins, most of which will be empty, but the data of interest usually is
|
528 |
+
unaffected. The Scott estimator is more affected and returns fewer bins,
|
529 |
+
despite most of the variance being in one area of the data. The Doane
|
530 |
+
estimator lies somewhere between the other two.
|
531 |
+
"""
|
532 |
+
xcenter = np.linspace(-10, 10, 50)
|
533 |
+
outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
|
534 |
+
|
535 |
+
outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
|
536 |
+
|
537 |
+
for estimator, numbins in outlier_resultdict.items():
|
538 |
+
a, b = np.histogram(outlier_dataset, estimator)
|
539 |
+
assert_equal(len(a), numbins)
|
540 |
+
|
541 |
+
def test_scott_vs_stone(self):
|
542 |
+
"""Verify that Scott's rule and Stone's rule converges for normally distributed data"""
|
543 |
+
|
544 |
+
def nbins_ratio(seed, size):
|
545 |
+
rng = np.random.RandomState(seed)
|
546 |
+
x = rng.normal(loc=0, scale=2, size=size)
|
547 |
+
a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
|
548 |
+
return a / (a + b)
|
549 |
+
|
550 |
+
ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
|
551 |
+
for seed in range(10)]
|
552 |
+
|
553 |
+
# the average difference between the two methods decreases as the dataset size increases.
|
554 |
+
avg = abs(np.mean(ll, axis=0) - 0.5)
|
555 |
+
assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
|
556 |
+
|
557 |
+
def test_simple_range(self):
|
558 |
+
"""
|
559 |
+
Straightforward testing with a mixture of linspace data (for
|
560 |
+
consistency). Adding in a 3rd mixture that will then be
|
561 |
+
completely ignored. All test values have been precomputed and
|
562 |
+
the shouldn't change.
|
563 |
+
"""
|
564 |
+
# some basic sanity checking, with some fixed data.
|
565 |
+
# Checking for the correct number of bins
|
566 |
+
basic_test = {
|
567 |
+
50: {'fd': 8, 'scott': 8, 'rice': 15,
|
568 |
+
'sturges': 14, 'auto': 14, 'stone': 8},
|
569 |
+
500: {'fd': 15, 'scott': 16, 'rice': 32,
|
570 |
+
'sturges': 20, 'auto': 20, 'stone': 80},
|
571 |
+
5000: {'fd': 33, 'scott': 33, 'rice': 69,
|
572 |
+
'sturges': 27, 'auto': 33, 'stone': 80}
|
573 |
+
}
|
574 |
+
|
575 |
+
for testlen, expectedResults in basic_test.items():
|
576 |
+
# create some sort of non uniform data to test with
|
577 |
+
# (3 peak uniform mixture)
|
578 |
+
x1 = np.linspace(-10, -1, testlen // 5 * 2)
|
579 |
+
x2 = np.linspace(1, 10, testlen // 5 * 3)
|
580 |
+
x3 = np.linspace(-100, -50, testlen)
|
581 |
+
x = np.hstack((x1, x2, x3))
|
582 |
+
for estimator, numbins in expectedResults.items():
|
583 |
+
a, b = np.histogram(x, estimator, range = (-20, 20))
|
584 |
+
msg = "For the {0} estimator".format(estimator)
|
585 |
+
msg += " with datasize of {0}".format(testlen)
|
586 |
+
assert_equal(len(a), numbins, err_msg=msg)
|
587 |
+
|
588 |
+
@pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
|
589 |
+
'stone', 'rice', 'sturges'])
|
590 |
+
def test_signed_integer_data(self, bins):
|
591 |
+
# Regression test for gh-14379.
|
592 |
+
a = np.array([-2, 0, 127], dtype=np.int8)
|
593 |
+
hist, edges = np.histogram(a, bins=bins)
|
594 |
+
hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
|
595 |
+
assert_array_equal(hist, hist32)
|
596 |
+
assert_array_equal(edges, edges32)
|
597 |
+
|
598 |
+
def test_simple_weighted(self):
|
599 |
+
"""
|
600 |
+
Check that weighted data raises a TypeError
|
601 |
+
"""
|
602 |
+
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
|
603 |
+
for estimator in estimator_list:
|
604 |
+
assert_raises(TypeError, histogram, [1, 2, 3],
|
605 |
+
estimator, weights=[1, 2, 3])
|
606 |
+
|
607 |
+
|
608 |
+
class TestHistogramdd:
|
609 |
+
|
610 |
+
def test_simple(self):
|
611 |
+
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
|
612 |
+
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
|
613 |
+
H, edges = histogramdd(x, (2, 3, 3),
|
614 |
+
range=[[-1, 1], [0, 3], [0, 3]])
|
615 |
+
answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
|
616 |
+
[[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
|
617 |
+
assert_array_equal(H, answer)
|
618 |
+
|
619 |
+
# Check normalization
|
620 |
+
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
|
621 |
+
H, edges = histogramdd(x, bins=ed, density=True)
|
622 |
+
assert_(np.all(H == answer / 12.))
|
623 |
+
|
624 |
+
# Check that H has the correct shape.
|
625 |
+
H, edges = histogramdd(x, (2, 3, 4),
|
626 |
+
range=[[-1, 1], [0, 3], [0, 4]],
|
627 |
+
density=True)
|
628 |
+
answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
|
629 |
+
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
|
630 |
+
assert_array_almost_equal(H, answer / 6., 4)
|
631 |
+
# Check that a sequence of arrays is accepted and H has the correct
|
632 |
+
# shape.
|
633 |
+
z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
|
634 |
+
H, edges = histogramdd(
|
635 |
+
z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
|
636 |
+
answer = np.array([[[0, 0], [0, 0], [0, 0]],
|
637 |
+
[[0, 1], [0, 0], [1, 0]],
|
638 |
+
[[0, 1], [0, 0], [0, 0]],
|
639 |
+
[[0, 0], [0, 0], [0, 0]]])
|
640 |
+
assert_array_equal(H, answer)
|
641 |
+
|
642 |
+
Z = np.zeros((5, 5, 5))
|
643 |
+
Z[list(range(5)), list(range(5)), list(range(5))] = 1.
|
644 |
+
H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
|
645 |
+
assert_array_equal(H, Z)
|
646 |
+
|
647 |
+
def test_shape_3d(self):
|
648 |
+
# All possible permutations for bins of different lengths in 3D.
|
649 |
+
bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
|
650 |
+
(4, 5, 6))
|
651 |
+
r = np.random.rand(10, 3)
|
652 |
+
for b in bins:
|
653 |
+
H, edges = histogramdd(r, b)
|
654 |
+
assert_(H.shape == b)
|
655 |
+
|
656 |
+
def test_shape_4d(self):
|
657 |
+
# All possible permutations for bins of different lengths in 4D.
|
658 |
+
bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
|
659 |
+
(5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
|
660 |
+
(7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
|
661 |
+
(4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
|
662 |
+
(6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
|
663 |
+
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
|
664 |
+
|
665 |
+
r = np.random.rand(10, 4)
|
666 |
+
for b in bins:
|
667 |
+
H, edges = histogramdd(r, b)
|
668 |
+
assert_(H.shape == b)
|
669 |
+
|
670 |
+
def test_weights(self):
|
671 |
+
v = np.random.rand(100, 2)
|
672 |
+
hist, edges = histogramdd(v)
|
673 |
+
n_hist, edges = histogramdd(v, density=True)
|
674 |
+
w_hist, edges = histogramdd(v, weights=np.ones(100))
|
675 |
+
assert_array_equal(w_hist, hist)
|
676 |
+
w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
|
677 |
+
assert_array_equal(w_hist, n_hist)
|
678 |
+
w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
|
679 |
+
assert_array_equal(w_hist, 2 * hist)
|
680 |
+
|
681 |
+
def test_identical_samples(self):
|
682 |
+
x = np.zeros((10, 2), int)
|
683 |
+
hist, edges = histogramdd(x, bins=2)
|
684 |
+
assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
|
685 |
+
|
686 |
+
def test_empty(self):
|
687 |
+
a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
|
688 |
+
assert_array_max_ulp(a, np.array([[0.]]))
|
689 |
+
a, b = np.histogramdd([[], [], []], bins=2)
|
690 |
+
assert_array_max_ulp(a, np.zeros((2, 2, 2)))
|
691 |
+
|
692 |
+
def test_bins_errors(self):
|
693 |
+
# There are two ways to specify bins. Check for the right errors
|
694 |
+
# when mixing those.
|
695 |
+
x = np.arange(8).reshape(2, 4)
|
696 |
+
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
|
697 |
+
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
|
698 |
+
assert_raises(
|
699 |
+
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
|
700 |
+
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
|
701 |
+
|
702 |
+
def test_inf_edges(self):
|
703 |
+
# Test using +/-inf bin edges works. See #1788.
|
704 |
+
with np.errstate(invalid='ignore'):
|
705 |
+
x = np.arange(6).reshape(3, 2)
|
706 |
+
expected = np.array([[1, 0], [0, 1], [0, 1]])
|
707 |
+
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
|
708 |
+
assert_allclose(h, expected)
|
709 |
+
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
|
710 |
+
assert_allclose(h, expected)
|
711 |
+
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
|
712 |
+
assert_allclose(h, expected)
|
713 |
+
|
714 |
+
def test_rightmost_binedge(self):
|
715 |
+
# Test event very close to rightmost binedge. See Github issue #4266
|
716 |
+
x = [0.9999999995]
|
717 |
+
bins = [[0., 0.5, 1.0]]
|
718 |
+
hist, _ = histogramdd(x, bins=bins)
|
719 |
+
assert_(hist[0] == 0.0)
|
720 |
+
assert_(hist[1] == 1.)
|
721 |
+
x = [1.0]
|
722 |
+
bins = [[0., 0.5, 1.0]]
|
723 |
+
hist, _ = histogramdd(x, bins=bins)
|
724 |
+
assert_(hist[0] == 0.0)
|
725 |
+
assert_(hist[1] == 1.)
|
726 |
+
x = [1.0000000001]
|
727 |
+
bins = [[0., 0.5, 1.0]]
|
728 |
+
hist, _ = histogramdd(x, bins=bins)
|
729 |
+
assert_(hist[0] == 0.0)
|
730 |
+
assert_(hist[1] == 0.0)
|
731 |
+
x = [1.0001]
|
732 |
+
bins = [[0., 0.5, 1.0]]
|
733 |
+
hist, _ = histogramdd(x, bins=bins)
|
734 |
+
assert_(hist[0] == 0.0)
|
735 |
+
assert_(hist[1] == 0.0)
|
736 |
+
|
737 |
+
def test_finite_range(self):
|
738 |
+
vals = np.random.random((100, 3))
|
739 |
+
histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
|
740 |
+
assert_raises(ValueError, histogramdd, vals,
|
741 |
+
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
|
742 |
+
assert_raises(ValueError, histogramdd, vals,
|
743 |
+
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
|
744 |
+
|
745 |
+
def test_equal_edges(self):
|
746 |
+
""" Test that adjacent entries in an edge array can be equal """
|
747 |
+
x = np.array([0, 1, 2])
|
748 |
+
y = np.array([0, 1, 2])
|
749 |
+
x_edges = np.array([0, 2, 2])
|
750 |
+
y_edges = 1
|
751 |
+
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
752 |
+
|
753 |
+
hist_expected = np.array([
|
754 |
+
[2.],
|
755 |
+
[1.], # x == 2 falls in the final bin
|
756 |
+
])
|
757 |
+
assert_equal(hist, hist_expected)
|
758 |
+
|
759 |
+
def test_edge_dtype(self):
|
760 |
+
""" Test that if an edge array is input, its type is preserved """
|
761 |
+
x = np.array([0, 10, 20])
|
762 |
+
y = x / 10
|
763 |
+
x_edges = np.array([0, 5, 15, 20])
|
764 |
+
y_edges = x_edges / 10
|
765 |
+
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
766 |
+
|
767 |
+
assert_equal(edges[0].dtype, x_edges.dtype)
|
768 |
+
assert_equal(edges[1].dtype, y_edges.dtype)
|
769 |
+
|
770 |
+
def test_large_integers(self):
|
771 |
+
big = 2**60 # Too large to represent with a full precision float
|
772 |
+
|
773 |
+
x = np.array([0], np.int64)
|
774 |
+
x_edges = np.array([-1, +1], np.int64)
|
775 |
+
y = big + x
|
776 |
+
y_edges = big + x_edges
|
777 |
+
|
778 |
+
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
779 |
+
|
780 |
+
assert_equal(hist[0, 0], 1)
|
781 |
+
|
782 |
+
def test_density_non_uniform_2d(self):
|
783 |
+
# Defines the following grid:
|
784 |
+
#
|
785 |
+
# 0 2 8
|
786 |
+
# 0+-+-----+
|
787 |
+
# + | +
|
788 |
+
# + | +
|
789 |
+
# 6+-+-----+
|
790 |
+
# 8+-+-----+
|
791 |
+
x_edges = np.array([0, 2, 8])
|
792 |
+
y_edges = np.array([0, 6, 8])
|
793 |
+
relative_areas = np.array([
|
794 |
+
[3, 9],
|
795 |
+
[1, 3]])
|
796 |
+
|
797 |
+
# ensure the number of points in each region is proportional to its area
|
798 |
+
x = np.array([1] + [1]*3 + [7]*3 + [7]*9)
|
799 |
+
y = np.array([7] + [1]*3 + [7]*3 + [1]*9)
|
800 |
+
|
801 |
+
# sanity check that the above worked as intended
|
802 |
+
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
|
803 |
+
assert_equal(hist, relative_areas)
|
804 |
+
|
805 |
+
# resulting histogram should be uniform, since counts and areas are proportional
|
806 |
+
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
|
807 |
+
assert_equal(hist, 1 / (8*8))
|
808 |
+
|
809 |
+
def test_density_non_uniform_1d(self):
|
810 |
+
# compare to histogram to show the results are the same
|
811 |
+
v = np.arange(10)
|
812 |
+
bins = np.array([0, 1, 3, 6, 10])
|
813 |
+
hist, edges = histogram(v, bins, density=True)
|
814 |
+
hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
|
815 |
+
assert_equal(hist, hist_dd)
|
816 |
+
assert_equal(edges, edges_dd[0])
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_index_tricks.py
ADDED
@@ -0,0 +1,551 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import (
|
5 |
+
assert_, assert_equal, assert_array_equal, assert_almost_equal,
|
6 |
+
assert_array_almost_equal, assert_raises, assert_raises_regex,
|
7 |
+
)
|
8 |
+
from numpy.lib.index_tricks import (
|
9 |
+
mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
|
10 |
+
index_exp, ndindex, r_, s_, ix_
|
11 |
+
)
|
12 |
+
|
13 |
+
|
14 |
+
class TestRavelUnravelIndex:
|
15 |
+
def test_basic(self):
|
16 |
+
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
|
17 |
+
|
18 |
+
# test that new shape argument works properly
|
19 |
+
assert_equal(np.unravel_index(indices=2,
|
20 |
+
shape=(2, 2)),
|
21 |
+
(1, 0))
|
22 |
+
|
23 |
+
# test that an invalid second keyword argument
|
24 |
+
# is properly handled, including the old name `dims`.
|
25 |
+
with assert_raises(TypeError):
|
26 |
+
np.unravel_index(indices=2, hape=(2, 2))
|
27 |
+
|
28 |
+
with assert_raises(TypeError):
|
29 |
+
np.unravel_index(2, hape=(2, 2))
|
30 |
+
|
31 |
+
with assert_raises(TypeError):
|
32 |
+
np.unravel_index(254, ims=(17, 94))
|
33 |
+
|
34 |
+
with assert_raises(TypeError):
|
35 |
+
np.unravel_index(254, dims=(17, 94))
|
36 |
+
|
37 |
+
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
|
38 |
+
assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
|
39 |
+
assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
|
40 |
+
assert_raises(ValueError, np.unravel_index, -1, (2, 2))
|
41 |
+
assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
|
42 |
+
assert_raises(ValueError, np.unravel_index, 4, (2, 2))
|
43 |
+
assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
|
44 |
+
assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
|
45 |
+
assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
|
46 |
+
assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
|
47 |
+
assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))
|
48 |
+
|
49 |
+
assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4])
|
50 |
+
assert_equal(
|
51 |
+
np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4)
|
52 |
+
|
53 |
+
arr = np.array([[3, 6, 6], [4, 5, 1]])
|
54 |
+
assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
|
55 |
+
assert_equal(
|
56 |
+
np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
|
57 |
+
assert_equal(
|
58 |
+
np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
|
59 |
+
assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
|
60 |
+
[12, 13, 13])
|
61 |
+
assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)
|
62 |
+
|
63 |
+
assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
|
64 |
+
[[3, 6, 6], [4, 5, 1]])
|
65 |
+
assert_equal(
|
66 |
+
np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
|
67 |
+
[[3, 6, 6], [4, 5, 1]])
|
68 |
+
assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
|
69 |
+
|
70 |
+
def test_empty_indices(self):
|
71 |
+
msg1 = 'indices must be integral: the provided empty sequence was'
|
72 |
+
msg2 = 'only int indices permitted'
|
73 |
+
assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5))
|
74 |
+
assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5))
|
75 |
+
assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]),
|
76 |
+
(10, 3, 5))
|
77 |
+
assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)),
|
78 |
+
[[], [], []])
|
79 |
+
assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []),
|
80 |
+
(10, 3))
|
81 |
+
assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']),
|
82 |
+
(10, 3))
|
83 |
+
assert_raises_regex(TypeError, msg2, np.ravel_multi_index,
|
84 |
+
(np.array([]), np.array([])), (5, 3))
|
85 |
+
assert_equal(np.ravel_multi_index(
|
86 |
+
(np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), [])
|
87 |
+
assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int),
|
88 |
+
(5, 3)), [])
|
89 |
+
|
90 |
+
def test_big_indices(self):
|
91 |
+
# ravel_multi_index for big indices (issue #7546)
|
92 |
+
if np.intp == np.int64:
|
93 |
+
arr = ([1, 29], [3, 5], [3, 117], [19, 2],
|
94 |
+
[2379, 1284], [2, 2], [0, 1])
|
95 |
+
assert_equal(
|
96 |
+
np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
|
97 |
+
[5627771580, 117259570957])
|
98 |
+
|
99 |
+
# test unravel_index for big indices (issue #9538)
|
100 |
+
assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1))
|
101 |
+
|
102 |
+
# test overflow checking for too big array (issue #7546)
|
103 |
+
dummy_arr = ([0],[0])
|
104 |
+
half_max = np.iinfo(np.intp).max // 2
|
105 |
+
assert_equal(
|
106 |
+
np.ravel_multi_index(dummy_arr, (half_max, 2)), [0])
|
107 |
+
assert_raises(ValueError,
|
108 |
+
np.ravel_multi_index, dummy_arr, (half_max+1, 2))
|
109 |
+
assert_equal(
|
110 |
+
np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0])
|
111 |
+
assert_raises(ValueError,
|
112 |
+
np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F')
|
113 |
+
|
114 |
+
def test_dtypes(self):
|
115 |
+
# Test with different data types
|
116 |
+
for dtype in [np.int16, np.uint16, np.int32,
|
117 |
+
np.uint32, np.int64, np.uint64]:
|
118 |
+
coords = np.array(
|
119 |
+
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
|
120 |
+
shape = (5, 8)
|
121 |
+
uncoords = 8*coords[0]+coords[1]
|
122 |
+
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
|
123 |
+
assert_equal(coords, np.unravel_index(uncoords, shape))
|
124 |
+
uncoords = coords[0]+5*coords[1]
|
125 |
+
assert_equal(
|
126 |
+
np.ravel_multi_index(coords, shape, order='F'), uncoords)
|
127 |
+
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
|
128 |
+
|
129 |
+
coords = np.array(
|
130 |
+
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
|
131 |
+
dtype=dtype)
|
132 |
+
shape = (5, 8, 10)
|
133 |
+
uncoords = 10*(8*coords[0]+coords[1])+coords[2]
|
134 |
+
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
|
135 |
+
assert_equal(coords, np.unravel_index(uncoords, shape))
|
136 |
+
uncoords = coords[0]+5*(coords[1]+8*coords[2])
|
137 |
+
assert_equal(
|
138 |
+
np.ravel_multi_index(coords, shape, order='F'), uncoords)
|
139 |
+
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
|
140 |
+
|
141 |
+
def test_clipmodes(self):
|
142 |
+
# Test clipmodes
|
143 |
+
assert_equal(
|
144 |
+
np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),
|
145 |
+
np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))
|
146 |
+
assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),
|
147 |
+
mode=(
|
148 |
+
'wrap', 'raise', 'clip', 'raise')),
|
149 |
+
np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))
|
150 |
+
assert_raises(
|
151 |
+
ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))
|
152 |
+
|
153 |
+
def test_writeability(self):
|
154 |
+
# See gh-7269
|
155 |
+
x, y = np.unravel_index([1, 2, 3], (4, 5))
|
156 |
+
assert_(x.flags.writeable)
|
157 |
+
assert_(y.flags.writeable)
|
158 |
+
|
159 |
+
def test_0d(self):
|
160 |
+
# gh-580
|
161 |
+
x = np.unravel_index(0, ())
|
162 |
+
assert_equal(x, ())
|
163 |
+
|
164 |
+
assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ())
|
165 |
+
assert_raises_regex(
|
166 |
+
ValueError, "out of bounds", np.unravel_index, [1], ())
|
167 |
+
|
168 |
+
@pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
|
169 |
+
def test_empty_array_ravel(self, mode):
|
170 |
+
res = np.ravel_multi_index(
|
171 |
+
np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
|
172 |
+
assert(res.shape == (0,))
|
173 |
+
|
174 |
+
with assert_raises(ValueError):
|
175 |
+
np.ravel_multi_index(
|
176 |
+
np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
|
177 |
+
|
178 |
+
def test_empty_array_unravel(self):
|
179 |
+
res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
|
180 |
+
# res is a tuple of three empty arrays
|
181 |
+
assert(len(res) == 3)
|
182 |
+
assert(all(a.shape == (0,) for a in res))
|
183 |
+
|
184 |
+
with assert_raises(ValueError):
|
185 |
+
np.unravel_index([1], (2, 1, 0))
|
186 |
+
|
187 |
+
class TestGrid:
|
188 |
+
def test_basic(self):
|
189 |
+
a = mgrid[-1:1:10j]
|
190 |
+
b = mgrid[-1:1:0.1]
|
191 |
+
assert_(a.shape == (10,))
|
192 |
+
assert_(b.shape == (20,))
|
193 |
+
assert_(a[0] == -1)
|
194 |
+
assert_almost_equal(a[-1], 1)
|
195 |
+
assert_(b[0] == -1)
|
196 |
+
assert_almost_equal(b[1]-b[0], 0.1, 11)
|
197 |
+
assert_almost_equal(b[-1], b[0]+19*0.1, 11)
|
198 |
+
assert_almost_equal(a[1]-a[0], 2.0/9.0, 11)
|
199 |
+
|
200 |
+
def test_linspace_equivalence(self):
|
201 |
+
y, st = np.linspace(2, 10, retstep=True)
|
202 |
+
assert_almost_equal(st, 8/49.0)
|
203 |
+
assert_array_almost_equal(y, mgrid[2:10:50j], 13)
|
204 |
+
|
205 |
+
def test_nd(self):
|
206 |
+
c = mgrid[-1:1:10j, -2:2:10j]
|
207 |
+
d = mgrid[-1:1:0.1, -2:2:0.2]
|
208 |
+
assert_(c.shape == (2, 10, 10))
|
209 |
+
assert_(d.shape == (2, 20, 20))
|
210 |
+
assert_array_equal(c[0][0, :], -np.ones(10, 'd'))
|
211 |
+
assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd'))
|
212 |
+
assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11)
|
213 |
+
assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11)
|
214 |
+
assert_array_almost_equal(d[0, 1, :] - d[0, 0, :],
|
215 |
+
0.1*np.ones(20, 'd'), 11)
|
216 |
+
assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
|
217 |
+
0.2*np.ones(20, 'd'), 11)
|
218 |
+
|
219 |
+
def test_sparse(self):
|
220 |
+
grid_full = mgrid[-1:1:10j, -2:2:10j]
|
221 |
+
grid_sparse = ogrid[-1:1:10j, -2:2:10j]
|
222 |
+
|
223 |
+
# sparse grids can be made dense by broadcasting
|
224 |
+
grid_broadcast = np.broadcast_arrays(*grid_sparse)
|
225 |
+
for f, b in zip(grid_full, grid_broadcast):
|
226 |
+
assert_equal(f, b)
|
227 |
+
|
228 |
+
@pytest.mark.parametrize("start, stop, step, expected", [
|
229 |
+
(None, 10, 10j, (200, 10)),
|
230 |
+
(-10, 20, None, (1800, 30)),
|
231 |
+
])
|
232 |
+
def test_mgrid_size_none_handling(self, start, stop, step, expected):
|
233 |
+
# regression test None value handling for
|
234 |
+
# start and step values used by mgrid;
|
235 |
+
# internally, this aims to cover previously
|
236 |
+
# unexplored code paths in nd_grid()
|
237 |
+
grid = mgrid[start:stop:step, start:stop:step]
|
238 |
+
# need a smaller grid to explore one of the
|
239 |
+
# untested code paths
|
240 |
+
grid_small = mgrid[start:stop:step]
|
241 |
+
assert_equal(grid.size, expected[0])
|
242 |
+
assert_equal(grid_small.size, expected[1])
|
243 |
+
|
244 |
+
def test_accepts_npfloating(self):
|
245 |
+
# regression test for #16466
|
246 |
+
grid64 = mgrid[0.1:0.33:0.1, ]
|
247 |
+
grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ]
|
248 |
+
assert_(grid32.dtype == np.float64)
|
249 |
+
assert_array_almost_equal(grid64, grid32)
|
250 |
+
|
251 |
+
# different code path for single slice
|
252 |
+
grid64 = mgrid[0.1:0.33:0.1]
|
253 |
+
grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)]
|
254 |
+
assert_(grid32.dtype == np.float64)
|
255 |
+
assert_array_almost_equal(grid64, grid32)
|
256 |
+
|
257 |
+
def test_accepts_longdouble(self):
|
258 |
+
# regression tests for #16945
|
259 |
+
grid64 = mgrid[0.1:0.33:0.1, ]
|
260 |
+
grid128 = mgrid[
|
261 |
+
np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1),
|
262 |
+
]
|
263 |
+
assert_(grid128.dtype == np.longdouble)
|
264 |
+
assert_array_almost_equal(grid64, grid128)
|
265 |
+
|
266 |
+
grid128c_a = mgrid[0:np.longdouble(1):3.4j]
|
267 |
+
grid128c_b = mgrid[0:np.longdouble(1):3.4j, ]
|
268 |
+
assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble)
|
269 |
+
assert_array_equal(grid128c_a, grid128c_b[0])
|
270 |
+
|
271 |
+
# different code path for single slice
|
272 |
+
grid64 = mgrid[0.1:0.33:0.1]
|
273 |
+
grid128 = mgrid[
|
274 |
+
np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1)
|
275 |
+
]
|
276 |
+
assert_(grid128.dtype == np.longdouble)
|
277 |
+
assert_array_almost_equal(grid64, grid128)
|
278 |
+
|
279 |
+
def test_accepts_npcomplexfloating(self):
|
280 |
+
# Related to #16466
|
281 |
+
assert_array_almost_equal(
|
282 |
+
mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ]
|
283 |
+
)
|
284 |
+
|
285 |
+
# different code path for single slice
|
286 |
+
assert_array_almost_equal(
|
287 |
+
mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)]
|
288 |
+
)
|
289 |
+
|
290 |
+
# Related to #16945
|
291 |
+
grid64_a = mgrid[0.1:0.3:3.3j]
|
292 |
+
grid64_b = mgrid[0.1:0.3:3.3j, ][0]
|
293 |
+
assert_(grid64_a.dtype == grid64_b.dtype == np.float64)
|
294 |
+
assert_array_equal(grid64_a, grid64_b)
|
295 |
+
|
296 |
+
grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)]
|
297 |
+
grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0]
|
298 |
+
assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble)
|
299 |
+
assert_array_equal(grid64_a, grid64_b)
|
300 |
+
|
301 |
+
|
302 |
+
class TestConcatenator:
|
303 |
+
def test_1d(self):
|
304 |
+
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
|
305 |
+
b = np.ones(5)
|
306 |
+
c = r_[b, 0, 0, b]
|
307 |
+
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
|
308 |
+
|
309 |
+
def test_mixed_type(self):
|
310 |
+
g = r_[10.1, 1:10]
|
311 |
+
assert_(g.dtype == 'f8')
|
312 |
+
|
313 |
+
def test_more_mixed_type(self):
|
314 |
+
g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
|
315 |
+
assert_(g.dtype == 'f8')
|
316 |
+
|
317 |
+
def test_complex_step(self):
|
318 |
+
# Regression test for #12262
|
319 |
+
g = r_[0:36:100j]
|
320 |
+
assert_(g.shape == (100,))
|
321 |
+
|
322 |
+
# Related to #16466
|
323 |
+
g = r_[0:36:np.complex64(100j)]
|
324 |
+
assert_(g.shape == (100,))
|
325 |
+
|
326 |
+
def test_2d(self):
|
327 |
+
b = np.random.rand(5, 5)
|
328 |
+
c = np.random.rand(5, 5)
|
329 |
+
d = r_['1', b, c] # append columns
|
330 |
+
assert_(d.shape == (5, 10))
|
331 |
+
assert_array_equal(d[:, :5], b)
|
332 |
+
assert_array_equal(d[:, 5:], c)
|
333 |
+
d = r_[b, c]
|
334 |
+
assert_(d.shape == (10, 5))
|
335 |
+
assert_array_equal(d[:5, :], b)
|
336 |
+
assert_array_equal(d[5:, :], c)
|
337 |
+
|
338 |
+
def test_0d(self):
|
339 |
+
assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
|
340 |
+
assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
|
341 |
+
assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])
|
342 |
+
|
343 |
+
|
344 |
+
class TestNdenumerate:
|
345 |
+
def test_basic(self):
|
346 |
+
a = np.array([[1, 2], [3, 4]])
|
347 |
+
assert_equal(list(ndenumerate(a)),
|
348 |
+
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
|
349 |
+
|
350 |
+
|
351 |
+
class TestIndexExpression:
|
352 |
+
def test_regression_1(self):
|
353 |
+
# ticket #1196
|
354 |
+
a = np.arange(2)
|
355 |
+
assert_equal(a[:-1], a[s_[:-1]])
|
356 |
+
assert_equal(a[:-1], a[index_exp[:-1]])
|
357 |
+
|
358 |
+
def test_simple_1(self):
|
359 |
+
a = np.random.rand(4, 5, 6)
|
360 |
+
|
361 |
+
assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]])
|
362 |
+
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
|
363 |
+
|
364 |
+
|
365 |
+
class TestIx_:
|
366 |
+
def test_regression_1(self):
|
367 |
+
# Test empty untyped inputs create outputs of indexing type, gh-5804
|
368 |
+
a, = np.ix_(range(0))
|
369 |
+
assert_equal(a.dtype, np.intp)
|
370 |
+
|
371 |
+
a, = np.ix_([])
|
372 |
+
assert_equal(a.dtype, np.intp)
|
373 |
+
|
374 |
+
# but if the type is specified, don't change it
|
375 |
+
a, = np.ix_(np.array([], dtype=np.float32))
|
376 |
+
assert_equal(a.dtype, np.float32)
|
377 |
+
|
378 |
+
def test_shape_and_dtype(self):
|
379 |
+
sizes = (4, 5, 3, 2)
|
380 |
+
# Test both lists and arrays
|
381 |
+
for func in (range, np.arange):
|
382 |
+
arrays = np.ix_(*[func(sz) for sz in sizes])
|
383 |
+
for k, (a, sz) in enumerate(zip(arrays, sizes)):
|
384 |
+
assert_equal(a.shape[k], sz)
|
385 |
+
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
|
386 |
+
assert_(np.issubdtype(a.dtype, np.integer))
|
387 |
+
|
388 |
+
def test_bool(self):
|
389 |
+
bool_a = [True, False, True, True]
|
390 |
+
int_a, = np.nonzero(bool_a)
|
391 |
+
assert_equal(np.ix_(bool_a)[0], int_a)
|
392 |
+
|
393 |
+
def test_1d_only(self):
|
394 |
+
idx2d = [[1, 2, 3], [4, 5, 6]]
|
395 |
+
assert_raises(ValueError, np.ix_, idx2d)
|
396 |
+
|
397 |
+
def test_repeated_input(self):
|
398 |
+
length_of_vector = 5
|
399 |
+
x = np.arange(length_of_vector)
|
400 |
+
out = ix_(x, x)
|
401 |
+
assert_equal(out[0].shape, (length_of_vector, 1))
|
402 |
+
assert_equal(out[1].shape, (1, length_of_vector))
|
403 |
+
# check that input shape is not modified
|
404 |
+
assert_equal(x.shape, (length_of_vector,))
|
405 |
+
|
406 |
+
|
407 |
+
def test_c_():
|
408 |
+
a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
|
409 |
+
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
|
410 |
+
|
411 |
+
|
412 |
+
class TestFillDiagonal:
|
413 |
+
def test_basic(self):
|
414 |
+
a = np.zeros((3, 3), int)
|
415 |
+
fill_diagonal(a, 5)
|
416 |
+
assert_array_equal(
|
417 |
+
a, np.array([[5, 0, 0],
|
418 |
+
[0, 5, 0],
|
419 |
+
[0, 0, 5]])
|
420 |
+
)
|
421 |
+
|
422 |
+
def test_tall_matrix(self):
|
423 |
+
a = np.zeros((10, 3), int)
|
424 |
+
fill_diagonal(a, 5)
|
425 |
+
assert_array_equal(
|
426 |
+
a, np.array([[5, 0, 0],
|
427 |
+
[0, 5, 0],
|
428 |
+
[0, 0, 5],
|
429 |
+
[0, 0, 0],
|
430 |
+
[0, 0, 0],
|
431 |
+
[0, 0, 0],
|
432 |
+
[0, 0, 0],
|
433 |
+
[0, 0, 0],
|
434 |
+
[0, 0, 0],
|
435 |
+
[0, 0, 0]])
|
436 |
+
)
|
437 |
+
|
438 |
+
def test_tall_matrix_wrap(self):
|
439 |
+
a = np.zeros((10, 3), int)
|
440 |
+
fill_diagonal(a, 5, True)
|
441 |
+
assert_array_equal(
|
442 |
+
a, np.array([[5, 0, 0],
|
443 |
+
[0, 5, 0],
|
444 |
+
[0, 0, 5],
|
445 |
+
[0, 0, 0],
|
446 |
+
[5, 0, 0],
|
447 |
+
[0, 5, 0],
|
448 |
+
[0, 0, 5],
|
449 |
+
[0, 0, 0],
|
450 |
+
[5, 0, 0],
|
451 |
+
[0, 5, 0]])
|
452 |
+
)
|
453 |
+
|
454 |
+
def test_wide_matrix(self):
|
455 |
+
a = np.zeros((3, 10), int)
|
456 |
+
fill_diagonal(a, 5)
|
457 |
+
assert_array_equal(
|
458 |
+
a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
459 |
+
[0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
|
460 |
+
[0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
|
461 |
+
)
|
462 |
+
|
463 |
+
def test_operate_4d_array(self):
|
464 |
+
a = np.zeros((3, 3, 3, 3), int)
|
465 |
+
fill_diagonal(a, 4)
|
466 |
+
i = np.array([0, 1, 2])
|
467 |
+
assert_equal(np.where(a != 0), (i, i, i, i))
|
468 |
+
|
469 |
+
def test_low_dim_handling(self):
|
470 |
+
# raise error with low dimensionality
|
471 |
+
a = np.zeros(3, int)
|
472 |
+
with assert_raises_regex(ValueError, "at least 2-d"):
|
473 |
+
fill_diagonal(a, 5)
|
474 |
+
|
475 |
+
def test_hetero_shape_handling(self):
|
476 |
+
# raise error with high dimensionality and
|
477 |
+
# shape mismatch
|
478 |
+
a = np.zeros((3,3,7,3), int)
|
479 |
+
with assert_raises_regex(ValueError, "equal length"):
|
480 |
+
fill_diagonal(a, 2)
|
481 |
+
|
482 |
+
|
483 |
+
def test_diag_indices():
|
484 |
+
di = diag_indices(4)
|
485 |
+
a = np.array([[1, 2, 3, 4],
|
486 |
+
[5, 6, 7, 8],
|
487 |
+
[9, 10, 11, 12],
|
488 |
+
[13, 14, 15, 16]])
|
489 |
+
a[di] = 100
|
490 |
+
assert_array_equal(
|
491 |
+
a, np.array([[100, 2, 3, 4],
|
492 |
+
[5, 100, 7, 8],
|
493 |
+
[9, 10, 100, 12],
|
494 |
+
[13, 14, 15, 100]])
|
495 |
+
)
|
496 |
+
|
497 |
+
# Now, we create indices to manipulate a 3-d array:
|
498 |
+
d3 = diag_indices(2, 3)
|
499 |
+
|
500 |
+
# And use it to set the diagonal of a zeros array to 1:
|
501 |
+
a = np.zeros((2, 2, 2), int)
|
502 |
+
a[d3] = 1
|
503 |
+
assert_array_equal(
|
504 |
+
a, np.array([[[1, 0],
|
505 |
+
[0, 0]],
|
506 |
+
[[0, 0],
|
507 |
+
[0, 1]]])
|
508 |
+
)
|
509 |
+
|
510 |
+
|
511 |
+
class TestDiagIndicesFrom:
|
512 |
+
|
513 |
+
def test_diag_indices_from(self):
|
514 |
+
x = np.random.random((4, 4))
|
515 |
+
r, c = diag_indices_from(x)
|
516 |
+
assert_array_equal(r, np.arange(4))
|
517 |
+
assert_array_equal(c, np.arange(4))
|
518 |
+
|
519 |
+
def test_error_small_input(self):
|
520 |
+
x = np.ones(7)
|
521 |
+
with assert_raises_regex(ValueError, "at least 2-d"):
|
522 |
+
diag_indices_from(x)
|
523 |
+
|
524 |
+
def test_error_shape_mismatch(self):
|
525 |
+
x = np.zeros((3, 3, 2, 3), int)
|
526 |
+
with assert_raises_regex(ValueError, "equal length"):
|
527 |
+
diag_indices_from(x)
|
528 |
+
|
529 |
+
|
530 |
+
def test_ndindex():
|
531 |
+
x = list(ndindex(1, 2, 3))
|
532 |
+
expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
|
533 |
+
assert_array_equal(x, expected)
|
534 |
+
|
535 |
+
x = list(ndindex((1, 2, 3)))
|
536 |
+
assert_array_equal(x, expected)
|
537 |
+
|
538 |
+
# Test use of scalars and tuples
|
539 |
+
x = list(ndindex((3,)))
|
540 |
+
assert_array_equal(x, list(ndindex(3)))
|
541 |
+
|
542 |
+
# Make sure size argument is optional
|
543 |
+
x = list(ndindex())
|
544 |
+
assert_equal(x, [()])
|
545 |
+
|
546 |
+
x = list(ndindex(()))
|
547 |
+
assert_equal(x, [()])
|
548 |
+
|
549 |
+
# Make sure 0-sized ndindex works correctly
|
550 |
+
x = list(ndindex(*[0]))
|
551 |
+
assert_equal(x, [])
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_io.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_loadtxt.py
ADDED
@@ -0,0 +1,1048 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests specific to `np.loadtxt` added during the move of loadtxt to be backed
|
3 |
+
by C code.
|
4 |
+
These tests complement those found in `test_io.py`.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import sys
|
8 |
+
import os
|
9 |
+
import pytest
|
10 |
+
from tempfile import NamedTemporaryFile, mkstemp
|
11 |
+
from io import StringIO
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
from numpy.ma.testutils import assert_equal
|
15 |
+
from numpy.testing import assert_array_equal, HAS_REFCOUNT, IS_PYPY
|
16 |
+
|
17 |
+
|
18 |
+
def test_scientific_notation():
|
19 |
+
"""Test that both 'e' and 'E' are parsed correctly."""
|
20 |
+
data = StringIO(
|
21 |
+
(
|
22 |
+
"1.0e-1,2.0E1,3.0\n"
|
23 |
+
"4.0e-2,5.0E-1,6.0\n"
|
24 |
+
"7.0e-3,8.0E1,9.0\n"
|
25 |
+
"0.0e-4,1.0E-1,2.0"
|
26 |
+
)
|
27 |
+
)
|
28 |
+
expected = np.array(
|
29 |
+
[[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]]
|
30 |
+
)
|
31 |
+
assert_array_equal(np.loadtxt(data, delimiter=","), expected)
|
32 |
+
|
33 |
+
|
34 |
+
@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"])
|
35 |
+
def test_comment_multiple_chars(comment):
|
36 |
+
content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n"
|
37 |
+
txt = StringIO(content.replace("#", comment))
|
38 |
+
a = np.loadtxt(txt, delimiter=",", comments=comment)
|
39 |
+
assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]])
|
40 |
+
|
41 |
+
|
42 |
+
@pytest.fixture
|
43 |
+
def mixed_types_structured():
|
44 |
+
"""
|
45 |
+
Fixture providing hetergeneous input data with a structured dtype, along
|
46 |
+
with the associated structured array.
|
47 |
+
"""
|
48 |
+
data = StringIO(
|
49 |
+
(
|
50 |
+
"1000;2.4;alpha;-34\n"
|
51 |
+
"2000;3.1;beta;29\n"
|
52 |
+
"3500;9.9;gamma;120\n"
|
53 |
+
"4090;8.1;delta;0\n"
|
54 |
+
"5001;4.4;epsilon;-99\n"
|
55 |
+
"6543;7.8;omega;-1\n"
|
56 |
+
)
|
57 |
+
)
|
58 |
+
dtype = np.dtype(
|
59 |
+
[('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)]
|
60 |
+
)
|
61 |
+
expected = np.array(
|
62 |
+
[
|
63 |
+
(1000, 2.4, "alpha", -34),
|
64 |
+
(2000, 3.1, "beta", 29),
|
65 |
+
(3500, 9.9, "gamma", 120),
|
66 |
+
(4090, 8.1, "delta", 0),
|
67 |
+
(5001, 4.4, "epsilon", -99),
|
68 |
+
(6543, 7.8, "omega", -1)
|
69 |
+
],
|
70 |
+
dtype=dtype
|
71 |
+
)
|
72 |
+
return data, dtype, expected
|
73 |
+
|
74 |
+
|
75 |
+
@pytest.mark.parametrize('skiprows', [0, 1, 2, 3])
|
76 |
+
def test_structured_dtype_and_skiprows_no_empty_lines(
|
77 |
+
skiprows, mixed_types_structured):
|
78 |
+
data, dtype, expected = mixed_types_structured
|
79 |
+
a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows)
|
80 |
+
assert_array_equal(a, expected[skiprows:])
|
81 |
+
|
82 |
+
|
83 |
+
def test_unpack_structured(mixed_types_structured):
|
84 |
+
data, dtype, expected = mixed_types_structured
|
85 |
+
|
86 |
+
a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True)
|
87 |
+
assert_array_equal(a, expected["f0"])
|
88 |
+
assert_array_equal(b, expected["f1"])
|
89 |
+
assert_array_equal(c, expected["f2"])
|
90 |
+
assert_array_equal(d, expected["f3"])
|
91 |
+
|
92 |
+
|
93 |
+
def test_structured_dtype_with_shape():
|
94 |
+
dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)])
|
95 |
+
data = StringIO("0,1,2,3\n6,7,8,9\n")
|
96 |
+
expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype)
|
97 |
+
assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected)
|
98 |
+
|
99 |
+
|
100 |
+
def test_structured_dtype_with_multi_shape():
|
101 |
+
dtype = np.dtype([("a", "u1", (2, 2))])
|
102 |
+
data = StringIO("0 1 2 3\n")
|
103 |
+
expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype)
|
104 |
+
assert_array_equal(np.loadtxt(data, dtype=dtype), expected)
|
105 |
+
|
106 |
+
|
107 |
+
def test_nested_structured_subarray():
|
108 |
+
# Test from gh-16678
|
109 |
+
point = np.dtype([('x', float), ('y', float)])
|
110 |
+
dt = np.dtype([('code', int), ('points', point, (2,))])
|
111 |
+
data = StringIO("100,1,2,3,4\n200,5,6,7,8\n")
|
112 |
+
expected = np.array(
|
113 |
+
[
|
114 |
+
(100, [(1., 2.), (3., 4.)]),
|
115 |
+
(200, [(5., 6.), (7., 8.)]),
|
116 |
+
],
|
117 |
+
dtype=dt
|
118 |
+
)
|
119 |
+
assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected)
|
120 |
+
|
121 |
+
|
122 |
+
def test_structured_dtype_offsets():
|
123 |
+
# An aligned structured dtype will have additional padding
|
124 |
+
dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True)
|
125 |
+
data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n")
|
126 |
+
expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt)
|
127 |
+
assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected)
|
128 |
+
|
129 |
+
|
130 |
+
@pytest.mark.parametrize("param", ("skiprows", "max_rows"))
|
131 |
+
def test_exception_negative_row_limits(param):
|
132 |
+
"""skiprows and max_rows should raise for negative parameters."""
|
133 |
+
with pytest.raises(ValueError, match="argument must be nonnegative"):
|
134 |
+
np.loadtxt("foo.bar", **{param: -3})
|
135 |
+
|
136 |
+
|
137 |
+
@pytest.mark.parametrize("param", ("skiprows", "max_rows"))
|
138 |
+
def test_exception_noninteger_row_limits(param):
|
139 |
+
with pytest.raises(TypeError, match="argument must be an integer"):
|
140 |
+
np.loadtxt("foo.bar", **{param: 1.0})
|
141 |
+
|
142 |
+
|
143 |
+
@pytest.mark.parametrize(
|
144 |
+
"data, shape",
|
145 |
+
[
|
146 |
+
("1 2 3 4 5\n", (1, 5)), # Single row
|
147 |
+
("1\n2\n3\n4\n5\n", (5, 1)), # Single column
|
148 |
+
]
|
149 |
+
)
|
150 |
+
def test_ndmin_single_row_or_col(data, shape):
|
151 |
+
arr = np.array([1, 2, 3, 4, 5])
|
152 |
+
arr2d = arr.reshape(shape)
|
153 |
+
|
154 |
+
assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr)
|
155 |
+
assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr)
|
156 |
+
assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr)
|
157 |
+
assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d)
|
158 |
+
|
159 |
+
|
160 |
+
@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"])
|
161 |
+
def test_bad_ndmin(badval):
|
162 |
+
with pytest.raises(ValueError, match="Illegal value of ndmin keyword"):
|
163 |
+
np.loadtxt("foo.bar", ndmin=badval)
|
164 |
+
|
165 |
+
|
166 |
+
@pytest.mark.parametrize(
|
167 |
+
"ws",
|
168 |
+
(
|
169 |
+
" ", # space
|
170 |
+
"\t", # tab
|
171 |
+
"\u2003", # em
|
172 |
+
"\u00A0", # non-break
|
173 |
+
"\u3000", # ideographic space
|
174 |
+
)
|
175 |
+
)
|
176 |
+
def test_blank_lines_spaces_delimit(ws):
|
177 |
+
txt = StringIO(
|
178 |
+
f"1 2{ws}30\n\n{ws}\n"
|
179 |
+
f"4 5 60{ws}\n {ws} \n"
|
180 |
+
f"7 8 {ws} 90\n # comment\n"
|
181 |
+
f"3 2 1"
|
182 |
+
)
|
183 |
+
# NOTE: It is unclear that the ` # comment` should succeed. Except
|
184 |
+
# for delimiter=None, which should use any whitespace (and maybe
|
185 |
+
# should just be implemented closer to Python
|
186 |
+
expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]])
|
187 |
+
assert_equal(
|
188 |
+
np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected
|
189 |
+
)
|
190 |
+
|
191 |
+
|
192 |
+
def test_blank_lines_normal_delimiter():
|
193 |
+
txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1')
|
194 |
+
expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]])
|
195 |
+
assert_equal(
|
196 |
+
np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected
|
197 |
+
)
|
198 |
+
|
199 |
+
|
200 |
+
@pytest.mark.parametrize("dtype", (float, object))
|
201 |
+
def test_maxrows_no_blank_lines(dtype):
|
202 |
+
txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0")
|
203 |
+
res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2)
|
204 |
+
assert_equal(res.dtype, dtype)
|
205 |
+
assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype))
|
206 |
+
|
207 |
+
|
208 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
209 |
+
reason="PyPy bug in error formatting")
|
210 |
+
@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2")))
|
211 |
+
def test_exception_message_bad_values(dtype):
|
212 |
+
txt = StringIO("1,2\n3,XXX\n5,6")
|
213 |
+
msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2"
|
214 |
+
with pytest.raises(ValueError, match=msg):
|
215 |
+
np.loadtxt(txt, dtype=dtype, delimiter=",")
|
216 |
+
|
217 |
+
|
218 |
+
def test_converters_negative_indices():
|
219 |
+
txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0')
|
220 |
+
conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}
|
221 |
+
expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]])
|
222 |
+
res = np.loadtxt(
|
223 |
+
txt, dtype=np.float64, delimiter=",", converters=conv, encoding=None
|
224 |
+
)
|
225 |
+
assert_equal(res, expected)
|
226 |
+
|
227 |
+
|
228 |
+
def test_converters_negative_indices_with_usecols():
|
229 |
+
txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n')
|
230 |
+
conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}
|
231 |
+
expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]])
|
232 |
+
res = np.loadtxt(
|
233 |
+
txt,
|
234 |
+
dtype=np.float64,
|
235 |
+
delimiter=",",
|
236 |
+
converters=conv,
|
237 |
+
usecols=[0, -1],
|
238 |
+
encoding=None,
|
239 |
+
)
|
240 |
+
assert_equal(res, expected)
|
241 |
+
|
242 |
+
# Second test with variable number of rows:
|
243 |
+
res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",",
|
244 |
+
usecols=[0, -1], converters={-1: (lambda x: -1)})
|
245 |
+
assert_array_equal(res, [[0, -1], [0, -1]])
|
246 |
+
|
247 |
+
|
248 |
+
def test_ragged_error():
|
249 |
+
rows = ["1,2,3", "1,2,3", "4,3,2,1"]
|
250 |
+
with pytest.raises(ValueError,
|
251 |
+
match="the number of columns changed from 3 to 4 at row 3"):
|
252 |
+
np.loadtxt(rows, delimiter=",")
|
253 |
+
|
254 |
+
|
255 |
+
def test_ragged_usecols():
|
256 |
+
# usecols, and negative ones, work even with varying number of columns.
|
257 |
+
txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
|
258 |
+
expected = np.array([[0, 0], [0, 0], [0, 0]])
|
259 |
+
res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2])
|
260 |
+
assert_equal(res, expected)
|
261 |
+
|
262 |
+
txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n")
|
263 |
+
with pytest.raises(ValueError,
|
264 |
+
match="invalid column index -2 at row 2 with 1 columns"):
|
265 |
+
# There is no -2 column in the second row:
|
266 |
+
np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2])
|
267 |
+
|
268 |
+
|
269 |
+
def test_empty_usecols():
|
270 |
+
txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
|
271 |
+
res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[])
|
272 |
+
assert res.shape == (3,)
|
273 |
+
assert res.dtype == np.dtype([])
|
274 |
+
|
275 |
+
|
276 |
+
@pytest.mark.parametrize("c1", ["a", "の", "🫕"])
|
277 |
+
@pytest.mark.parametrize("c2", ["a", "の", "🫕"])
|
278 |
+
def test_large_unicode_characters(c1, c2):
|
279 |
+
# c1 and c2 span ascii, 16bit and 32bit range.
|
280 |
+
txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g")
|
281 |
+
res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",")
|
282 |
+
expected = np.array(
|
283 |
+
[f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")],
|
284 |
+
dtype=np.dtype('U12')
|
285 |
+
)
|
286 |
+
assert_equal(res, expected)
|
287 |
+
|
288 |
+
|
289 |
+
def test_unicode_with_converter():
|
290 |
+
txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n")
|
291 |
+
conv = {0: lambda s: s.upper()}
|
292 |
+
res = np.loadtxt(
|
293 |
+
txt,
|
294 |
+
dtype=np.dtype("U12"),
|
295 |
+
converters=conv,
|
296 |
+
delimiter=",",
|
297 |
+
encoding=None
|
298 |
+
)
|
299 |
+
expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']])
|
300 |
+
assert_equal(res, expected)
|
301 |
+
|
302 |
+
|
303 |
+
def test_converter_with_structured_dtype():
|
304 |
+
txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n')
|
305 |
+
dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')])
|
306 |
+
conv = {0: lambda s: int(10*float(s)), -1: lambda s: s.upper()}
|
307 |
+
res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv)
|
308 |
+
expected = np.array(
|
309 |
+
[(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt
|
310 |
+
)
|
311 |
+
assert_equal(res, expected)
|
312 |
+
|
313 |
+
|
314 |
+
def test_converter_with_unicode_dtype():
|
315 |
+
"""
|
316 |
+
With the default 'bytes' encoding, tokens are encoded prior to being
|
317 |
+
passed to the converter. This means that the output of the converter may
|
318 |
+
be bytes instead of unicode as expected by `read_rows`.
|
319 |
+
|
320 |
+
This test checks that outputs from the above scenario are properly decoded
|
321 |
+
prior to parsing by `read_rows`.
|
322 |
+
"""
|
323 |
+
txt = StringIO('abc,def\nrst,xyz')
|
324 |
+
conv = bytes.upper
|
325 |
+
res = np.loadtxt(
|
326 |
+
txt, dtype=np.dtype("U3"), converters=conv, delimiter=",")
|
327 |
+
expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])
|
328 |
+
assert_equal(res, expected)
|
329 |
+
|
330 |
+
|
331 |
+
def test_read_huge_row():
|
332 |
+
row = "1.5, 2.5," * 50000
|
333 |
+
row = row[:-1] + "\n"
|
334 |
+
txt = StringIO(row * 2)
|
335 |
+
res = np.loadtxt(txt, delimiter=",", dtype=float)
|
336 |
+
assert_equal(res, np.tile([1.5, 2.5], (2, 50000)))
|
337 |
+
|
338 |
+
|
339 |
+
@pytest.mark.parametrize("dtype", "edfgFDG")
|
340 |
+
def test_huge_float(dtype):
|
341 |
+
# Covers a non-optimized path that is rarely taken:
|
342 |
+
field = "0" * 1000 + ".123456789"
|
343 |
+
dtype = np.dtype(dtype)
|
344 |
+
value = np.loadtxt([field], dtype=dtype)[()]
|
345 |
+
assert value == dtype.type("0.123456789")
|
346 |
+
|
347 |
+
|
348 |
+
@pytest.mark.parametrize(
|
349 |
+
("given_dtype", "expected_dtype"),
|
350 |
+
[
|
351 |
+
("S", np.dtype("S5")),
|
352 |
+
("U", np.dtype("U5")),
|
353 |
+
],
|
354 |
+
)
|
355 |
+
def test_string_no_length_given(given_dtype, expected_dtype):
|
356 |
+
"""
|
357 |
+
The given dtype is just 'S' or 'U' with no length. In these cases, the
|
358 |
+
length of the resulting dtype is determined by the longest string found
|
359 |
+
in the file.
|
360 |
+
"""
|
361 |
+
txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n")
|
362 |
+
res = np.loadtxt(txt, dtype=given_dtype, delimiter=",")
|
363 |
+
expected = np.array(
|
364 |
+
[['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype
|
365 |
+
)
|
366 |
+
assert_equal(res, expected)
|
367 |
+
assert_equal(res.dtype, expected_dtype)
|
368 |
+
|
369 |
+
|
370 |
+
def test_float_conversion():
|
371 |
+
"""
|
372 |
+
Some tests that the conversion to float64 works as accurately as the
|
373 |
+
Python built-in `float` function. In a naive version of the float parser,
|
374 |
+
these strings resulted in values that were off by an ULP or two.
|
375 |
+
"""
|
376 |
+
strings = [
|
377 |
+
'0.9999999999999999',
|
378 |
+
'9876543210.123456',
|
379 |
+
'5.43215432154321e+300',
|
380 |
+
'0.901',
|
381 |
+
'0.333',
|
382 |
+
]
|
383 |
+
txt = StringIO('\n'.join(strings))
|
384 |
+
res = np.loadtxt(txt)
|
385 |
+
expected = np.array([float(s) for s in strings])
|
386 |
+
assert_equal(res, expected)
|
387 |
+
|
388 |
+
|
389 |
+
def test_bool():
|
390 |
+
# Simple test for bool via integer
|
391 |
+
txt = StringIO("1, 0\n10, -1")
|
392 |
+
res = np.loadtxt(txt, dtype=bool, delimiter=",")
|
393 |
+
assert res.dtype == bool
|
394 |
+
assert_array_equal(res, [[True, False], [True, True]])
|
395 |
+
# Make sure we use only 1 and 0 on the byte level:
|
396 |
+
assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]])
|
397 |
+
|
398 |
+
|
399 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
400 |
+
reason="PyPy bug in error formatting")
|
401 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
|
402 |
+
@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning")
|
403 |
+
def test_integer_signs(dtype):
|
404 |
+
dtype = np.dtype(dtype)
|
405 |
+
assert np.loadtxt(["+2"], dtype=dtype) == 2
|
406 |
+
if dtype.kind == "u":
|
407 |
+
with pytest.raises(ValueError):
|
408 |
+
np.loadtxt(["-1\n"], dtype=dtype)
|
409 |
+
else:
|
410 |
+
assert np.loadtxt(["-2\n"], dtype=dtype) == -2
|
411 |
+
|
412 |
+
for sign in ["++", "+-", "--", "-+"]:
|
413 |
+
with pytest.raises(ValueError):
|
414 |
+
np.loadtxt([f"{sign}2\n"], dtype=dtype)
|
415 |
+
|
416 |
+
|
417 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
418 |
+
reason="PyPy bug in error formatting")
|
419 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
|
420 |
+
@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning")
|
421 |
+
def test_implicit_cast_float_to_int_fails(dtype):
|
422 |
+
txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6")
|
423 |
+
with pytest.raises(ValueError):
|
424 |
+
np.loadtxt(txt, dtype=dtype, delimiter=",")
|
425 |
+
|
426 |
+
@pytest.mark.parametrize("dtype", (np.complex64, np.complex128))
|
427 |
+
@pytest.mark.parametrize("with_parens", (False, True))
|
428 |
+
def test_complex_parsing(dtype, with_parens):
|
429 |
+
s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)"
|
430 |
+
if not with_parens:
|
431 |
+
s = s.replace("(", "").replace(")", "")
|
432 |
+
|
433 |
+
res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",")
|
434 |
+
expected = np.array(
|
435 |
+
[[1.0-2.5j, 3.75, 7-5j], [4.0, -1900j, 0]], dtype=dtype
|
436 |
+
)
|
437 |
+
assert_equal(res, expected)
|
438 |
+
|
439 |
+
|
440 |
+
def test_read_from_generator():
|
441 |
+
def gen():
|
442 |
+
for i in range(4):
|
443 |
+
yield f"{i},{2*i},{i**2}"
|
444 |
+
|
445 |
+
res = np.loadtxt(gen(), dtype=int, delimiter=",")
|
446 |
+
expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]])
|
447 |
+
assert_equal(res, expected)
|
448 |
+
|
449 |
+
|
450 |
+
def test_read_from_generator_multitype():
|
451 |
+
def gen():
|
452 |
+
for i in range(3):
|
453 |
+
yield f"{i} {i / 4}"
|
454 |
+
|
455 |
+
res = np.loadtxt(gen(), dtype="i, d", delimiter=" ")
|
456 |
+
expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d")
|
457 |
+
assert_equal(res, expected)
|
458 |
+
|
459 |
+
|
460 |
+
def test_read_from_bad_generator():
|
461 |
+
def gen():
|
462 |
+
for entry in ["1,2", b"3, 5", 12738]:
|
463 |
+
yield entry
|
464 |
+
|
465 |
+
with pytest.raises(
|
466 |
+
TypeError, match=r"non-string returned while reading data"):
|
467 |
+
np.loadtxt(gen(), dtype="i, i", delimiter=",")
|
468 |
+
|
469 |
+
|
470 |
+
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
|
471 |
+
def test_object_cleanup_on_read_error():
|
472 |
+
sentinel = object()
|
473 |
+
already_read = 0
|
474 |
+
|
475 |
+
def conv(x):
|
476 |
+
nonlocal already_read
|
477 |
+
if already_read > 4999:
|
478 |
+
raise ValueError("failed half-way through!")
|
479 |
+
already_read += 1
|
480 |
+
return sentinel
|
481 |
+
|
482 |
+
txt = StringIO("x\n" * 10000)
|
483 |
+
|
484 |
+
with pytest.raises(ValueError, match="at row 5000, column 1"):
|
485 |
+
np.loadtxt(txt, dtype=object, converters={0: conv})
|
486 |
+
|
487 |
+
assert sys.getrefcount(sentinel) == 2
|
488 |
+
|
489 |
+
|
490 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
491 |
+
reason="PyPy bug in error formatting")
|
492 |
+
def test_character_not_bytes_compatible():
|
493 |
+
"""Test exception when a character cannot be encoded as 'S'."""
|
494 |
+
data = StringIO("–") # == \u2013
|
495 |
+
with pytest.raises(ValueError):
|
496 |
+
np.loadtxt(data, dtype="S5")
|
497 |
+
|
498 |
+
|
499 |
+
@pytest.mark.parametrize("conv", (0, [float], ""))
|
500 |
+
def test_invalid_converter(conv):
|
501 |
+
msg = (
|
502 |
+
"converters must be a dictionary mapping columns to converter "
|
503 |
+
"functions or a single callable."
|
504 |
+
)
|
505 |
+
with pytest.raises(TypeError, match=msg):
|
506 |
+
np.loadtxt(StringIO("1 2\n3 4"), converters=conv)
|
507 |
+
|
508 |
+
|
509 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
510 |
+
reason="PyPy bug in error formatting")
|
511 |
+
def test_converters_dict_raises_non_integer_key():
|
512 |
+
with pytest.raises(TypeError, match="keys of the converters dict"):
|
513 |
+
np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int})
|
514 |
+
with pytest.raises(TypeError, match="keys of the converters dict"):
|
515 |
+
np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0)
|
516 |
+
|
517 |
+
|
518 |
+
@pytest.mark.parametrize("bad_col_ind", (3, -3))
|
519 |
+
def test_converters_dict_raises_non_col_key(bad_col_ind):
|
520 |
+
data = StringIO("1 2\n3 4")
|
521 |
+
with pytest.raises(ValueError, match="converter specified for column"):
|
522 |
+
np.loadtxt(data, converters={bad_col_ind: int})
|
523 |
+
|
524 |
+
|
525 |
+
def test_converters_dict_raises_val_not_callable():
|
526 |
+
with pytest.raises(TypeError,
|
527 |
+
match="values of the converters dictionary must be callable"):
|
528 |
+
np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1})
|
529 |
+
|
530 |
+
|
531 |
+
@pytest.mark.parametrize("q", ('"', "'", "`"))
|
532 |
+
def test_quoted_field(q):
|
533 |
+
txt = StringIO(
|
534 |
+
f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n"
|
535 |
+
)
|
536 |
+
dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])
|
537 |
+
expected = np.array(
|
538 |
+
[("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype
|
539 |
+
)
|
540 |
+
|
541 |
+
res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q)
|
542 |
+
assert_array_equal(res, expected)
|
543 |
+
|
544 |
+
|
545 |
+
@pytest.mark.parametrize("q", ('"', "'", "`"))
|
546 |
+
def test_quoted_field_with_whitepace_delimiter(q):
|
547 |
+
txt = StringIO(
|
548 |
+
f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n"
|
549 |
+
)
|
550 |
+
dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])
|
551 |
+
expected = np.array(
|
552 |
+
[("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype
|
553 |
+
)
|
554 |
+
|
555 |
+
res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q)
|
556 |
+
assert_array_equal(res, expected)
|
557 |
+
|
558 |
+
|
559 |
+
def test_quote_support_default():
|
560 |
+
"""Support for quoted fields is disabled by default."""
|
561 |
+
txt = StringIO('"lat,long", 45, 30\n')
|
562 |
+
dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)])
|
563 |
+
|
564 |
+
with pytest.raises(ValueError,
|
565 |
+
match="the dtype passed requires 3 columns but 4 were"):
|
566 |
+
np.loadtxt(txt, dtype=dtype, delimiter=",")
|
567 |
+
|
568 |
+
# Enable quoting support with non-None value for quotechar param
|
569 |
+
txt.seek(0)
|
570 |
+
expected = np.array([("lat,long", 45., 30.)], dtype=dtype)
|
571 |
+
|
572 |
+
res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"')
|
573 |
+
assert_array_equal(res, expected)
|
574 |
+
|
575 |
+
|
576 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
577 |
+
reason="PyPy bug in error formatting")
|
578 |
+
def test_quotechar_multichar_error():
|
579 |
+
txt = StringIO("1,2\n3,4")
|
580 |
+
msg = r".*must be a single unicode character or None"
|
581 |
+
with pytest.raises(TypeError, match=msg):
|
582 |
+
np.loadtxt(txt, delimiter=",", quotechar="''")
|
583 |
+
|
584 |
+
|
585 |
+
def test_comment_multichar_error_with_quote():
|
586 |
+
txt = StringIO("1,2\n3,4")
|
587 |
+
msg = (
|
588 |
+
"when multiple comments or a multi-character comment is given, "
|
589 |
+
"quotes are not supported."
|
590 |
+
)
|
591 |
+
with pytest.raises(ValueError, match=msg):
|
592 |
+
np.loadtxt(txt, delimiter=",", comments="123", quotechar='"')
|
593 |
+
with pytest.raises(ValueError, match=msg):
|
594 |
+
np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"')
|
595 |
+
|
596 |
+
# A single character string in a tuple is unpacked though:
|
597 |
+
res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'")
|
598 |
+
assert_equal(res, [[1, 2], [3, 4]])
|
599 |
+
|
600 |
+
|
601 |
+
def test_structured_dtype_with_quotes():
|
602 |
+
data = StringIO(
|
603 |
+
(
|
604 |
+
"1000;2.4;'alpha';-34\n"
|
605 |
+
"2000;3.1;'beta';29\n"
|
606 |
+
"3500;9.9;'gamma';120\n"
|
607 |
+
"4090;8.1;'delta';0\n"
|
608 |
+
"5001;4.4;'epsilon';-99\n"
|
609 |
+
"6543;7.8;'omega';-1\n"
|
610 |
+
)
|
611 |
+
)
|
612 |
+
dtype = np.dtype(
|
613 |
+
[('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)]
|
614 |
+
)
|
615 |
+
expected = np.array(
|
616 |
+
[
|
617 |
+
(1000, 2.4, "alpha", -34),
|
618 |
+
(2000, 3.1, "beta", 29),
|
619 |
+
(3500, 9.9, "gamma", 120),
|
620 |
+
(4090, 8.1, "delta", 0),
|
621 |
+
(5001, 4.4, "epsilon", -99),
|
622 |
+
(6543, 7.8, "omega", -1)
|
623 |
+
],
|
624 |
+
dtype=dtype
|
625 |
+
)
|
626 |
+
res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'")
|
627 |
+
assert_array_equal(res, expected)
|
628 |
+
|
629 |
+
|
630 |
+
def test_quoted_field_is_not_empty():
|
631 |
+
txt = StringIO('1\n\n"4"\n""')
|
632 |
+
expected = np.array(["1", "4", ""], dtype="U1")
|
633 |
+
res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"')
|
634 |
+
assert_equal(res, expected)
|
635 |
+
|
636 |
+
def test_quoted_field_is_not_empty_nonstrict():
|
637 |
+
# Same as test_quoted_field_is_not_empty but check that we are not strict
|
638 |
+
# about missing closing quote (this is the `csv.reader` default also)
|
639 |
+
txt = StringIO('1\n\n"4"\n"')
|
640 |
+
expected = np.array(["1", "4", ""], dtype="U1")
|
641 |
+
res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"')
|
642 |
+
assert_equal(res, expected)
|
643 |
+
|
644 |
+
def test_consecutive_quotechar_escaped():
|
645 |
+
txt = StringIO('"Hello, my name is ""Monty""!"')
|
646 |
+
expected = np.array('Hello, my name is "Monty"!', dtype="U40")
|
647 |
+
res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"')
|
648 |
+
assert_equal(res, expected)
|
649 |
+
|
650 |
+
|
651 |
+
@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n"))
|
652 |
+
@pytest.mark.parametrize("ndmin", (0, 1, 2))
|
653 |
+
@pytest.mark.parametrize("usecols", [None, (1, 2, 3)])
|
654 |
+
def test_warn_on_no_data(data, ndmin, usecols):
|
655 |
+
"""Check that a UserWarning is emitted when no data is read from input."""
|
656 |
+
if usecols is not None:
|
657 |
+
expected_shape = (0, 3)
|
658 |
+
elif ndmin == 2:
|
659 |
+
expected_shape = (0, 1) # guess a single column?!
|
660 |
+
else:
|
661 |
+
expected_shape = (0,)
|
662 |
+
|
663 |
+
txt = StringIO(data)
|
664 |
+
with pytest.warns(UserWarning, match="input contained no data"):
|
665 |
+
res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols)
|
666 |
+
assert res.shape == expected_shape
|
667 |
+
|
668 |
+
with NamedTemporaryFile(mode="w") as fh:
|
669 |
+
fh.write(data)
|
670 |
+
fh.seek(0)
|
671 |
+
with pytest.warns(UserWarning, match="input contained no data"):
|
672 |
+
res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols)
|
673 |
+
assert res.shape == expected_shape
|
674 |
+
|
675 |
+
@pytest.mark.parametrize("skiprows", (2, 3))
|
676 |
+
def test_warn_on_skipped_data(skiprows):
|
677 |
+
data = "1 2 3\n4 5 6"
|
678 |
+
txt = StringIO(data)
|
679 |
+
with pytest.warns(UserWarning, match="input contained no data"):
|
680 |
+
np.loadtxt(txt, skiprows=skiprows)
|
681 |
+
|
682 |
+
|
683 |
+
@pytest.mark.parametrize(["dtype", "value"], [
|
684 |
+
("i2", 0x0001), ("u2", 0x0001),
|
685 |
+
("i4", 0x00010203), ("u4", 0x00010203),
|
686 |
+
("i8", 0x0001020304050607), ("u8", 0x0001020304050607),
|
687 |
+
# The following values are constructed to lead to unique bytes:
|
688 |
+
("float16", 3.07e-05),
|
689 |
+
("float32", 9.2557e-41), ("complex64", 9.2557e-41+2.8622554e-29j),
|
690 |
+
("float64", -1.758571353180402e-24),
|
691 |
+
# Here and below, the repr side-steps a small loss of precision in
|
692 |
+
# complex `str` in PyPy (which is probably fine, as repr works):
|
693 |
+
("complex128", repr(5.406409232372729e-29-1.758571353180402e-24j)),
|
694 |
+
# Use integer values that fit into double. Everything else leads to
|
695 |
+
# problems due to longdoubles going via double and decimal strings
|
696 |
+
# causing rounding errors.
|
697 |
+
("longdouble", 0x01020304050607),
|
698 |
+
("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))),
|
699 |
+
("U2", "\U00010203\U000a0b0c")])
|
700 |
+
@pytest.mark.parametrize("swap", [True, False])
|
701 |
+
def test_byteswapping_and_unaligned(dtype, value, swap):
|
702 |
+
# Try to create "interesting" values within the valid unicode range:
|
703 |
+
dtype = np.dtype(dtype)
|
704 |
+
data = [f"x,{value}\n"] # repr as PyPy `str` truncates some
|
705 |
+
if swap:
|
706 |
+
dtype = dtype.newbyteorder()
|
707 |
+
full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False)
|
708 |
+
# The above ensures that the interesting "b" field is unaligned:
|
709 |
+
assert full_dt.fields["b"][1] == 1
|
710 |
+
res = np.loadtxt(data, dtype=full_dt, delimiter=",", encoding=None,
|
711 |
+
max_rows=1) # max-rows prevents over-allocation
|
712 |
+
assert res["b"] == dtype.type(value)
|
713 |
+
|
714 |
+
|
715 |
+
@pytest.mark.parametrize("dtype",
|
716 |
+
np.typecodes["AllInteger"] + "efdFD" + "?")
|
717 |
+
def test_unicode_whitespace_stripping(dtype):
|
718 |
+
# Test that all numeric types (and bool) strip whitespace correctly
|
719 |
+
# \u202F is a narrow no-break space, `\n` is just a whitespace if quoted.
|
720 |
+
# Currently, skip float128 as it did not always support this and has no
|
721 |
+
# "custom" parsing:
|
722 |
+
txt = StringIO(' 3 ,"\u202F2\n"')
|
723 |
+
res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"')
|
724 |
+
assert_array_equal(res, np.array([3, 2]).astype(dtype))
|
725 |
+
|
726 |
+
|
727 |
+
@pytest.mark.parametrize("dtype", "FD")
|
728 |
+
def test_unicode_whitespace_stripping_complex(dtype):
|
729 |
+
# Complex has a few extra cases since it has two components and
|
730 |
+
# parentheses
|
731 |
+
line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n"
|
732 |
+
data = [line, line.replace(" ", "\u202F")]
|
733 |
+
res = np.loadtxt(data, dtype=dtype, delimiter=',')
|
734 |
+
assert_array_equal(res, np.array([[1, 2+3j, 4+5j, 6-7j, 8j, 9j]] * 2))
|
735 |
+
|
736 |
+
|
737 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
738 |
+
reason="PyPy bug in error formatting")
|
739 |
+
@pytest.mark.parametrize("dtype", "FD")
|
740 |
+
@pytest.mark.parametrize("field",
|
741 |
+
["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"])
|
742 |
+
def test_bad_complex(dtype, field):
|
743 |
+
with pytest.raises(ValueError):
|
744 |
+
np.loadtxt([field + "\n"], dtype=dtype, delimiter=",")
|
745 |
+
|
746 |
+
|
747 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
748 |
+
reason="PyPy bug in error formatting")
|
749 |
+
@pytest.mark.parametrize("dtype",
|
750 |
+
np.typecodes["AllInteger"] + "efgdFDG" + "?")
|
751 |
+
def test_nul_character_error(dtype):
|
752 |
+
# Test that a \0 character is correctly recognized as an error even if
|
753 |
+
# what comes before is valid (not everything gets parsed internally).
|
754 |
+
if dtype.lower() == "g":
|
755 |
+
pytest.xfail("longdouble/clongdouble assignment may misbehave.")
|
756 |
+
with pytest.raises(ValueError):
|
757 |
+
np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"')
|
758 |
+
|
759 |
+
|
760 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
761 |
+
reason="PyPy bug in error formatting")
|
762 |
+
@pytest.mark.parametrize("dtype",
|
763 |
+
np.typecodes["AllInteger"] + "efgdFDG" + "?")
|
764 |
+
def test_no_thousands_support(dtype):
|
765 |
+
# Mainly to document behaviour, Python supports thousands like 1_1.
|
766 |
+
# (e and G may end up using different conversion and support it, this is
|
767 |
+
# a bug but happens...)
|
768 |
+
if dtype == "e":
|
769 |
+
pytest.skip("half assignment currently uses Python float converter")
|
770 |
+
if dtype in "eG":
|
771 |
+
pytest.xfail("clongdouble assignment is buggy (uses `complex`?).")
|
772 |
+
|
773 |
+
assert int("1_1") == float("1_1") == complex("1_1") == 11
|
774 |
+
with pytest.raises(ValueError):
|
775 |
+
np.loadtxt(["1_1\n"], dtype=dtype)
|
776 |
+
|
777 |
+
|
778 |
+
@pytest.mark.parametrize("data", [
|
779 |
+
["1,2\n", "2\n,3\n"],
|
780 |
+
["1,2\n", "2\r,3\n"]])
|
781 |
+
def test_bad_newline_in_iterator(data):
|
782 |
+
# In NumPy <=1.22 this was accepted, because newlines were completely
|
783 |
+
# ignored when the input was an iterable. This could be changed, but right
|
784 |
+
# now, we raise an error.
|
785 |
+
msg = "Found an unquoted embedded newline within a single line"
|
786 |
+
with pytest.raises(ValueError, match=msg):
|
787 |
+
np.loadtxt(data, delimiter=",")
|
788 |
+
|
789 |
+
|
790 |
+
@pytest.mark.parametrize("data", [
|
791 |
+
["1,2\n", "2,3\r\n"], # a universal newline
|
792 |
+
["1,2\n", "'2\n',3\n"], # a quoted newline
|
793 |
+
["1,2\n", "'2\r',3\n"],
|
794 |
+
["1,2\n", "'2\r\n',3\n"],
|
795 |
+
])
|
796 |
+
def test_good_newline_in_iterator(data):
|
797 |
+
# The quoted newlines will be untransformed here, but are just whitespace.
|
798 |
+
res = np.loadtxt(data, delimiter=",", quotechar="'")
|
799 |
+
assert_array_equal(res, [[1., 2.], [2., 3.]])
|
800 |
+
|
801 |
+
|
802 |
+
@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"])
|
803 |
+
def test_universal_newlines_quoted(newline):
|
804 |
+
# Check that universal newline support within the tokenizer is not applied
|
805 |
+
# to quoted fields. (note that lines must end in newline or quoted
|
806 |
+
# fields will not include a newline at all)
|
807 |
+
data = ['1,"2\n"\n', '3,"4\n', '1"\n']
|
808 |
+
data = [row.replace("\n", newline) for row in data]
|
809 |
+
res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"')
|
810 |
+
assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']])
|
811 |
+
|
812 |
+
|
813 |
+
def test_null_character():
|
814 |
+
# Basic tests to check that the NUL character is not special:
|
815 |
+
res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000")
|
816 |
+
assert_array_equal(res, [[1, 2, 3], [4, 5, 6]])
|
817 |
+
|
818 |
+
# Also not as part of a field (avoid unicode/arrays as unicode strips \0)
|
819 |
+
res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"],
|
820 |
+
delimiter=",", dtype=object)
|
821 |
+
assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]]
|
822 |
+
|
823 |
+
|
824 |
+
def test_iterator_fails_getting_next_line():
|
825 |
+
class BadSequence:
|
826 |
+
def __len__(self):
|
827 |
+
return 100
|
828 |
+
|
829 |
+
def __getitem__(self, item):
|
830 |
+
if item == 50:
|
831 |
+
raise RuntimeError("Bad things happened!")
|
832 |
+
return f"{item}, {item+1}"
|
833 |
+
|
834 |
+
with pytest.raises(RuntimeError, match="Bad things happened!"):
|
835 |
+
np.loadtxt(BadSequence(), dtype=int, delimiter=",")
|
836 |
+
|
837 |
+
|
838 |
+
class TestCReaderUnitTests:
|
839 |
+
# These are internal tests for path that should not be possible to hit
|
840 |
+
# unless things go very very wrong somewhere.
|
841 |
+
def test_not_an_filelike(self):
|
842 |
+
with pytest.raises(AttributeError, match=".*read"):
|
843 |
+
np.core._multiarray_umath._load_from_filelike(
|
844 |
+
object(), dtype=np.dtype("i"), filelike=True)
|
845 |
+
|
846 |
+
def test_filelike_read_fails(self):
|
847 |
+
# Can only be reached if loadtxt opens the file, so it is hard to do
|
848 |
+
# via the public interface (although maybe not impossible considering
|
849 |
+
# the current "DataClass" backing).
|
850 |
+
class BadFileLike:
|
851 |
+
counter = 0
|
852 |
+
|
853 |
+
def read(self, size):
|
854 |
+
self.counter += 1
|
855 |
+
if self.counter > 20:
|
856 |
+
raise RuntimeError("Bad bad bad!")
|
857 |
+
return "1,2,3\n"
|
858 |
+
|
859 |
+
with pytest.raises(RuntimeError, match="Bad bad bad!"):
|
860 |
+
np.core._multiarray_umath._load_from_filelike(
|
861 |
+
BadFileLike(), dtype=np.dtype("i"), filelike=True)
|
862 |
+
|
863 |
+
def test_filelike_bad_read(self):
|
864 |
+
# Can only be reached if loadtxt opens the file, so it is hard to do
|
865 |
+
# via the public interface (although maybe not impossible considering
|
866 |
+
# the current "DataClass" backing).
|
867 |
+
|
868 |
+
class BadFileLike:
|
869 |
+
counter = 0
|
870 |
+
|
871 |
+
def read(self, size):
|
872 |
+
return 1234 # not a string!
|
873 |
+
|
874 |
+
with pytest.raises(TypeError,
|
875 |
+
match="non-string returned while reading data"):
|
876 |
+
np.core._multiarray_umath._load_from_filelike(
|
877 |
+
BadFileLike(), dtype=np.dtype("i"), filelike=True)
|
878 |
+
|
879 |
+
def test_not_an_iter(self):
|
880 |
+
with pytest.raises(TypeError,
|
881 |
+
match="error reading from object, expected an iterable"):
|
882 |
+
np.core._multiarray_umath._load_from_filelike(
|
883 |
+
object(), dtype=np.dtype("i"), filelike=False)
|
884 |
+
|
885 |
+
def test_bad_type(self):
|
886 |
+
with pytest.raises(TypeError, match="internal error: dtype must"):
|
887 |
+
np.core._multiarray_umath._load_from_filelike(
|
888 |
+
object(), dtype="i", filelike=False)
|
889 |
+
|
890 |
+
def test_bad_encoding(self):
|
891 |
+
with pytest.raises(TypeError, match="encoding must be a unicode"):
|
892 |
+
np.core._multiarray_umath._load_from_filelike(
|
893 |
+
object(), dtype=np.dtype("i"), filelike=False, encoding=123)
|
894 |
+
|
895 |
+
@pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"])
|
896 |
+
def test_manual_universal_newlines(self, newline):
|
897 |
+
# This is currently not available to users, because we should always
|
898 |
+
# open files with universal newlines enabled `newlines=None`.
|
899 |
+
# (And reading from an iterator uses slightly different code paths.)
|
900 |
+
# We have no real support for `newline="\r"` or `newline="\n" as the
|
901 |
+
# user cannot specify those options.
|
902 |
+
data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline),
|
903 |
+
newline="")
|
904 |
+
|
905 |
+
res = np.core._multiarray_umath._load_from_filelike(
|
906 |
+
data, dtype=np.dtype("U10"), filelike=True,
|
907 |
+
quote='"', comment="#", skiplines=1)
|
908 |
+
assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "])
|
909 |
+
|
910 |
+
|
911 |
+
def test_delimiter_comment_collision_raises():
|
912 |
+
with pytest.raises(TypeError, match=".*control characters.*incompatible"):
|
913 |
+
np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",")
|
914 |
+
|
915 |
+
|
916 |
+
def test_delimiter_quotechar_collision_raises():
|
917 |
+
with pytest.raises(TypeError, match=".*control characters.*incompatible"):
|
918 |
+
np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",")
|
919 |
+
|
920 |
+
|
921 |
+
def test_comment_quotechar_collision_raises():
|
922 |
+
with pytest.raises(TypeError, match=".*control characters.*incompatible"):
|
923 |
+
np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#")
|
924 |
+
|
925 |
+
|
926 |
+
def test_delimiter_and_multiple_comments_collision_raises():
|
927 |
+
with pytest.raises(
|
928 |
+
TypeError, match="Comment characters.*cannot include the delimiter"
|
929 |
+
):
|
930 |
+
np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","])
|
931 |
+
|
932 |
+
|
933 |
+
@pytest.mark.parametrize(
|
934 |
+
"ws",
|
935 |
+
(
|
936 |
+
" ", # space
|
937 |
+
"\t", # tab
|
938 |
+
"\u2003", # em
|
939 |
+
"\u00A0", # non-break
|
940 |
+
"\u3000", # ideographic space
|
941 |
+
)
|
942 |
+
)
|
943 |
+
def test_collision_with_default_delimiter_raises(ws):
|
944 |
+
with pytest.raises(TypeError, match=".*control characters.*incompatible"):
|
945 |
+
np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws)
|
946 |
+
with pytest.raises(TypeError, match=".*control characters.*incompatible"):
|
947 |
+
np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws)
|
948 |
+
|
949 |
+
|
950 |
+
@pytest.mark.parametrize("nl", ("\n", "\r"))
|
951 |
+
def test_control_character_newline_raises(nl):
|
952 |
+
txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}")
|
953 |
+
msg = "control character.*cannot be a newline"
|
954 |
+
with pytest.raises(TypeError, match=msg):
|
955 |
+
np.loadtxt(txt, delimiter=nl)
|
956 |
+
with pytest.raises(TypeError, match=msg):
|
957 |
+
np.loadtxt(txt, comments=nl)
|
958 |
+
with pytest.raises(TypeError, match=msg):
|
959 |
+
np.loadtxt(txt, quotechar=nl)
|
960 |
+
|
961 |
+
|
962 |
+
@pytest.mark.parametrize(
|
963 |
+
("generic_data", "long_datum", "unitless_dtype", "expected_dtype"),
|
964 |
+
[
|
965 |
+
("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes
|
966 |
+
("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str
|
967 |
+
],
|
968 |
+
)
|
969 |
+
@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize
|
970 |
+
def test_parametric_unit_discovery(
|
971 |
+
generic_data, long_datum, unitless_dtype, expected_dtype, nrows
|
972 |
+
):
|
973 |
+
"""Check that the correct unit (e.g. month, day, second) is discovered from
|
974 |
+
the data when a user specifies a unitless datetime."""
|
975 |
+
# Unit should be "D" (days) due to last entry
|
976 |
+
data = [generic_data] * 50000 + [long_datum]
|
977 |
+
expected = np.array(data, dtype=expected_dtype)
|
978 |
+
|
979 |
+
# file-like path
|
980 |
+
txt = StringIO("\n".join(data))
|
981 |
+
a = np.loadtxt(txt, dtype=unitless_dtype)
|
982 |
+
assert a.dtype == expected.dtype
|
983 |
+
assert_equal(a, expected)
|
984 |
+
|
985 |
+
# file-obj path
|
986 |
+
fd, fname = mkstemp()
|
987 |
+
os.close(fd)
|
988 |
+
with open(fname, "w") as fh:
|
989 |
+
fh.write("\n".join(data))
|
990 |
+
a = np.loadtxt(fname, dtype=unitless_dtype)
|
991 |
+
os.remove(fname)
|
992 |
+
assert a.dtype == expected.dtype
|
993 |
+
assert_equal(a, expected)
|
994 |
+
|
995 |
+
|
996 |
+
def test_str_dtype_unit_discovery_with_converter():
|
997 |
+
data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"]
|
998 |
+
expected = np.array(
|
999 |
+
["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17"
|
1000 |
+
)
|
1001 |
+
conv = lambda s: s.strip("XXX")
|
1002 |
+
|
1003 |
+
# file-like path
|
1004 |
+
txt = StringIO("\n".join(data))
|
1005 |
+
a = np.loadtxt(txt, dtype="U", converters=conv, encoding=None)
|
1006 |
+
assert a.dtype == expected.dtype
|
1007 |
+
assert_equal(a, expected)
|
1008 |
+
|
1009 |
+
# file-obj path
|
1010 |
+
fd, fname = mkstemp()
|
1011 |
+
os.close(fd)
|
1012 |
+
with open(fname, "w") as fh:
|
1013 |
+
fh.write("\n".join(data))
|
1014 |
+
a = np.loadtxt(fname, dtype="U", converters=conv, encoding=None)
|
1015 |
+
os.remove(fname)
|
1016 |
+
assert a.dtype == expected.dtype
|
1017 |
+
assert_equal(a, expected)
|
1018 |
+
|
1019 |
+
|
1020 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
1021 |
+
reason="PyPy bug in error formatting")
|
1022 |
+
def test_control_character_empty():
|
1023 |
+
with pytest.raises(TypeError, match="Text reading control character must"):
|
1024 |
+
np.loadtxt(StringIO("1 2 3"), delimiter="")
|
1025 |
+
with pytest.raises(TypeError, match="Text reading control character must"):
|
1026 |
+
np.loadtxt(StringIO("1 2 3"), quotechar="")
|
1027 |
+
with pytest.raises(ValueError, match="comments cannot be an empty string"):
|
1028 |
+
np.loadtxt(StringIO("1 2 3"), comments="")
|
1029 |
+
with pytest.raises(ValueError, match="comments cannot be an empty string"):
|
1030 |
+
np.loadtxt(StringIO("1 2 3"), comments=["#", ""])
|
1031 |
+
|
1032 |
+
|
1033 |
+
def test_control_characters_as_bytes():
|
1034 |
+
"""Byte control characters (comments, delimiter) are supported."""
|
1035 |
+
a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",")
|
1036 |
+
assert_equal(a, [1, 2, 3])
|
1037 |
+
|
1038 |
+
|
1039 |
+
@pytest.mark.filterwarnings('ignore::UserWarning')
|
1040 |
+
def test_field_growing_cases():
|
1041 |
+
# Test empty field appending/growing (each field still takes 1 character)
|
1042 |
+
# to see if the final field appending does not create issues.
|
1043 |
+
res = np.loadtxt([""], delimiter=",", dtype=bytes)
|
1044 |
+
assert len(res) == 0
|
1045 |
+
|
1046 |
+
for i in range(1, 1024):
|
1047 |
+
res = np.loadtxt(["," * i], delimiter=",", dtype=bytes)
|
1048 |
+
assert len(res) == i+1
|
env-llmeval/lib/python3.10/site-packages/numpy/lib/tests/test_mixins.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numbers
|
2 |
+
import operator
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.testing import assert_, assert_equal, assert_raises
|
6 |
+
|
7 |
+
|
8 |
+
# NOTE: This class should be kept as an exact copy of the example from the
|
9 |
+
# docstring for NDArrayOperatorsMixin.
|
10 |
+
|
11 |
+
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
|
12 |
+
def __init__(self, value):
|
13 |
+
self.value = np.asarray(value)
|
14 |
+
|
15 |
+
# One might also consider adding the built-in list type to this
|
16 |
+
# list, to support operations like np.add(array_like, list)
|
17 |
+
_HANDLED_TYPES = (np.ndarray, numbers.Number)
|
18 |
+
|
19 |
+
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
|
20 |
+
out = kwargs.get('out', ())
|
21 |
+
for x in inputs + out:
|
22 |
+
# Only support operations with instances of _HANDLED_TYPES.
|
23 |
+
# Use ArrayLike instead of type(self) for isinstance to
|
24 |
+
# allow subclasses that don't override __array_ufunc__ to
|
25 |
+
# handle ArrayLike objects.
|
26 |
+
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
|
27 |
+
return NotImplemented
|
28 |
+
|
29 |
+
# Defer to the implementation of the ufunc on unwrapped values.
|
30 |
+
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
|
31 |
+
for x in inputs)
|
32 |
+
if out:
|
33 |
+
kwargs['out'] = tuple(
|
34 |
+
x.value if isinstance(x, ArrayLike) else x
|
35 |
+
for x in out)
|
36 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
37 |
+
|
38 |
+
if type(result) is tuple:
|
39 |
+
# multiple return values
|
40 |
+
return tuple(type(self)(x) for x in result)
|
41 |
+
elif method == 'at':
|
42 |
+
# no return value
|
43 |
+
return None
|
44 |
+
else:
|
45 |
+
# one return value
|
46 |
+
return type(self)(result)
|
47 |
+
|
48 |
+
def __repr__(self):
|
49 |
+
return '%s(%r)' % (type(self).__name__, self.value)
|
50 |
+
|
51 |
+
|
52 |
+
def wrap_array_like(result):
|
53 |
+
if type(result) is tuple:
|
54 |
+
return tuple(ArrayLike(r) for r in result)
|
55 |
+
else:
|
56 |
+
return ArrayLike(result)
|
57 |
+
|
58 |
+
|
59 |
+
def _assert_equal_type_and_value(result, expected, err_msg=None):
|
60 |
+
assert_equal(type(result), type(expected), err_msg=err_msg)
|
61 |
+
if isinstance(result, tuple):
|
62 |
+
assert_equal(len(result), len(expected), err_msg=err_msg)
|
63 |
+
for result_item, expected_item in zip(result, expected):
|
64 |
+
_assert_equal_type_and_value(result_item, expected_item, err_msg)
|
65 |
+
else:
|
66 |
+
assert_equal(result.value, expected.value, err_msg=err_msg)
|
67 |
+
assert_equal(getattr(result.value, 'dtype', None),
|
68 |
+
getattr(expected.value, 'dtype', None), err_msg=err_msg)
|
69 |
+
|
70 |
+
|
71 |
+
_ALL_BINARY_OPERATORS = [
|
72 |
+
operator.lt,
|
73 |
+
operator.le,
|
74 |
+
operator.eq,
|
75 |
+
operator.ne,
|
76 |
+
operator.gt,
|
77 |
+
operator.ge,
|
78 |
+
operator.add,
|
79 |
+
operator.sub,
|
80 |
+
operator.mul,
|
81 |
+
operator.truediv,
|
82 |
+
operator.floordiv,
|
83 |
+
operator.mod,
|
84 |
+
divmod,
|
85 |
+
pow,
|
86 |
+
operator.lshift,
|
87 |
+
operator.rshift,
|
88 |
+
operator.and_,
|
89 |
+
operator.xor,
|
90 |
+
operator.or_,
|
91 |
+
]
|
92 |
+
|
93 |
+
|
94 |
+
class TestNDArrayOperatorsMixin:
|
95 |
+
|
96 |
+
def test_array_like_add(self):
|
97 |
+
|
98 |
+
def check(result):
|
99 |
+
_assert_equal_type_and_value(result, ArrayLike(0))
|
100 |
+
|
101 |
+
check(ArrayLike(0) + 0)
|
102 |
+
check(0 + ArrayLike(0))
|
103 |
+
|
104 |
+
check(ArrayLike(0) + np.array(0))
|
105 |
+
check(np.array(0) + ArrayLike(0))
|
106 |
+
|
107 |
+
check(ArrayLike(np.array(0)) + 0)
|
108 |
+
check(0 + ArrayLike(np.array(0)))
|
109 |
+
|
110 |
+
check(ArrayLike(np.array(0)) + np.array(0))
|
111 |
+
check(np.array(0) + ArrayLike(np.array(0)))
|
112 |
+
|
113 |
+
def test_inplace(self):
|
114 |
+
array_like = ArrayLike(np.array([0]))
|
115 |
+
array_like += 1
|
116 |
+
_assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
|
117 |
+
|
118 |
+
array = np.array([0])
|
119 |
+
array += ArrayLike(1)
|
120 |
+
_assert_equal_type_and_value(array, ArrayLike(np.array([1])))
|
121 |
+
|
122 |
+
def test_opt_out(self):
|
123 |
+
|
124 |
+
class OptOut:
|
125 |
+
"""Object that opts out of __array_ufunc__."""
|
126 |
+
__array_ufunc__ = None
|
127 |
+
|
128 |
+
def __add__(self, other):
|
129 |
+
return self
|
130 |
+
|
131 |
+
def __radd__(self, other):
|
132 |
+
return self
|
133 |
+
|
134 |
+
array_like = ArrayLike(1)
|
135 |
+
opt_out = OptOut()
|
136 |
+
|
137 |
+
# supported operations
|
138 |
+
assert_(array_like + opt_out is opt_out)
|
139 |
+
assert_(opt_out + array_like is opt_out)
|
140 |
+
|
141 |
+
# not supported
|
142 |
+
with assert_raises(TypeError):
|
143 |
+
# don't use the Python default, array_like = array_like + opt_out
|
144 |
+
array_like += opt_out
|
145 |
+
with assert_raises(TypeError):
|
146 |
+
array_like - opt_out
|
147 |
+
with assert_raises(TypeError):
|
148 |
+
opt_out - array_like
|
149 |
+
|
150 |
+
def test_subclass(self):
|
151 |
+
|
152 |
+
class SubArrayLike(ArrayLike):
|
153 |
+
"""Should take precedence over ArrayLike."""
|
154 |
+
|
155 |
+
x = ArrayLike(0)
|
156 |
+
y = SubArrayLike(1)
|
157 |
+
_assert_equal_type_and_value(x + y, y)
|
158 |
+
_assert_equal_type_and_value(y + x, y)
|
159 |
+
|
160 |
+
def test_object(self):
|
161 |
+
x = ArrayLike(0)
|
162 |
+
obj = object()
|
163 |
+
with assert_raises(TypeError):
|
164 |
+
x + obj
|
165 |
+
with assert_raises(TypeError):
|
166 |
+
obj + x
|
167 |
+
with assert_raises(TypeError):
|
168 |
+
x += obj
|
169 |
+
|
170 |
+
def test_unary_methods(self):
|
171 |
+
array = np.array([-1, 0, 1, 2])
|
172 |
+
array_like = ArrayLike(array)
|
173 |
+
for op in [operator.neg,
|
174 |
+
operator.pos,
|
175 |
+
abs,
|
176 |
+
operator.invert]:
|
177 |
+
_assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
|
178 |
+
|
179 |
+
def test_forward_binary_methods(self):
|
180 |
+
array = np.array([-1, 0, 1, 2])
|
181 |
+
array_like = ArrayLike(array)
|
182 |
+
for op in _ALL_BINARY_OPERATORS:
|
183 |
+
expected = wrap_array_like(op(array, 1))
|
184 |
+
actual = op(array_like, 1)
|
185 |
+
err_msg = 'failed for operator {}'.format(op)
|
186 |
+
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
|
187 |
+
|
188 |
+
def test_reflected_binary_methods(self):
|
189 |
+
for op in _ALL_BINARY_OPERATORS:
|
190 |
+
expected = wrap_array_like(op(2, 1))
|
191 |
+
actual = op(2, ArrayLike(1))
|
192 |
+
err_msg = 'failed for operator {}'.format(op)
|
193 |
+
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
|
194 |
+
|
195 |
+
def test_matmul(self):
|
196 |
+
array = np.array([1, 2], dtype=np.float64)
|
197 |
+
array_like = ArrayLike(array)
|
198 |
+
expected = ArrayLike(np.float64(5))
|
199 |
+
_assert_equal_type_and_value(expected, np.matmul(array_like, array))
|
200 |
+
_assert_equal_type_and_value(
|
201 |
+
expected, operator.matmul(array_like, array))
|
202 |
+
_assert_equal_type_and_value(
|
203 |
+
expected, operator.matmul(array, array_like))
|
204 |
+
|
205 |
+
def test_ufunc_at(self):
|
206 |
+
array = ArrayLike(np.array([1, 2, 3, 4]))
|
207 |
+
assert_(np.negative.at(array, np.array([0, 1])) is None)
|
208 |
+
_assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
|
209 |
+
|
210 |
+
def test_ufunc_two_outputs(self):
|
211 |
+
mantissa, exponent = np.frexp(2 ** -3)
|
212 |
+
expected = (ArrayLike(mantissa), ArrayLike(exponent))
|
213 |
+
_assert_equal_type_and_value(
|
214 |
+
np.frexp(ArrayLike(2 ** -3)), expected)
|
215 |
+
_assert_equal_type_and_value(
|
216 |
+
np.frexp(ArrayLike(np.array(2 ** -3))), expected)
|