Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/__pycache__/arrayprint.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/__pycache__/numeric.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so +3 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so +3 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_argparse.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_array_interface.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_arraymethod.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_arrayprint.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_casting_unittests.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_conversion_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_errstate.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_extint128.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_half.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_hashtable.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_longdouble.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_machar.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_records.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_shape_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_umath_complex.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test__exceptions.py +88 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_abc.py +54 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_api.py +615 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_argparse.py +62 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_array_coercion.py +898 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_array_interface.py +219 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_arraymethod.py +85 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_casting_unittests.py +819 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_cpu_features.py +404 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_custom_dtypes.py +253 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_datetime.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_defchararray.py +686 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_deprecations.py +817 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_dlpack.py +124 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_dtype.py +1906 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_einsum.py +1248 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_extint128.py +219 -0
.gitattributes
CHANGED
@@ -120,3 +120,5 @@ env-llmeval/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
|
|
120 |
env-llmeval/bin/python filter=lfs diff=lfs merge=lfs -text
|
121 |
llmeval-env/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
|
122 |
llmeval-env/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
120 |
env-llmeval/bin/python filter=lfs diff=lfs merge=lfs -text
|
121 |
llmeval-env/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
|
122 |
llmeval-env/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
|
123 |
+
env-llmeval/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
124 |
+
env-llmeval/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
env-llmeval/lib/python3.10/site-packages/numpy/core/__pycache__/arrayprint.cpython-310.pyc
ADDED
Binary file (52.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/__pycache__/numeric.cpython-310.pyc
ADDED
Binary file (73 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe5efe31c55326b072c8fb239a225819211826cb45cd3c74ed0af0030e70f3a1
|
3 |
+
size 7426841
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b2c96bec20e3c7a59f8f78b30e7fd5142d015e42f2cbd27223c3e862c53e4113
|
3 |
+
size 3527040
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (181 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-310.pyc
ADDED
Binary file (2.01 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_api.cpython-310.pyc
ADDED
Binary file (15.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_argparse.cpython-310.pyc
ADDED
Binary file (2.49 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_array_interface.cpython-310.pyc
ADDED
Binary file (6.04 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_arraymethod.cpython-310.pyc
ADDED
Binary file (3.14 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_arrayprint.cpython-310.pyc
ADDED
Binary file (37.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-310.pyc
ADDED
Binary file (5.54 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_casting_unittests.cpython-310.pyc
ADDED
Binary file (20.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_conversion_utils.cpython-310.pyc
ADDED
Binary file (8.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-310.pyc
ADDED
Binary file (3.59 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-310.pyc
ADDED
Binary file (65.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-310.pyc
ADDED
Binary file (39.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-310.pyc
ADDED
Binary file (64 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-310.pyc
ADDED
Binary file (35 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_errstate.cpython-310.pyc
ADDED
Binary file (2.63 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_extint128.cpython-310.pyc
ADDED
Binary file (6.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_half.cpython-310.pyc
ADDED
Binary file (16.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_hashtable.cpython-310.pyc
ADDED
Binary file (1.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_longdouble.cpython-310.pyc
ADDED
Binary file (13 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_machar.cpython-310.pyc
ADDED
Binary file (1.41 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_records.cpython-310.pyc
ADDED
Binary file (19.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_shape_base.cpython-310.pyc
ADDED
Binary file (26.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-310.pyc
ADDED
Binary file (4.03 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-310.pyc
ADDED
Binary file (98.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-310.pyc
ADDED
Binary file (158 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_umath_complex.cpython-310.pyc
ADDED
Binary file (17.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test__exceptions.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import pickle
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
|
11 |
+
_UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError
|
12 |
+
|
13 |
+
class TestArrayMemoryError:
|
14 |
+
def test_pickling(self):
|
15 |
+
""" Test that _ArrayMemoryError can be pickled """
|
16 |
+
error = _ArrayMemoryError((1023,), np.dtype(np.uint8))
|
17 |
+
res = pickle.loads(pickle.dumps(error))
|
18 |
+
assert res._total_size == error._total_size
|
19 |
+
|
20 |
+
def test_str(self):
|
21 |
+
e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
|
22 |
+
str(e) # not crashing is enough
|
23 |
+
|
24 |
+
# testing these properties is easier than testing the full string repr
|
25 |
+
def test__size_to_string(self):
|
26 |
+
""" Test e._size_to_string """
|
27 |
+
f = _ArrayMemoryError._size_to_string
|
28 |
+
Ki = 1024
|
29 |
+
assert f(0) == '0 bytes'
|
30 |
+
assert f(1) == '1 bytes'
|
31 |
+
assert f(1023) == '1023 bytes'
|
32 |
+
assert f(Ki) == '1.00 KiB'
|
33 |
+
assert f(Ki+1) == '1.00 KiB'
|
34 |
+
assert f(10*Ki) == '10.0 KiB'
|
35 |
+
assert f(int(999.4*Ki)) == '999. KiB'
|
36 |
+
assert f(int(1023.4*Ki)) == '1023. KiB'
|
37 |
+
assert f(int(1023.5*Ki)) == '1.00 MiB'
|
38 |
+
assert f(Ki*Ki) == '1.00 MiB'
|
39 |
+
|
40 |
+
# 1023.9999 Mib should round to 1 GiB
|
41 |
+
assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
|
42 |
+
assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
|
43 |
+
# larger than sys.maxsize, adding larger prefixes isn't going to help
|
44 |
+
# anyway.
|
45 |
+
assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
|
46 |
+
|
47 |
+
def test__total_size(self):
|
48 |
+
""" Test e._total_size """
|
49 |
+
e = _ArrayMemoryError((1,), np.dtype(np.uint8))
|
50 |
+
assert e._total_size == 1
|
51 |
+
|
52 |
+
e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
|
53 |
+
assert e._total_size == 1024
|
54 |
+
|
55 |
+
|
56 |
+
class TestUFuncNoLoopError:
|
57 |
+
def test_pickling(self):
|
58 |
+
""" Test that _UFuncNoLoopError can be pickled """
|
59 |
+
assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
|
60 |
+
|
61 |
+
|
62 |
+
@pytest.mark.parametrize("args", [
|
63 |
+
(2, 1, None),
|
64 |
+
(2, 1, "test_prefix"),
|
65 |
+
("test message",),
|
66 |
+
])
|
67 |
+
class TestAxisError:
|
68 |
+
def test_attr(self, args):
|
69 |
+
"""Validate attribute types."""
|
70 |
+
exc = np.AxisError(*args)
|
71 |
+
if len(args) == 1:
|
72 |
+
assert exc.axis is None
|
73 |
+
assert exc.ndim is None
|
74 |
+
else:
|
75 |
+
axis, ndim, *_ = args
|
76 |
+
assert exc.axis == axis
|
77 |
+
assert exc.ndim == ndim
|
78 |
+
|
79 |
+
def test_pickling(self, args):
|
80 |
+
"""Test that `AxisError` can be pickled."""
|
81 |
+
exc = np.AxisError(*args)
|
82 |
+
exc2 = pickle.loads(pickle.dumps(exc))
|
83 |
+
|
84 |
+
assert type(exc) is type(exc2)
|
85 |
+
for name in ("axis", "ndim", "args"):
|
86 |
+
attr1 = getattr(exc, name)
|
87 |
+
attr2 = getattr(exc2, name)
|
88 |
+
assert attr1 == attr2, name
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_abc.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.testing import assert_
|
2 |
+
|
3 |
+
import numbers
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
from numpy.core.numerictypes import sctypes
|
7 |
+
|
8 |
+
class TestABC:
|
9 |
+
def test_abstract(self):
|
10 |
+
assert_(issubclass(np.number, numbers.Number))
|
11 |
+
|
12 |
+
assert_(issubclass(np.inexact, numbers.Complex))
|
13 |
+
assert_(issubclass(np.complexfloating, numbers.Complex))
|
14 |
+
assert_(issubclass(np.floating, numbers.Real))
|
15 |
+
|
16 |
+
assert_(issubclass(np.integer, numbers.Integral))
|
17 |
+
assert_(issubclass(np.signedinteger, numbers.Integral))
|
18 |
+
assert_(issubclass(np.unsignedinteger, numbers.Integral))
|
19 |
+
|
20 |
+
def test_floats(self):
|
21 |
+
for t in sctypes['float']:
|
22 |
+
assert_(isinstance(t(), numbers.Real),
|
23 |
+
f"{t.__name__} is not instance of Real")
|
24 |
+
assert_(issubclass(t, numbers.Real),
|
25 |
+
f"{t.__name__} is not subclass of Real")
|
26 |
+
assert_(not isinstance(t(), numbers.Rational),
|
27 |
+
f"{t.__name__} is instance of Rational")
|
28 |
+
assert_(not issubclass(t, numbers.Rational),
|
29 |
+
f"{t.__name__} is subclass of Rational")
|
30 |
+
|
31 |
+
def test_complex(self):
|
32 |
+
for t in sctypes['complex']:
|
33 |
+
assert_(isinstance(t(), numbers.Complex),
|
34 |
+
f"{t.__name__} is not instance of Complex")
|
35 |
+
assert_(issubclass(t, numbers.Complex),
|
36 |
+
f"{t.__name__} is not subclass of Complex")
|
37 |
+
assert_(not isinstance(t(), numbers.Real),
|
38 |
+
f"{t.__name__} is instance of Real")
|
39 |
+
assert_(not issubclass(t, numbers.Real),
|
40 |
+
f"{t.__name__} is subclass of Real")
|
41 |
+
|
42 |
+
def test_int(self):
|
43 |
+
for t in sctypes['int']:
|
44 |
+
assert_(isinstance(t(), numbers.Integral),
|
45 |
+
f"{t.__name__} is not instance of Integral")
|
46 |
+
assert_(issubclass(t, numbers.Integral),
|
47 |
+
f"{t.__name__} is not subclass of Integral")
|
48 |
+
|
49 |
+
def test_uint(self):
|
50 |
+
for t in sctypes['uint']:
|
51 |
+
assert_(isinstance(t(), numbers.Integral),
|
52 |
+
f"{t.__name__} is not instance of Integral")
|
53 |
+
assert_(issubclass(t, numbers.Integral),
|
54 |
+
f"{t.__name__} is not subclass of Integral")
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_api.py
ADDED
@@ -0,0 +1,615 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.core._rational_tests import rational
|
5 |
+
import pytest
|
6 |
+
from numpy.testing import (
|
7 |
+
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
|
8 |
+
HAS_REFCOUNT
|
9 |
+
)
|
10 |
+
|
11 |
+
|
12 |
+
def test_array_array():
|
13 |
+
tobj = type(object)
|
14 |
+
ones11 = np.ones((1, 1), np.float64)
|
15 |
+
tndarray = type(ones11)
|
16 |
+
# Test is_ndarray
|
17 |
+
assert_equal(np.array(ones11, dtype=np.float64), ones11)
|
18 |
+
if HAS_REFCOUNT:
|
19 |
+
old_refcount = sys.getrefcount(tndarray)
|
20 |
+
np.array(ones11)
|
21 |
+
assert_equal(old_refcount, sys.getrefcount(tndarray))
|
22 |
+
|
23 |
+
# test None
|
24 |
+
assert_equal(np.array(None, dtype=np.float64),
|
25 |
+
np.array(np.nan, dtype=np.float64))
|
26 |
+
if HAS_REFCOUNT:
|
27 |
+
old_refcount = sys.getrefcount(tobj)
|
28 |
+
np.array(None, dtype=np.float64)
|
29 |
+
assert_equal(old_refcount, sys.getrefcount(tobj))
|
30 |
+
|
31 |
+
# test scalar
|
32 |
+
assert_equal(np.array(1.0, dtype=np.float64),
|
33 |
+
np.ones((), dtype=np.float64))
|
34 |
+
if HAS_REFCOUNT:
|
35 |
+
old_refcount = sys.getrefcount(np.float64)
|
36 |
+
np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
|
37 |
+
assert_equal(old_refcount, sys.getrefcount(np.float64))
|
38 |
+
|
39 |
+
# test string
|
40 |
+
S2 = np.dtype((bytes, 2))
|
41 |
+
S3 = np.dtype((bytes, 3))
|
42 |
+
S5 = np.dtype((bytes, 5))
|
43 |
+
assert_equal(np.array(b"1.0", dtype=np.float64),
|
44 |
+
np.ones((), dtype=np.float64))
|
45 |
+
assert_equal(np.array(b"1.0").dtype, S3)
|
46 |
+
assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
|
47 |
+
assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
|
48 |
+
assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
|
49 |
+
|
50 |
+
# test string
|
51 |
+
U2 = np.dtype((str, 2))
|
52 |
+
U3 = np.dtype((str, 3))
|
53 |
+
U5 = np.dtype((str, 5))
|
54 |
+
assert_equal(np.array("1.0", dtype=np.float64),
|
55 |
+
np.ones((), dtype=np.float64))
|
56 |
+
assert_equal(np.array("1.0").dtype, U3)
|
57 |
+
assert_equal(np.array("1.0", dtype=str).dtype, U3)
|
58 |
+
assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
|
59 |
+
assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
|
60 |
+
|
61 |
+
builtins = getattr(__builtins__, '__dict__', __builtins__)
|
62 |
+
assert_(hasattr(builtins, 'get'))
|
63 |
+
|
64 |
+
# test memoryview
|
65 |
+
dat = np.array(memoryview(b'1.0'), dtype=np.float64)
|
66 |
+
assert_equal(dat, [49.0, 46.0, 48.0])
|
67 |
+
assert_(dat.dtype.type is np.float64)
|
68 |
+
|
69 |
+
dat = np.array(memoryview(b'1.0'))
|
70 |
+
assert_equal(dat, [49, 46, 48])
|
71 |
+
assert_(dat.dtype.type is np.uint8)
|
72 |
+
|
73 |
+
# test array interface
|
74 |
+
a = np.array(100.0, dtype=np.float64)
|
75 |
+
o = type("o", (object,),
|
76 |
+
dict(__array_interface__=a.__array_interface__))
|
77 |
+
assert_equal(np.array(o, dtype=np.float64), a)
|
78 |
+
|
79 |
+
# test array_struct interface
|
80 |
+
a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
|
81 |
+
dtype=[('f0', int), ('f1', float), ('f2', str)])
|
82 |
+
o = type("o", (object,),
|
83 |
+
dict(__array_struct__=a.__array_struct__))
|
84 |
+
## wasn't what I expected... is np.array(o) supposed to equal a ?
|
85 |
+
## instead we get a array([...], dtype=">V18")
|
86 |
+
assert_equal(bytes(np.array(o).data), bytes(a.data))
|
87 |
+
|
88 |
+
# test array
|
89 |
+
o = type("o", (object,),
|
90 |
+
dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
|
91 |
+
assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
|
92 |
+
|
93 |
+
# test recursion
|
94 |
+
nested = 1.5
|
95 |
+
for i in range(np.MAXDIMS):
|
96 |
+
nested = [nested]
|
97 |
+
|
98 |
+
# no error
|
99 |
+
np.array(nested)
|
100 |
+
|
101 |
+
# Exceeds recursion limit
|
102 |
+
assert_raises(ValueError, np.array, [nested], dtype=np.float64)
|
103 |
+
|
104 |
+
# Try with lists...
|
105 |
+
# float32
|
106 |
+
assert_equal(np.array([None] * 10, dtype=np.float32),
|
107 |
+
np.full((10,), np.nan, dtype=np.float32))
|
108 |
+
assert_equal(np.array([[None]] * 10, dtype=np.float32),
|
109 |
+
np.full((10, 1), np.nan, dtype=np.float32))
|
110 |
+
assert_equal(np.array([[None] * 10], dtype=np.float32),
|
111 |
+
np.full((1, 10), np.nan, dtype=np.float32))
|
112 |
+
assert_equal(np.array([[None] * 10] * 10, dtype=np.float32),
|
113 |
+
np.full((10, 10), np.nan, dtype=np.float32))
|
114 |
+
# float64
|
115 |
+
assert_equal(np.array([None] * 10, dtype=np.float64),
|
116 |
+
np.full((10,), np.nan, dtype=np.float64))
|
117 |
+
assert_equal(np.array([[None]] * 10, dtype=np.float64),
|
118 |
+
np.full((10, 1), np.nan, dtype=np.float64))
|
119 |
+
assert_equal(np.array([[None] * 10], dtype=np.float64),
|
120 |
+
np.full((1, 10), np.nan, dtype=np.float64))
|
121 |
+
assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
|
122 |
+
np.full((10, 10), np.nan, dtype=np.float64))
|
123 |
+
|
124 |
+
assert_equal(np.array([1.0] * 10, dtype=np.float64),
|
125 |
+
np.ones((10,), dtype=np.float64))
|
126 |
+
assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
|
127 |
+
np.ones((10, 1), dtype=np.float64))
|
128 |
+
assert_equal(np.array([[1.0] * 10], dtype=np.float64),
|
129 |
+
np.ones((1, 10), dtype=np.float64))
|
130 |
+
assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
|
131 |
+
np.ones((10, 10), dtype=np.float64))
|
132 |
+
|
133 |
+
# Try with tuples
|
134 |
+
assert_equal(np.array((None,) * 10, dtype=np.float64),
|
135 |
+
np.full((10,), np.nan, dtype=np.float64))
|
136 |
+
assert_equal(np.array([(None,)] * 10, dtype=np.float64),
|
137 |
+
np.full((10, 1), np.nan, dtype=np.float64))
|
138 |
+
assert_equal(np.array([(None,) * 10], dtype=np.float64),
|
139 |
+
np.full((1, 10), np.nan, dtype=np.float64))
|
140 |
+
assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
|
141 |
+
np.full((10, 10), np.nan, dtype=np.float64))
|
142 |
+
|
143 |
+
assert_equal(np.array((1.0,) * 10, dtype=np.float64),
|
144 |
+
np.ones((10,), dtype=np.float64))
|
145 |
+
assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
|
146 |
+
np.ones((10, 1), dtype=np.float64))
|
147 |
+
assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
|
148 |
+
np.ones((1, 10), dtype=np.float64))
|
149 |
+
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
|
150 |
+
np.ones((10, 10), dtype=np.float64))
|
151 |
+
|
152 |
+
@pytest.mark.parametrize("array", [True, False])
|
153 |
+
def test_array_impossible_casts(array):
|
154 |
+
# All builtin types can be forcibly cast, at least theoretically,
|
155 |
+
# but user dtypes cannot necessarily.
|
156 |
+
rt = rational(1, 2)
|
157 |
+
if array:
|
158 |
+
rt = np.array(rt)
|
159 |
+
with assert_raises(TypeError):
|
160 |
+
np.array(rt, dtype="M8")
|
161 |
+
|
162 |
+
|
163 |
+
# TODO: remove when fastCopyAndTranspose deprecation expires
|
164 |
+
@pytest.mark.parametrize("a",
|
165 |
+
(
|
166 |
+
np.array(2), # 0D array
|
167 |
+
np.array([3, 2, 7, 0]), # 1D array
|
168 |
+
np.arange(6).reshape(2, 3) # 2D array
|
169 |
+
),
|
170 |
+
)
|
171 |
+
def test_fastCopyAndTranspose(a):
|
172 |
+
with pytest.deprecated_call():
|
173 |
+
b = np.fastCopyAndTranspose(a)
|
174 |
+
assert_equal(b, a.T)
|
175 |
+
assert b.flags.owndata
|
176 |
+
|
177 |
+
|
178 |
+
def test_array_astype():
|
179 |
+
a = np.arange(6, dtype='f4').reshape(2, 3)
|
180 |
+
# Default behavior: allows unsafe casts, keeps memory layout,
|
181 |
+
# always copies.
|
182 |
+
b = a.astype('i4')
|
183 |
+
assert_equal(a, b)
|
184 |
+
assert_equal(b.dtype, np.dtype('i4'))
|
185 |
+
assert_equal(a.strides, b.strides)
|
186 |
+
b = a.T.astype('i4')
|
187 |
+
assert_equal(a.T, b)
|
188 |
+
assert_equal(b.dtype, np.dtype('i4'))
|
189 |
+
assert_equal(a.T.strides, b.strides)
|
190 |
+
b = a.astype('f4')
|
191 |
+
assert_equal(a, b)
|
192 |
+
assert_(not (a is b))
|
193 |
+
|
194 |
+
# copy=False parameter can sometimes skip a copy
|
195 |
+
b = a.astype('f4', copy=False)
|
196 |
+
assert_(a is b)
|
197 |
+
|
198 |
+
# order parameter allows overriding of the memory layout,
|
199 |
+
# forcing a copy if the layout is wrong
|
200 |
+
b = a.astype('f4', order='F', copy=False)
|
201 |
+
assert_equal(a, b)
|
202 |
+
assert_(not (a is b))
|
203 |
+
assert_(b.flags.f_contiguous)
|
204 |
+
|
205 |
+
b = a.astype('f4', order='C', copy=False)
|
206 |
+
assert_equal(a, b)
|
207 |
+
assert_(a is b)
|
208 |
+
assert_(b.flags.c_contiguous)
|
209 |
+
|
210 |
+
# casting parameter allows catching bad casts
|
211 |
+
b = a.astype('c8', casting='safe')
|
212 |
+
assert_equal(a, b)
|
213 |
+
assert_equal(b.dtype, np.dtype('c8'))
|
214 |
+
|
215 |
+
assert_raises(TypeError, a.astype, 'i4', casting='safe')
|
216 |
+
|
217 |
+
# subok=False passes through a non-subclassed array
|
218 |
+
b = a.astype('f4', subok=0, copy=False)
|
219 |
+
assert_(a is b)
|
220 |
+
|
221 |
+
class MyNDArray(np.ndarray):
|
222 |
+
pass
|
223 |
+
|
224 |
+
a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
|
225 |
+
|
226 |
+
# subok=True passes through a subclass
|
227 |
+
b = a.astype('f4', subok=True, copy=False)
|
228 |
+
assert_(a is b)
|
229 |
+
|
230 |
+
# subok=True is default, and creates a subtype on a cast
|
231 |
+
b = a.astype('i4', copy=False)
|
232 |
+
assert_equal(a, b)
|
233 |
+
assert_equal(type(b), MyNDArray)
|
234 |
+
|
235 |
+
# subok=False never returns a subclass
|
236 |
+
b = a.astype('f4', subok=False, copy=False)
|
237 |
+
assert_equal(a, b)
|
238 |
+
assert_(not (a is b))
|
239 |
+
assert_(type(b) is not MyNDArray)
|
240 |
+
|
241 |
+
# Make sure converting from string object to fixed length string
|
242 |
+
# does not truncate.
|
243 |
+
a = np.array([b'a'*100], dtype='O')
|
244 |
+
b = a.astype('S')
|
245 |
+
assert_equal(a, b)
|
246 |
+
assert_equal(b.dtype, np.dtype('S100'))
|
247 |
+
a = np.array(['a'*100], dtype='O')
|
248 |
+
b = a.astype('U')
|
249 |
+
assert_equal(a, b)
|
250 |
+
assert_equal(b.dtype, np.dtype('U100'))
|
251 |
+
|
252 |
+
# Same test as above but for strings shorter than 64 characters
|
253 |
+
a = np.array([b'a'*10], dtype='O')
|
254 |
+
b = a.astype('S')
|
255 |
+
assert_equal(a, b)
|
256 |
+
assert_equal(b.dtype, np.dtype('S10'))
|
257 |
+
a = np.array(['a'*10], dtype='O')
|
258 |
+
b = a.astype('U')
|
259 |
+
assert_equal(a, b)
|
260 |
+
assert_equal(b.dtype, np.dtype('U10'))
|
261 |
+
|
262 |
+
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
|
263 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
264 |
+
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
|
265 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
266 |
+
|
267 |
+
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
|
268 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
269 |
+
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
|
270 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
271 |
+
|
272 |
+
a = np.array(123456789012345678901234567890, dtype='S')
|
273 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
274 |
+
a = np.array(123456789012345678901234567890, dtype='U')
|
275 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
276 |
+
|
277 |
+
a = np.array('a\u0140', dtype='U')
|
278 |
+
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
|
279 |
+
assert_(b.size == 2)
|
280 |
+
|
281 |
+
a = np.array([1000], dtype='i4')
|
282 |
+
assert_raises(TypeError, a.astype, 'S1', casting='safe')
|
283 |
+
|
284 |
+
a = np.array(1000, dtype='i4')
|
285 |
+
assert_raises(TypeError, a.astype, 'U1', casting='safe')
|
286 |
+
|
287 |
+
# gh-24023
|
288 |
+
assert_raises(TypeError, a.astype)
|
289 |
+
|
290 |
+
@pytest.mark.parametrize("dt", ["S", "U"])
|
291 |
+
def test_array_astype_to_string_discovery_empty(dt):
|
292 |
+
# See also gh-19085
|
293 |
+
arr = np.array([""], dtype=object)
|
294 |
+
# Note, the itemsize is the `0 -> 1` logic, which should change.
|
295 |
+
# The important part the test is rather that it does not error.
|
296 |
+
assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
|
297 |
+
|
298 |
+
# check the same thing for `np.can_cast` (since it accepts arrays)
|
299 |
+
assert np.can_cast(arr, dt, casting="unsafe")
|
300 |
+
assert not np.can_cast(arr, dt, casting="same_kind")
|
301 |
+
# as well as for the object as a descriptor:
|
302 |
+
assert np.can_cast("O", dt, casting="unsafe")
|
303 |
+
|
304 |
+
@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
|
305 |
+
def test_array_astype_to_void(dt):
|
306 |
+
dt = np.dtype(dt)
|
307 |
+
arr = np.array([], dtype=dt)
|
308 |
+
assert arr.astype("V").dtype.itemsize == dt.itemsize
|
309 |
+
|
310 |
+
def test_object_array_astype_to_void():
|
311 |
+
# This is different to `test_array_astype_to_void` as object arrays
|
312 |
+
# are inspected. The default void is "V8" (8 is the length of double)
|
313 |
+
arr = np.array([], dtype="O").astype("V")
|
314 |
+
assert arr.dtype == "V8"
|
315 |
+
|
316 |
+
@pytest.mark.parametrize("t",
|
317 |
+
np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
|
318 |
+
)
|
319 |
+
def test_array_astype_warning(t):
|
320 |
+
# test ComplexWarning when casting from complex to float or int
|
321 |
+
a = np.array(10, dtype=np.complex_)
|
322 |
+
assert_warns(np.ComplexWarning, a.astype, t)
|
323 |
+
|
324 |
+
@pytest.mark.parametrize(["dtype", "out_dtype"],
|
325 |
+
[(np.bytes_, np.bool_),
|
326 |
+
(np.str_, np.bool_),
|
327 |
+
(np.dtype("S10,S9"), np.dtype("?,?"))])
|
328 |
+
def test_string_to_boolean_cast(dtype, out_dtype):
|
329 |
+
"""
|
330 |
+
Currently, for `astype` strings are cast to booleans effectively by
|
331 |
+
calling `bool(int(string)`. This is not consistent (see gh-9875) and
|
332 |
+
will eventually be deprecated.
|
333 |
+
"""
|
334 |
+
arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
|
335 |
+
expected = np.array([True, True, False, False], dtype=out_dtype)
|
336 |
+
assert_array_equal(arr.astype(out_dtype), expected)
|
337 |
+
|
338 |
+
@pytest.mark.parametrize(["dtype", "out_dtype"],
|
339 |
+
[(np.bytes_, np.bool_),
|
340 |
+
(np.str_, np.bool_),
|
341 |
+
(np.dtype("S10,S9"), np.dtype("?,?"))])
|
342 |
+
def test_string_to_boolean_cast_errors(dtype, out_dtype):
|
343 |
+
"""
|
344 |
+
These currently error out, since cast to integers fails, but should not
|
345 |
+
error out in the future.
|
346 |
+
"""
|
347 |
+
for invalid in ["False", "True", "", "\0", "non-empty"]:
|
348 |
+
arr = np.array([invalid], dtype=dtype)
|
349 |
+
with assert_raises(ValueError):
|
350 |
+
arr.astype(out_dtype)
|
351 |
+
|
352 |
+
@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
|
353 |
+
@pytest.mark.parametrize("scalar_type",
|
354 |
+
[np.complex64, np.complex128, np.clongdouble])
|
355 |
+
def test_string_to_complex_cast(str_type, scalar_type):
|
356 |
+
value = scalar_type(b"1+3j")
|
357 |
+
assert scalar_type(value) == 1+3j
|
358 |
+
assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
|
359 |
+
assert np.array(value).astype(scalar_type)[()] == 1+3j
|
360 |
+
arr = np.zeros(1, dtype=scalar_type)
|
361 |
+
arr[0] = value
|
362 |
+
assert arr[0] == 1+3j
|
363 |
+
|
364 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
|
365 |
+
def test_none_to_nan_cast(dtype):
|
366 |
+
# Note that at the time of writing this test, the scalar constructors
|
367 |
+
# reject None
|
368 |
+
arr = np.zeros(1, dtype=dtype)
|
369 |
+
arr[0] = None
|
370 |
+
assert np.isnan(arr)[0]
|
371 |
+
assert np.isnan(np.array(None, dtype=dtype))[()]
|
372 |
+
assert np.isnan(np.array([None], dtype=dtype))[0]
|
373 |
+
assert np.isnan(np.array(None).astype(dtype))[()]
|
374 |
+
|
375 |
+
def test_copyto_fromscalar():
|
376 |
+
a = np.arange(6, dtype='f4').reshape(2, 3)
|
377 |
+
|
378 |
+
# Simple copy
|
379 |
+
np.copyto(a, 1.5)
|
380 |
+
assert_equal(a, 1.5)
|
381 |
+
np.copyto(a.T, 2.5)
|
382 |
+
assert_equal(a, 2.5)
|
383 |
+
|
384 |
+
# Where-masked copy
|
385 |
+
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
|
386 |
+
np.copyto(a, 3.5, where=mask)
|
387 |
+
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
|
388 |
+
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
|
389 |
+
np.copyto(a.T, 4.5, where=mask)
|
390 |
+
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
|
391 |
+
|
392 |
+
def test_copyto():
|
393 |
+
a = np.arange(6, dtype='i4').reshape(2, 3)
|
394 |
+
|
395 |
+
# Simple copy
|
396 |
+
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
|
397 |
+
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
|
398 |
+
|
399 |
+
# Overlapping copy should work
|
400 |
+
np.copyto(a[:, :2], a[::-1, 1::-1])
|
401 |
+
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
|
402 |
+
|
403 |
+
# Defaults to 'same_kind' casting
|
404 |
+
assert_raises(TypeError, np.copyto, a, 1.5)
|
405 |
+
|
406 |
+
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
|
407 |
+
np.copyto(a, 1.5, casting='unsafe')
|
408 |
+
assert_equal(a, 1)
|
409 |
+
|
410 |
+
# Copying with a mask
|
411 |
+
np.copyto(a, 3, where=[True, False, True])
|
412 |
+
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
|
413 |
+
|
414 |
+
# Casting rule still applies with a mask
|
415 |
+
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
|
416 |
+
|
417 |
+
# Lists of integer 0's and 1's is ok too
|
418 |
+
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
|
419 |
+
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
|
420 |
+
|
421 |
+
# Overlapping copy with mask should work
|
422 |
+
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
|
423 |
+
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
|
424 |
+
|
425 |
+
# 'dst' must be an array
|
426 |
+
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
|
427 |
+
|
428 |
+
def test_copyto_permut():
|
429 |
+
# test explicit overflow case
|
430 |
+
pad = 500
|
431 |
+
l = [True] * pad + [True, True, True, True]
|
432 |
+
r = np.zeros(len(l)-pad)
|
433 |
+
d = np.ones(len(l)-pad)
|
434 |
+
mask = np.array(l)[pad:]
|
435 |
+
np.copyto(r, d, where=mask[::-1])
|
436 |
+
|
437 |
+
# test all permutation of possible masks, 9 should be sufficient for
|
438 |
+
# current 4 byte unrolled code
|
439 |
+
power = 9
|
440 |
+
d = np.ones(power)
|
441 |
+
for i in range(2**power):
|
442 |
+
r = np.zeros(power)
|
443 |
+
l = [(i & x) != 0 for x in range(power)]
|
444 |
+
mask = np.array(l)
|
445 |
+
np.copyto(r, d, where=mask)
|
446 |
+
assert_array_equal(r == 1, l)
|
447 |
+
assert_equal(r.sum(), sum(l))
|
448 |
+
|
449 |
+
r = np.zeros(power)
|
450 |
+
np.copyto(r, d, where=mask[::-1])
|
451 |
+
assert_array_equal(r == 1, l[::-1])
|
452 |
+
assert_equal(r.sum(), sum(l))
|
453 |
+
|
454 |
+
r = np.zeros(power)
|
455 |
+
np.copyto(r[::2], d[::2], where=mask[::2])
|
456 |
+
assert_array_equal(r[::2] == 1, l[::2])
|
457 |
+
assert_equal(r[::2].sum(), sum(l[::2]))
|
458 |
+
|
459 |
+
r = np.zeros(power)
|
460 |
+
np.copyto(r[::2], d[::2], where=mask[::-2])
|
461 |
+
assert_array_equal(r[::2] == 1, l[::-2])
|
462 |
+
assert_equal(r[::2].sum(), sum(l[::-2]))
|
463 |
+
|
464 |
+
for c in [0xFF, 0x7F, 0x02, 0x10]:
|
465 |
+
r = np.zeros(power)
|
466 |
+
mask = np.array(l)
|
467 |
+
imask = np.array(l).view(np.uint8)
|
468 |
+
imask[mask != 0] = c
|
469 |
+
np.copyto(r, d, where=mask)
|
470 |
+
assert_array_equal(r == 1, l)
|
471 |
+
assert_equal(r.sum(), sum(l))
|
472 |
+
|
473 |
+
r = np.zeros(power)
|
474 |
+
np.copyto(r, d, where=True)
|
475 |
+
assert_equal(r.sum(), r.size)
|
476 |
+
r = np.ones(power)
|
477 |
+
d = np.zeros(power)
|
478 |
+
np.copyto(r, d, where=False)
|
479 |
+
assert_equal(r.sum(), r.size)
|
480 |
+
|
481 |
+
def test_copy_order():
|
482 |
+
a = np.arange(24).reshape(2, 1, 3, 4)
|
483 |
+
b = a.copy(order='F')
|
484 |
+
c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
|
485 |
+
|
486 |
+
def check_copy_result(x, y, ccontig, fcontig, strides=False):
|
487 |
+
assert_(not (x is y))
|
488 |
+
assert_equal(x, y)
|
489 |
+
assert_equal(res.flags.c_contiguous, ccontig)
|
490 |
+
assert_equal(res.flags.f_contiguous, fcontig)
|
491 |
+
|
492 |
+
# Validate the initial state of a, b, and c
|
493 |
+
assert_(a.flags.c_contiguous)
|
494 |
+
assert_(not a.flags.f_contiguous)
|
495 |
+
assert_(not b.flags.c_contiguous)
|
496 |
+
assert_(b.flags.f_contiguous)
|
497 |
+
assert_(not c.flags.c_contiguous)
|
498 |
+
assert_(not c.flags.f_contiguous)
|
499 |
+
|
500 |
+
# Copy with order='C'
|
501 |
+
res = a.copy(order='C')
|
502 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
503 |
+
res = b.copy(order='C')
|
504 |
+
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
505 |
+
res = c.copy(order='C')
|
506 |
+
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
507 |
+
res = np.copy(a, order='C')
|
508 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
509 |
+
res = np.copy(b, order='C')
|
510 |
+
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
511 |
+
res = np.copy(c, order='C')
|
512 |
+
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
513 |
+
|
514 |
+
# Copy with order='F'
|
515 |
+
res = a.copy(order='F')
|
516 |
+
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
517 |
+
res = b.copy(order='F')
|
518 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
519 |
+
res = c.copy(order='F')
|
520 |
+
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
521 |
+
res = np.copy(a, order='F')
|
522 |
+
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
523 |
+
res = np.copy(b, order='F')
|
524 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
525 |
+
res = np.copy(c, order='F')
|
526 |
+
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
527 |
+
|
528 |
+
# Copy with order='K'
|
529 |
+
res = a.copy(order='K')
|
530 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
531 |
+
res = b.copy(order='K')
|
532 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
533 |
+
res = c.copy(order='K')
|
534 |
+
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
535 |
+
res = np.copy(a, order='K')
|
536 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
537 |
+
res = np.copy(b, order='K')
|
538 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
539 |
+
res = np.copy(c, order='K')
|
540 |
+
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
541 |
+
|
542 |
+
def test_contiguous_flags():
|
543 |
+
a = np.ones((4, 4, 1))[::2,:,:]
|
544 |
+
a.strides = a.strides[:2] + (-123,)
|
545 |
+
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
|
546 |
+
|
547 |
+
def check_contig(a, ccontig, fcontig):
|
548 |
+
assert_(a.flags.c_contiguous == ccontig)
|
549 |
+
assert_(a.flags.f_contiguous == fcontig)
|
550 |
+
|
551 |
+
# Check if new arrays are correct:
|
552 |
+
check_contig(a, False, False)
|
553 |
+
check_contig(b, False, False)
|
554 |
+
check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
|
555 |
+
check_contig(np.array([[[1], [2]]], order='F'), True, True)
|
556 |
+
check_contig(np.empty((2, 2)), True, False)
|
557 |
+
check_contig(np.empty((2, 2), order='F'), False, True)
|
558 |
+
|
559 |
+
# Check that np.array creates correct contiguous flags:
|
560 |
+
check_contig(np.array(a, copy=False), False, False)
|
561 |
+
check_contig(np.array(a, copy=False, order='C'), True, False)
|
562 |
+
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
|
563 |
+
|
564 |
+
# Check slicing update of flags and :
|
565 |
+
check_contig(a[0], True, True)
|
566 |
+
check_contig(a[None, ::4, ..., None], True, True)
|
567 |
+
check_contig(b[0, 0, ...], False, True)
|
568 |
+
check_contig(b[:, :, 0:0, :, :], True, True)
|
569 |
+
|
570 |
+
# Test ravel and squeeze.
|
571 |
+
check_contig(a.ravel(), True, True)
|
572 |
+
check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
|
573 |
+
|
574 |
+
def test_broadcast_arrays():
|
575 |
+
# Test user defined dtypes
|
576 |
+
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
|
577 |
+
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
|
578 |
+
result = np.broadcast_arrays(a, b)
|
579 |
+
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
|
580 |
+
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
|
581 |
+
|
582 |
+
@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
|
583 |
+
[((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
|
584 |
+
((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
|
585 |
+
def test_full_from_list(shape, fill_value, expected_output):
|
586 |
+
output = np.full(shape, fill_value)
|
587 |
+
assert_equal(output, expected_output)
|
588 |
+
|
589 |
+
def test_astype_copyflag():
|
590 |
+
# test the various copyflag options
|
591 |
+
arr = np.arange(10, dtype=np.intp)
|
592 |
+
|
593 |
+
res_true = arr.astype(np.intp, copy=True)
|
594 |
+
assert not np.may_share_memory(arr, res_true)
|
595 |
+
res_always = arr.astype(np.intp, copy=np._CopyMode.ALWAYS)
|
596 |
+
assert not np.may_share_memory(arr, res_always)
|
597 |
+
|
598 |
+
res_false = arr.astype(np.intp, copy=False)
|
599 |
+
# `res_false is arr` currently, but check `may_share_memory`.
|
600 |
+
assert np.may_share_memory(arr, res_false)
|
601 |
+
res_if_needed = arr.astype(np.intp, copy=np._CopyMode.IF_NEEDED)
|
602 |
+
# `res_if_needed is arr` currently, but check `may_share_memory`.
|
603 |
+
assert np.may_share_memory(arr, res_if_needed)
|
604 |
+
|
605 |
+
res_never = arr.astype(np.intp, copy=np._CopyMode.NEVER)
|
606 |
+
assert np.may_share_memory(arr, res_never)
|
607 |
+
|
608 |
+
# Simple tests for when a copy is necessary:
|
609 |
+
res_false = arr.astype(np.float64, copy=False)
|
610 |
+
assert_array_equal(res_false, arr)
|
611 |
+
res_if_needed = arr.astype(np.float64,
|
612 |
+
copy=np._CopyMode.IF_NEEDED)
|
613 |
+
assert_array_equal(res_if_needed, arr)
|
614 |
+
assert_raises(ValueError, arr.astype, np.float64,
|
615 |
+
copy=np._CopyMode.NEVER)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_argparse.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for the private NumPy argument parsing functionality.
|
3 |
+
They mainly exists to ensure good test coverage without having to try the
|
4 |
+
weirder cases on actual numpy functions but test them in one place.
|
5 |
+
|
6 |
+
The test function is defined in C to be equivalent to (errors may not always
|
7 |
+
match exactly, and could be adjusted):
|
8 |
+
|
9 |
+
def func(arg1, /, arg2, *, arg3):
|
10 |
+
i = integer(arg1) # reproducing the 'i' parsing in Python.
|
11 |
+
return None
|
12 |
+
"""
|
13 |
+
|
14 |
+
import pytest
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
from numpy.core._multiarray_tests import argparse_example_function as func
|
18 |
+
|
19 |
+
|
20 |
+
def test_invalid_integers():
|
21 |
+
with pytest.raises(TypeError,
|
22 |
+
match="integer argument expected, got float"):
|
23 |
+
func(1.)
|
24 |
+
with pytest.raises(OverflowError):
|
25 |
+
func(2**100)
|
26 |
+
|
27 |
+
|
28 |
+
def test_missing_arguments():
|
29 |
+
with pytest.raises(TypeError,
|
30 |
+
match="missing required positional argument 0"):
|
31 |
+
func()
|
32 |
+
with pytest.raises(TypeError,
|
33 |
+
match="missing required positional argument 0"):
|
34 |
+
func(arg2=1, arg3=4)
|
35 |
+
with pytest.raises(TypeError,
|
36 |
+
match=r"missing required argument \'arg2\' \(pos 1\)"):
|
37 |
+
func(1, arg3=5)
|
38 |
+
|
39 |
+
|
40 |
+
def test_too_many_positional():
|
41 |
+
# the second argument is positional but can be passed as keyword.
|
42 |
+
with pytest.raises(TypeError,
|
43 |
+
match="takes from 2 to 3 positional arguments but 4 were given"):
|
44 |
+
func(1, 2, 3, 4)
|
45 |
+
|
46 |
+
|
47 |
+
def test_multiple_values():
|
48 |
+
with pytest.raises(TypeError,
|
49 |
+
match=r"given by name \('arg2'\) and position \(position 1\)"):
|
50 |
+
func(1, 2, arg2=3)
|
51 |
+
|
52 |
+
|
53 |
+
def test_string_fallbacks():
|
54 |
+
# We can (currently?) use numpy strings to test the "slow" fallbacks
|
55 |
+
# that should normally not be taken due to string interning.
|
56 |
+
arg2 = np.str_("arg2")
|
57 |
+
missing_arg = np.str_("missing_arg")
|
58 |
+
func(1, **{arg2: 3})
|
59 |
+
with pytest.raises(TypeError,
|
60 |
+
match="got an unexpected keyword argument 'missing_arg'"):
|
61 |
+
func(2, **{missing_arg: 3})
|
62 |
+
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_array_coercion.py
ADDED
@@ -0,0 +1,898 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for array coercion, mainly through testing `np.array` results directly.
|
3 |
+
Note that other such tests exist, e.g., in `test_api.py` and many corner-cases
|
4 |
+
are tested (sometimes indirectly) elsewhere.
|
5 |
+
"""
|
6 |
+
|
7 |
+
from itertools import permutations, product
|
8 |
+
|
9 |
+
import pytest
|
10 |
+
from pytest import param
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
from numpy.core._rational_tests import rational
|
14 |
+
from numpy.core._multiarray_umath import _discover_array_parameters
|
15 |
+
|
16 |
+
from numpy.testing import (
|
17 |
+
assert_array_equal, assert_warns, IS_PYPY)
|
18 |
+
|
19 |
+
|
20 |
+
def arraylikes():
|
21 |
+
"""
|
22 |
+
Generator for functions converting an array into various array-likes.
|
23 |
+
If full is True (default) it includes array-likes not capable of handling
|
24 |
+
all dtypes.
|
25 |
+
"""
|
26 |
+
# base array:
|
27 |
+
def ndarray(a):
|
28 |
+
return a
|
29 |
+
|
30 |
+
yield param(ndarray, id="ndarray")
|
31 |
+
|
32 |
+
# subclass:
|
33 |
+
class MyArr(np.ndarray):
|
34 |
+
pass
|
35 |
+
|
36 |
+
def subclass(a):
|
37 |
+
return a.view(MyArr)
|
38 |
+
|
39 |
+
yield subclass
|
40 |
+
|
41 |
+
class _SequenceLike():
|
42 |
+
# Older NumPy versions, sometimes cared whether a protocol array was
|
43 |
+
# also _SequenceLike. This shouldn't matter, but keep it for now
|
44 |
+
# for __array__ and not the others.
|
45 |
+
def __len__(self):
|
46 |
+
raise TypeError
|
47 |
+
|
48 |
+
def __getitem__(self):
|
49 |
+
raise TypeError
|
50 |
+
|
51 |
+
# Array-interface
|
52 |
+
class ArrayDunder(_SequenceLike):
|
53 |
+
def __init__(self, a):
|
54 |
+
self.a = a
|
55 |
+
|
56 |
+
def __array__(self, dtype=None):
|
57 |
+
return self.a
|
58 |
+
|
59 |
+
yield param(ArrayDunder, id="__array__")
|
60 |
+
|
61 |
+
# memory-view
|
62 |
+
yield param(memoryview, id="memoryview")
|
63 |
+
|
64 |
+
# Array-interface
|
65 |
+
class ArrayInterface:
|
66 |
+
def __init__(self, a):
|
67 |
+
self.a = a # need to hold on to keep interface valid
|
68 |
+
self.__array_interface__ = a.__array_interface__
|
69 |
+
|
70 |
+
yield param(ArrayInterface, id="__array_interface__")
|
71 |
+
|
72 |
+
# Array-Struct
|
73 |
+
class ArrayStruct:
|
74 |
+
def __init__(self, a):
|
75 |
+
self.a = a # need to hold on to keep struct valid
|
76 |
+
self.__array_struct__ = a.__array_struct__
|
77 |
+
|
78 |
+
yield param(ArrayStruct, id="__array_struct__")
|
79 |
+
|
80 |
+
|
81 |
+
def scalar_instances(times=True, extended_precision=True, user_dtype=True):
|
82 |
+
# Hard-coded list of scalar instances.
|
83 |
+
# Floats:
|
84 |
+
yield param(np.sqrt(np.float16(5)), id="float16")
|
85 |
+
yield param(np.sqrt(np.float32(5)), id="float32")
|
86 |
+
yield param(np.sqrt(np.float64(5)), id="float64")
|
87 |
+
if extended_precision:
|
88 |
+
yield param(np.sqrt(np.longdouble(5)), id="longdouble")
|
89 |
+
|
90 |
+
# Complex:
|
91 |
+
yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
|
92 |
+
yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
|
93 |
+
if extended_precision:
|
94 |
+
yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
|
95 |
+
|
96 |
+
# Bool:
|
97 |
+
# XFAIL: Bool should be added, but has some bad properties when it
|
98 |
+
# comes to strings, see also gh-9875
|
99 |
+
# yield param(np.bool_(0), id="bool")
|
100 |
+
|
101 |
+
# Integers:
|
102 |
+
yield param(np.int8(2), id="int8")
|
103 |
+
yield param(np.int16(2), id="int16")
|
104 |
+
yield param(np.int32(2), id="int32")
|
105 |
+
yield param(np.int64(2), id="int64")
|
106 |
+
|
107 |
+
yield param(np.uint8(2), id="uint8")
|
108 |
+
yield param(np.uint16(2), id="uint16")
|
109 |
+
yield param(np.uint32(2), id="uint32")
|
110 |
+
yield param(np.uint64(2), id="uint64")
|
111 |
+
|
112 |
+
# Rational:
|
113 |
+
if user_dtype:
|
114 |
+
yield param(rational(1, 2), id="rational")
|
115 |
+
|
116 |
+
# Cannot create a structured void scalar directly:
|
117 |
+
structured = np.array([(1, 3)], "i,i")[0]
|
118 |
+
assert isinstance(structured, np.void)
|
119 |
+
assert structured.dtype == np.dtype("i,i")
|
120 |
+
yield param(structured, id="structured")
|
121 |
+
|
122 |
+
if times:
|
123 |
+
# Datetimes and timedelta
|
124 |
+
yield param(np.timedelta64(2), id="timedelta64[generic]")
|
125 |
+
yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
|
126 |
+
yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
|
127 |
+
|
128 |
+
yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
|
129 |
+
yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
|
130 |
+
|
131 |
+
# Strings and unstructured void:
|
132 |
+
yield param(np.bytes_(b"1234"), id="bytes")
|
133 |
+
yield param(np.str_("2345"), id="unicode")
|
134 |
+
yield param(np.void(b"4321"), id="unstructured_void")
|
135 |
+
|
136 |
+
|
137 |
+
def is_parametric_dtype(dtype):
|
138 |
+
"""Returns True if the dtype is a parametric legacy dtype (itemsize
|
139 |
+
is 0, or a datetime without units)
|
140 |
+
"""
|
141 |
+
if dtype.itemsize == 0:
|
142 |
+
return True
|
143 |
+
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
|
144 |
+
if dtype.name.endswith("64"):
|
145 |
+
# Generic time units
|
146 |
+
return True
|
147 |
+
return False
|
148 |
+
|
149 |
+
|
150 |
+
class TestStringDiscovery:
|
151 |
+
@pytest.mark.parametrize("obj",
|
152 |
+
[object(), 1.2, 10**43, None, "string"],
|
153 |
+
ids=["object", "1.2", "10**43", "None", "string"])
|
154 |
+
def test_basic_stringlength(self, obj):
|
155 |
+
length = len(str(obj))
|
156 |
+
expected = np.dtype(f"S{length}")
|
157 |
+
|
158 |
+
assert np.array(obj, dtype="S").dtype == expected
|
159 |
+
assert np.array([obj], dtype="S").dtype == expected
|
160 |
+
|
161 |
+
# A nested array is also discovered correctly
|
162 |
+
arr = np.array(obj, dtype="O")
|
163 |
+
assert np.array(arr, dtype="S").dtype == expected
|
164 |
+
# Also if we use the dtype class
|
165 |
+
assert np.array(arr, dtype=type(expected)).dtype == expected
|
166 |
+
# Check that .astype() behaves identical
|
167 |
+
assert arr.astype("S").dtype == expected
|
168 |
+
# The DType class is accepted by `.astype()`
|
169 |
+
assert arr.astype(type(np.dtype("S"))).dtype == expected
|
170 |
+
|
171 |
+
@pytest.mark.parametrize("obj",
|
172 |
+
[object(), 1.2, 10**43, None, "string"],
|
173 |
+
ids=["object", "1.2", "10**43", "None", "string"])
|
174 |
+
def test_nested_arrays_stringlength(self, obj):
|
175 |
+
length = len(str(obj))
|
176 |
+
expected = np.dtype(f"S{length}")
|
177 |
+
arr = np.array(obj, dtype="O")
|
178 |
+
assert np.array([arr, arr], dtype="S").dtype == expected
|
179 |
+
|
180 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
181 |
+
def test_unpack_first_level(self, arraylike):
|
182 |
+
# We unpack exactly one level of array likes
|
183 |
+
obj = np.array([None])
|
184 |
+
obj[0] = np.array(1.2)
|
185 |
+
# the length of the included item, not of the float dtype
|
186 |
+
length = len(str(obj[0]))
|
187 |
+
expected = np.dtype(f"S{length}")
|
188 |
+
|
189 |
+
obj = arraylike(obj)
|
190 |
+
# casting to string usually calls str(obj)
|
191 |
+
arr = np.array([obj], dtype="S")
|
192 |
+
assert arr.shape == (1, 1)
|
193 |
+
assert arr.dtype == expected
|
194 |
+
|
195 |
+
|
196 |
+
class TestScalarDiscovery:
|
197 |
+
def test_void_special_case(self):
|
198 |
+
# Void dtypes with structures discover tuples as elements
|
199 |
+
arr = np.array((1, 2, 3), dtype="i,i,i")
|
200 |
+
assert arr.shape == ()
|
201 |
+
arr = np.array([(1, 2, 3)], dtype="i,i,i")
|
202 |
+
assert arr.shape == (1,)
|
203 |
+
|
204 |
+
def test_char_special_case(self):
|
205 |
+
arr = np.array("string", dtype="c")
|
206 |
+
assert arr.shape == (6,)
|
207 |
+
assert arr.dtype.char == "c"
|
208 |
+
arr = np.array(["string"], dtype="c")
|
209 |
+
assert arr.shape == (1, 6)
|
210 |
+
assert arr.dtype.char == "c"
|
211 |
+
|
212 |
+
def test_char_special_case_deep(self):
|
213 |
+
# Check that the character special case errors correctly if the
|
214 |
+
# array is too deep:
|
215 |
+
nested = ["string"] # 2 dimensions (due to string being sequence)
|
216 |
+
for i in range(np.MAXDIMS - 2):
|
217 |
+
nested = [nested]
|
218 |
+
|
219 |
+
arr = np.array(nested, dtype='c')
|
220 |
+
assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
|
221 |
+
with pytest.raises(ValueError):
|
222 |
+
np.array([nested], dtype="c")
|
223 |
+
|
224 |
+
def test_unknown_object(self):
|
225 |
+
arr = np.array(object())
|
226 |
+
assert arr.shape == ()
|
227 |
+
assert arr.dtype == np.dtype("O")
|
228 |
+
|
229 |
+
@pytest.mark.parametrize("scalar", scalar_instances())
|
230 |
+
def test_scalar(self, scalar):
|
231 |
+
arr = np.array(scalar)
|
232 |
+
assert arr.shape == ()
|
233 |
+
assert arr.dtype == scalar.dtype
|
234 |
+
|
235 |
+
arr = np.array([[scalar, scalar]])
|
236 |
+
assert arr.shape == (1, 2)
|
237 |
+
assert arr.dtype == scalar.dtype
|
238 |
+
|
239 |
+
# Additionally to string this test also runs into a corner case
|
240 |
+
# with datetime promotion (the difference is the promotion order).
|
241 |
+
@pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
|
242 |
+
def test_scalar_promotion(self):
|
243 |
+
for sc1, sc2 in product(scalar_instances(), scalar_instances()):
|
244 |
+
sc1, sc2 = sc1.values[0], sc2.values[0]
|
245 |
+
# test all combinations:
|
246 |
+
try:
|
247 |
+
arr = np.array([sc1, sc2])
|
248 |
+
except (TypeError, ValueError):
|
249 |
+
# The promotion between two times can fail
|
250 |
+
# XFAIL (ValueError): Some object casts are currently undefined
|
251 |
+
continue
|
252 |
+
assert arr.shape == (2,)
|
253 |
+
try:
|
254 |
+
dt1, dt2 = sc1.dtype, sc2.dtype
|
255 |
+
expected_dtype = np.promote_types(dt1, dt2)
|
256 |
+
assert arr.dtype == expected_dtype
|
257 |
+
except TypeError as e:
|
258 |
+
# Will currently always go to object dtype
|
259 |
+
assert arr.dtype == np.dtype("O")
|
260 |
+
|
261 |
+
@pytest.mark.parametrize("scalar", scalar_instances())
|
262 |
+
def test_scalar_coercion(self, scalar):
|
263 |
+
# This tests various scalar coercion paths, mainly for the numerical
|
264 |
+
# types. It includes some paths not directly related to `np.array`.
|
265 |
+
if isinstance(scalar, np.inexact):
|
266 |
+
# Ensure we have a full-precision number if available
|
267 |
+
scalar = type(scalar)((scalar * 2)**0.5)
|
268 |
+
|
269 |
+
if type(scalar) is rational:
|
270 |
+
# Rational generally fails due to a missing cast. In the future
|
271 |
+
# object casts should automatically be defined based on `setitem`.
|
272 |
+
pytest.xfail("Rational to object cast is undefined currently.")
|
273 |
+
|
274 |
+
# Use casting from object:
|
275 |
+
arr = np.array(scalar, dtype=object).astype(scalar.dtype)
|
276 |
+
|
277 |
+
# Test various ways to create an array containing this scalar:
|
278 |
+
arr1 = np.array(scalar).reshape(1)
|
279 |
+
arr2 = np.array([scalar])
|
280 |
+
arr3 = np.empty(1, dtype=scalar.dtype)
|
281 |
+
arr3[0] = scalar
|
282 |
+
arr4 = np.empty(1, dtype=scalar.dtype)
|
283 |
+
arr4[:] = [scalar]
|
284 |
+
# All of these methods should yield the same results
|
285 |
+
assert_array_equal(arr, arr1)
|
286 |
+
assert_array_equal(arr, arr2)
|
287 |
+
assert_array_equal(arr, arr3)
|
288 |
+
assert_array_equal(arr, arr4)
|
289 |
+
|
290 |
+
@pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
|
291 |
+
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
|
292 |
+
@pytest.mark.parametrize("cast_to", scalar_instances())
|
293 |
+
def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
|
294 |
+
"""
|
295 |
+
Test that in most cases:
|
296 |
+
* `np.array(scalar, dtype=dtype)`
|
297 |
+
* `np.empty((), dtype=dtype)[()] = scalar`
|
298 |
+
* `np.array(scalar).astype(dtype)`
|
299 |
+
should behave the same. The only exceptions are parametric dtypes
|
300 |
+
(mainly datetime/timedelta without unit) and void without fields.
|
301 |
+
"""
|
302 |
+
dtype = cast_to.dtype # use to parametrize only the target dtype
|
303 |
+
|
304 |
+
for scalar in scalar_instances(times=False):
|
305 |
+
scalar = scalar.values[0]
|
306 |
+
|
307 |
+
if dtype.type == np.void:
|
308 |
+
if scalar.dtype.fields is not None and dtype.fields is None:
|
309 |
+
# Here, coercion to "V6" works, but the cast fails.
|
310 |
+
# Since the types are identical, SETITEM takes care of
|
311 |
+
# this, but has different rules than the cast.
|
312 |
+
with pytest.raises(TypeError):
|
313 |
+
np.array(scalar).astype(dtype)
|
314 |
+
np.array(scalar, dtype=dtype)
|
315 |
+
np.array([scalar], dtype=dtype)
|
316 |
+
continue
|
317 |
+
|
318 |
+
# The main test, we first try to use casting and if it succeeds
|
319 |
+
# continue below testing that things are the same, otherwise
|
320 |
+
# test that the alternative paths at least also fail.
|
321 |
+
try:
|
322 |
+
cast = np.array(scalar).astype(dtype)
|
323 |
+
except (TypeError, ValueError, RuntimeError):
|
324 |
+
# coercion should also raise (error type may change)
|
325 |
+
with pytest.raises(Exception):
|
326 |
+
np.array(scalar, dtype=dtype)
|
327 |
+
|
328 |
+
if (isinstance(scalar, rational) and
|
329 |
+
np.issubdtype(dtype, np.signedinteger)):
|
330 |
+
return
|
331 |
+
|
332 |
+
with pytest.raises(Exception):
|
333 |
+
np.array([scalar], dtype=dtype)
|
334 |
+
# assignment should also raise
|
335 |
+
res = np.zeros((), dtype=dtype)
|
336 |
+
with pytest.raises(Exception):
|
337 |
+
res[()] = scalar
|
338 |
+
|
339 |
+
return
|
340 |
+
|
341 |
+
# Non error path:
|
342 |
+
arr = np.array(scalar, dtype=dtype)
|
343 |
+
assert_array_equal(arr, cast)
|
344 |
+
# assignment behaves the same
|
345 |
+
ass = np.zeros((), dtype=dtype)
|
346 |
+
ass[()] = scalar
|
347 |
+
assert_array_equal(ass, cast)
|
348 |
+
|
349 |
+
@pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
|
350 |
+
def test_pyscalar_subclasses(self, pyscalar):
|
351 |
+
"""NumPy arrays are read/write which means that anything but invariant
|
352 |
+
behaviour is on thin ice. However, we currently are happy to discover
|
353 |
+
subclasses of Python float, int, complex the same as the base classes.
|
354 |
+
This should potentially be deprecated.
|
355 |
+
"""
|
356 |
+
class MyScalar(type(pyscalar)):
|
357 |
+
pass
|
358 |
+
|
359 |
+
res = np.array(MyScalar(pyscalar))
|
360 |
+
expected = np.array(pyscalar)
|
361 |
+
assert_array_equal(res, expected)
|
362 |
+
|
363 |
+
@pytest.mark.parametrize("dtype_char", np.typecodes["All"])
|
364 |
+
def test_default_dtype_instance(self, dtype_char):
|
365 |
+
if dtype_char in "SU":
|
366 |
+
dtype = np.dtype(dtype_char + "1")
|
367 |
+
elif dtype_char == "V":
|
368 |
+
# Legacy behaviour was to use V8. The reason was float64 being the
|
369 |
+
# default dtype and that having 8 bytes.
|
370 |
+
dtype = np.dtype("V8")
|
371 |
+
else:
|
372 |
+
dtype = np.dtype(dtype_char)
|
373 |
+
|
374 |
+
discovered_dtype, _ = _discover_array_parameters([], type(dtype))
|
375 |
+
|
376 |
+
assert discovered_dtype == dtype
|
377 |
+
assert discovered_dtype.itemsize == dtype.itemsize
|
378 |
+
|
379 |
+
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
|
380 |
+
@pytest.mark.parametrize(["scalar", "error"],
|
381 |
+
[(np.float64(np.nan), ValueError),
|
382 |
+
(np.array(-1).astype(np.ulonglong)[()], OverflowError)])
|
383 |
+
def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error):
|
384 |
+
"""
|
385 |
+
Signed integers are currently different in that they do not cast other
|
386 |
+
NumPy scalar, but instead use scalar.__int__(). The hardcoded
|
387 |
+
exception to this rule is `np.array(scalar, dtype=integer)`.
|
388 |
+
"""
|
389 |
+
dtype = np.dtype(dtype)
|
390 |
+
|
391 |
+
# This is a special case using casting logic. It warns for the NaN
|
392 |
+
# but allows the cast (giving undefined behaviour).
|
393 |
+
with np.errstate(invalid="ignore"):
|
394 |
+
coerced = np.array(scalar, dtype=dtype)
|
395 |
+
cast = np.array(scalar).astype(dtype)
|
396 |
+
assert_array_equal(coerced, cast)
|
397 |
+
|
398 |
+
# However these fail:
|
399 |
+
with pytest.raises(error):
|
400 |
+
np.array([scalar], dtype=dtype)
|
401 |
+
with pytest.raises(error):
|
402 |
+
cast[()] = scalar
|
403 |
+
|
404 |
+
|
405 |
+
class TestTimeScalars:
|
406 |
+
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
|
407 |
+
@pytest.mark.parametrize("scalar",
|
408 |
+
[param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
|
409 |
+
param(np.timedelta64(123, "s"), id="timedelta64[s]"),
|
410 |
+
param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
|
411 |
+
param(np.datetime64(1, "D"), id="datetime64[D]")],)
|
412 |
+
def test_coercion_basic(self, dtype, scalar):
|
413 |
+
# Note the `[scalar]` is there because np.array(scalar) uses stricter
|
414 |
+
# `scalar.__int__()` rules for backward compatibility right now.
|
415 |
+
arr = np.array(scalar, dtype=dtype)
|
416 |
+
cast = np.array(scalar).astype(dtype)
|
417 |
+
assert_array_equal(arr, cast)
|
418 |
+
|
419 |
+
ass = np.ones((), dtype=dtype)
|
420 |
+
if issubclass(dtype, np.integer):
|
421 |
+
with pytest.raises(TypeError):
|
422 |
+
# raises, as would np.array([scalar], dtype=dtype), this is
|
423 |
+
# conversion from times, but behaviour of integers.
|
424 |
+
ass[()] = scalar
|
425 |
+
else:
|
426 |
+
ass[()] = scalar
|
427 |
+
assert_array_equal(ass, cast)
|
428 |
+
|
429 |
+
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
|
430 |
+
@pytest.mark.parametrize("scalar",
|
431 |
+
[param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
|
432 |
+
param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
|
433 |
+
def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
|
434 |
+
# Only "ns" and "generic" timedeltas can be converted to numbers
|
435 |
+
# so these are slightly special.
|
436 |
+
arr = np.array(scalar, dtype=dtype)
|
437 |
+
cast = np.array(scalar).astype(dtype)
|
438 |
+
ass = np.ones((), dtype=dtype)
|
439 |
+
ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype)
|
440 |
+
|
441 |
+
assert_array_equal(arr, cast)
|
442 |
+
assert_array_equal(cast, cast)
|
443 |
+
|
444 |
+
@pytest.mark.parametrize("dtype", ["S6", "U6"])
|
445 |
+
@pytest.mark.parametrize(["val", "unit"],
|
446 |
+
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
|
447 |
+
def test_coercion_assignment_datetime(self, val, unit, dtype):
|
448 |
+
# String from datetime64 assignment is currently special cased to
|
449 |
+
# never use casting. This is because casting will error in this
|
450 |
+
# case, and traditionally in most cases the behaviour is maintained
|
451 |
+
# like this. (`np.array(scalar, dtype="U6")` would have failed before)
|
452 |
+
# TODO: This discrepancy _should_ be resolved, either by relaxing the
|
453 |
+
# cast, or by deprecating the first part.
|
454 |
+
scalar = np.datetime64(val, unit)
|
455 |
+
dtype = np.dtype(dtype)
|
456 |
+
cut_string = dtype.type(str(scalar)[:6])
|
457 |
+
|
458 |
+
arr = np.array(scalar, dtype=dtype)
|
459 |
+
assert arr[()] == cut_string
|
460 |
+
ass = np.ones((), dtype=dtype)
|
461 |
+
ass[()] = scalar
|
462 |
+
assert ass[()] == cut_string
|
463 |
+
|
464 |
+
with pytest.raises(RuntimeError):
|
465 |
+
# However, unlike the above assignment using `str(scalar)[:6]`
|
466 |
+
# due to being handled by the string DType and not be casting
|
467 |
+
# the explicit cast fails:
|
468 |
+
np.array(scalar).astype(dtype)
|
469 |
+
|
470 |
+
|
471 |
+
@pytest.mark.parametrize(["val", "unit"],
|
472 |
+
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
|
473 |
+
def test_coercion_assignment_timedelta(self, val, unit):
|
474 |
+
scalar = np.timedelta64(val, unit)
|
475 |
+
|
476 |
+
# Unlike datetime64, timedelta allows the unsafe cast:
|
477 |
+
np.array(scalar, dtype="S6")
|
478 |
+
cast = np.array(scalar).astype("S6")
|
479 |
+
ass = np.ones((), dtype="S6")
|
480 |
+
ass[()] = scalar
|
481 |
+
expected = scalar.astype("S")[:6]
|
482 |
+
assert cast[()] == expected
|
483 |
+
assert ass[()] == expected
|
484 |
+
|
485 |
+
class TestNested:
|
486 |
+
def test_nested_simple(self):
|
487 |
+
initial = [1.2]
|
488 |
+
nested = initial
|
489 |
+
for i in range(np.MAXDIMS - 1):
|
490 |
+
nested = [nested]
|
491 |
+
|
492 |
+
arr = np.array(nested, dtype="float64")
|
493 |
+
assert arr.shape == (1,) * np.MAXDIMS
|
494 |
+
with pytest.raises(ValueError):
|
495 |
+
np.array([nested], dtype="float64")
|
496 |
+
|
497 |
+
with pytest.raises(ValueError, match=".*would exceed the maximum"):
|
498 |
+
np.array([nested]) # user must ask for `object` explicitly
|
499 |
+
|
500 |
+
arr = np.array([nested], dtype=object)
|
501 |
+
assert arr.dtype == np.dtype("O")
|
502 |
+
assert arr.shape == (1,) * np.MAXDIMS
|
503 |
+
assert arr.item() is initial
|
504 |
+
|
505 |
+
def test_pathological_self_containing(self):
|
506 |
+
# Test that this also works for two nested sequences
|
507 |
+
l = []
|
508 |
+
l.append(l)
|
509 |
+
arr = np.array([l, l, l], dtype=object)
|
510 |
+
assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
|
511 |
+
|
512 |
+
# Also check a ragged case:
|
513 |
+
arr = np.array([l, [None], l], dtype=object)
|
514 |
+
assert arr.shape == (3, 1)
|
515 |
+
|
516 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
517 |
+
def test_nested_arraylikes(self, arraylike):
|
518 |
+
# We try storing an array like into an array, but the array-like
|
519 |
+
# will have too many dimensions. This means the shape discovery
|
520 |
+
# decides that the array-like must be treated as an object (a special
|
521 |
+
# case of ragged discovery). The result will be an array with one
|
522 |
+
# dimension less than the maximum dimensions, and the array being
|
523 |
+
# assigned to it (which does work for object or if `float(arraylike)`
|
524 |
+
# works).
|
525 |
+
initial = arraylike(np.ones((1, 1)))
|
526 |
+
|
527 |
+
nested = initial
|
528 |
+
for i in range(np.MAXDIMS - 1):
|
529 |
+
nested = [nested]
|
530 |
+
|
531 |
+
with pytest.raises(ValueError, match=".*would exceed the maximum"):
|
532 |
+
# It will refuse to assign the array into
|
533 |
+
np.array(nested, dtype="float64")
|
534 |
+
|
535 |
+
# If this is object, we end up assigning a (1, 1) array into (1,)
|
536 |
+
# (due to running out of dimensions), this is currently supported but
|
537 |
+
# a special case which is not ideal.
|
538 |
+
arr = np.array(nested, dtype=object)
|
539 |
+
assert arr.shape == (1,) * np.MAXDIMS
|
540 |
+
assert arr.item() == np.array(initial).item()
|
541 |
+
|
542 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
543 |
+
def test_uneven_depth_ragged(self, arraylike):
|
544 |
+
arr = np.arange(4).reshape((2, 2))
|
545 |
+
arr = arraylike(arr)
|
546 |
+
|
547 |
+
# Array is ragged in the second dimension already:
|
548 |
+
out = np.array([arr, [arr]], dtype=object)
|
549 |
+
assert out.shape == (2,)
|
550 |
+
assert out[0] is arr
|
551 |
+
assert type(out[1]) is list
|
552 |
+
|
553 |
+
# Array is ragged in the third dimension:
|
554 |
+
with pytest.raises(ValueError):
|
555 |
+
# This is a broadcast error during assignment, because
|
556 |
+
# the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
|
557 |
+
np.array([arr, [arr, arr]], dtype=object)
|
558 |
+
|
559 |
+
def test_empty_sequence(self):
|
560 |
+
arr = np.array([[], [1], [[1]]], dtype=object)
|
561 |
+
assert arr.shape == (3,)
|
562 |
+
|
563 |
+
# The empty sequence stops further dimension discovery, so the
|
564 |
+
# result shape will be (0,) which leads to an error during:
|
565 |
+
with pytest.raises(ValueError):
|
566 |
+
np.array([[], np.empty((0, 1))], dtype=object)
|
567 |
+
|
568 |
+
def test_array_of_different_depths(self):
|
569 |
+
# When multiple arrays (or array-likes) are included in a
|
570 |
+
# sequences and have different depth, we currently discover
|
571 |
+
# as many dimensions as they share. (see also gh-17224)
|
572 |
+
arr = np.zeros((3, 2))
|
573 |
+
mismatch_first_dim = np.zeros((1, 2))
|
574 |
+
mismatch_second_dim = np.zeros((3, 3))
|
575 |
+
|
576 |
+
dtype, shape = _discover_array_parameters(
|
577 |
+
[arr, mismatch_second_dim], dtype=np.dtype("O"))
|
578 |
+
assert shape == (2, 3)
|
579 |
+
|
580 |
+
dtype, shape = _discover_array_parameters(
|
581 |
+
[arr, mismatch_first_dim], dtype=np.dtype("O"))
|
582 |
+
assert shape == (2,)
|
583 |
+
# The second case is currently supported because the arrays
|
584 |
+
# can be stored as objects:
|
585 |
+
res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
|
586 |
+
assert res[0] is arr
|
587 |
+
assert res[1] is mismatch_first_dim
|
588 |
+
|
589 |
+
|
590 |
+
class TestBadSequences:
|
591 |
+
# These are tests for bad objects passed into `np.array`, in general
|
592 |
+
# these have undefined behaviour. In the old code they partially worked
|
593 |
+
# when now they will fail. We could (and maybe should) create a copy
|
594 |
+
# of all sequences to be safe against bad-actors.
|
595 |
+
|
596 |
+
def test_growing_list(self):
|
597 |
+
# List to coerce, `mylist` will append to it during coercion
|
598 |
+
obj = []
|
599 |
+
class mylist(list):
|
600 |
+
def __len__(self):
|
601 |
+
obj.append([1, 2])
|
602 |
+
return super().__len__()
|
603 |
+
|
604 |
+
obj.append(mylist([1, 2]))
|
605 |
+
|
606 |
+
with pytest.raises(RuntimeError):
|
607 |
+
np.array(obj)
|
608 |
+
|
609 |
+
# Note: We do not test a shrinking list. These do very evil things
|
610 |
+
# and the only way to fix them would be to copy all sequences.
|
611 |
+
# (which may be a real option in the future).
|
612 |
+
|
613 |
+
def test_mutated_list(self):
|
614 |
+
# List to coerce, `mylist` will mutate the first element
|
615 |
+
obj = []
|
616 |
+
class mylist(list):
|
617 |
+
def __len__(self):
|
618 |
+
obj[0] = [2, 3] # replace with a different list.
|
619 |
+
return super().__len__()
|
620 |
+
|
621 |
+
obj.append([2, 3])
|
622 |
+
obj.append(mylist([1, 2]))
|
623 |
+
# Does not crash:
|
624 |
+
np.array(obj)
|
625 |
+
|
626 |
+
def test_replace_0d_array(self):
|
627 |
+
# List to coerce, `mylist` will mutate the first element
|
628 |
+
obj = []
|
629 |
+
class baditem:
|
630 |
+
def __len__(self):
|
631 |
+
obj[0][0] = 2 # replace with a different list.
|
632 |
+
raise ValueError("not actually a sequence!")
|
633 |
+
|
634 |
+
def __getitem__(self):
|
635 |
+
pass
|
636 |
+
|
637 |
+
# Runs into a corner case in the new code, the `array(2)` is cached
|
638 |
+
# so replacing it invalidates the cache.
|
639 |
+
obj.append([np.array(2), baditem()])
|
640 |
+
with pytest.raises(RuntimeError):
|
641 |
+
np.array(obj)
|
642 |
+
|
643 |
+
|
644 |
+
class TestArrayLikes:
|
645 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
646 |
+
def test_0d_object_special_case(self, arraylike):
|
647 |
+
arr = np.array(0.)
|
648 |
+
obj = arraylike(arr)
|
649 |
+
# A single array-like is always converted:
|
650 |
+
res = np.array(obj, dtype=object)
|
651 |
+
assert_array_equal(arr, res)
|
652 |
+
|
653 |
+
# But a single 0-D nested array-like never:
|
654 |
+
res = np.array([obj], dtype=object)
|
655 |
+
assert res[0] is obj
|
656 |
+
|
657 |
+
@pytest.mark.parametrize("arraylike", arraylikes())
|
658 |
+
@pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)])
|
659 |
+
def test_object_assignment_special_case(self, arraylike, arr):
|
660 |
+
obj = arraylike(arr)
|
661 |
+
empty = np.arange(1, dtype=object)
|
662 |
+
empty[:] = [obj]
|
663 |
+
assert empty[0] is obj
|
664 |
+
|
665 |
+
def test_0d_generic_special_case(self):
|
666 |
+
class ArraySubclass(np.ndarray):
|
667 |
+
def __float__(self):
|
668 |
+
raise TypeError("e.g. quantities raise on this")
|
669 |
+
|
670 |
+
arr = np.array(0.)
|
671 |
+
obj = arr.view(ArraySubclass)
|
672 |
+
res = np.array(obj)
|
673 |
+
# The subclass is simply cast:
|
674 |
+
assert_array_equal(arr, res)
|
675 |
+
|
676 |
+
# If the 0-D array-like is included, __float__ is currently
|
677 |
+
# guaranteed to be used. We may want to change that, quantities
|
678 |
+
# and masked arrays half make use of this.
|
679 |
+
with pytest.raises(TypeError):
|
680 |
+
np.array([obj])
|
681 |
+
|
682 |
+
# The same holds for memoryview:
|
683 |
+
obj = memoryview(arr)
|
684 |
+
res = np.array(obj)
|
685 |
+
assert_array_equal(arr, res)
|
686 |
+
with pytest.raises(ValueError):
|
687 |
+
# The error type does not matter much here.
|
688 |
+
np.array([obj])
|
689 |
+
|
690 |
+
def test_arraylike_classes(self):
|
691 |
+
# The classes of array-likes should generally be acceptable to be
|
692 |
+
# stored inside a numpy (object) array. This tests all of the
|
693 |
+
# special attributes (since all are checked during coercion).
|
694 |
+
arr = np.array(np.int64)
|
695 |
+
assert arr[()] is np.int64
|
696 |
+
arr = np.array([np.int64])
|
697 |
+
assert arr[0] is np.int64
|
698 |
+
|
699 |
+
# This also works for properties/unbound methods:
|
700 |
+
class ArrayLike:
|
701 |
+
@property
|
702 |
+
def __array_interface__(self):
|
703 |
+
pass
|
704 |
+
|
705 |
+
@property
|
706 |
+
def __array_struct__(self):
|
707 |
+
pass
|
708 |
+
|
709 |
+
def __array__(self):
|
710 |
+
pass
|
711 |
+
|
712 |
+
arr = np.array(ArrayLike)
|
713 |
+
assert arr[()] is ArrayLike
|
714 |
+
arr = np.array([ArrayLike])
|
715 |
+
assert arr[0] is ArrayLike
|
716 |
+
|
717 |
+
@pytest.mark.skipif(
|
718 |
+
np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
|
719 |
+
def test_too_large_array_error_paths(self):
|
720 |
+
"""Test the error paths, including for memory leaks"""
|
721 |
+
arr = np.array(0, dtype="uint8")
|
722 |
+
# Guarantees that a contiguous copy won't work:
|
723 |
+
arr = np.broadcast_to(arr, 2**62)
|
724 |
+
|
725 |
+
for i in range(5):
|
726 |
+
# repeat, to ensure caching cannot have an effect:
|
727 |
+
with pytest.raises(MemoryError):
|
728 |
+
np.array(arr)
|
729 |
+
with pytest.raises(MemoryError):
|
730 |
+
np.array([arr])
|
731 |
+
|
732 |
+
@pytest.mark.parametrize("attribute",
|
733 |
+
["__array_interface__", "__array__", "__array_struct__"])
|
734 |
+
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
|
735 |
+
def test_bad_array_like_attributes(self, attribute, error):
|
736 |
+
# RecursionError and MemoryError are considered fatal. All errors
|
737 |
+
# (except AttributeError) should probably be raised in the future,
|
738 |
+
# but shapely made use of it, so it will require a deprecation.
|
739 |
+
|
740 |
+
class BadInterface:
|
741 |
+
def __getattr__(self, attr):
|
742 |
+
if attr == attribute:
|
743 |
+
raise error
|
744 |
+
super().__getattr__(attr)
|
745 |
+
|
746 |
+
with pytest.raises(error):
|
747 |
+
np.array(BadInterface())
|
748 |
+
|
749 |
+
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
|
750 |
+
def test_bad_array_like_bad_length(self, error):
|
751 |
+
# RecursionError and MemoryError are considered "critical" in
|
752 |
+
# sequences. We could expand this more generally though. (NumPy 1.20)
|
753 |
+
class BadSequence:
|
754 |
+
def __len__(self):
|
755 |
+
raise error
|
756 |
+
def __getitem__(self):
|
757 |
+
# must have getitem to be a Sequence
|
758 |
+
return 1
|
759 |
+
|
760 |
+
with pytest.raises(error):
|
761 |
+
np.array(BadSequence())
|
762 |
+
|
763 |
+
|
764 |
+
class TestAsArray:
|
765 |
+
"""Test expected behaviors of ``asarray``."""
|
766 |
+
|
767 |
+
def test_dtype_identity(self):
|
768 |
+
"""Confirm the intended behavior for *dtype* kwarg.
|
769 |
+
|
770 |
+
The result of ``asarray()`` should have the dtype provided through the
|
771 |
+
keyword argument, when used. This forces unique array handles to be
|
772 |
+
produced for unique np.dtype objects, but (for equivalent dtypes), the
|
773 |
+
underlying data (the base object) is shared with the original array
|
774 |
+
object.
|
775 |
+
|
776 |
+
Ref https://github.com/numpy/numpy/issues/1468
|
777 |
+
"""
|
778 |
+
int_array = np.array([1, 2, 3], dtype='i')
|
779 |
+
assert np.asarray(int_array) is int_array
|
780 |
+
|
781 |
+
# The character code resolves to the singleton dtype object provided
|
782 |
+
# by the numpy package.
|
783 |
+
assert np.asarray(int_array, dtype='i') is int_array
|
784 |
+
|
785 |
+
# Derive a dtype from n.dtype('i'), but add a metadata object to force
|
786 |
+
# the dtype to be distinct.
|
787 |
+
unequal_type = np.dtype('i', metadata={'spam': True})
|
788 |
+
annotated_int_array = np.asarray(int_array, dtype=unequal_type)
|
789 |
+
assert annotated_int_array is not int_array
|
790 |
+
assert annotated_int_array.base is int_array
|
791 |
+
# Create an equivalent descriptor with a new and distinct dtype
|
792 |
+
# instance.
|
793 |
+
equivalent_requirement = np.dtype('i', metadata={'spam': True})
|
794 |
+
annotated_int_array_alt = np.asarray(annotated_int_array,
|
795 |
+
dtype=equivalent_requirement)
|
796 |
+
assert unequal_type == equivalent_requirement
|
797 |
+
assert unequal_type is not equivalent_requirement
|
798 |
+
assert annotated_int_array_alt is not annotated_int_array
|
799 |
+
assert annotated_int_array_alt.dtype is equivalent_requirement
|
800 |
+
|
801 |
+
# Check the same logic for a pair of C types whose equivalence may vary
|
802 |
+
# between computing environments.
|
803 |
+
# Find an equivalent pair.
|
804 |
+
integer_type_codes = ('i', 'l', 'q')
|
805 |
+
integer_dtypes = [np.dtype(code) for code in integer_type_codes]
|
806 |
+
typeA = None
|
807 |
+
typeB = None
|
808 |
+
for typeA, typeB in permutations(integer_dtypes, r=2):
|
809 |
+
if typeA == typeB:
|
810 |
+
assert typeA is not typeB
|
811 |
+
break
|
812 |
+
assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype)
|
813 |
+
|
814 |
+
# These ``asarray()`` calls may produce a new view or a copy,
|
815 |
+
# but never the same object.
|
816 |
+
long_int_array = np.asarray(int_array, dtype='l')
|
817 |
+
long_long_int_array = np.asarray(int_array, dtype='q')
|
818 |
+
assert long_int_array is not int_array
|
819 |
+
assert long_long_int_array is not int_array
|
820 |
+
assert np.asarray(long_int_array, dtype='q') is not long_int_array
|
821 |
+
array_a = np.asarray(int_array, dtype=typeA)
|
822 |
+
assert typeA == typeB
|
823 |
+
assert typeA is not typeB
|
824 |
+
assert array_a.dtype is typeA
|
825 |
+
assert array_a is not np.asarray(array_a, dtype=typeB)
|
826 |
+
assert np.asarray(array_a, dtype=typeB).dtype is typeB
|
827 |
+
assert array_a is np.asarray(array_a, dtype=typeB).base
|
828 |
+
|
829 |
+
|
830 |
+
class TestSpecialAttributeLookupFailure:
|
831 |
+
# An exception was raised while fetching the attribute
|
832 |
+
|
833 |
+
class WeirdArrayLike:
|
834 |
+
@property
|
835 |
+
def __array__(self):
|
836 |
+
raise RuntimeError("oops!")
|
837 |
+
|
838 |
+
class WeirdArrayInterface:
|
839 |
+
@property
|
840 |
+
def __array_interface__(self):
|
841 |
+
raise RuntimeError("oops!")
|
842 |
+
|
843 |
+
def test_deprecated(self):
|
844 |
+
with pytest.raises(RuntimeError):
|
845 |
+
np.array(self.WeirdArrayLike())
|
846 |
+
with pytest.raises(RuntimeError):
|
847 |
+
np.array(self.WeirdArrayInterface())
|
848 |
+
|
849 |
+
|
850 |
+
def test_subarray_from_array_construction():
|
851 |
+
# Arrays are more complex, since they "broadcast" on success:
|
852 |
+
arr = np.array([1, 2])
|
853 |
+
|
854 |
+
res = arr.astype("(2)i,")
|
855 |
+
assert_array_equal(res, [[1, 1], [2, 2]])
|
856 |
+
|
857 |
+
res = np.array(arr, dtype="(2)i,")
|
858 |
+
|
859 |
+
assert_array_equal(res, [[1, 1], [2, 2]])
|
860 |
+
|
861 |
+
res = np.array([[(1,), (2,)], arr], dtype="(2)i,")
|
862 |
+
assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]])
|
863 |
+
|
864 |
+
# Also try a multi-dimensional example:
|
865 |
+
arr = np.arange(5 * 2).reshape(5, 2)
|
866 |
+
expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2))
|
867 |
+
|
868 |
+
res = arr.astype("(2,2)f")
|
869 |
+
assert_array_equal(res, expected)
|
870 |
+
|
871 |
+
res = np.array(arr, dtype="(2,2)f")
|
872 |
+
assert_array_equal(res, expected)
|
873 |
+
|
874 |
+
|
875 |
+
def test_empty_string():
|
876 |
+
# Empty strings are unfortunately often converted to S1 and we need to
|
877 |
+
# make sure we are filling the S1 and not the (possibly) detected S0
|
878 |
+
# result. This should likely just return S0 and if not maybe the decision
|
879 |
+
# to return S1 should be moved.
|
880 |
+
res = np.array([""] * 10, dtype="S")
|
881 |
+
assert_array_equal(res, np.array("\0", "S1"))
|
882 |
+
assert res.dtype == "S1"
|
883 |
+
|
884 |
+
arr = np.array([""] * 10, dtype=object)
|
885 |
+
|
886 |
+
res = arr.astype("S")
|
887 |
+
assert_array_equal(res, b"")
|
888 |
+
assert res.dtype == "S1"
|
889 |
+
|
890 |
+
res = np.array(arr, dtype="S")
|
891 |
+
assert_array_equal(res, b"")
|
892 |
+
# TODO: This is arguably weird/wrong, but seems old:
|
893 |
+
assert res.dtype == f"S{np.dtype('O').itemsize}"
|
894 |
+
|
895 |
+
res = np.array([[""] * 10, arr], dtype="S")
|
896 |
+
assert_array_equal(res, b"")
|
897 |
+
assert res.shape == (2, 10)
|
898 |
+
assert res.dtype == "S1"
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_array_interface.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import pytest
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import extbuild
|
5 |
+
|
6 |
+
|
7 |
+
@pytest.fixture
|
8 |
+
def get_module(tmp_path):
|
9 |
+
""" Some codes to generate data and manage temporary buffers use when
|
10 |
+
sharing with numpy via the array interface protocol.
|
11 |
+
"""
|
12 |
+
|
13 |
+
if not sys.platform.startswith('linux'):
|
14 |
+
pytest.skip('link fails on cygwin')
|
15 |
+
|
16 |
+
prologue = '''
|
17 |
+
#include <Python.h>
|
18 |
+
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
19 |
+
#include <numpy/arrayobject.h>
|
20 |
+
#include <stdio.h>
|
21 |
+
#include <math.h>
|
22 |
+
|
23 |
+
NPY_NO_EXPORT
|
24 |
+
void delete_array_struct(PyObject *cap) {
|
25 |
+
|
26 |
+
/* get the array interface structure */
|
27 |
+
PyArrayInterface *inter = (PyArrayInterface*)
|
28 |
+
PyCapsule_GetPointer(cap, NULL);
|
29 |
+
|
30 |
+
/* get the buffer by which data was shared */
|
31 |
+
double *ptr = (double*)PyCapsule_GetContext(cap);
|
32 |
+
|
33 |
+
/* for the purposes of the regression test set the elements
|
34 |
+
to nan */
|
35 |
+
for (npy_intp i = 0; i < inter->shape[0]; ++i)
|
36 |
+
ptr[i] = nan("");
|
37 |
+
|
38 |
+
/* free the shared buffer */
|
39 |
+
free(ptr);
|
40 |
+
|
41 |
+
/* free the array interface structure */
|
42 |
+
free(inter->shape);
|
43 |
+
free(inter);
|
44 |
+
|
45 |
+
fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
|
46 |
+
" ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
|
47 |
+
}
|
48 |
+
'''
|
49 |
+
|
50 |
+
functions = [
|
51 |
+
("new_array_struct", "METH_VARARGS", """
|
52 |
+
|
53 |
+
long long n_elem = 0;
|
54 |
+
double value = 0.0;
|
55 |
+
|
56 |
+
if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
|
57 |
+
Py_RETURN_NONE;
|
58 |
+
}
|
59 |
+
|
60 |
+
/* allocate and initialize the data to share with numpy */
|
61 |
+
long long n_bytes = n_elem*sizeof(double);
|
62 |
+
double *data = (double*)malloc(n_bytes);
|
63 |
+
|
64 |
+
if (!data) {
|
65 |
+
PyErr_Format(PyExc_MemoryError,
|
66 |
+
"Failed to malloc %lld bytes", n_bytes);
|
67 |
+
|
68 |
+
Py_RETURN_NONE;
|
69 |
+
}
|
70 |
+
|
71 |
+
for (long long i = 0; i < n_elem; ++i) {
|
72 |
+
data[i] = value;
|
73 |
+
}
|
74 |
+
|
75 |
+
/* calculate the shape and stride */
|
76 |
+
int nd = 1;
|
77 |
+
|
78 |
+
npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp));
|
79 |
+
npy_intp *shape = ss;
|
80 |
+
npy_intp *stride = ss + nd;
|
81 |
+
|
82 |
+
shape[0] = n_elem;
|
83 |
+
stride[0] = sizeof(double);
|
84 |
+
|
85 |
+
/* construct the array interface */
|
86 |
+
PyArrayInterface *inter = (PyArrayInterface*)
|
87 |
+
malloc(sizeof(PyArrayInterface));
|
88 |
+
|
89 |
+
memset(inter, 0, sizeof(PyArrayInterface));
|
90 |
+
|
91 |
+
inter->two = 2;
|
92 |
+
inter->nd = nd;
|
93 |
+
inter->typekind = 'f';
|
94 |
+
inter->itemsize = sizeof(double);
|
95 |
+
inter->shape = shape;
|
96 |
+
inter->strides = stride;
|
97 |
+
inter->data = data;
|
98 |
+
inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
|
99 |
+
NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
|
100 |
+
|
101 |
+
/* package into a capsule */
|
102 |
+
PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct);
|
103 |
+
|
104 |
+
/* save the pointer to the data */
|
105 |
+
PyCapsule_SetContext(cap, data);
|
106 |
+
|
107 |
+
fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
|
108 |
+
" ptr = %ld\\n", (long)cap, (long)inter, (long)data);
|
109 |
+
|
110 |
+
return cap;
|
111 |
+
""")
|
112 |
+
]
|
113 |
+
|
114 |
+
more_init = "import_array();"
|
115 |
+
|
116 |
+
try:
|
117 |
+
import array_interface_testing
|
118 |
+
return array_interface_testing
|
119 |
+
except ImportError:
|
120 |
+
pass
|
121 |
+
|
122 |
+
# if it does not exist, build and load it
|
123 |
+
return extbuild.build_and_import_extension('array_interface_testing',
|
124 |
+
functions,
|
125 |
+
prologue=prologue,
|
126 |
+
include_dirs=[np.get_include()],
|
127 |
+
build_dir=tmp_path,
|
128 |
+
more_init=more_init)
|
129 |
+
|
130 |
+
|
131 |
+
# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on
|
132 |
+
# Python 3.12 and up.
|
133 |
+
@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
|
134 |
+
@pytest.mark.slow
|
135 |
+
def test_cstruct(get_module):
|
136 |
+
|
137 |
+
class data_source:
|
138 |
+
"""
|
139 |
+
This class is for testing the timing of the PyCapsule destructor
|
140 |
+
invoked when numpy release its reference to the shared data as part of
|
141 |
+
the numpy array interface protocol. If the PyCapsule destructor is
|
142 |
+
called early the shared data is freed and invalid memory accesses will
|
143 |
+
occur.
|
144 |
+
"""
|
145 |
+
|
146 |
+
def __init__(self, size, value):
|
147 |
+
self.size = size
|
148 |
+
self.value = value
|
149 |
+
|
150 |
+
@property
|
151 |
+
def __array_struct__(self):
|
152 |
+
return get_module.new_array_struct(self.size, self.value)
|
153 |
+
|
154 |
+
# write to the same stream as the C code
|
155 |
+
stderr = sys.__stderr__
|
156 |
+
|
157 |
+
# used to validate the shared data.
|
158 |
+
expected_value = -3.1415
|
159 |
+
multiplier = -10000.0
|
160 |
+
|
161 |
+
# create some data to share with numpy via the array interface
|
162 |
+
# assign the data an expected value.
|
163 |
+
stderr.write(' ---- create an object to share data ---- \n')
|
164 |
+
buf = data_source(256, expected_value)
|
165 |
+
stderr.write(' ---- OK!\n\n')
|
166 |
+
|
167 |
+
# share the data
|
168 |
+
stderr.write(' ---- share data via the array interface protocol ---- \n')
|
169 |
+
arr = np.array(buf, copy=False)
|
170 |
+
stderr.write('arr.__array_interface___ = %s\n' % (
|
171 |
+
str(arr.__array_interface__)))
|
172 |
+
stderr.write('arr.base = %s\n' % (str(arr.base)))
|
173 |
+
stderr.write(' ---- OK!\n\n')
|
174 |
+
|
175 |
+
# release the source of the shared data. this will not release the data
|
176 |
+
# that was shared with numpy, that is done in the PyCapsule destructor.
|
177 |
+
stderr.write(' ---- destroy the object that shared data ---- \n')
|
178 |
+
buf = None
|
179 |
+
stderr.write(' ---- OK!\n\n')
|
180 |
+
|
181 |
+
# check that we got the expected data. If the PyCapsule destructor we
|
182 |
+
# defined was prematurely called then this test will fail because our
|
183 |
+
# destructor sets the elements of the array to NaN before free'ing the
|
184 |
+
# buffer. Reading the values here may also cause a SEGV
|
185 |
+
assert np.allclose(arr, expected_value)
|
186 |
+
|
187 |
+
# read the data. If the PyCapsule destructor we defined was prematurely
|
188 |
+
# called then reading the values here may cause a SEGV and will be reported
|
189 |
+
# as invalid reads by valgrind
|
190 |
+
stderr.write(' ---- read shared data ---- \n')
|
191 |
+
stderr.write('arr = %s\n' % (str(arr)))
|
192 |
+
stderr.write(' ---- OK!\n\n')
|
193 |
+
|
194 |
+
# write to the shared buffer. If the shared data was prematurely deleted
|
195 |
+
# this will may cause a SEGV and valgrind will report invalid writes
|
196 |
+
stderr.write(' ---- modify shared data ---- \n')
|
197 |
+
arr *= multiplier
|
198 |
+
expected_value *= multiplier
|
199 |
+
stderr.write('arr.__array_interface___ = %s\n' % (
|
200 |
+
str(arr.__array_interface__)))
|
201 |
+
stderr.write('arr.base = %s\n' % (str(arr.base)))
|
202 |
+
stderr.write(' ---- OK!\n\n')
|
203 |
+
|
204 |
+
# read the data. If the shared data was prematurely deleted this
|
205 |
+
# will may cause a SEGV and valgrind will report invalid reads
|
206 |
+
stderr.write(' ---- read modified shared data ---- \n')
|
207 |
+
stderr.write('arr = %s\n' % (str(arr)))
|
208 |
+
stderr.write(' ---- OK!\n\n')
|
209 |
+
|
210 |
+
# check that we got the expected data. If the PyCapsule destructor we
|
211 |
+
# defined was prematurely called then this test will fail because our
|
212 |
+
# destructor sets the elements of the array to NaN before free'ing the
|
213 |
+
# buffer. Reading the values here may also cause a SEGV
|
214 |
+
assert np.allclose(arr, expected_value)
|
215 |
+
|
216 |
+
# free the shared data, the PyCapsule destructor should run here
|
217 |
+
stderr.write(' ---- free shared data ---- \n')
|
218 |
+
arr = None
|
219 |
+
stderr.write(' ---- OK!\n\n')
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_arraymethod.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file tests the generic aspects of ArrayMethod. At the time of writing
|
3 |
+
this is private API, but when added, public API may be added here.
|
4 |
+
"""
|
5 |
+
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
import sys
|
9 |
+
import types
|
10 |
+
from typing import Any
|
11 |
+
|
12 |
+
import pytest
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
|
16 |
+
|
17 |
+
|
18 |
+
class TestResolveDescriptors:
|
19 |
+
# Test mainly error paths of the resolve_descriptors function,
|
20 |
+
# note that the `casting_unittests` tests exercise this non-error paths.
|
21 |
+
|
22 |
+
# Casting implementations are the main/only current user:
|
23 |
+
method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
|
24 |
+
|
25 |
+
@pytest.mark.parametrize("args", [
|
26 |
+
(True,), # Not a tuple.
|
27 |
+
((None,)), # Too few elements
|
28 |
+
((None, None, None),), # Too many
|
29 |
+
((None, None),), # Input dtype is None, which is invalid.
|
30 |
+
((np.dtype("d"), True),), # Output dtype is not a dtype
|
31 |
+
((np.dtype("f"), None),), # Input dtype does not match method
|
32 |
+
])
|
33 |
+
def test_invalid_arguments(self, args):
|
34 |
+
with pytest.raises(TypeError):
|
35 |
+
self.method._resolve_descriptors(*args)
|
36 |
+
|
37 |
+
|
38 |
+
class TestSimpleStridedCall:
|
39 |
+
# Test mainly error paths of the resolve_descriptors function,
|
40 |
+
# note that the `casting_unittests` tests exercise this non-error paths.
|
41 |
+
|
42 |
+
# Casting implementations are the main/only current user:
|
43 |
+
method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
|
44 |
+
|
45 |
+
@pytest.mark.parametrize(["args", "error"], [
|
46 |
+
((True,), TypeError), # Not a tuple
|
47 |
+
(((None,),), TypeError), # Too few elements
|
48 |
+
((None, None), TypeError), # Inputs are not arrays.
|
49 |
+
(((None, None, None),), TypeError), # Too many
|
50 |
+
(((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes
|
51 |
+
(((np.ones(3, dtype=">d"), np.ones(3, dtype="<f")),),
|
52 |
+
TypeError), # Does not support byte-swapping
|
53 |
+
(((np.ones((2, 2), dtype="d"), np.ones((2, 2), dtype="f")),),
|
54 |
+
ValueError), # not 1-D
|
55 |
+
(((np.ones(3, dtype="d"), np.ones(4, dtype="f")),),
|
56 |
+
ValueError), # different length
|
57 |
+
(((np.frombuffer(b"\0x00"*3*2, dtype="d"),
|
58 |
+
np.frombuffer(b"\0x00"*3, dtype="f")),),
|
59 |
+
ValueError), # output not writeable
|
60 |
+
])
|
61 |
+
def test_invalid_arguments(self, args, error):
|
62 |
+
# This is private API, which may be modified freely
|
63 |
+
with pytest.raises(error):
|
64 |
+
self.method._simple_strided_call(*args)
|
65 |
+
|
66 |
+
|
67 |
+
@pytest.mark.parametrize(
|
68 |
+
"cls", [np.ndarray, np.recarray, np.chararray, np.matrix, np.memmap]
|
69 |
+
)
|
70 |
+
class TestClassGetItem:
|
71 |
+
def test_class_getitem(self, cls: type[np.ndarray]) -> None:
|
72 |
+
"""Test `ndarray.__class_getitem__`."""
|
73 |
+
alias = cls[Any, Any]
|
74 |
+
assert isinstance(alias, types.GenericAlias)
|
75 |
+
assert alias.__origin__ is cls
|
76 |
+
|
77 |
+
@pytest.mark.parametrize("arg_len", range(4))
|
78 |
+
def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None:
|
79 |
+
arg_tup = (Any,) * arg_len
|
80 |
+
if arg_len in (1, 2):
|
81 |
+
assert cls[arg_tup]
|
82 |
+
else:
|
83 |
+
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
|
84 |
+
with pytest.raises(TypeError, match=match):
|
85 |
+
cls[arg_tup]
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_casting_unittests.py
ADDED
@@ -0,0 +1,819 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The tests exercise the casting machinery in a more low-level manner.
|
3 |
+
The reason is mostly to test a new implementation of the casting machinery.
|
4 |
+
|
5 |
+
Unlike most tests in NumPy, these are closer to unit-tests rather
|
6 |
+
than integration tests.
|
7 |
+
"""
|
8 |
+
|
9 |
+
import pytest
|
10 |
+
import textwrap
|
11 |
+
import enum
|
12 |
+
import random
|
13 |
+
import ctypes
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
from numpy.lib.stride_tricks import as_strided
|
17 |
+
|
18 |
+
from numpy.testing import assert_array_equal
|
19 |
+
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
|
20 |
+
|
21 |
+
|
22 |
+
# Simple skips object, parametric and long double (unsupported by struct)
|
23 |
+
simple_dtypes = "?bhilqBHILQefdFD"
|
24 |
+
if np.dtype("l").itemsize != np.dtype("q").itemsize:
|
25 |
+
# Remove l and L, the table was generated with 64bit linux in mind.
|
26 |
+
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
|
27 |
+
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
|
28 |
+
|
29 |
+
|
30 |
+
def simple_dtype_instances():
|
31 |
+
for dtype_class in simple_dtypes:
|
32 |
+
dt = dtype_class()
|
33 |
+
yield pytest.param(dt, id=str(dt))
|
34 |
+
if dt.byteorder != "|":
|
35 |
+
dt = dt.newbyteorder()
|
36 |
+
yield pytest.param(dt, id=str(dt))
|
37 |
+
|
38 |
+
|
39 |
+
def get_expected_stringlength(dtype):
|
40 |
+
"""Returns the string length when casting the basic dtypes to strings.
|
41 |
+
"""
|
42 |
+
if dtype == np.bool_:
|
43 |
+
return 5
|
44 |
+
if dtype.kind in "iu":
|
45 |
+
if dtype.itemsize == 1:
|
46 |
+
length = 3
|
47 |
+
elif dtype.itemsize == 2:
|
48 |
+
length = 5
|
49 |
+
elif dtype.itemsize == 4:
|
50 |
+
length = 10
|
51 |
+
elif dtype.itemsize == 8:
|
52 |
+
length = 20
|
53 |
+
else:
|
54 |
+
raise AssertionError(f"did not find expected length for {dtype}")
|
55 |
+
|
56 |
+
if dtype.kind == "i":
|
57 |
+
length += 1 # adds one character for the sign
|
58 |
+
|
59 |
+
return length
|
60 |
+
|
61 |
+
# Note: Can't do dtype comparison for longdouble on windows
|
62 |
+
if dtype.char == "g":
|
63 |
+
return 48
|
64 |
+
elif dtype.char == "G":
|
65 |
+
return 48 * 2
|
66 |
+
elif dtype.kind == "f":
|
67 |
+
return 32 # also for half apparently.
|
68 |
+
elif dtype.kind == "c":
|
69 |
+
return 32 * 2
|
70 |
+
|
71 |
+
raise AssertionError(f"did not find expected length for {dtype}")
|
72 |
+
|
73 |
+
|
74 |
+
class Casting(enum.IntEnum):
|
75 |
+
no = 0
|
76 |
+
equiv = 1
|
77 |
+
safe = 2
|
78 |
+
same_kind = 3
|
79 |
+
unsafe = 4
|
80 |
+
|
81 |
+
|
82 |
+
def _get_cancast_table():
|
83 |
+
table = textwrap.dedent("""
|
84 |
+
X ? b h i l q B H I L Q e f d g F D G S U V O M m
|
85 |
+
? # = = = = = = = = = = = = = = = = = = = = = . =
|
86 |
+
b . # = = = = . . . . . = = = = = = = = = = = . =
|
87 |
+
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
|
88 |
+
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
|
89 |
+
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
|
90 |
+
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
|
91 |
+
B . ~ = = = = # = = = = = = = = = = = = = = = . =
|
92 |
+
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
|
93 |
+
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
|
94 |
+
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
|
95 |
+
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
|
96 |
+
e . . . . . . . . . . . # = = = = = = = = = = . .
|
97 |
+
f . . . . . . . . . . . ~ # = = = = = = = = = . .
|
98 |
+
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
|
99 |
+
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
|
100 |
+
F . . . . . . . . . . . . . . . # = = = = = = . .
|
101 |
+
D . . . . . . . . . . . . . . . ~ # = = = = = . .
|
102 |
+
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
|
103 |
+
S . . . . . . . . . . . . . . . . . . # = = = . .
|
104 |
+
U . . . . . . . . . . . . . . . . . . . # = = . .
|
105 |
+
V . . . . . . . . . . . . . . . . . . . . # = . .
|
106 |
+
O . . . . . . . . . . . . . . . . . . . . = # . .
|
107 |
+
M . . . . . . . . . . . . . . . . . . . . = = # .
|
108 |
+
m . . . . . . . . . . . . . . . . . . . . = = . #
|
109 |
+
""").strip().split("\n")
|
110 |
+
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
|
111 |
+
|
112 |
+
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
|
113 |
+
"=": Casting.safe, "#": Casting.equiv,
|
114 |
+
" ": -1}
|
115 |
+
|
116 |
+
cancast = {}
|
117 |
+
for from_dt, row in zip(dtypes, table[1:]):
|
118 |
+
cancast[from_dt] = {}
|
119 |
+
for to_dt, c in zip(dtypes, row[2::2]):
|
120 |
+
cancast[from_dt][to_dt] = convert_cast[c]
|
121 |
+
|
122 |
+
return cancast
|
123 |
+
|
124 |
+
CAST_TABLE = _get_cancast_table()
|
125 |
+
|
126 |
+
|
127 |
+
class TestChanges:
|
128 |
+
"""
|
129 |
+
These test cases exercise some behaviour changes
|
130 |
+
"""
|
131 |
+
@pytest.mark.parametrize("string", ["S", "U"])
|
132 |
+
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
|
133 |
+
def test_float_to_string(self, floating, string):
|
134 |
+
assert np.can_cast(floating, string)
|
135 |
+
# 100 is long enough to hold any formatted floating
|
136 |
+
assert np.can_cast(floating, f"{string}100")
|
137 |
+
|
138 |
+
def test_to_void(self):
|
139 |
+
# But in general, we do consider these safe:
|
140 |
+
assert np.can_cast("d", "V")
|
141 |
+
assert np.can_cast("S20", "V")
|
142 |
+
|
143 |
+
# Do not consider it a safe cast if the void is too smaller:
|
144 |
+
assert not np.can_cast("d", "V1")
|
145 |
+
assert not np.can_cast("S20", "V1")
|
146 |
+
assert not np.can_cast("U1", "V1")
|
147 |
+
# Structured to unstructured is just like any other:
|
148 |
+
assert np.can_cast("d,i", "V", casting="same_kind")
|
149 |
+
# Unstructured void to unstructured is actually no cast at all:
|
150 |
+
assert np.can_cast("V3", "V", casting="no")
|
151 |
+
assert np.can_cast("V0", "V", casting="no")
|
152 |
+
|
153 |
+
|
154 |
+
class TestCasting:
|
155 |
+
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
|
156 |
+
|
157 |
+
def get_data(self, dtype1, dtype2):
|
158 |
+
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
|
159 |
+
length = self.size // dtype1.itemsize
|
160 |
+
else:
|
161 |
+
length = self.size // dtype2.itemsize
|
162 |
+
|
163 |
+
# Assume that the base array is well enough aligned for all inputs.
|
164 |
+
arr1 = np.empty(length, dtype=dtype1)
|
165 |
+
assert arr1.flags.c_contiguous
|
166 |
+
assert arr1.flags.aligned
|
167 |
+
|
168 |
+
values = [random.randrange(-128, 128) for _ in range(length)]
|
169 |
+
|
170 |
+
for i, value in enumerate(values):
|
171 |
+
# Use item assignment to ensure this is not using casting:
|
172 |
+
if value < 0 and dtype1.kind == "u":
|
173 |
+
# Manually rollover unsigned integers (-1 -> int.max)
|
174 |
+
value = value + np.iinfo(dtype1).max + 1
|
175 |
+
arr1[i] = value
|
176 |
+
|
177 |
+
if dtype2 is None:
|
178 |
+
if dtype1.char == "?":
|
179 |
+
values = [bool(v) for v in values]
|
180 |
+
return arr1, values
|
181 |
+
|
182 |
+
if dtype2.char == "?":
|
183 |
+
values = [bool(v) for v in values]
|
184 |
+
|
185 |
+
arr2 = np.empty(length, dtype=dtype2)
|
186 |
+
assert arr2.flags.c_contiguous
|
187 |
+
assert arr2.flags.aligned
|
188 |
+
|
189 |
+
for i, value in enumerate(values):
|
190 |
+
# Use item assignment to ensure this is not using casting:
|
191 |
+
if value < 0 and dtype2.kind == "u":
|
192 |
+
# Manually rollover unsigned integers (-1 -> int.max)
|
193 |
+
value = value + np.iinfo(dtype2).max + 1
|
194 |
+
arr2[i] = value
|
195 |
+
|
196 |
+
return arr1, arr2, values
|
197 |
+
|
198 |
+
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
|
199 |
+
"""
|
200 |
+
Returns a copy of arr1 that may be non-contiguous or unaligned, and a
|
201 |
+
matching array for arr2 (although not a copy).
|
202 |
+
"""
|
203 |
+
if contig:
|
204 |
+
stride1 = arr1.dtype.itemsize
|
205 |
+
stride2 = arr2.dtype.itemsize
|
206 |
+
elif aligned:
|
207 |
+
stride1 = 2 * arr1.dtype.itemsize
|
208 |
+
stride2 = 2 * arr2.dtype.itemsize
|
209 |
+
else:
|
210 |
+
stride1 = arr1.dtype.itemsize + 1
|
211 |
+
stride2 = arr2.dtype.itemsize + 1
|
212 |
+
|
213 |
+
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
|
214 |
+
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
|
215 |
+
from_bytes = np.zeros(max_size1, dtype=np.uint8)
|
216 |
+
to_bytes = np.zeros(max_size2, dtype=np.uint8)
|
217 |
+
|
218 |
+
# Sanity check that the above is large enough:
|
219 |
+
assert stride1 * len(arr1) <= from_bytes.nbytes
|
220 |
+
assert stride2 * len(arr2) <= to_bytes.nbytes
|
221 |
+
|
222 |
+
if aligned:
|
223 |
+
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
|
224 |
+
arr1.shape, (stride1,))
|
225 |
+
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
|
226 |
+
arr2.shape, (stride2,))
|
227 |
+
else:
|
228 |
+
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
|
229 |
+
arr1.shape, (stride1,))
|
230 |
+
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
|
231 |
+
arr2.shape, (stride2,))
|
232 |
+
|
233 |
+
new1[...] = arr1
|
234 |
+
|
235 |
+
if not contig:
|
236 |
+
# Ensure we did not overwrite bytes that should not be written:
|
237 |
+
offset = arr1.dtype.itemsize if aligned else 0
|
238 |
+
buf = from_bytes[offset::stride1].tobytes()
|
239 |
+
assert buf.count(b"\0") == len(buf)
|
240 |
+
|
241 |
+
if contig:
|
242 |
+
assert new1.flags.c_contiguous
|
243 |
+
assert new2.flags.c_contiguous
|
244 |
+
else:
|
245 |
+
assert not new1.flags.c_contiguous
|
246 |
+
assert not new2.flags.c_contiguous
|
247 |
+
|
248 |
+
if aligned:
|
249 |
+
assert new1.flags.aligned
|
250 |
+
assert new2.flags.aligned
|
251 |
+
else:
|
252 |
+
assert not new1.flags.aligned or new1.dtype.alignment == 1
|
253 |
+
assert not new2.flags.aligned or new2.dtype.alignment == 1
|
254 |
+
|
255 |
+
return new1, new2
|
256 |
+
|
257 |
+
@pytest.mark.parametrize("from_Dt", simple_dtypes)
|
258 |
+
def test_simple_cancast(self, from_Dt):
|
259 |
+
for to_Dt in simple_dtypes:
|
260 |
+
cast = get_castingimpl(from_Dt, to_Dt)
|
261 |
+
|
262 |
+
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
|
263 |
+
default = cast._resolve_descriptors((from_dt, None))[1][1]
|
264 |
+
assert default == to_Dt()
|
265 |
+
del default
|
266 |
+
|
267 |
+
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
|
268 |
+
casting, (from_res, to_res), view_off = (
|
269 |
+
cast._resolve_descriptors((from_dt, to_dt)))
|
270 |
+
assert(type(from_res) == from_Dt)
|
271 |
+
assert(type(to_res) == to_Dt)
|
272 |
+
if view_off is not None:
|
273 |
+
# If a view is acceptable, this is "no" casting
|
274 |
+
# and byte order must be matching.
|
275 |
+
assert casting == Casting.no
|
276 |
+
# The above table lists this as "equivalent"
|
277 |
+
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
|
278 |
+
# Note that to_res may not be the same as from_dt
|
279 |
+
assert from_res.isnative == to_res.isnative
|
280 |
+
else:
|
281 |
+
if from_Dt == to_Dt:
|
282 |
+
# Note that to_res may not be the same as from_dt
|
283 |
+
assert from_res.isnative != to_res.isnative
|
284 |
+
assert casting == CAST_TABLE[from_Dt][to_Dt]
|
285 |
+
|
286 |
+
if from_Dt is to_Dt:
|
287 |
+
assert(from_dt is from_res)
|
288 |
+
assert(to_dt is to_res)
|
289 |
+
|
290 |
+
|
291 |
+
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
|
292 |
+
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
|
293 |
+
def test_simple_direct_casts(self, from_dt):
|
294 |
+
"""
|
295 |
+
This test checks numeric direct casts for dtypes supported also by the
|
296 |
+
struct module (plus complex). It tries to be test a wide range of
|
297 |
+
inputs, but skips over possibly undefined behaviour (e.g. int rollover).
|
298 |
+
Longdouble and CLongdouble are tested, but only using double precision.
|
299 |
+
|
300 |
+
If this test creates issues, it should possibly just be simplified
|
301 |
+
or even removed (checking whether unaligned/non-contiguous casts give
|
302 |
+
the same results is useful, though).
|
303 |
+
"""
|
304 |
+
for to_dt in simple_dtype_instances():
|
305 |
+
to_dt = to_dt.values[0]
|
306 |
+
cast = get_castingimpl(type(from_dt), type(to_dt))
|
307 |
+
|
308 |
+
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
|
309 |
+
(from_dt, to_dt))
|
310 |
+
|
311 |
+
if from_res is not from_dt or to_res is not to_dt:
|
312 |
+
# Do not test this case, it is handled in multiple steps,
|
313 |
+
# each of which should is tested individually.
|
314 |
+
return
|
315 |
+
|
316 |
+
safe = casting <= Casting.safe
|
317 |
+
del from_res, to_res, casting
|
318 |
+
|
319 |
+
arr1, arr2, values = self.get_data(from_dt, to_dt)
|
320 |
+
|
321 |
+
cast._simple_strided_call((arr1, arr2))
|
322 |
+
|
323 |
+
# Check via python list
|
324 |
+
assert arr2.tolist() == values
|
325 |
+
|
326 |
+
# Check that the same results are achieved for strided loops
|
327 |
+
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
|
328 |
+
cast._simple_strided_call((arr1_o, arr2_o))
|
329 |
+
|
330 |
+
assert_array_equal(arr2_o, arr2)
|
331 |
+
assert arr2_o.tobytes() == arr2.tobytes()
|
332 |
+
|
333 |
+
# Check if alignment makes a difference, but only if supported
|
334 |
+
# and only if the alignment can be wrong
|
335 |
+
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
|
336 |
+
not cast._supports_unaligned):
|
337 |
+
return
|
338 |
+
|
339 |
+
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
|
340 |
+
cast._simple_strided_call((arr1_o, arr2_o))
|
341 |
+
|
342 |
+
assert_array_equal(arr2_o, arr2)
|
343 |
+
assert arr2_o.tobytes() == arr2.tobytes()
|
344 |
+
|
345 |
+
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
|
346 |
+
cast._simple_strided_call((arr1_o, arr2_o))
|
347 |
+
|
348 |
+
assert_array_equal(arr2_o, arr2)
|
349 |
+
assert arr2_o.tobytes() == arr2.tobytes()
|
350 |
+
|
351 |
+
del arr1_o, arr2_o, cast
|
352 |
+
|
353 |
+
@pytest.mark.parametrize("from_Dt", simple_dtypes)
|
354 |
+
def test_numeric_to_times(self, from_Dt):
|
355 |
+
# We currently only implement contiguous loops, so only need to
|
356 |
+
# test those.
|
357 |
+
from_dt = from_Dt()
|
358 |
+
|
359 |
+
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
|
360 |
+
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
|
361 |
+
for time_dt in time_dtypes:
|
362 |
+
cast = get_castingimpl(type(from_dt), type(time_dt))
|
363 |
+
|
364 |
+
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
|
365 |
+
(from_dt, time_dt))
|
366 |
+
|
367 |
+
assert from_res is from_dt
|
368 |
+
assert to_res is time_dt
|
369 |
+
del from_res, to_res
|
370 |
+
|
371 |
+
assert casting & CAST_TABLE[from_Dt][type(time_dt)]
|
372 |
+
assert view_off is None
|
373 |
+
|
374 |
+
int64_dt = np.dtype(np.int64)
|
375 |
+
arr1, arr2, values = self.get_data(from_dt, int64_dt)
|
376 |
+
arr2 = arr2.view(time_dt)
|
377 |
+
arr2[...] = np.datetime64("NaT")
|
378 |
+
|
379 |
+
if time_dt == np.dtype("M8"):
|
380 |
+
# This is a bit of a strange path, and could probably be removed
|
381 |
+
arr1[-1] = 0 # ensure at least one value is not NaT
|
382 |
+
|
383 |
+
# The cast currently succeeds, but the values are invalid:
|
384 |
+
cast._simple_strided_call((arr1, arr2))
|
385 |
+
with pytest.raises(ValueError):
|
386 |
+
str(arr2[-1]) # e.g. conversion to string fails
|
387 |
+
return
|
388 |
+
|
389 |
+
cast._simple_strided_call((arr1, arr2))
|
390 |
+
|
391 |
+
assert [int(v) for v in arr2.tolist()] == values
|
392 |
+
|
393 |
+
# Check that the same results are achieved for strided loops
|
394 |
+
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
|
395 |
+
cast._simple_strided_call((arr1_o, arr2_o))
|
396 |
+
|
397 |
+
assert_array_equal(arr2_o, arr2)
|
398 |
+
assert arr2_o.tobytes() == arr2.tobytes()
|
399 |
+
|
400 |
+
@pytest.mark.parametrize(
|
401 |
+
["from_dt", "to_dt", "expected_casting", "expected_view_off",
|
402 |
+
"nom", "denom"],
|
403 |
+
[("M8[ns]", None, Casting.no, 0, 1, 1),
|
404 |
+
(str(np.dtype("M8[ns]").newbyteorder()), None,
|
405 |
+
Casting.equiv, None, 1, 1),
|
406 |
+
("M8", "M8[ms]", Casting.safe, 0, 1, 1),
|
407 |
+
# should be invalid cast:
|
408 |
+
("M8[ms]", "M8", Casting.unsafe, None, 1, 1),
|
409 |
+
("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1),
|
410 |
+
("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6),
|
411 |
+
("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1),
|
412 |
+
("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7),
|
413 |
+
("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
|
414 |
+
# give full values based on NumPy 1.19.x
|
415 |
+
[-2**63, 0, -1, 1314, -1315, 564442610]),
|
416 |
+
("m8[ns]", None, Casting.no, 0, 1, 1),
|
417 |
+
(str(np.dtype("m8[ns]").newbyteorder()), None,
|
418 |
+
Casting.equiv, None, 1, 1),
|
419 |
+
("m8", "m8[ms]", Casting.safe, 0, 1, 1),
|
420 |
+
# should be invalid cast:
|
421 |
+
("m8[ms]", "m8", Casting.unsafe, None, 1, 1),
|
422 |
+
("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1),
|
423 |
+
("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6),
|
424 |
+
("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1),
|
425 |
+
("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7),
|
426 |
+
("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
|
427 |
+
# give full values based on NumPy 1.19.x
|
428 |
+
[-2**63, 0, 0, 1314, -1315, 564442610])])
|
429 |
+
def test_time_to_time(self, from_dt, to_dt,
|
430 |
+
expected_casting, expected_view_off,
|
431 |
+
nom, denom):
|
432 |
+
from_dt = np.dtype(from_dt)
|
433 |
+
if to_dt is not None:
|
434 |
+
to_dt = np.dtype(to_dt)
|
435 |
+
|
436 |
+
# Test a few values for casting (results generated with NumPy 1.19)
|
437 |
+
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
|
438 |
+
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
|
439 |
+
assert values.dtype.byteorder == from_dt.byteorder
|
440 |
+
assert np.isnat(values.view(from_dt)[0])
|
441 |
+
|
442 |
+
DType = type(from_dt)
|
443 |
+
cast = get_castingimpl(DType, DType)
|
444 |
+
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
|
445 |
+
(from_dt, to_dt))
|
446 |
+
assert from_res is from_dt
|
447 |
+
assert to_res is to_dt or to_dt is None
|
448 |
+
assert casting == expected_casting
|
449 |
+
assert view_off == expected_view_off
|
450 |
+
|
451 |
+
if nom is not None:
|
452 |
+
expected_out = (values * nom // denom).view(to_res)
|
453 |
+
expected_out[0] = "NaT"
|
454 |
+
else:
|
455 |
+
expected_out = np.empty_like(values)
|
456 |
+
expected_out[...] = denom
|
457 |
+
expected_out = expected_out.view(to_dt)
|
458 |
+
|
459 |
+
orig_arr = values.view(from_dt)
|
460 |
+
orig_out = np.empty_like(expected_out)
|
461 |
+
|
462 |
+
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
|
463 |
+
# Casting from non-generic to generic units is an error and should
|
464 |
+
# probably be reported as an invalid cast earlier.
|
465 |
+
with pytest.raises(ValueError):
|
466 |
+
cast._simple_strided_call((orig_arr, orig_out))
|
467 |
+
return
|
468 |
+
|
469 |
+
for aligned in [True, True]:
|
470 |
+
for contig in [True, True]:
|
471 |
+
arr, out = self.get_data_variation(
|
472 |
+
orig_arr, orig_out, aligned, contig)
|
473 |
+
out[...] = 0
|
474 |
+
cast._simple_strided_call((arr, out))
|
475 |
+
assert_array_equal(out.view("int64"), expected_out.view("int64"))
|
476 |
+
|
477 |
+
def string_with_modified_length(self, dtype, change_length):
|
478 |
+
fact = 1 if dtype.char == "S" else 4
|
479 |
+
length = dtype.itemsize // fact + change_length
|
480 |
+
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
|
481 |
+
|
482 |
+
@pytest.mark.parametrize("other_DT", simple_dtypes)
|
483 |
+
@pytest.mark.parametrize("string_char", ["S", "U"])
|
484 |
+
def test_string_cancast(self, other_DT, string_char):
|
485 |
+
fact = 1 if string_char == "S" else 4
|
486 |
+
|
487 |
+
string_DT = type(np.dtype(string_char))
|
488 |
+
cast = get_castingimpl(other_DT, string_DT)
|
489 |
+
|
490 |
+
other_dt = other_DT()
|
491 |
+
expected_length = get_expected_stringlength(other_dt)
|
492 |
+
string_dt = np.dtype(f"{string_char}{expected_length}")
|
493 |
+
|
494 |
+
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
|
495 |
+
(other_dt, None))
|
496 |
+
assert res_dt.itemsize == expected_length * fact
|
497 |
+
assert safety == Casting.safe # we consider to string casts "safe"
|
498 |
+
assert view_off is None
|
499 |
+
assert isinstance(res_dt, string_DT)
|
500 |
+
|
501 |
+
# These casts currently implement changing the string length, so
|
502 |
+
# check the cast-safety for too long/fixed string lengths:
|
503 |
+
for change_length in [-1, 0, 1]:
|
504 |
+
if change_length >= 0:
|
505 |
+
expected_safety = Casting.safe
|
506 |
+
else:
|
507 |
+
expected_safety = Casting.same_kind
|
508 |
+
|
509 |
+
to_dt = self.string_with_modified_length(string_dt, change_length)
|
510 |
+
safety, (_, res_dt), view_off = cast._resolve_descriptors(
|
511 |
+
(other_dt, to_dt))
|
512 |
+
assert res_dt is to_dt
|
513 |
+
assert safety == expected_safety
|
514 |
+
assert view_off is None
|
515 |
+
|
516 |
+
# The opposite direction is always considered unsafe:
|
517 |
+
cast = get_castingimpl(string_DT, other_DT)
|
518 |
+
|
519 |
+
safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
|
520 |
+
assert safety == Casting.unsafe
|
521 |
+
assert view_off is None
|
522 |
+
|
523 |
+
cast = get_castingimpl(string_DT, other_DT)
|
524 |
+
safety, (_, res_dt), view_off = cast._resolve_descriptors(
|
525 |
+
(string_dt, None))
|
526 |
+
assert safety == Casting.unsafe
|
527 |
+
assert view_off is None
|
528 |
+
assert other_dt is res_dt # returns the singleton for simple dtypes
|
529 |
+
|
530 |
+
@pytest.mark.parametrize("string_char", ["S", "U"])
|
531 |
+
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
|
532 |
+
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
|
533 |
+
"""
|
534 |
+
Tests casts from and to string by checking the roundtripping property.
|
535 |
+
|
536 |
+
The test also covers some string to string casts (but not all).
|
537 |
+
|
538 |
+
If this test creates issues, it should possibly just be simplified
|
539 |
+
or even removed (checking whether unaligned/non-contiguous casts give
|
540 |
+
the same results is useful, though).
|
541 |
+
"""
|
542 |
+
string_DT = type(np.dtype(string_char))
|
543 |
+
|
544 |
+
cast = get_castingimpl(type(other_dt), string_DT)
|
545 |
+
cast_back = get_castingimpl(string_DT, type(other_dt))
|
546 |
+
_, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
|
547 |
+
(other_dt, None))
|
548 |
+
|
549 |
+
if res_other_dt is not other_dt:
|
550 |
+
# do not support non-native byteorder, skip test in that case
|
551 |
+
assert other_dt.byteorder != res_other_dt.byteorder
|
552 |
+
return
|
553 |
+
|
554 |
+
orig_arr, values = self.get_data(other_dt, None)
|
555 |
+
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
|
556 |
+
string_dt_short = self.string_with_modified_length(string_dt, -1)
|
557 |
+
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
|
558 |
+
string_dt_long = self.string_with_modified_length(string_dt, 1)
|
559 |
+
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
|
560 |
+
|
561 |
+
assert not cast._supports_unaligned # if support is added, should test
|
562 |
+
assert not cast_back._supports_unaligned
|
563 |
+
|
564 |
+
for contig in [True, False]:
|
565 |
+
other_arr, str_arr = self.get_data_variation(
|
566 |
+
orig_arr, str_arr, True, contig)
|
567 |
+
_, str_arr_short = self.get_data_variation(
|
568 |
+
orig_arr, str_arr_short.copy(), True, contig)
|
569 |
+
_, str_arr_long = self.get_data_variation(
|
570 |
+
orig_arr, str_arr_long, True, contig)
|
571 |
+
|
572 |
+
cast._simple_strided_call((other_arr, str_arr))
|
573 |
+
|
574 |
+
cast._simple_strided_call((other_arr, str_arr_short))
|
575 |
+
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
|
576 |
+
|
577 |
+
cast._simple_strided_call((other_arr, str_arr_long))
|
578 |
+
assert_array_equal(str_arr, str_arr_long)
|
579 |
+
|
580 |
+
if other_dt.kind == "b":
|
581 |
+
# Booleans do not roundtrip
|
582 |
+
continue
|
583 |
+
|
584 |
+
other_arr[...] = 0
|
585 |
+
cast_back._simple_strided_call((str_arr, other_arr))
|
586 |
+
assert_array_equal(orig_arr, other_arr)
|
587 |
+
|
588 |
+
other_arr[...] = 0
|
589 |
+
cast_back._simple_strided_call((str_arr_long, other_arr))
|
590 |
+
assert_array_equal(orig_arr, other_arr)
|
591 |
+
|
592 |
+
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
|
593 |
+
@pytest.mark.parametrize("string_char", ["S", "U"])
|
594 |
+
def test_string_to_string_cancast(self, other_dt, string_char):
|
595 |
+
other_dt = np.dtype(other_dt)
|
596 |
+
|
597 |
+
fact = 1 if string_char == "S" else 4
|
598 |
+
div = 1 if other_dt.char == "S" else 4
|
599 |
+
|
600 |
+
string_DT = type(np.dtype(string_char))
|
601 |
+
cast = get_castingimpl(type(other_dt), string_DT)
|
602 |
+
|
603 |
+
expected_length = other_dt.itemsize // div
|
604 |
+
string_dt = np.dtype(f"{string_char}{expected_length}")
|
605 |
+
|
606 |
+
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
|
607 |
+
(other_dt, None))
|
608 |
+
assert res_dt.itemsize == expected_length * fact
|
609 |
+
assert isinstance(res_dt, string_DT)
|
610 |
+
|
611 |
+
expected_view_off = None
|
612 |
+
if other_dt.char == string_char:
|
613 |
+
if other_dt.isnative:
|
614 |
+
expected_safety = Casting.no
|
615 |
+
expected_view_off = 0
|
616 |
+
else:
|
617 |
+
expected_safety = Casting.equiv
|
618 |
+
elif string_char == "U":
|
619 |
+
expected_safety = Casting.safe
|
620 |
+
else:
|
621 |
+
expected_safety = Casting.unsafe
|
622 |
+
|
623 |
+
assert view_off == expected_view_off
|
624 |
+
assert expected_safety == safety
|
625 |
+
|
626 |
+
for change_length in [-1, 0, 1]:
|
627 |
+
to_dt = self.string_with_modified_length(string_dt, change_length)
|
628 |
+
safety, (_, res_dt), view_off = cast._resolve_descriptors(
|
629 |
+
(other_dt, to_dt))
|
630 |
+
|
631 |
+
assert res_dt is to_dt
|
632 |
+
if change_length <= 0:
|
633 |
+
assert view_off == expected_view_off
|
634 |
+
else:
|
635 |
+
assert view_off is None
|
636 |
+
if expected_safety == Casting.unsafe:
|
637 |
+
assert safety == expected_safety
|
638 |
+
elif change_length < 0:
|
639 |
+
assert safety == Casting.same_kind
|
640 |
+
elif change_length == 0:
|
641 |
+
assert safety == expected_safety
|
642 |
+
elif change_length > 0:
|
643 |
+
assert safety == Casting.safe
|
644 |
+
|
645 |
+
@pytest.mark.parametrize("order1", [">", "<"])
|
646 |
+
@pytest.mark.parametrize("order2", [">", "<"])
|
647 |
+
def test_unicode_byteswapped_cast(self, order1, order2):
|
648 |
+
# Very specific tests (not using the castingimpl directly)
|
649 |
+
# that tests unicode bytedwaps including for unaligned array data.
|
650 |
+
dtype1 = np.dtype(f"{order1}U30")
|
651 |
+
dtype2 = np.dtype(f"{order2}U30")
|
652 |
+
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
|
653 |
+
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
|
654 |
+
if dtype1.alignment != 1:
|
655 |
+
# alignment should always be >1, but skip the check if not
|
656 |
+
assert not data1.flags.aligned
|
657 |
+
assert not data2.flags.aligned
|
658 |
+
|
659 |
+
element = "this is a ünicode string‽"
|
660 |
+
data1[()] = element
|
661 |
+
# Test both `data1` and `data1.copy()` (which should be aligned)
|
662 |
+
for data in [data1, data1.copy()]:
|
663 |
+
data2[...] = data1
|
664 |
+
assert data2[()] == element
|
665 |
+
assert data2.copy()[()] == element
|
666 |
+
|
667 |
+
def test_void_to_string_special_case(self):
|
668 |
+
# Cover a small special case in void to string casting that could
|
669 |
+
# probably just as well be turned into an error (compare
|
670 |
+
# `test_object_to_parametric_internal_error` below).
|
671 |
+
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
|
672 |
+
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
|
673 |
+
|
674 |
+
def test_object_to_parametric_internal_error(self):
|
675 |
+
# We reject casting from object to a parametric type, without
|
676 |
+
# figuring out the correct instance first.
|
677 |
+
object_dtype = type(np.dtype(object))
|
678 |
+
other_dtype = type(np.dtype(str))
|
679 |
+
cast = get_castingimpl(object_dtype, other_dtype)
|
680 |
+
with pytest.raises(TypeError,
|
681 |
+
match="casting from object to the parametric DType"):
|
682 |
+
cast._resolve_descriptors((np.dtype("O"), None))
|
683 |
+
|
684 |
+
@pytest.mark.parametrize("dtype", simple_dtype_instances())
|
685 |
+
def test_object_and_simple_resolution(self, dtype):
|
686 |
+
# Simple test to exercise the cast when no instance is specified
|
687 |
+
object_dtype = type(np.dtype(object))
|
688 |
+
cast = get_castingimpl(object_dtype, type(dtype))
|
689 |
+
|
690 |
+
safety, (_, res_dt), view_off = cast._resolve_descriptors(
|
691 |
+
(np.dtype("O"), dtype))
|
692 |
+
assert safety == Casting.unsafe
|
693 |
+
assert view_off is None
|
694 |
+
assert res_dt is dtype
|
695 |
+
|
696 |
+
safety, (_, res_dt), view_off = cast._resolve_descriptors(
|
697 |
+
(np.dtype("O"), None))
|
698 |
+
assert safety == Casting.unsafe
|
699 |
+
assert view_off is None
|
700 |
+
assert res_dt == dtype.newbyteorder("=")
|
701 |
+
|
702 |
+
@pytest.mark.parametrize("dtype", simple_dtype_instances())
|
703 |
+
def test_simple_to_object_resolution(self, dtype):
|
704 |
+
# Simple test to exercise the cast when no instance is specified
|
705 |
+
object_dtype = type(np.dtype(object))
|
706 |
+
cast = get_castingimpl(type(dtype), object_dtype)
|
707 |
+
|
708 |
+
safety, (_, res_dt), view_off = cast._resolve_descriptors(
|
709 |
+
(dtype, None))
|
710 |
+
assert safety == Casting.safe
|
711 |
+
assert view_off is None
|
712 |
+
assert res_dt is np.dtype("O")
|
713 |
+
|
714 |
+
@pytest.mark.parametrize("casting", ["no", "unsafe"])
|
715 |
+
def test_void_and_structured_with_subarray(self, casting):
|
716 |
+
# test case corresponding to gh-19325
|
717 |
+
dtype = np.dtype([("foo", "<f4", (3, 2))])
|
718 |
+
expected = casting == "unsafe"
|
719 |
+
assert np.can_cast("V4", dtype, casting=casting) == expected
|
720 |
+
assert np.can_cast(dtype, "V4", casting=casting) == expected
|
721 |
+
|
722 |
+
@pytest.mark.parametrize(["to_dt", "expected_off"],
|
723 |
+
[ # Same as `from_dt` but with both fields shifted:
|
724 |
+
(np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"],
|
725 |
+
"offsets": [0, 4]}), 2),
|
726 |
+
# Additional change of the names
|
727 |
+
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
|
728 |
+
"offsets": [0, 4]}), 2),
|
729 |
+
# Incompatible field offset change
|
730 |
+
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
|
731 |
+
"offsets": [0, 6]}), None)])
|
732 |
+
def test_structured_field_offsets(self, to_dt, expected_off):
|
733 |
+
# This checks the cast-safety and view offset for swapped and "shifted"
|
734 |
+
# fields which are viewable
|
735 |
+
from_dt = np.dtype({"names": ["a", "b"],
|
736 |
+
"formats": ["i4", "f4"],
|
737 |
+
"offsets": [2, 6]})
|
738 |
+
cast = get_castingimpl(type(from_dt), type(to_dt))
|
739 |
+
safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
|
740 |
+
if from_dt.names == to_dt.names:
|
741 |
+
assert safety == Casting.equiv
|
742 |
+
else:
|
743 |
+
assert safety == Casting.safe
|
744 |
+
# Shifting the original data pointer by -2 will align both by
|
745 |
+
# effectively adding 2 bytes of spacing before `from_dt`.
|
746 |
+
assert view_off == expected_off
|
747 |
+
|
748 |
+
@pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [
|
749 |
+
# Subarray cases:
|
750 |
+
("i", "(1,1)i", 0),
|
751 |
+
("(1,1)i", "i", 0),
|
752 |
+
("(2,1)i", "(2,1)i", 0),
|
753 |
+
# field cases (field to field is tested explicitly also):
|
754 |
+
# Not considered viewable, because a negative offset would allow
|
755 |
+
# may structured dtype to indirectly access invalid memory.
|
756 |
+
("i", dict(names=["a"], formats=["i"], offsets=[2]), None),
|
757 |
+
(dict(names=["a"], formats=["i"], offsets=[2]), "i", 2),
|
758 |
+
# Currently considered not viewable, due to multiple fields
|
759 |
+
# even though they overlap (maybe we should not allow that?)
|
760 |
+
("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]),
|
761 |
+
None),
|
762 |
+
# different number of fields can't work, should probably just fail
|
763 |
+
# so it never reports "viewable":
|
764 |
+
("i,i", "i,i,i", None),
|
765 |
+
# Unstructured void cases:
|
766 |
+
("i4", "V3", 0), # void smaller or equal
|
767 |
+
("i4", "V4", 0), # void smaller or equal
|
768 |
+
("i4", "V10", None), # void is larger (no view)
|
769 |
+
("O", "V4", None), # currently reject objects for view here.
|
770 |
+
("O", "V8", None), # currently reject objects for view here.
|
771 |
+
("V4", "V3", 0),
|
772 |
+
("V4", "V4", 0),
|
773 |
+
("V3", "V4", None),
|
774 |
+
# Note that currently void-to-other cast goes via byte-strings
|
775 |
+
# and is not a "view" based cast like the opposite direction:
|
776 |
+
("V4", "i4", None),
|
777 |
+
# completely invalid/impossible cast:
|
778 |
+
("i,i", "i,i,i", None),
|
779 |
+
])
|
780 |
+
def test_structured_view_offsets_paramteric(
|
781 |
+
self, from_dt, to_dt, expected_off):
|
782 |
+
# TODO: While this test is fairly thorough, right now, it does not
|
783 |
+
# really test some paths that may have nonzero offsets (they don't
|
784 |
+
# really exists).
|
785 |
+
from_dt = np.dtype(from_dt)
|
786 |
+
to_dt = np.dtype(to_dt)
|
787 |
+
cast = get_castingimpl(type(from_dt), type(to_dt))
|
788 |
+
_, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
|
789 |
+
assert view_off == expected_off
|
790 |
+
|
791 |
+
@pytest.mark.parametrize("dtype", np.typecodes["All"])
|
792 |
+
def test_object_casts_NULL_None_equivalence(self, dtype):
|
793 |
+
# None to <other> casts may succeed or fail, but a NULL'ed array must
|
794 |
+
# behave the same as one filled with None's.
|
795 |
+
arr_normal = np.array([None] * 5)
|
796 |
+
arr_NULLs = np.empty_like(arr_normal)
|
797 |
+
ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes)
|
798 |
+
# If the check fails (maybe it should) the test would lose its purpose:
|
799 |
+
assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
|
800 |
+
|
801 |
+
try:
|
802 |
+
expected = arr_normal.astype(dtype)
|
803 |
+
except TypeError:
|
804 |
+
with pytest.raises(TypeError):
|
805 |
+
arr_NULLs.astype(dtype),
|
806 |
+
else:
|
807 |
+
assert_array_equal(expected, arr_NULLs.astype(dtype))
|
808 |
+
|
809 |
+
@pytest.mark.parametrize("dtype",
|
810 |
+
np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
|
811 |
+
def test_nonstandard_bool_to_other(self, dtype):
|
812 |
+
# simple test for casting bool_ to numeric types, which should not
|
813 |
+
# expose the detail that NumPy bools can sometimes take values other
|
814 |
+
# than 0 and 1. See also gh-19514.
|
815 |
+
nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool)
|
816 |
+
res = nonstandard_bools.astype(dtype)
|
817 |
+
expected = [0, 1, 1]
|
818 |
+
assert_array_equal(res, expected)
|
819 |
+
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_cpu_features.py
ADDED
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys, platform, re, pytest
|
2 |
+
from numpy.core._multiarray_umath import (
|
3 |
+
__cpu_features__,
|
4 |
+
__cpu_baseline__,
|
5 |
+
__cpu_dispatch__,
|
6 |
+
)
|
7 |
+
import numpy as np
|
8 |
+
import subprocess
|
9 |
+
import pathlib
|
10 |
+
import os
|
11 |
+
import re
|
12 |
+
|
13 |
+
def assert_features_equal(actual, desired, fname):
|
14 |
+
__tracebackhide__ = True # Hide traceback for py.test
|
15 |
+
actual, desired = str(actual), str(desired)
|
16 |
+
if actual == desired:
|
17 |
+
return
|
18 |
+
detected = str(__cpu_features__).replace("'", "")
|
19 |
+
try:
|
20 |
+
with open("/proc/cpuinfo") as fd:
|
21 |
+
cpuinfo = fd.read(2048)
|
22 |
+
except Exception as err:
|
23 |
+
cpuinfo = str(err)
|
24 |
+
|
25 |
+
try:
|
26 |
+
import subprocess
|
27 |
+
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
|
28 |
+
auxv = auxv.decode()
|
29 |
+
except Exception as err:
|
30 |
+
auxv = str(err)
|
31 |
+
|
32 |
+
import textwrap
|
33 |
+
error_report = textwrap.indent(
|
34 |
+
"""
|
35 |
+
###########################################
|
36 |
+
### Extra debugging information
|
37 |
+
###########################################
|
38 |
+
-------------------------------------------
|
39 |
+
--- NumPy Detections
|
40 |
+
-------------------------------------------
|
41 |
+
%s
|
42 |
+
-------------------------------------------
|
43 |
+
--- SYS / CPUINFO
|
44 |
+
-------------------------------------------
|
45 |
+
%s....
|
46 |
+
-------------------------------------------
|
47 |
+
--- SYS / AUXV
|
48 |
+
-------------------------------------------
|
49 |
+
%s
|
50 |
+
""" % (detected, cpuinfo, auxv), prefix='\r')
|
51 |
+
|
52 |
+
raise AssertionError((
|
53 |
+
"Failure Detection\n"
|
54 |
+
" NAME: '%s'\n"
|
55 |
+
" ACTUAL: %s\n"
|
56 |
+
" DESIRED: %s\n"
|
57 |
+
"%s"
|
58 |
+
) % (fname, actual, desired, error_report))
|
59 |
+
|
60 |
+
def _text_to_list(txt):
|
61 |
+
out = txt.strip("][\n").replace("'", "").split(', ')
|
62 |
+
return None if out[0] == "" else out
|
63 |
+
|
64 |
+
class AbstractTest:
|
65 |
+
features = []
|
66 |
+
features_groups = {}
|
67 |
+
features_map = {}
|
68 |
+
features_flags = set()
|
69 |
+
|
70 |
+
def load_flags(self):
|
71 |
+
# a hook
|
72 |
+
pass
|
73 |
+
def test_features(self):
|
74 |
+
self.load_flags()
|
75 |
+
for gname, features in self.features_groups.items():
|
76 |
+
test_features = [self.cpu_have(f) for f in features]
|
77 |
+
assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
|
78 |
+
|
79 |
+
for feature_name in self.features:
|
80 |
+
cpu_have = self.cpu_have(feature_name)
|
81 |
+
npy_have = __cpu_features__.get(feature_name)
|
82 |
+
assert_features_equal(npy_have, cpu_have, feature_name)
|
83 |
+
|
84 |
+
def cpu_have(self, feature_name):
|
85 |
+
map_names = self.features_map.get(feature_name, feature_name)
|
86 |
+
if isinstance(map_names, str):
|
87 |
+
return map_names in self.features_flags
|
88 |
+
for f in map_names:
|
89 |
+
if f in self.features_flags:
|
90 |
+
return True
|
91 |
+
return False
|
92 |
+
|
93 |
+
def load_flags_cpuinfo(self, magic_key):
|
94 |
+
self.features_flags = self.get_cpuinfo_item(magic_key)
|
95 |
+
|
96 |
+
def get_cpuinfo_item(self, magic_key):
|
97 |
+
values = set()
|
98 |
+
with open('/proc/cpuinfo') as fd:
|
99 |
+
for line in fd:
|
100 |
+
if not line.startswith(magic_key):
|
101 |
+
continue
|
102 |
+
flags_value = [s.strip() for s in line.split(':', 1)]
|
103 |
+
if len(flags_value) == 2:
|
104 |
+
values = values.union(flags_value[1].upper().split())
|
105 |
+
return values
|
106 |
+
|
107 |
+
def load_flags_auxv(self):
|
108 |
+
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
|
109 |
+
for at in auxv.split(b'\n'):
|
110 |
+
if not at.startswith(b"AT_HWCAP"):
|
111 |
+
continue
|
112 |
+
hwcap_value = [s.strip() for s in at.split(b':', 1)]
|
113 |
+
if len(hwcap_value) == 2:
|
114 |
+
self.features_flags = self.features_flags.union(
|
115 |
+
hwcap_value[1].upper().decode().split()
|
116 |
+
)
|
117 |
+
|
118 |
+
@pytest.mark.skipif(
|
119 |
+
sys.platform == 'emscripten',
|
120 |
+
reason= (
|
121 |
+
"The subprocess module is not available on WASM platforms and"
|
122 |
+
" therefore this test class cannot be properly executed."
|
123 |
+
),
|
124 |
+
)
|
125 |
+
class TestEnvPrivation:
|
126 |
+
cwd = pathlib.Path(__file__).parent.resolve()
|
127 |
+
env = os.environ.copy()
|
128 |
+
_enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None)
|
129 |
+
_disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None)
|
130 |
+
SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True)
|
131 |
+
unavailable_feats = [
|
132 |
+
feat for feat in __cpu_dispatch__ if not __cpu_features__[feat]
|
133 |
+
]
|
134 |
+
UNAVAILABLE_FEAT = (
|
135 |
+
None if len(unavailable_feats) == 0
|
136 |
+
else unavailable_feats[0]
|
137 |
+
)
|
138 |
+
BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0]
|
139 |
+
SCRIPT = """
|
140 |
+
def main():
|
141 |
+
from numpy.core._multiarray_umath import __cpu_features__, __cpu_dispatch__
|
142 |
+
|
143 |
+
detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]]
|
144 |
+
print(detected)
|
145 |
+
|
146 |
+
if __name__ == "__main__":
|
147 |
+
main()
|
148 |
+
"""
|
149 |
+
|
150 |
+
@pytest.fixture(autouse=True)
|
151 |
+
def setup_class(self, tmp_path_factory):
|
152 |
+
file = tmp_path_factory.mktemp("runtime_test_script")
|
153 |
+
file /= "_runtime_detect.py"
|
154 |
+
file.write_text(self.SCRIPT)
|
155 |
+
self.file = file
|
156 |
+
return
|
157 |
+
|
158 |
+
def _run(self):
|
159 |
+
return subprocess.run(
|
160 |
+
[sys.executable, self.file],
|
161 |
+
env=self.env,
|
162 |
+
**self.SUBPROCESS_ARGS,
|
163 |
+
)
|
164 |
+
|
165 |
+
# Helper function mimicing pytest.raises for subprocess call
|
166 |
+
def _expect_error(
|
167 |
+
self,
|
168 |
+
msg,
|
169 |
+
err_type,
|
170 |
+
no_error_msg="Failed to generate error"
|
171 |
+
):
|
172 |
+
try:
|
173 |
+
self._run()
|
174 |
+
except subprocess.CalledProcessError as e:
|
175 |
+
assertion_message = f"Expected: {msg}\nGot: {e.stderr}"
|
176 |
+
assert re.search(msg, e.stderr), assertion_message
|
177 |
+
|
178 |
+
assertion_message = (
|
179 |
+
f"Expected error of type: {err_type}; see full "
|
180 |
+
f"error:\n{e.stderr}"
|
181 |
+
)
|
182 |
+
assert re.search(err_type, e.stderr), assertion_message
|
183 |
+
else:
|
184 |
+
assert False, no_error_msg
|
185 |
+
|
186 |
+
def setup_method(self):
|
187 |
+
"""Ensure that the environment is reset"""
|
188 |
+
self.env = os.environ.copy()
|
189 |
+
return
|
190 |
+
|
191 |
+
def test_runtime_feature_selection(self):
|
192 |
+
"""
|
193 |
+
Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the
|
194 |
+
features exactly specified are dispatched.
|
195 |
+
"""
|
196 |
+
|
197 |
+
# Capture runtime-enabled features
|
198 |
+
out = self._run()
|
199 |
+
non_baseline_features = _text_to_list(out.stdout)
|
200 |
+
|
201 |
+
if non_baseline_features is None:
|
202 |
+
pytest.skip(
|
203 |
+
"No dispatchable features outside of baseline detected."
|
204 |
+
)
|
205 |
+
feature = non_baseline_features[0]
|
206 |
+
|
207 |
+
# Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
|
208 |
+
# specified
|
209 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = feature
|
210 |
+
out = self._run()
|
211 |
+
enabled_features = _text_to_list(out.stdout)
|
212 |
+
|
213 |
+
# Ensure that only one feature is enabled, and it is exactly the one
|
214 |
+
# specified by `NPY_ENABLE_CPU_FEATURES`
|
215 |
+
assert set(enabled_features) == {feature}
|
216 |
+
|
217 |
+
if len(non_baseline_features) < 2:
|
218 |
+
pytest.skip("Only one non-baseline feature detected.")
|
219 |
+
# Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
|
220 |
+
# specified
|
221 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features)
|
222 |
+
out = self._run()
|
223 |
+
enabled_features = _text_to_list(out.stdout)
|
224 |
+
|
225 |
+
# Ensure that both features are enabled, and they are exactly the ones
|
226 |
+
# specified by `NPY_ENABLE_CPU_FEATURES`
|
227 |
+
assert set(enabled_features) == set(non_baseline_features)
|
228 |
+
return
|
229 |
+
|
230 |
+
@pytest.mark.parametrize("enabled, disabled",
|
231 |
+
[
|
232 |
+
("feature", "feature"),
|
233 |
+
("feature", "same"),
|
234 |
+
])
|
235 |
+
def test_both_enable_disable_set(self, enabled, disabled):
|
236 |
+
"""
|
237 |
+
Ensure that when both environment variables are set then an
|
238 |
+
ImportError is thrown
|
239 |
+
"""
|
240 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = enabled
|
241 |
+
self.env['NPY_DISABLE_CPU_FEATURES'] = disabled
|
242 |
+
msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES"
|
243 |
+
err_type = "ImportError"
|
244 |
+
self._expect_error(msg, err_type)
|
245 |
+
|
246 |
+
@pytest.mark.skipif(
|
247 |
+
not __cpu_dispatch__,
|
248 |
+
reason=(
|
249 |
+
"NPY_*_CPU_FEATURES only parsed if "
|
250 |
+
"`__cpu_dispatch__` is non-empty"
|
251 |
+
)
|
252 |
+
)
|
253 |
+
@pytest.mark.parametrize("action", ["ENABLE", "DISABLE"])
|
254 |
+
def test_variable_too_long(self, action):
|
255 |
+
"""
|
256 |
+
Test that an error is thrown if the environment variables are too long
|
257 |
+
to be processed. Current limit is 1024, but this may change later.
|
258 |
+
"""
|
259 |
+
MAX_VAR_LENGTH = 1024
|
260 |
+
# Actual length is MAX_VAR_LENGTH + 1 due to null-termination
|
261 |
+
self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH
|
262 |
+
msg = (
|
263 |
+
f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is "
|
264 |
+
f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted"
|
265 |
+
)
|
266 |
+
err_type = "RuntimeError"
|
267 |
+
self._expect_error(msg, err_type)
|
268 |
+
|
269 |
+
@pytest.mark.skipif(
|
270 |
+
not __cpu_dispatch__,
|
271 |
+
reason=(
|
272 |
+
"NPY_*_CPU_FEATURES only parsed if "
|
273 |
+
"`__cpu_dispatch__` is non-empty"
|
274 |
+
)
|
275 |
+
)
|
276 |
+
def test_impossible_feature_disable(self):
|
277 |
+
"""
|
278 |
+
Test that a RuntimeError is thrown if an impossible feature-disabling
|
279 |
+
request is made. This includes disabling a baseline feature.
|
280 |
+
"""
|
281 |
+
|
282 |
+
if self.BASELINE_FEAT is None:
|
283 |
+
pytest.skip("There are no unavailable features to test with")
|
284 |
+
bad_feature = self.BASELINE_FEAT
|
285 |
+
self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature
|
286 |
+
msg = (
|
287 |
+
f"You cannot disable CPU feature '{bad_feature}', since it is "
|
288 |
+
"part of the baseline optimizations"
|
289 |
+
)
|
290 |
+
err_type = "RuntimeError"
|
291 |
+
self._expect_error(msg, err_type)
|
292 |
+
|
293 |
+
def test_impossible_feature_enable(self):
|
294 |
+
"""
|
295 |
+
Test that a RuntimeError is thrown if an impossible feature-enabling
|
296 |
+
request is made. This includes enabling a feature not supported by the
|
297 |
+
machine, or disabling a baseline optimization.
|
298 |
+
"""
|
299 |
+
|
300 |
+
if self.UNAVAILABLE_FEAT is None:
|
301 |
+
pytest.skip("There are no unavailable features to test with")
|
302 |
+
bad_feature = self.UNAVAILABLE_FEAT
|
303 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature
|
304 |
+
msg = (
|
305 |
+
f"You cannot enable CPU features \\({bad_feature}\\), since "
|
306 |
+
"they are not supported by your machine."
|
307 |
+
)
|
308 |
+
err_type = "RuntimeError"
|
309 |
+
self._expect_error(msg, err_type)
|
310 |
+
|
311 |
+
# Ensure that only the bad feature gets reported
|
312 |
+
feats = f"{bad_feature}, {self.BASELINE_FEAT}"
|
313 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = feats
|
314 |
+
msg = (
|
315 |
+
f"You cannot enable CPU features \\({bad_feature}\\), since they "
|
316 |
+
"are not supported by your machine."
|
317 |
+
)
|
318 |
+
self._expect_error(msg, err_type)
|
319 |
+
|
320 |
+
is_linux = sys.platform.startswith('linux')
|
321 |
+
is_cygwin = sys.platform.startswith('cygwin')
|
322 |
+
machine = platform.machine()
|
323 |
+
is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
|
324 |
+
@pytest.mark.skipif(
|
325 |
+
not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
|
326 |
+
)
|
327 |
+
class Test_X86_Features(AbstractTest):
|
328 |
+
features = [
|
329 |
+
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
|
330 |
+
"AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
|
331 |
+
"AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
|
332 |
+
"AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
|
333 |
+
"AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16",
|
334 |
+
]
|
335 |
+
features_groups = dict(
|
336 |
+
AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
|
337 |
+
AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
|
338 |
+
"AVX5124VNNIW", "AVX512VPOPCNTDQ"],
|
339 |
+
AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
|
340 |
+
AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
|
341 |
+
AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
|
342 |
+
"AVX512VBMI"],
|
343 |
+
AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
|
344 |
+
"AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
|
345 |
+
AVX512_SPR = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ",
|
346 |
+
"AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI",
|
347 |
+
"AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ",
|
348 |
+
"AVX512FP16"],
|
349 |
+
)
|
350 |
+
features_map = dict(
|
351 |
+
SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
|
352 |
+
AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
|
353 |
+
AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
|
354 |
+
AVX512FP16="AVX512_FP16",
|
355 |
+
)
|
356 |
+
def load_flags(self):
|
357 |
+
self.load_flags_cpuinfo("flags")
|
358 |
+
|
359 |
+
is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
|
360 |
+
@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
|
361 |
+
class Test_POWER_Features(AbstractTest):
|
362 |
+
features = ["VSX", "VSX2", "VSX3", "VSX4"]
|
363 |
+
features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
|
364 |
+
|
365 |
+
def load_flags(self):
|
366 |
+
self.load_flags_auxv()
|
367 |
+
|
368 |
+
|
369 |
+
is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
|
370 |
+
@pytest.mark.skipif(not is_linux or not is_zarch,
|
371 |
+
reason="Only for Linux and IBM Z")
|
372 |
+
class Test_ZARCH_Features(AbstractTest):
|
373 |
+
features = ["VX", "VXE", "VXE2"]
|
374 |
+
|
375 |
+
def load_flags(self):
|
376 |
+
self.load_flags_auxv()
|
377 |
+
|
378 |
+
|
379 |
+
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
|
380 |
+
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
|
381 |
+
class Test_ARM_Features(AbstractTest):
|
382 |
+
features = [
|
383 |
+
"NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
|
384 |
+
]
|
385 |
+
features_groups = dict(
|
386 |
+
NEON_FP16 = ["NEON", "HALF"],
|
387 |
+
NEON_VFPV4 = ["NEON", "VFPV4"],
|
388 |
+
)
|
389 |
+
def load_flags(self):
|
390 |
+
self.load_flags_cpuinfo("Features")
|
391 |
+
arch = self.get_cpuinfo_item("CPU architecture")
|
392 |
+
# in case of mounting virtual filesystem of aarch64 kernel
|
393 |
+
is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
|
394 |
+
if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
|
395 |
+
self.features_map = dict(
|
396 |
+
NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
|
397 |
+
)
|
398 |
+
else:
|
399 |
+
self.features_map = dict(
|
400 |
+
# ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
|
401 |
+
# doesn't provide information about ASIMD, so we assume that ASIMD is supported
|
402 |
+
# if the kernel reports any one of the following ARM8 features.
|
403 |
+
ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
|
404 |
+
)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_custom_dtypes.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import assert_array_equal
|
5 |
+
from numpy.core._multiarray_umath import (
|
6 |
+
_discover_array_parameters as discover_array_params, _get_sfloat_dtype)
|
7 |
+
|
8 |
+
|
9 |
+
SF = _get_sfloat_dtype()
|
10 |
+
|
11 |
+
|
12 |
+
class TestSFloat:
|
13 |
+
def _get_array(self, scaling, aligned=True):
|
14 |
+
if not aligned:
|
15 |
+
a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
|
16 |
+
a = a.view(np.float64)
|
17 |
+
a[:] = [1., 2., 3.]
|
18 |
+
else:
|
19 |
+
a = np.array([1., 2., 3.])
|
20 |
+
|
21 |
+
a *= 1./scaling # the casting code also uses the reciprocal.
|
22 |
+
return a.view(SF(scaling))
|
23 |
+
|
24 |
+
def test_sfloat_rescaled(self):
|
25 |
+
sf = SF(1.)
|
26 |
+
sf2 = sf.scaled_by(2.)
|
27 |
+
assert sf2.get_scaling() == 2.
|
28 |
+
sf6 = sf2.scaled_by(3.)
|
29 |
+
assert sf6.get_scaling() == 6.
|
30 |
+
|
31 |
+
def test_class_discovery(self):
|
32 |
+
# This does not test much, since we always discover the scaling as 1.
|
33 |
+
# But most of NumPy (when writing) does not understand DType classes
|
34 |
+
dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
|
35 |
+
assert dt == SF(1.)
|
36 |
+
|
37 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
38 |
+
def test_scaled_float_from_floats(self, scaling):
|
39 |
+
a = np.array([1., 2., 3.], dtype=SF(scaling))
|
40 |
+
|
41 |
+
assert a.dtype.get_scaling() == scaling
|
42 |
+
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
|
43 |
+
|
44 |
+
def test_repr(self):
|
45 |
+
# Check the repr, mainly to cover the code paths:
|
46 |
+
assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
|
47 |
+
|
48 |
+
def test_dtype_name(self):
|
49 |
+
assert SF(1.).name == "_ScaledFloatTestDType64"
|
50 |
+
|
51 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
52 |
+
def test_sfloat_from_float(self, scaling):
|
53 |
+
a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
|
54 |
+
|
55 |
+
assert a.dtype.get_scaling() == scaling
|
56 |
+
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
|
57 |
+
|
58 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
59 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
60 |
+
def test_sfloat_getitem(self, aligned, scaling):
|
61 |
+
a = self._get_array(1., aligned)
|
62 |
+
assert a.tolist() == [1., 2., 3.]
|
63 |
+
|
64 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
65 |
+
def test_sfloat_casts(self, aligned):
|
66 |
+
a = self._get_array(1., aligned)
|
67 |
+
|
68 |
+
assert np.can_cast(a, SF(-1.), casting="equiv")
|
69 |
+
assert not np.can_cast(a, SF(-1.), casting="no")
|
70 |
+
na = a.astype(SF(-1.))
|
71 |
+
assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
|
72 |
+
|
73 |
+
assert np.can_cast(a, SF(2.), casting="same_kind")
|
74 |
+
assert not np.can_cast(a, SF(2.), casting="safe")
|
75 |
+
a2 = a.astype(SF(2.))
|
76 |
+
assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
|
77 |
+
|
78 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
79 |
+
def test_sfloat_cast_internal_errors(self, aligned):
|
80 |
+
a = self._get_array(2e300, aligned)
|
81 |
+
|
82 |
+
with pytest.raises(TypeError,
|
83 |
+
match="error raised inside the core-loop: non-finite factor!"):
|
84 |
+
a.astype(SF(2e-300))
|
85 |
+
|
86 |
+
def test_sfloat_promotion(self):
|
87 |
+
assert np.result_type(SF(2.), SF(3.)) == SF(3.)
|
88 |
+
assert np.result_type(SF(3.), SF(2.)) == SF(3.)
|
89 |
+
# Float64 -> SF(1.) and then promotes normally, so both of this work:
|
90 |
+
assert np.result_type(SF(3.), np.float64) == SF(3.)
|
91 |
+
assert np.result_type(np.float64, SF(0.5)) == SF(1.)
|
92 |
+
|
93 |
+
# Test an undefined promotion:
|
94 |
+
with pytest.raises(TypeError):
|
95 |
+
np.result_type(SF(1.), np.int64)
|
96 |
+
|
97 |
+
def test_basic_multiply(self):
|
98 |
+
a = self._get_array(2.)
|
99 |
+
b = self._get_array(4.)
|
100 |
+
|
101 |
+
res = a * b
|
102 |
+
# multiplies dtype scaling and content separately:
|
103 |
+
assert res.dtype.get_scaling() == 8.
|
104 |
+
expected_view = a.view(np.float64) * b.view(np.float64)
|
105 |
+
assert_array_equal(res.view(np.float64), expected_view)
|
106 |
+
|
107 |
+
def test_possible_and_impossible_reduce(self):
|
108 |
+
# For reductions to work, the first and last operand must have the
|
109 |
+
# same dtype. For this parametric DType that is not necessarily true.
|
110 |
+
a = self._get_array(2.)
|
111 |
+
# Addition reductin works (as of writing requires to pass initial
|
112 |
+
# because setting a scaled-float from the default `0` fails).
|
113 |
+
res = np.add.reduce(a, initial=0.)
|
114 |
+
assert res == a.astype(np.float64).sum()
|
115 |
+
|
116 |
+
# But each multiplication changes the factor, so a reduction is not
|
117 |
+
# possible (the relaxed version of the old refusal to handle any
|
118 |
+
# flexible dtype).
|
119 |
+
with pytest.raises(TypeError,
|
120 |
+
match="the resolved dtypes are not compatible"):
|
121 |
+
np.multiply.reduce(a)
|
122 |
+
|
123 |
+
def test_basic_ufunc_at(self):
|
124 |
+
float_a = np.array([1., 2., 3.])
|
125 |
+
b = self._get_array(2.)
|
126 |
+
|
127 |
+
float_b = b.view(np.float64).copy()
|
128 |
+
np.multiply.at(float_b, [1, 1, 1], float_a)
|
129 |
+
np.multiply.at(b, [1, 1, 1], float_a)
|
130 |
+
|
131 |
+
assert_array_equal(b.view(np.float64), float_b)
|
132 |
+
|
133 |
+
def test_basic_multiply_promotion(self):
|
134 |
+
float_a = np.array([1., 2., 3.])
|
135 |
+
b = self._get_array(2.)
|
136 |
+
|
137 |
+
res1 = float_a * b
|
138 |
+
res2 = b * float_a
|
139 |
+
|
140 |
+
# one factor is one, so we get the factor of b:
|
141 |
+
assert res1.dtype == res2.dtype == b.dtype
|
142 |
+
expected_view = float_a * b.view(np.float64)
|
143 |
+
assert_array_equal(res1.view(np.float64), expected_view)
|
144 |
+
assert_array_equal(res2.view(np.float64), expected_view)
|
145 |
+
|
146 |
+
# Check that promotion works when `out` is used:
|
147 |
+
np.multiply(b, float_a, out=res2)
|
148 |
+
with pytest.raises(TypeError):
|
149 |
+
# The promoter accepts this (maybe it should not), but the SFloat
|
150 |
+
# result cannot be cast to integer:
|
151 |
+
np.multiply(b, float_a, out=np.arange(3))
|
152 |
+
|
153 |
+
def test_basic_addition(self):
|
154 |
+
a = self._get_array(2.)
|
155 |
+
b = self._get_array(4.)
|
156 |
+
|
157 |
+
res = a + b
|
158 |
+
# addition uses the type promotion rules for the result:
|
159 |
+
assert res.dtype == np.result_type(a.dtype, b.dtype)
|
160 |
+
expected_view = (a.astype(res.dtype).view(np.float64) +
|
161 |
+
b.astype(res.dtype).view(np.float64))
|
162 |
+
assert_array_equal(res.view(np.float64), expected_view)
|
163 |
+
|
164 |
+
def test_addition_cast_safety(self):
|
165 |
+
"""The addition method is special for the scaled float, because it
|
166 |
+
includes the "cast" between different factors, thus cast-safety
|
167 |
+
is influenced by the implementation.
|
168 |
+
"""
|
169 |
+
a = self._get_array(2.)
|
170 |
+
b = self._get_array(-2.)
|
171 |
+
c = self._get_array(3.)
|
172 |
+
|
173 |
+
# sign change is "equiv":
|
174 |
+
np.add(a, b, casting="equiv")
|
175 |
+
with pytest.raises(TypeError):
|
176 |
+
np.add(a, b, casting="no")
|
177 |
+
|
178 |
+
# Different factor is "same_kind" (default) so check that "safe" fails
|
179 |
+
with pytest.raises(TypeError):
|
180 |
+
np.add(a, c, casting="safe")
|
181 |
+
|
182 |
+
# Check that casting the output fails also (done by the ufunc here)
|
183 |
+
with pytest.raises(TypeError):
|
184 |
+
np.add(a, a, out=c, casting="safe")
|
185 |
+
|
186 |
+
@pytest.mark.parametrize("ufunc",
|
187 |
+
[np.logical_and, np.logical_or, np.logical_xor])
|
188 |
+
def test_logical_ufuncs_casts_to_bool(self, ufunc):
|
189 |
+
a = self._get_array(2.)
|
190 |
+
a[0] = 0. # make sure first element is considered False.
|
191 |
+
|
192 |
+
float_equiv = a.astype(float)
|
193 |
+
expected = ufunc(float_equiv, float_equiv)
|
194 |
+
res = ufunc(a, a)
|
195 |
+
assert_array_equal(res, expected)
|
196 |
+
|
197 |
+
# also check that the same works for reductions:
|
198 |
+
expected = ufunc.reduce(float_equiv)
|
199 |
+
res = ufunc.reduce(a)
|
200 |
+
assert_array_equal(res, expected)
|
201 |
+
|
202 |
+
# The output casting does not match the bool, bool -> bool loop:
|
203 |
+
with pytest.raises(TypeError):
|
204 |
+
ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
|
205 |
+
|
206 |
+
def test_wrapped_and_wrapped_reductions(self):
|
207 |
+
a = self._get_array(2.)
|
208 |
+
float_equiv = a.astype(float)
|
209 |
+
|
210 |
+
expected = np.hypot(float_equiv, float_equiv)
|
211 |
+
res = np.hypot(a, a)
|
212 |
+
assert res.dtype == a.dtype
|
213 |
+
res_float = res.view(np.float64) * 2
|
214 |
+
assert_array_equal(res_float, expected)
|
215 |
+
|
216 |
+
# Also check reduction (keepdims, due to incorrect getitem)
|
217 |
+
res = np.hypot.reduce(a, keepdims=True)
|
218 |
+
assert res.dtype == a.dtype
|
219 |
+
expected = np.hypot.reduce(float_equiv, keepdims=True)
|
220 |
+
assert res.view(np.float64) * 2 == expected
|
221 |
+
|
222 |
+
def test_astype_class(self):
|
223 |
+
# Very simple test that we accept `.astype()` also on the class.
|
224 |
+
# ScaledFloat always returns the default descriptor, but it does
|
225 |
+
# check the relevant code paths.
|
226 |
+
arr = np.array([1., 2., 3.], dtype=object)
|
227 |
+
|
228 |
+
res = arr.astype(SF) # passing the class class
|
229 |
+
expected = arr.astype(SF(1.)) # above will have discovered 1. scaling
|
230 |
+
assert_array_equal(res.view(np.float64), expected.view(np.float64))
|
231 |
+
|
232 |
+
def test_creation_class(self):
|
233 |
+
arr1 = np.array([1., 2., 3.], dtype=SF)
|
234 |
+
assert arr1.dtype == SF(1.)
|
235 |
+
arr2 = np.array([1., 2., 3.], dtype=SF(1.))
|
236 |
+
assert_array_equal(arr1.view(np.float64), arr2.view(np.float64))
|
237 |
+
|
238 |
+
|
239 |
+
def test_type_pickle():
|
240 |
+
# can't actually unpickle, but we can pickle (if in namespace)
|
241 |
+
import pickle
|
242 |
+
|
243 |
+
np._ScaledFloatTestDType = SF
|
244 |
+
|
245 |
+
s = pickle.dumps(SF)
|
246 |
+
res = pickle.loads(s)
|
247 |
+
assert res is SF
|
248 |
+
|
249 |
+
del np._ScaledFloatTestDType
|
250 |
+
|
251 |
+
|
252 |
+
def test_is_numeric():
|
253 |
+
assert SF._is_numeric
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_datetime.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_defchararray.py
ADDED
@@ -0,0 +1,686 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.core.multiarray import _vec_string
|
5 |
+
from numpy.testing import (
|
6 |
+
assert_, assert_equal, assert_array_equal, assert_raises,
|
7 |
+
assert_raises_regex
|
8 |
+
)
|
9 |
+
|
10 |
+
kw_unicode_true = {'unicode': True} # make 2to3 work properly
|
11 |
+
kw_unicode_false = {'unicode': False}
|
12 |
+
|
13 |
+
class TestBasic:
|
14 |
+
def test_from_object_array(self):
|
15 |
+
A = np.array([['abc', 2],
|
16 |
+
['long ', '0123456789']], dtype='O')
|
17 |
+
B = np.char.array(A)
|
18 |
+
assert_equal(B.dtype.itemsize, 10)
|
19 |
+
assert_array_equal(B, [[b'abc', b'2'],
|
20 |
+
[b'long', b'0123456789']])
|
21 |
+
|
22 |
+
def test_from_object_array_unicode(self):
|
23 |
+
A = np.array([['abc', 'Sigma \u03a3'],
|
24 |
+
['long ', '0123456789']], dtype='O')
|
25 |
+
assert_raises(ValueError, np.char.array, (A,))
|
26 |
+
B = np.char.array(A, **kw_unicode_true)
|
27 |
+
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
|
28 |
+
assert_array_equal(B, [['abc', 'Sigma \u03a3'],
|
29 |
+
['long', '0123456789']])
|
30 |
+
|
31 |
+
def test_from_string_array(self):
|
32 |
+
A = np.array([[b'abc', b'foo'],
|
33 |
+
[b'long ', b'0123456789']])
|
34 |
+
assert_equal(A.dtype.type, np.bytes_)
|
35 |
+
B = np.char.array(A)
|
36 |
+
assert_array_equal(B, A)
|
37 |
+
assert_equal(B.dtype, A.dtype)
|
38 |
+
assert_equal(B.shape, A.shape)
|
39 |
+
B[0, 0] = 'changed'
|
40 |
+
assert_(B[0, 0] != A[0, 0])
|
41 |
+
C = np.char.asarray(A)
|
42 |
+
assert_array_equal(C, A)
|
43 |
+
assert_equal(C.dtype, A.dtype)
|
44 |
+
C[0, 0] = 'changed again'
|
45 |
+
assert_(C[0, 0] != B[0, 0])
|
46 |
+
assert_(C[0, 0] == A[0, 0])
|
47 |
+
|
48 |
+
def test_from_unicode_array(self):
|
49 |
+
A = np.array([['abc', 'Sigma \u03a3'],
|
50 |
+
['long ', '0123456789']])
|
51 |
+
assert_equal(A.dtype.type, np.str_)
|
52 |
+
B = np.char.array(A)
|
53 |
+
assert_array_equal(B, A)
|
54 |
+
assert_equal(B.dtype, A.dtype)
|
55 |
+
assert_equal(B.shape, A.shape)
|
56 |
+
B = np.char.array(A, **kw_unicode_true)
|
57 |
+
assert_array_equal(B, A)
|
58 |
+
assert_equal(B.dtype, A.dtype)
|
59 |
+
assert_equal(B.shape, A.shape)
|
60 |
+
|
61 |
+
def fail():
|
62 |
+
np.char.array(A, **kw_unicode_false)
|
63 |
+
|
64 |
+
assert_raises(UnicodeEncodeError, fail)
|
65 |
+
|
66 |
+
def test_unicode_upconvert(self):
|
67 |
+
A = np.char.array(['abc'])
|
68 |
+
B = np.char.array(['\u03a3'])
|
69 |
+
assert_(issubclass((A + B).dtype.type, np.str_))
|
70 |
+
|
71 |
+
def test_from_string(self):
|
72 |
+
A = np.char.array(b'abc')
|
73 |
+
assert_equal(len(A), 1)
|
74 |
+
assert_equal(len(A[0]), 3)
|
75 |
+
assert_(issubclass(A.dtype.type, np.bytes_))
|
76 |
+
|
77 |
+
def test_from_unicode(self):
|
78 |
+
A = np.char.array('\u03a3')
|
79 |
+
assert_equal(len(A), 1)
|
80 |
+
assert_equal(len(A[0]), 1)
|
81 |
+
assert_equal(A.itemsize, 4)
|
82 |
+
assert_(issubclass(A.dtype.type, np.str_))
|
83 |
+
|
84 |
+
class TestVecString:
|
85 |
+
def test_non_existent_method(self):
|
86 |
+
|
87 |
+
def fail():
|
88 |
+
_vec_string('a', np.bytes_, 'bogus')
|
89 |
+
|
90 |
+
assert_raises(AttributeError, fail)
|
91 |
+
|
92 |
+
def test_non_string_array(self):
|
93 |
+
|
94 |
+
def fail():
|
95 |
+
_vec_string(1, np.bytes_, 'strip')
|
96 |
+
|
97 |
+
assert_raises(TypeError, fail)
|
98 |
+
|
99 |
+
def test_invalid_args_tuple(self):
|
100 |
+
|
101 |
+
def fail():
|
102 |
+
_vec_string(['a'], np.bytes_, 'strip', 1)
|
103 |
+
|
104 |
+
assert_raises(TypeError, fail)
|
105 |
+
|
106 |
+
def test_invalid_type_descr(self):
|
107 |
+
|
108 |
+
def fail():
|
109 |
+
_vec_string(['a'], 'BOGUS', 'strip')
|
110 |
+
|
111 |
+
assert_raises(TypeError, fail)
|
112 |
+
|
113 |
+
def test_invalid_function_args(self):
|
114 |
+
|
115 |
+
def fail():
|
116 |
+
_vec_string(['a'], np.bytes_, 'strip', (1,))
|
117 |
+
|
118 |
+
assert_raises(TypeError, fail)
|
119 |
+
|
120 |
+
def test_invalid_result_type(self):
|
121 |
+
|
122 |
+
def fail():
|
123 |
+
_vec_string(['a'], np.int_, 'strip')
|
124 |
+
|
125 |
+
assert_raises(TypeError, fail)
|
126 |
+
|
127 |
+
def test_broadcast_error(self):
|
128 |
+
|
129 |
+
def fail():
|
130 |
+
_vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
|
131 |
+
|
132 |
+
assert_raises(ValueError, fail)
|
133 |
+
|
134 |
+
|
135 |
+
class TestWhitespace:
|
136 |
+
def setup_method(self):
|
137 |
+
self.A = np.array([['abc ', '123 '],
|
138 |
+
['789 ', 'xyz ']]).view(np.chararray)
|
139 |
+
self.B = np.array([['abc', '123'],
|
140 |
+
['789', 'xyz']]).view(np.chararray)
|
141 |
+
|
142 |
+
def test1(self):
|
143 |
+
assert_(np.all(self.A == self.B))
|
144 |
+
assert_(np.all(self.A >= self.B))
|
145 |
+
assert_(np.all(self.A <= self.B))
|
146 |
+
assert_(not np.any(self.A > self.B))
|
147 |
+
assert_(not np.any(self.A < self.B))
|
148 |
+
assert_(not np.any(self.A != self.B))
|
149 |
+
|
150 |
+
class TestChar:
|
151 |
+
def setup_method(self):
|
152 |
+
self.A = np.array('abc1', dtype='c').view(np.chararray)
|
153 |
+
|
154 |
+
def test_it(self):
|
155 |
+
assert_equal(self.A.shape, (4,))
|
156 |
+
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
|
157 |
+
|
158 |
+
class TestComparisons:
|
159 |
+
def setup_method(self):
|
160 |
+
self.A = np.array([['abc', '123'],
|
161 |
+
['789', 'xyz']]).view(np.chararray)
|
162 |
+
self.B = np.array([['efg', '123 '],
|
163 |
+
['051', 'tuv']]).view(np.chararray)
|
164 |
+
|
165 |
+
def test_not_equal(self):
|
166 |
+
assert_array_equal((self.A != self.B), [[True, False], [True, True]])
|
167 |
+
|
168 |
+
def test_equal(self):
|
169 |
+
assert_array_equal((self.A == self.B), [[False, True], [False, False]])
|
170 |
+
|
171 |
+
def test_greater_equal(self):
|
172 |
+
assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
|
173 |
+
|
174 |
+
def test_less_equal(self):
|
175 |
+
assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
|
176 |
+
|
177 |
+
def test_greater(self):
|
178 |
+
assert_array_equal((self.A > self.B), [[False, False], [True, True]])
|
179 |
+
|
180 |
+
def test_less(self):
|
181 |
+
assert_array_equal((self.A < self.B), [[True, False], [False, False]])
|
182 |
+
|
183 |
+
def test_type(self):
|
184 |
+
out1 = np.char.equal(self.A, self.B)
|
185 |
+
out2 = np.char.equal('a', 'a')
|
186 |
+
assert_(isinstance(out1, np.ndarray))
|
187 |
+
assert_(isinstance(out2, np.ndarray))
|
188 |
+
|
189 |
+
class TestComparisonsMixed1(TestComparisons):
|
190 |
+
"""Ticket #1276"""
|
191 |
+
|
192 |
+
def setup_method(self):
|
193 |
+
TestComparisons.setup_method(self)
|
194 |
+
self.B = np.array([['efg', '123 '],
|
195 |
+
['051', 'tuv']], np.str_).view(np.chararray)
|
196 |
+
|
197 |
+
class TestComparisonsMixed2(TestComparisons):
|
198 |
+
"""Ticket #1276"""
|
199 |
+
|
200 |
+
def setup_method(self):
|
201 |
+
TestComparisons.setup_method(self)
|
202 |
+
self.A = np.array([['abc', '123'],
|
203 |
+
['789', 'xyz']], np.str_).view(np.chararray)
|
204 |
+
|
205 |
+
class TestInformation:
|
206 |
+
def setup_method(self):
|
207 |
+
self.A = np.array([[' abc ', ''],
|
208 |
+
['12345', 'MixedCase'],
|
209 |
+
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
|
210 |
+
self.B = np.array([[' \u03a3 ', ''],
|
211 |
+
['12345', 'MixedCase'],
|
212 |
+
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
|
213 |
+
|
214 |
+
def test_len(self):
|
215 |
+
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
|
216 |
+
assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
|
217 |
+
assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
|
218 |
+
|
219 |
+
def test_count(self):
|
220 |
+
assert_(issubclass(self.A.count('').dtype.type, np.integer))
|
221 |
+
assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
|
222 |
+
assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
|
223 |
+
# Python doesn't seem to like counting NULL characters
|
224 |
+
# assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
|
225 |
+
assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
|
226 |
+
assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
|
227 |
+
assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
|
228 |
+
# assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
|
229 |
+
|
230 |
+
def test_endswith(self):
|
231 |
+
assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
|
232 |
+
assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
|
233 |
+
assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
|
234 |
+
|
235 |
+
def fail():
|
236 |
+
self.A.endswith('3', 'fdjk')
|
237 |
+
|
238 |
+
assert_raises(TypeError, fail)
|
239 |
+
|
240 |
+
def test_find(self):
|
241 |
+
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
|
242 |
+
assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
|
243 |
+
assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
|
244 |
+
assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
|
245 |
+
assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
|
246 |
+
|
247 |
+
def test_index(self):
|
248 |
+
|
249 |
+
def fail():
|
250 |
+
self.A.index('a')
|
251 |
+
|
252 |
+
assert_raises(ValueError, fail)
|
253 |
+
assert_(np.char.index('abcba', 'b') == 1)
|
254 |
+
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
|
255 |
+
|
256 |
+
def test_isalnum(self):
|
257 |
+
assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
|
258 |
+
assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
|
259 |
+
|
260 |
+
def test_isalpha(self):
|
261 |
+
assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
|
262 |
+
assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
|
263 |
+
|
264 |
+
def test_isdigit(self):
|
265 |
+
assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
|
266 |
+
assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
|
267 |
+
|
268 |
+
def test_islower(self):
|
269 |
+
assert_(issubclass(self.A.islower().dtype.type, np.bool_))
|
270 |
+
assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
|
271 |
+
|
272 |
+
def test_isspace(self):
|
273 |
+
assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
|
274 |
+
assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
|
275 |
+
|
276 |
+
def test_istitle(self):
|
277 |
+
assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
|
278 |
+
assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
|
279 |
+
|
280 |
+
def test_isupper(self):
|
281 |
+
assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
|
282 |
+
assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
|
283 |
+
|
284 |
+
def test_rfind(self):
|
285 |
+
assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
|
286 |
+
assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
|
287 |
+
assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
|
288 |
+
assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
|
289 |
+
assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
|
290 |
+
|
291 |
+
def test_rindex(self):
|
292 |
+
|
293 |
+
def fail():
|
294 |
+
self.A.rindex('a')
|
295 |
+
|
296 |
+
assert_raises(ValueError, fail)
|
297 |
+
assert_(np.char.rindex('abcba', 'b') == 3)
|
298 |
+
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
|
299 |
+
|
300 |
+
def test_startswith(self):
|
301 |
+
assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
|
302 |
+
assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
|
303 |
+
assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
|
304 |
+
|
305 |
+
def fail():
|
306 |
+
self.A.startswith('3', 'fdjk')
|
307 |
+
|
308 |
+
assert_raises(TypeError, fail)
|
309 |
+
|
310 |
+
|
311 |
+
class TestMethods:
|
312 |
+
def setup_method(self):
|
313 |
+
self.A = np.array([[' abc ', ''],
|
314 |
+
['12345', 'MixedCase'],
|
315 |
+
['123 \t 345 \0 ', 'UPPER']],
|
316 |
+
dtype='S').view(np.chararray)
|
317 |
+
self.B = np.array([[' \u03a3 ', ''],
|
318 |
+
['12345', 'MixedCase'],
|
319 |
+
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
|
320 |
+
|
321 |
+
def test_capitalize(self):
|
322 |
+
tgt = [[b' abc ', b''],
|
323 |
+
[b'12345', b'Mixedcase'],
|
324 |
+
[b'123 \t 345 \0 ', b'Upper']]
|
325 |
+
assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_))
|
326 |
+
assert_array_equal(self.A.capitalize(), tgt)
|
327 |
+
|
328 |
+
tgt = [[' \u03c3 ', ''],
|
329 |
+
['12345', 'Mixedcase'],
|
330 |
+
['123 \t 345 \0 ', 'Upper']]
|
331 |
+
assert_(issubclass(self.B.capitalize().dtype.type, np.str_))
|
332 |
+
assert_array_equal(self.B.capitalize(), tgt)
|
333 |
+
|
334 |
+
def test_center(self):
|
335 |
+
assert_(issubclass(self.A.center(10).dtype.type, np.bytes_))
|
336 |
+
C = self.A.center([10, 20])
|
337 |
+
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
338 |
+
|
339 |
+
C = self.A.center(20, b'#')
|
340 |
+
assert_(np.all(C.startswith(b'#')))
|
341 |
+
assert_(np.all(C.endswith(b'#')))
|
342 |
+
|
343 |
+
C = np.char.center(b'FOO', [[10, 20], [15, 8]])
|
344 |
+
tgt = [[b' FOO ', b' FOO '],
|
345 |
+
[b' FOO ', b' FOO ']]
|
346 |
+
assert_(issubclass(C.dtype.type, np.bytes_))
|
347 |
+
assert_array_equal(C, tgt)
|
348 |
+
|
349 |
+
def test_decode(self):
|
350 |
+
A = np.char.array([b'\\u03a3'])
|
351 |
+
assert_(A.decode('unicode-escape')[0] == '\u03a3')
|
352 |
+
|
353 |
+
def test_encode(self):
|
354 |
+
B = self.B.encode('unicode_escape')
|
355 |
+
assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
|
356 |
+
|
357 |
+
def test_expandtabs(self):
|
358 |
+
T = self.A.expandtabs()
|
359 |
+
assert_(T[2, 0] == b'123 345 \0')
|
360 |
+
|
361 |
+
def test_join(self):
|
362 |
+
# NOTE: list(b'123') == [49, 50, 51]
|
363 |
+
# so that b','.join(b'123') results to an error on Py3
|
364 |
+
A0 = self.A.decode('ascii')
|
365 |
+
|
366 |
+
A = np.char.join([',', '#'], A0)
|
367 |
+
assert_(issubclass(A.dtype.type, np.str_))
|
368 |
+
tgt = np.array([[' ,a,b,c, ', ''],
|
369 |
+
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
|
370 |
+
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
|
371 |
+
assert_array_equal(np.char.join([',', '#'], A0), tgt)
|
372 |
+
|
373 |
+
def test_ljust(self):
|
374 |
+
assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_))
|
375 |
+
|
376 |
+
C = self.A.ljust([10, 20])
|
377 |
+
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
378 |
+
|
379 |
+
C = self.A.ljust(20, b'#')
|
380 |
+
assert_array_equal(C.startswith(b'#'), [
|
381 |
+
[False, True], [False, False], [False, False]])
|
382 |
+
assert_(np.all(C.endswith(b'#')))
|
383 |
+
|
384 |
+
C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
|
385 |
+
tgt = [[b'FOO ', b'FOO '],
|
386 |
+
[b'FOO ', b'FOO ']]
|
387 |
+
assert_(issubclass(C.dtype.type, np.bytes_))
|
388 |
+
assert_array_equal(C, tgt)
|
389 |
+
|
390 |
+
def test_lower(self):
|
391 |
+
tgt = [[b' abc ', b''],
|
392 |
+
[b'12345', b'mixedcase'],
|
393 |
+
[b'123 \t 345 \0 ', b'upper']]
|
394 |
+
assert_(issubclass(self.A.lower().dtype.type, np.bytes_))
|
395 |
+
assert_array_equal(self.A.lower(), tgt)
|
396 |
+
|
397 |
+
tgt = [[' \u03c3 ', ''],
|
398 |
+
['12345', 'mixedcase'],
|
399 |
+
['123 \t 345 \0 ', 'upper']]
|
400 |
+
assert_(issubclass(self.B.lower().dtype.type, np.str_))
|
401 |
+
assert_array_equal(self.B.lower(), tgt)
|
402 |
+
|
403 |
+
def test_lstrip(self):
|
404 |
+
tgt = [[b'abc ', b''],
|
405 |
+
[b'12345', b'MixedCase'],
|
406 |
+
[b'123 \t 345 \0 ', b'UPPER']]
|
407 |
+
assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_))
|
408 |
+
assert_array_equal(self.A.lstrip(), tgt)
|
409 |
+
|
410 |
+
tgt = [[b' abc', b''],
|
411 |
+
[b'2345', b'ixedCase'],
|
412 |
+
[b'23 \t 345 \x00', b'UPPER']]
|
413 |
+
assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
|
414 |
+
|
415 |
+
tgt = [['\u03a3 ', ''],
|
416 |
+
['12345', 'MixedCase'],
|
417 |
+
['123 \t 345 \0 ', 'UPPER']]
|
418 |
+
assert_(issubclass(self.B.lstrip().dtype.type, np.str_))
|
419 |
+
assert_array_equal(self.B.lstrip(), tgt)
|
420 |
+
|
421 |
+
def test_partition(self):
|
422 |
+
P = self.A.partition([b'3', b'M'])
|
423 |
+
tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
|
424 |
+
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
|
425 |
+
[(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
|
426 |
+
assert_(issubclass(P.dtype.type, np.bytes_))
|
427 |
+
assert_array_equal(P, tgt)
|
428 |
+
|
429 |
+
def test_replace(self):
|
430 |
+
R = self.A.replace([b'3', b'a'],
|
431 |
+
[b'##########', b'@'])
|
432 |
+
tgt = [[b' abc ', b''],
|
433 |
+
[b'12##########45', b'MixedC@se'],
|
434 |
+
[b'12########## \t ##########45 \x00', b'UPPER']]
|
435 |
+
assert_(issubclass(R.dtype.type, np.bytes_))
|
436 |
+
assert_array_equal(R, tgt)
|
437 |
+
|
438 |
+
def test_rjust(self):
|
439 |
+
assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_))
|
440 |
+
|
441 |
+
C = self.A.rjust([10, 20])
|
442 |
+
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
443 |
+
|
444 |
+
C = self.A.rjust(20, b'#')
|
445 |
+
assert_(np.all(C.startswith(b'#')))
|
446 |
+
assert_array_equal(C.endswith(b'#'),
|
447 |
+
[[False, True], [False, False], [False, False]])
|
448 |
+
|
449 |
+
C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
|
450 |
+
tgt = [[b' FOO', b' FOO'],
|
451 |
+
[b' FOO', b' FOO']]
|
452 |
+
assert_(issubclass(C.dtype.type, np.bytes_))
|
453 |
+
assert_array_equal(C, tgt)
|
454 |
+
|
455 |
+
def test_rpartition(self):
|
456 |
+
P = self.A.rpartition([b'3', b'M'])
|
457 |
+
tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
|
458 |
+
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
|
459 |
+
[(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
|
460 |
+
assert_(issubclass(P.dtype.type, np.bytes_))
|
461 |
+
assert_array_equal(P, tgt)
|
462 |
+
|
463 |
+
def test_rsplit(self):
|
464 |
+
A = self.A.rsplit(b'3')
|
465 |
+
tgt = [[[b' abc '], [b'']],
|
466 |
+
[[b'12', b'45'], [b'MixedCase']],
|
467 |
+
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
|
468 |
+
assert_(issubclass(A.dtype.type, np.object_))
|
469 |
+
assert_equal(A.tolist(), tgt)
|
470 |
+
|
471 |
+
def test_rstrip(self):
|
472 |
+
assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_))
|
473 |
+
|
474 |
+
tgt = [[b' abc', b''],
|
475 |
+
[b'12345', b'MixedCase'],
|
476 |
+
[b'123 \t 345', b'UPPER']]
|
477 |
+
assert_array_equal(self.A.rstrip(), tgt)
|
478 |
+
|
479 |
+
tgt = [[b' abc ', b''],
|
480 |
+
[b'1234', b'MixedCase'],
|
481 |
+
[b'123 \t 345 \x00', b'UPP']
|
482 |
+
]
|
483 |
+
assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
|
484 |
+
|
485 |
+
tgt = [[' \u03a3', ''],
|
486 |
+
['12345', 'MixedCase'],
|
487 |
+
['123 \t 345', 'UPPER']]
|
488 |
+
assert_(issubclass(self.B.rstrip().dtype.type, np.str_))
|
489 |
+
assert_array_equal(self.B.rstrip(), tgt)
|
490 |
+
|
491 |
+
def test_strip(self):
|
492 |
+
tgt = [[b'abc', b''],
|
493 |
+
[b'12345', b'MixedCase'],
|
494 |
+
[b'123 \t 345', b'UPPER']]
|
495 |
+
assert_(issubclass(self.A.strip().dtype.type, np.bytes_))
|
496 |
+
assert_array_equal(self.A.strip(), tgt)
|
497 |
+
|
498 |
+
tgt = [[b' abc ', b''],
|
499 |
+
[b'234', b'ixedCas'],
|
500 |
+
[b'23 \t 345 \x00', b'UPP']]
|
501 |
+
assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
|
502 |
+
|
503 |
+
tgt = [['\u03a3', ''],
|
504 |
+
['12345', 'MixedCase'],
|
505 |
+
['123 \t 345', 'UPPER']]
|
506 |
+
assert_(issubclass(self.B.strip().dtype.type, np.str_))
|
507 |
+
assert_array_equal(self.B.strip(), tgt)
|
508 |
+
|
509 |
+
def test_split(self):
|
510 |
+
A = self.A.split(b'3')
|
511 |
+
tgt = [
|
512 |
+
[[b' abc '], [b'']],
|
513 |
+
[[b'12', b'45'], [b'MixedCase']],
|
514 |
+
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
|
515 |
+
assert_(issubclass(A.dtype.type, np.object_))
|
516 |
+
assert_equal(A.tolist(), tgt)
|
517 |
+
|
518 |
+
def test_splitlines(self):
|
519 |
+
A = np.char.array(['abc\nfds\nwer']).splitlines()
|
520 |
+
assert_(issubclass(A.dtype.type, np.object_))
|
521 |
+
assert_(A.shape == (1,))
|
522 |
+
assert_(len(A[0]) == 3)
|
523 |
+
|
524 |
+
def test_swapcase(self):
|
525 |
+
tgt = [[b' ABC ', b''],
|
526 |
+
[b'12345', b'mIXEDcASE'],
|
527 |
+
[b'123 \t 345 \0 ', b'upper']]
|
528 |
+
assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_))
|
529 |
+
assert_array_equal(self.A.swapcase(), tgt)
|
530 |
+
|
531 |
+
tgt = [[' \u03c3 ', ''],
|
532 |
+
['12345', 'mIXEDcASE'],
|
533 |
+
['123 \t 345 \0 ', 'upper']]
|
534 |
+
assert_(issubclass(self.B.swapcase().dtype.type, np.str_))
|
535 |
+
assert_array_equal(self.B.swapcase(), tgt)
|
536 |
+
|
537 |
+
def test_title(self):
|
538 |
+
tgt = [[b' Abc ', b''],
|
539 |
+
[b'12345', b'Mixedcase'],
|
540 |
+
[b'123 \t 345 \0 ', b'Upper']]
|
541 |
+
assert_(issubclass(self.A.title().dtype.type, np.bytes_))
|
542 |
+
assert_array_equal(self.A.title(), tgt)
|
543 |
+
|
544 |
+
tgt = [[' \u03a3 ', ''],
|
545 |
+
['12345', 'Mixedcase'],
|
546 |
+
['123 \t 345 \0 ', 'Upper']]
|
547 |
+
assert_(issubclass(self.B.title().dtype.type, np.str_))
|
548 |
+
assert_array_equal(self.B.title(), tgt)
|
549 |
+
|
550 |
+
def test_upper(self):
|
551 |
+
tgt = [[b' ABC ', b''],
|
552 |
+
[b'12345', b'MIXEDCASE'],
|
553 |
+
[b'123 \t 345 \0 ', b'UPPER']]
|
554 |
+
assert_(issubclass(self.A.upper().dtype.type, np.bytes_))
|
555 |
+
assert_array_equal(self.A.upper(), tgt)
|
556 |
+
|
557 |
+
tgt = [[' \u03a3 ', ''],
|
558 |
+
['12345', 'MIXEDCASE'],
|
559 |
+
['123 \t 345 \0 ', 'UPPER']]
|
560 |
+
assert_(issubclass(self.B.upper().dtype.type, np.str_))
|
561 |
+
assert_array_equal(self.B.upper(), tgt)
|
562 |
+
|
563 |
+
def test_isnumeric(self):
|
564 |
+
|
565 |
+
def fail():
|
566 |
+
self.A.isnumeric()
|
567 |
+
|
568 |
+
assert_raises(TypeError, fail)
|
569 |
+
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
|
570 |
+
assert_array_equal(self.B.isnumeric(), [
|
571 |
+
[False, False], [True, False], [False, False]])
|
572 |
+
|
573 |
+
def test_isdecimal(self):
|
574 |
+
|
575 |
+
def fail():
|
576 |
+
self.A.isdecimal()
|
577 |
+
|
578 |
+
assert_raises(TypeError, fail)
|
579 |
+
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
|
580 |
+
assert_array_equal(self.B.isdecimal(), [
|
581 |
+
[False, False], [True, False], [False, False]])
|
582 |
+
|
583 |
+
|
584 |
+
class TestOperations:
|
585 |
+
def setup_method(self):
|
586 |
+
self.A = np.array([['abc', '123'],
|
587 |
+
['789', 'xyz']]).view(np.chararray)
|
588 |
+
self.B = np.array([['efg', '456'],
|
589 |
+
['051', 'tuv']]).view(np.chararray)
|
590 |
+
|
591 |
+
def test_add(self):
|
592 |
+
AB = np.array([['abcefg', '123456'],
|
593 |
+
['789051', 'xyztuv']]).view(np.chararray)
|
594 |
+
assert_array_equal(AB, (self.A + self.B))
|
595 |
+
assert_(len((self.A + self.B)[0][0]) == 6)
|
596 |
+
|
597 |
+
def test_radd(self):
|
598 |
+
QA = np.array([['qabc', 'q123'],
|
599 |
+
['q789', 'qxyz']]).view(np.chararray)
|
600 |
+
assert_array_equal(QA, ('q' + self.A))
|
601 |
+
|
602 |
+
def test_mul(self):
|
603 |
+
A = self.A
|
604 |
+
for r in (2, 3, 5, 7, 197):
|
605 |
+
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
|
606 |
+
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
|
607 |
+
|
608 |
+
assert_array_equal(Ar, (self.A * r))
|
609 |
+
|
610 |
+
for ob in [object(), 'qrs']:
|
611 |
+
with assert_raises_regex(ValueError,
|
612 |
+
'Can only multiply by integers'):
|
613 |
+
A*ob
|
614 |
+
|
615 |
+
def test_rmul(self):
|
616 |
+
A = self.A
|
617 |
+
for r in (2, 3, 5, 7, 197):
|
618 |
+
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
|
619 |
+
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
|
620 |
+
assert_array_equal(Ar, (r * self.A))
|
621 |
+
|
622 |
+
for ob in [object(), 'qrs']:
|
623 |
+
with assert_raises_regex(ValueError,
|
624 |
+
'Can only multiply by integers'):
|
625 |
+
ob * A
|
626 |
+
|
627 |
+
def test_mod(self):
|
628 |
+
"""Ticket #856"""
|
629 |
+
F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
|
630 |
+
C = np.array([[3, 7], [19, 1]])
|
631 |
+
FC = np.array([['3', '7.000000'],
|
632 |
+
['19', '1']]).view(np.chararray)
|
633 |
+
assert_array_equal(FC, F % C)
|
634 |
+
|
635 |
+
A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
|
636 |
+
A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
|
637 |
+
assert_array_equal(A1, (A % 1))
|
638 |
+
|
639 |
+
A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
|
640 |
+
assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
|
641 |
+
|
642 |
+
def test_rmod(self):
|
643 |
+
assert_(("%s" % self.A) == str(self.A))
|
644 |
+
assert_(("%r" % self.A) == repr(self.A))
|
645 |
+
|
646 |
+
for ob in [42, object()]:
|
647 |
+
with assert_raises_regex(
|
648 |
+
TypeError, "unsupported operand type.* and 'chararray'"):
|
649 |
+
ob % self.A
|
650 |
+
|
651 |
+
def test_slice(self):
|
652 |
+
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
|
653 |
+
|
654 |
+
arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
|
655 |
+
dtype='S4').view(np.chararray)
|
656 |
+
sl1 = arr[:]
|
657 |
+
assert_array_equal(sl1, arr)
|
658 |
+
assert_(sl1.base is arr)
|
659 |
+
assert_(sl1.base.base is arr.base)
|
660 |
+
|
661 |
+
sl2 = arr[:, :]
|
662 |
+
assert_array_equal(sl2, arr)
|
663 |
+
assert_(sl2.base is arr)
|
664 |
+
assert_(sl2.base.base is arr.base)
|
665 |
+
|
666 |
+
assert_(arr[0, 0] == b'abc')
|
667 |
+
|
668 |
+
|
669 |
+
def test_empty_indexing():
|
670 |
+
"""Regression test for ticket 1948."""
|
671 |
+
# Check that indexing a chararray with an empty list/array returns an
|
672 |
+
# empty chararray instead of a chararray with a single empty string in it.
|
673 |
+
s = np.chararray((4,))
|
674 |
+
assert_(s[[]].size == 0)
|
675 |
+
|
676 |
+
|
677 |
+
@pytest.mark.parametrize(["dt1", "dt2"],
|
678 |
+
[("S", "U"), ("U", "S"), ("S", "O"), ("U", "O"),
|
679 |
+
("S", "d"), ("S", "V")])
|
680 |
+
def test_add_types(dt1, dt2):
|
681 |
+
arr1 = np.array([1234234], dtype=dt1)
|
682 |
+
# If the following fails, e.g. use a number and test "V" explicitly
|
683 |
+
arr2 = np.array([b"423"], dtype=dt2)
|
684 |
+
with pytest.raises(TypeError,
|
685 |
+
match=f".*same dtype kind.*{arr1.dtype}.*{arr2.dtype}"):
|
686 |
+
np.char.add(arr1, arr2)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_deprecations.py
ADDED
@@ -0,0 +1,817 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests related to deprecation warnings. Also a convenient place
|
3 |
+
to document how deprecations should eventually be turned into errors.
|
4 |
+
|
5 |
+
"""
|
6 |
+
import datetime
|
7 |
+
import operator
|
8 |
+
import warnings
|
9 |
+
import pytest
|
10 |
+
import tempfile
|
11 |
+
import re
|
12 |
+
import sys
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
from numpy.testing import (
|
16 |
+
assert_raises, assert_warns, assert_, assert_array_equal, SkipTest,
|
17 |
+
KnownFailureException, break_cycles,
|
18 |
+
)
|
19 |
+
|
20 |
+
from numpy.core._multiarray_tests import fromstring_null_term_c_api
|
21 |
+
|
22 |
+
try:
|
23 |
+
import pytz
|
24 |
+
_has_pytz = True
|
25 |
+
except ImportError:
|
26 |
+
_has_pytz = False
|
27 |
+
|
28 |
+
|
29 |
+
class _DeprecationTestCase:
|
30 |
+
# Just as warning: warnings uses re.match, so the start of this message
|
31 |
+
# must match.
|
32 |
+
message = ''
|
33 |
+
warning_cls = DeprecationWarning
|
34 |
+
|
35 |
+
def setup_method(self):
|
36 |
+
self.warn_ctx = warnings.catch_warnings(record=True)
|
37 |
+
self.log = self.warn_ctx.__enter__()
|
38 |
+
|
39 |
+
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
|
40 |
+
# can give very confusing results because of
|
41 |
+
# https://bugs.python.org/issue4180 and it is probably simplest to
|
42 |
+
# try to keep the tests cleanly giving only the right warning type.
|
43 |
+
# (While checking them set to "error" those are ignored anyway)
|
44 |
+
# We still have them show up, because otherwise they would be raised
|
45 |
+
warnings.filterwarnings("always", category=self.warning_cls)
|
46 |
+
warnings.filterwarnings("always", message=self.message,
|
47 |
+
category=self.warning_cls)
|
48 |
+
|
49 |
+
def teardown_method(self):
|
50 |
+
self.warn_ctx.__exit__()
|
51 |
+
|
52 |
+
def assert_deprecated(self, function, num=1, ignore_others=False,
|
53 |
+
function_fails=False,
|
54 |
+
exceptions=np._NoValue,
|
55 |
+
args=(), kwargs={}):
|
56 |
+
"""Test if DeprecationWarnings are given and raised.
|
57 |
+
|
58 |
+
This first checks if the function when called gives `num`
|
59 |
+
DeprecationWarnings, after that it tries to raise these
|
60 |
+
DeprecationWarnings and compares them with `exceptions`.
|
61 |
+
The exceptions can be different for cases where this code path
|
62 |
+
is simply not anticipated and the exception is replaced.
|
63 |
+
|
64 |
+
Parameters
|
65 |
+
----------
|
66 |
+
function : callable
|
67 |
+
The function to test
|
68 |
+
num : int
|
69 |
+
Number of DeprecationWarnings to expect. This should normally be 1.
|
70 |
+
ignore_others : bool
|
71 |
+
Whether warnings of the wrong type should be ignored (note that
|
72 |
+
the message is not checked)
|
73 |
+
function_fails : bool
|
74 |
+
If the function would normally fail, setting this will check for
|
75 |
+
warnings inside a try/except block.
|
76 |
+
exceptions : Exception or tuple of Exceptions
|
77 |
+
Exception to expect when turning the warnings into an error.
|
78 |
+
The default checks for DeprecationWarnings. If exceptions is
|
79 |
+
empty the function is expected to run successfully.
|
80 |
+
args : tuple
|
81 |
+
Arguments for `function`
|
82 |
+
kwargs : dict
|
83 |
+
Keyword arguments for `function`
|
84 |
+
"""
|
85 |
+
__tracebackhide__ = True # Hide traceback for py.test
|
86 |
+
|
87 |
+
# reset the log
|
88 |
+
self.log[:] = []
|
89 |
+
|
90 |
+
if exceptions is np._NoValue:
|
91 |
+
exceptions = (self.warning_cls,)
|
92 |
+
|
93 |
+
try:
|
94 |
+
function(*args, **kwargs)
|
95 |
+
except (Exception if function_fails else tuple()):
|
96 |
+
pass
|
97 |
+
|
98 |
+
# just in case, clear the registry
|
99 |
+
num_found = 0
|
100 |
+
for warning in self.log:
|
101 |
+
if warning.category is self.warning_cls:
|
102 |
+
num_found += 1
|
103 |
+
elif not ignore_others:
|
104 |
+
raise AssertionError(
|
105 |
+
"expected %s but got: %s" %
|
106 |
+
(self.warning_cls.__name__, warning.category))
|
107 |
+
if num is not None and num_found != num:
|
108 |
+
msg = "%i warnings found but %i expected." % (len(self.log), num)
|
109 |
+
lst = [str(w) for w in self.log]
|
110 |
+
raise AssertionError("\n".join([msg] + lst))
|
111 |
+
|
112 |
+
with warnings.catch_warnings():
|
113 |
+
warnings.filterwarnings("error", message=self.message,
|
114 |
+
category=self.warning_cls)
|
115 |
+
try:
|
116 |
+
function(*args, **kwargs)
|
117 |
+
if exceptions != tuple():
|
118 |
+
raise AssertionError(
|
119 |
+
"No error raised during function call")
|
120 |
+
except exceptions:
|
121 |
+
if exceptions == tuple():
|
122 |
+
raise AssertionError(
|
123 |
+
"Error raised during function call")
|
124 |
+
|
125 |
+
def assert_not_deprecated(self, function, args=(), kwargs={}):
|
126 |
+
"""Test that warnings are not raised.
|
127 |
+
|
128 |
+
This is just a shorthand for:
|
129 |
+
|
130 |
+
self.assert_deprecated(function, num=0, ignore_others=True,
|
131 |
+
exceptions=tuple(), args=args, kwargs=kwargs)
|
132 |
+
"""
|
133 |
+
self.assert_deprecated(function, num=0, ignore_others=True,
|
134 |
+
exceptions=tuple(), args=args, kwargs=kwargs)
|
135 |
+
|
136 |
+
|
137 |
+
class _VisibleDeprecationTestCase(_DeprecationTestCase):
|
138 |
+
warning_cls = np.VisibleDeprecationWarning
|
139 |
+
|
140 |
+
|
141 |
+
class TestDatetime64Timezone(_DeprecationTestCase):
|
142 |
+
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
|
143 |
+
datetime64 is now timezone naive rather than UTC only.
|
144 |
+
|
145 |
+
It will be quite a while before we can remove this, because, at the very
|
146 |
+
least, a lot of existing code uses the 'Z' modifier to avoid conversion
|
147 |
+
from local time to UTC, even if otherwise it handles time in a timezone
|
148 |
+
naive fashion.
|
149 |
+
"""
|
150 |
+
def test_string(self):
|
151 |
+
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
|
152 |
+
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
|
153 |
+
|
154 |
+
@pytest.mark.skipif(not _has_pytz,
|
155 |
+
reason="The pytz module is not available.")
|
156 |
+
def test_datetime(self):
|
157 |
+
tz = pytz.timezone('US/Eastern')
|
158 |
+
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
|
159 |
+
self.assert_deprecated(np.datetime64, args=(dt,))
|
160 |
+
|
161 |
+
|
162 |
+
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
|
163 |
+
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
|
164 |
+
out in gh-7093. Eventually, such assignment should NOT be allowed, but
|
165 |
+
in the interests of maintaining backwards compatibility, only a Deprecation-
|
166 |
+
Warning will be raised instead for the time being to give developers time to
|
167 |
+
refactor relevant code.
|
168 |
+
"""
|
169 |
+
|
170 |
+
def test_data_attr_assignment(self):
|
171 |
+
a = np.arange(10)
|
172 |
+
b = np.linspace(0, 1, 10)
|
173 |
+
|
174 |
+
self.message = ("Assigning the 'data' attribute is an "
|
175 |
+
"inherently unsafe operation and will "
|
176 |
+
"be removed in the future.")
|
177 |
+
self.assert_deprecated(a.__setattr__, args=('data', b.data))
|
178 |
+
|
179 |
+
|
180 |
+
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
|
181 |
+
"""
|
182 |
+
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
|
183 |
+
represent the number in base 2 (positive) or 2's complement (negative) form,
|
184 |
+
the function used to silently ignore the parameter and return a representation
|
185 |
+
using the minimal number of bits needed for the form in question. Such behavior
|
186 |
+
is now considered unsafe from a user perspective and will raise an error in the future.
|
187 |
+
"""
|
188 |
+
|
189 |
+
def test_insufficient_width_positive(self):
|
190 |
+
args = (10,)
|
191 |
+
kwargs = {'width': 2}
|
192 |
+
|
193 |
+
self.message = ("Insufficient bit width provided. This behavior "
|
194 |
+
"will raise an error in the future.")
|
195 |
+
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
|
196 |
+
|
197 |
+
def test_insufficient_width_negative(self):
|
198 |
+
args = (-5,)
|
199 |
+
kwargs = {'width': 2}
|
200 |
+
|
201 |
+
self.message = ("Insufficient bit width provided. This behavior "
|
202 |
+
"will raise an error in the future.")
|
203 |
+
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
|
204 |
+
|
205 |
+
|
206 |
+
class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase):
|
207 |
+
# Deprecated 2021-01-05, NumPy 1.21
|
208 |
+
message = r".*`.dtype` attribute"
|
209 |
+
|
210 |
+
def test_deprecation_dtype_attribute_is_dtype(self):
|
211 |
+
class dt:
|
212 |
+
dtype = "f8"
|
213 |
+
|
214 |
+
class vdt(np.void):
|
215 |
+
dtype = "f,f"
|
216 |
+
|
217 |
+
self.assert_deprecated(lambda: np.dtype(dt))
|
218 |
+
self.assert_deprecated(lambda: np.dtype(dt()))
|
219 |
+
self.assert_deprecated(lambda: np.dtype(vdt))
|
220 |
+
self.assert_deprecated(lambda: np.dtype(vdt(1)))
|
221 |
+
|
222 |
+
|
223 |
+
class TestTestDeprecated:
|
224 |
+
def test_assert_deprecated(self):
|
225 |
+
test_case_instance = _DeprecationTestCase()
|
226 |
+
test_case_instance.setup_method()
|
227 |
+
assert_raises(AssertionError,
|
228 |
+
test_case_instance.assert_deprecated,
|
229 |
+
lambda: None)
|
230 |
+
|
231 |
+
def foo():
|
232 |
+
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
|
233 |
+
|
234 |
+
test_case_instance.assert_deprecated(foo)
|
235 |
+
test_case_instance.teardown_method()
|
236 |
+
|
237 |
+
|
238 |
+
class TestNonNumericConjugate(_DeprecationTestCase):
|
239 |
+
"""
|
240 |
+
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
|
241 |
+
which conflicts with the error behavior of np.conjugate.
|
242 |
+
"""
|
243 |
+
def test_conjugate(self):
|
244 |
+
for a in np.array(5), np.array(5j):
|
245 |
+
self.assert_not_deprecated(a.conjugate)
|
246 |
+
for a in (np.array('s'), np.array('2016', 'M'),
|
247 |
+
np.array((1, 2), [('a', int), ('b', int)])):
|
248 |
+
self.assert_deprecated(a.conjugate)
|
249 |
+
|
250 |
+
|
251 |
+
class TestNPY_CHAR(_DeprecationTestCase):
|
252 |
+
# 2017-05-03, 1.13.0
|
253 |
+
def test_npy_char_deprecation(self):
|
254 |
+
from numpy.core._multiarray_tests import npy_char_deprecation
|
255 |
+
self.assert_deprecated(npy_char_deprecation)
|
256 |
+
assert_(npy_char_deprecation() == 'S1')
|
257 |
+
|
258 |
+
|
259 |
+
class TestPyArray_AS1D(_DeprecationTestCase):
|
260 |
+
def test_npy_pyarrayas1d_deprecation(self):
|
261 |
+
from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
|
262 |
+
assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
|
263 |
+
|
264 |
+
|
265 |
+
class TestPyArray_AS2D(_DeprecationTestCase):
|
266 |
+
def test_npy_pyarrayas2d_deprecation(self):
|
267 |
+
from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
|
268 |
+
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
|
269 |
+
|
270 |
+
|
271 |
+
class TestDatetimeEvent(_DeprecationTestCase):
|
272 |
+
# 2017-08-11, 1.14.0
|
273 |
+
def test_3_tuple(self):
|
274 |
+
for cls in (np.datetime64, np.timedelta64):
|
275 |
+
# two valid uses - (unit, num) and (unit, num, den, None)
|
276 |
+
self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
|
277 |
+
self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
|
278 |
+
|
279 |
+
# trying to use the event argument, removed in 1.7.0, is deprecated
|
280 |
+
# it used to be a uint8
|
281 |
+
self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
|
282 |
+
self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
|
283 |
+
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
|
284 |
+
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
|
285 |
+
|
286 |
+
|
287 |
+
class TestTruthTestingEmptyArrays(_DeprecationTestCase):
|
288 |
+
# 2017-09-25, 1.14.0
|
289 |
+
message = '.*truth value of an empty array is ambiguous.*'
|
290 |
+
|
291 |
+
def test_1d(self):
|
292 |
+
self.assert_deprecated(bool, args=(np.array([]),))
|
293 |
+
|
294 |
+
def test_2d(self):
|
295 |
+
self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
|
296 |
+
self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
|
297 |
+
self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
|
298 |
+
|
299 |
+
|
300 |
+
class TestBincount(_DeprecationTestCase):
|
301 |
+
# 2017-06-01, 1.14.0
|
302 |
+
def test_bincount_minlength(self):
|
303 |
+
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
|
304 |
+
|
305 |
+
|
306 |
+
|
307 |
+
class TestGeneratorSum(_DeprecationTestCase):
|
308 |
+
# 2018-02-25, 1.15.0
|
309 |
+
def test_generator_sum(self):
|
310 |
+
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
|
311 |
+
|
312 |
+
|
313 |
+
class TestFromstring(_DeprecationTestCase):
|
314 |
+
# 2017-10-19, 1.14
|
315 |
+
def test_fromstring(self):
|
316 |
+
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
|
317 |
+
|
318 |
+
|
319 |
+
class TestFromStringAndFileInvalidData(_DeprecationTestCase):
|
320 |
+
# 2019-06-08, 1.17.0
|
321 |
+
# Tests should be moved to real tests when deprecation is done.
|
322 |
+
message = "string or file could not be read to its end"
|
323 |
+
|
324 |
+
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
|
325 |
+
def test_deprecate_unparsable_data_file(self, invalid_str):
|
326 |
+
x = np.array([1.51, 2, 3.51, 4], dtype=float)
|
327 |
+
|
328 |
+
with tempfile.TemporaryFile(mode="w") as f:
|
329 |
+
x.tofile(f, sep=',', format='%.2f')
|
330 |
+
f.write(invalid_str)
|
331 |
+
|
332 |
+
f.seek(0)
|
333 |
+
self.assert_deprecated(lambda: np.fromfile(f, sep=","))
|
334 |
+
f.seek(0)
|
335 |
+
self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
|
336 |
+
# Should not raise:
|
337 |
+
with warnings.catch_warnings():
|
338 |
+
warnings.simplefilter("error", DeprecationWarning)
|
339 |
+
f.seek(0)
|
340 |
+
res = np.fromfile(f, sep=",", count=4)
|
341 |
+
assert_array_equal(res, x)
|
342 |
+
|
343 |
+
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
|
344 |
+
def test_deprecate_unparsable_string(self, invalid_str):
|
345 |
+
x = np.array([1.51, 2, 3.51, 4], dtype=float)
|
346 |
+
x_str = "1.51,2,3.51,4{}".format(invalid_str)
|
347 |
+
|
348 |
+
self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
|
349 |
+
self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
|
350 |
+
|
351 |
+
# The C-level API can use not fixed size, but 0 terminated strings,
|
352 |
+
# so test that as well:
|
353 |
+
bytestr = x_str.encode("ascii")
|
354 |
+
self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
|
355 |
+
|
356 |
+
with assert_warns(DeprecationWarning):
|
357 |
+
# this is slightly strange, in that fromstring leaves data
|
358 |
+
# potentially uninitialized (would be good to error when all is
|
359 |
+
# read, but count is larger then actual data maybe).
|
360 |
+
res = np.fromstring(x_str, sep=",", count=5)
|
361 |
+
assert_array_equal(res[:-1], x)
|
362 |
+
|
363 |
+
with warnings.catch_warnings():
|
364 |
+
warnings.simplefilter("error", DeprecationWarning)
|
365 |
+
|
366 |
+
# Should not raise:
|
367 |
+
res = np.fromstring(x_str, sep=",", count=4)
|
368 |
+
assert_array_equal(res, x)
|
369 |
+
|
370 |
+
|
371 |
+
class Test_GetSet_NumericOps(_DeprecationTestCase):
|
372 |
+
# 2018-09-20, 1.16.0
|
373 |
+
def test_get_numeric_ops(self):
|
374 |
+
from numpy.core._multiarray_tests import getset_numericops
|
375 |
+
self.assert_deprecated(getset_numericops, num=2)
|
376 |
+
|
377 |
+
# empty kwargs prevents any state actually changing which would break
|
378 |
+
# other tests.
|
379 |
+
self.assert_deprecated(np.set_numeric_ops, kwargs={})
|
380 |
+
assert_raises(ValueError, np.set_numeric_ops, add='abc')
|
381 |
+
|
382 |
+
|
383 |
+
class TestShape1Fields(_DeprecationTestCase):
|
384 |
+
warning_cls = FutureWarning
|
385 |
+
|
386 |
+
# 2019-05-20, 1.17.0
|
387 |
+
def test_shape_1_fields(self):
|
388 |
+
self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
|
389 |
+
|
390 |
+
|
391 |
+
class TestNonZero(_DeprecationTestCase):
|
392 |
+
# 2019-05-26, 1.17.0
|
393 |
+
def test_zerod(self):
|
394 |
+
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
|
395 |
+
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
|
396 |
+
|
397 |
+
|
398 |
+
class TestToString(_DeprecationTestCase):
|
399 |
+
# 2020-03-06 1.19.0
|
400 |
+
message = re.escape("tostring() is deprecated. Use tobytes() instead.")
|
401 |
+
|
402 |
+
def test_tostring(self):
|
403 |
+
arr = np.array(list(b"test\xFF"), dtype=np.uint8)
|
404 |
+
self.assert_deprecated(arr.tostring)
|
405 |
+
|
406 |
+
def test_tostring_matches_tobytes(self):
|
407 |
+
arr = np.array(list(b"test\xFF"), dtype=np.uint8)
|
408 |
+
b = arr.tobytes()
|
409 |
+
with assert_warns(DeprecationWarning):
|
410 |
+
s = arr.tostring()
|
411 |
+
assert s == b
|
412 |
+
|
413 |
+
|
414 |
+
class TestDTypeCoercion(_DeprecationTestCase):
|
415 |
+
# 2020-02-06 1.19.0
|
416 |
+
message = "Converting .* to a dtype .*is deprecated"
|
417 |
+
deprecated_types = [
|
418 |
+
# The builtin scalar super types:
|
419 |
+
np.generic, np.flexible, np.number,
|
420 |
+
np.inexact, np.floating, np.complexfloating,
|
421 |
+
np.integer, np.unsignedinteger, np.signedinteger,
|
422 |
+
# character is a deprecated S1 special case:
|
423 |
+
np.character,
|
424 |
+
]
|
425 |
+
|
426 |
+
def test_dtype_coercion(self):
|
427 |
+
for scalar_type in self.deprecated_types:
|
428 |
+
self.assert_deprecated(np.dtype, args=(scalar_type,))
|
429 |
+
|
430 |
+
def test_array_construction(self):
|
431 |
+
for scalar_type in self.deprecated_types:
|
432 |
+
self.assert_deprecated(np.array, args=([], scalar_type,))
|
433 |
+
|
434 |
+
def test_not_deprecated(self):
|
435 |
+
# All specific types are not deprecated:
|
436 |
+
for group in np.sctypes.values():
|
437 |
+
for scalar_type in group:
|
438 |
+
self.assert_not_deprecated(np.dtype, args=(scalar_type,))
|
439 |
+
|
440 |
+
for scalar_type in [type, dict, list, tuple]:
|
441 |
+
# Typical python types are coerced to object currently:
|
442 |
+
self.assert_not_deprecated(np.dtype, args=(scalar_type,))
|
443 |
+
|
444 |
+
|
445 |
+
class BuiltInRoundComplexDType(_DeprecationTestCase):
|
446 |
+
# 2020-03-31 1.19.0
|
447 |
+
deprecated_types = [np.csingle, np.cdouble, np.clongdouble]
|
448 |
+
not_deprecated_types = [
|
449 |
+
np.int8, np.int16, np.int32, np.int64,
|
450 |
+
np.uint8, np.uint16, np.uint32, np.uint64,
|
451 |
+
np.float16, np.float32, np.float64,
|
452 |
+
]
|
453 |
+
|
454 |
+
def test_deprecated(self):
|
455 |
+
for scalar_type in self.deprecated_types:
|
456 |
+
scalar = scalar_type(0)
|
457 |
+
self.assert_deprecated(round, args=(scalar,))
|
458 |
+
self.assert_deprecated(round, args=(scalar, 0))
|
459 |
+
self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
|
460 |
+
|
461 |
+
def test_not_deprecated(self):
|
462 |
+
for scalar_type in self.not_deprecated_types:
|
463 |
+
scalar = scalar_type(0)
|
464 |
+
self.assert_not_deprecated(round, args=(scalar,))
|
465 |
+
self.assert_not_deprecated(round, args=(scalar, 0))
|
466 |
+
self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
|
467 |
+
|
468 |
+
|
469 |
+
class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase):
|
470 |
+
# 2020-05-27, NumPy 1.20.0
|
471 |
+
message = "Out of bound index found. This was previously ignored.*"
|
472 |
+
|
473 |
+
@pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])])
|
474 |
+
def test_empty_subspace(self, index):
|
475 |
+
# Test for both a single and two/multiple advanced indices. These
|
476 |
+
# This will raise an IndexError in the future.
|
477 |
+
arr = np.ones((2, 2, 0))
|
478 |
+
self.assert_deprecated(arr.__getitem__, args=(index,))
|
479 |
+
self.assert_deprecated(arr.__setitem__, args=(index, 0.))
|
480 |
+
|
481 |
+
# for this array, the subspace is only empty after applying the slice
|
482 |
+
arr2 = np.ones((2, 2, 1))
|
483 |
+
index2 = (slice(0, 0),) + index
|
484 |
+
self.assert_deprecated(arr2.__getitem__, args=(index2,))
|
485 |
+
self.assert_deprecated(arr2.__setitem__, args=(index2, 0.))
|
486 |
+
|
487 |
+
def test_empty_index_broadcast_not_deprecated(self):
|
488 |
+
arr = np.ones((2, 2, 2))
|
489 |
+
|
490 |
+
index = ([[3], [2]], []) # broadcast to an empty result.
|
491 |
+
self.assert_not_deprecated(arr.__getitem__, args=(index,))
|
492 |
+
self.assert_not_deprecated(arr.__setitem__,
|
493 |
+
args=(index, np.empty((2, 0, 2))))
|
494 |
+
|
495 |
+
|
496 |
+
class TestNonExactMatchDeprecation(_DeprecationTestCase):
|
497 |
+
# 2020-04-22
|
498 |
+
def test_non_exact_match(self):
|
499 |
+
arr = np.array([[3, 6, 6], [4, 5, 1]])
|
500 |
+
# misspelt mode check
|
501 |
+
self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp'))
|
502 |
+
# using completely different word with first character as R
|
503 |
+
self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random'))
|
504 |
+
|
505 |
+
|
506 |
+
class TestMatrixInOuter(_DeprecationTestCase):
|
507 |
+
# 2020-05-13 NumPy 1.20.0
|
508 |
+
message = (r"add.outer\(\) was passed a numpy matrix as "
|
509 |
+
r"(first|second) argument.")
|
510 |
+
|
511 |
+
def test_deprecated(self):
|
512 |
+
arr = np.array([1, 2, 3])
|
513 |
+
m = np.array([1, 2, 3]).view(np.matrix)
|
514 |
+
self.assert_deprecated(np.add.outer, args=(m, m), num=2)
|
515 |
+
self.assert_deprecated(np.add.outer, args=(arr, m))
|
516 |
+
self.assert_deprecated(np.add.outer, args=(m, arr))
|
517 |
+
self.assert_not_deprecated(np.add.outer, args=(arr, arr))
|
518 |
+
|
519 |
+
|
520 |
+
class FlatteningConcatenateUnsafeCast(_DeprecationTestCase):
|
521 |
+
# NumPy 1.20, 2020-09-03
|
522 |
+
message = "concatenate with `axis=None` will use same-kind casting"
|
523 |
+
|
524 |
+
def test_deprecated(self):
|
525 |
+
self.assert_deprecated(np.concatenate,
|
526 |
+
args=(([0.], [1.]),),
|
527 |
+
kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64)))
|
528 |
+
|
529 |
+
def test_not_deprecated(self):
|
530 |
+
self.assert_not_deprecated(np.concatenate,
|
531 |
+
args=(([0.], [1.]),),
|
532 |
+
kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64),
|
533 |
+
'casting': "unsafe"})
|
534 |
+
|
535 |
+
with assert_raises(TypeError):
|
536 |
+
# Tests should notice if the deprecation warning is given first...
|
537 |
+
np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64),
|
538 |
+
casting="same_kind")
|
539 |
+
|
540 |
+
|
541 |
+
class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase):
|
542 |
+
# Deprecated 2020-11-24, NumPy 1.20
|
543 |
+
"""
|
544 |
+
Technically, it should be impossible to create numpy object scalars,
|
545 |
+
but there was an unpickle path that would in theory allow it. That
|
546 |
+
path is invalid and must lead to the warning.
|
547 |
+
"""
|
548 |
+
message = "Unpickling a scalar with object dtype is deprecated."
|
549 |
+
|
550 |
+
def test_deprecated(self):
|
551 |
+
ctor = np.core.multiarray.scalar
|
552 |
+
self.assert_deprecated(lambda: ctor(np.dtype("O"), 1))
|
553 |
+
|
554 |
+
|
555 |
+
class TestSingleElementSignature(_DeprecationTestCase):
|
556 |
+
# Deprecated 2021-04-01, NumPy 1.21
|
557 |
+
message = r"The use of a length 1"
|
558 |
+
|
559 |
+
def test_deprecated(self):
|
560 |
+
self.assert_deprecated(lambda: np.add(1, 2, signature="d"))
|
561 |
+
self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),)))
|
562 |
+
|
563 |
+
|
564 |
+
class TestCtypesGetter(_DeprecationTestCase):
|
565 |
+
# Deprecated 2021-05-18, Numpy 1.21.0
|
566 |
+
warning_cls = DeprecationWarning
|
567 |
+
ctypes = np.array([1]).ctypes
|
568 |
+
|
569 |
+
@pytest.mark.parametrize(
|
570 |
+
"name", ["get_data", "get_shape", "get_strides", "get_as_parameter"]
|
571 |
+
)
|
572 |
+
def test_deprecated(self, name: str) -> None:
|
573 |
+
func = getattr(self.ctypes, name)
|
574 |
+
self.assert_deprecated(lambda: func())
|
575 |
+
|
576 |
+
@pytest.mark.parametrize(
|
577 |
+
"name", ["data", "shape", "strides", "_as_parameter_"]
|
578 |
+
)
|
579 |
+
def test_not_deprecated(self, name: str) -> None:
|
580 |
+
self.assert_not_deprecated(lambda: getattr(self.ctypes, name))
|
581 |
+
|
582 |
+
|
583 |
+
PARTITION_DICT = {
|
584 |
+
"partition method": np.arange(10).partition,
|
585 |
+
"argpartition method": np.arange(10).argpartition,
|
586 |
+
"partition function": lambda kth: np.partition(np.arange(10), kth),
|
587 |
+
"argpartition function": lambda kth: np.argpartition(np.arange(10), kth),
|
588 |
+
}
|
589 |
+
|
590 |
+
|
591 |
+
@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT)
|
592 |
+
class TestPartitionBoolIndex(_DeprecationTestCase):
|
593 |
+
# Deprecated 2021-09-29, NumPy 1.22
|
594 |
+
warning_cls = DeprecationWarning
|
595 |
+
message = "Passing booleans as partition index is deprecated"
|
596 |
+
|
597 |
+
def test_deprecated(self, func):
|
598 |
+
self.assert_deprecated(lambda: func(True))
|
599 |
+
self.assert_deprecated(lambda: func([False, True]))
|
600 |
+
|
601 |
+
def test_not_deprecated(self, func):
|
602 |
+
self.assert_not_deprecated(lambda: func(1))
|
603 |
+
self.assert_not_deprecated(lambda: func([0, 1]))
|
604 |
+
|
605 |
+
|
606 |
+
class TestMachAr(_DeprecationTestCase):
|
607 |
+
# Deprecated 2022-11-22, NumPy 1.25
|
608 |
+
warning_cls = DeprecationWarning
|
609 |
+
|
610 |
+
def test_deprecated_module(self):
|
611 |
+
self.assert_deprecated(lambda: getattr(np.core, "MachAr"))
|
612 |
+
|
613 |
+
|
614 |
+
class TestQuantileInterpolationDeprecation(_DeprecationTestCase):
|
615 |
+
# Deprecated 2021-11-08, NumPy 1.22
|
616 |
+
@pytest.mark.parametrize("func",
|
617 |
+
[np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
|
618 |
+
def test_deprecated(self, func):
|
619 |
+
self.assert_deprecated(
|
620 |
+
lambda: func([0., 1.], 0., interpolation="linear"))
|
621 |
+
self.assert_deprecated(
|
622 |
+
lambda: func([0., 1.], 0., interpolation="nearest"))
|
623 |
+
|
624 |
+
@pytest.mark.parametrize("func",
|
625 |
+
[np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
|
626 |
+
def test_both_passed(self, func):
|
627 |
+
with warnings.catch_warnings():
|
628 |
+
# catch the DeprecationWarning so that it does not raise:
|
629 |
+
warnings.simplefilter("always", DeprecationWarning)
|
630 |
+
with pytest.raises(TypeError):
|
631 |
+
func([0., 1.], 0., interpolation="nearest", method="nearest")
|
632 |
+
|
633 |
+
|
634 |
+
class TestMemEventHook(_DeprecationTestCase):
|
635 |
+
# Deprecated 2021-11-18, NumPy 1.23
|
636 |
+
def test_mem_seteventhook(self):
|
637 |
+
# The actual tests are within the C code in
|
638 |
+
# multiarray/_multiarray_tests.c.src
|
639 |
+
import numpy.core._multiarray_tests as ma_tests
|
640 |
+
with pytest.warns(DeprecationWarning,
|
641 |
+
match='PyDataMem_SetEventHook is deprecated'):
|
642 |
+
ma_tests.test_pydatamem_seteventhook_start()
|
643 |
+
# force an allocation and free of a numpy array
|
644 |
+
# needs to be larger then limit of small memory cacher in ctors.c
|
645 |
+
a = np.zeros(1000)
|
646 |
+
del a
|
647 |
+
break_cycles()
|
648 |
+
with pytest.warns(DeprecationWarning,
|
649 |
+
match='PyDataMem_SetEventHook is deprecated'):
|
650 |
+
ma_tests.test_pydatamem_seteventhook_end()
|
651 |
+
|
652 |
+
|
653 |
+
class TestArrayFinalizeNone(_DeprecationTestCase):
|
654 |
+
message = "Setting __array_finalize__ = None"
|
655 |
+
|
656 |
+
def test_use_none_is_deprecated(self):
|
657 |
+
# Deprecated way that ndarray itself showed nothing needs finalizing.
|
658 |
+
class NoFinalize(np.ndarray):
|
659 |
+
__array_finalize__ = None
|
660 |
+
|
661 |
+
self.assert_deprecated(lambda: np.array(1).view(NoFinalize))
|
662 |
+
|
663 |
+
class TestAxisNotMAXDIMS(_DeprecationTestCase):
|
664 |
+
# Deprecated 2022-01-08, NumPy 1.23
|
665 |
+
message = r"Using `axis=32` \(MAXDIMS\) is deprecated"
|
666 |
+
|
667 |
+
def test_deprecated(self):
|
668 |
+
a = np.zeros((1,)*32)
|
669 |
+
self.assert_deprecated(lambda: np.repeat(a, 1, axis=np.MAXDIMS))
|
670 |
+
|
671 |
+
|
672 |
+
class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase):
|
673 |
+
# Deprecated 2022-07-03, NumPy 1.23
|
674 |
+
# This test can be removed without replacement after the deprecation.
|
675 |
+
# The tests:
|
676 |
+
# * numpy/lib/tests/test_loadtxt.py::test_integer_signs
|
677 |
+
# * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails
|
678 |
+
# Have a warning filter that needs to be removed.
|
679 |
+
message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*"
|
680 |
+
|
681 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
|
682 |
+
def test_deprecated_warning(self, dtype):
|
683 |
+
with pytest.warns(DeprecationWarning, match=self.message):
|
684 |
+
np.loadtxt(["10.5"], dtype=dtype)
|
685 |
+
|
686 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
|
687 |
+
def test_deprecated_raised(self, dtype):
|
688 |
+
# The DeprecationWarning is chained when raised, so test manually:
|
689 |
+
with warnings.catch_warnings():
|
690 |
+
warnings.simplefilter("error", DeprecationWarning)
|
691 |
+
try:
|
692 |
+
np.loadtxt(["10.5"], dtype=dtype)
|
693 |
+
except ValueError as e:
|
694 |
+
assert isinstance(e.__cause__, DeprecationWarning)
|
695 |
+
|
696 |
+
|
697 |
+
class TestScalarConversion(_DeprecationTestCase):
|
698 |
+
# 2023-01-02, 1.25.0
|
699 |
+
def test_float_conversion(self):
|
700 |
+
self.assert_deprecated(float, args=(np.array([3.14]),))
|
701 |
+
|
702 |
+
def test_behaviour(self):
|
703 |
+
b = np.array([[3.14]])
|
704 |
+
c = np.zeros(5)
|
705 |
+
with pytest.warns(DeprecationWarning):
|
706 |
+
c[0] = b
|
707 |
+
|
708 |
+
|
709 |
+
class TestPyIntConversion(_DeprecationTestCase):
|
710 |
+
message = r".*stop allowing conversion of out-of-bound.*"
|
711 |
+
|
712 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
|
713 |
+
def test_deprecated_scalar(self, dtype):
|
714 |
+
dtype = np.dtype(dtype)
|
715 |
+
info = np.iinfo(dtype)
|
716 |
+
|
717 |
+
# Cover the most common creation paths (all end up in the
|
718 |
+
# same place):
|
719 |
+
def scalar(value, dtype):
|
720 |
+
dtype.type(value)
|
721 |
+
|
722 |
+
def assign(value, dtype):
|
723 |
+
arr = np.array([0, 0, 0], dtype=dtype)
|
724 |
+
arr[2] = value
|
725 |
+
|
726 |
+
def create(value, dtype):
|
727 |
+
np.array([value], dtype=dtype)
|
728 |
+
|
729 |
+
for creation_func in [scalar, assign, create]:
|
730 |
+
try:
|
731 |
+
self.assert_deprecated(
|
732 |
+
lambda: creation_func(info.min - 1, dtype))
|
733 |
+
except OverflowError:
|
734 |
+
pass # OverflowErrors always happened also before and are OK.
|
735 |
+
|
736 |
+
try:
|
737 |
+
self.assert_deprecated(
|
738 |
+
lambda: creation_func(info.max + 1, dtype))
|
739 |
+
except OverflowError:
|
740 |
+
pass # OverflowErrors always happened also before and are OK.
|
741 |
+
|
742 |
+
|
743 |
+
class TestDeprecatedGlobals(_DeprecationTestCase):
|
744 |
+
# Deprecated 2022-11-17, NumPy 1.24
|
745 |
+
def test_type_aliases(self):
|
746 |
+
# from builtins
|
747 |
+
self.assert_deprecated(lambda: np.bool8)
|
748 |
+
self.assert_deprecated(lambda: np.int0)
|
749 |
+
self.assert_deprecated(lambda: np.uint0)
|
750 |
+
self.assert_deprecated(lambda: np.bytes0)
|
751 |
+
self.assert_deprecated(lambda: np.str0)
|
752 |
+
self.assert_deprecated(lambda: np.object0)
|
753 |
+
|
754 |
+
|
755 |
+
@pytest.mark.parametrize("name",
|
756 |
+
["bool", "long", "ulong", "str", "bytes", "object"])
|
757 |
+
def test_future_scalar_attributes(name):
|
758 |
+
# FutureWarning added 2022-11-17, NumPy 1.24,
|
759 |
+
assert name not in dir(np) # we may want to not add them
|
760 |
+
with pytest.warns(FutureWarning,
|
761 |
+
match=f"In the future .*{name}"):
|
762 |
+
assert not hasattr(np, name)
|
763 |
+
|
764 |
+
# Unfortunately, they are currently still valid via `np.dtype()`
|
765 |
+
np.dtype(name)
|
766 |
+
name in np.sctypeDict
|
767 |
+
|
768 |
+
|
769 |
+
# Ignore the above future attribute warning for this test.
|
770 |
+
@pytest.mark.filterwarnings("ignore:In the future:FutureWarning")
|
771 |
+
class TestRemovedGlobals:
|
772 |
+
# Removed 2023-01-12, NumPy 1.24.0
|
773 |
+
# Not a deprecation, but the large error was added to aid those who missed
|
774 |
+
# the previous deprecation, and should be removed similarly to one
|
775 |
+
# (or faster).
|
776 |
+
@pytest.mark.parametrize("name",
|
777 |
+
["object", "bool", "float", "complex", "str", "int"])
|
778 |
+
def test_attributeerror_includes_info(self, name):
|
779 |
+
msg = f".*\n`np.{name}` was a deprecated alias for the builtin"
|
780 |
+
with pytest.raises(AttributeError, match=msg):
|
781 |
+
getattr(np, name)
|
782 |
+
|
783 |
+
|
784 |
+
class TestDeprecatedFinfo(_DeprecationTestCase):
|
785 |
+
# Deprecated in NumPy 1.25, 2023-01-16
|
786 |
+
def test_deprecated_none(self):
|
787 |
+
self.assert_deprecated(np.finfo, args=(None,))
|
788 |
+
|
789 |
+
class TestFromnumeric(_DeprecationTestCase):
|
790 |
+
# 2023-02-28, 1.25.0
|
791 |
+
def test_round_(self):
|
792 |
+
self.assert_deprecated(lambda: np.round_(np.array([1.5, 2.5, 3.5])))
|
793 |
+
|
794 |
+
# 2023-03-02, 1.25.0
|
795 |
+
def test_cumproduct(self):
|
796 |
+
self.assert_deprecated(lambda: np.cumproduct(np.array([1, 2, 3])))
|
797 |
+
|
798 |
+
# 2023-03-02, 1.25.0
|
799 |
+
def test_product(self):
|
800 |
+
self.assert_deprecated(lambda: np.product(np.array([1, 2, 3])))
|
801 |
+
|
802 |
+
# 2023-03-02, 1.25.0
|
803 |
+
def test_sometrue(self):
|
804 |
+
self.assert_deprecated(lambda: np.sometrue(np.array([True, False])))
|
805 |
+
|
806 |
+
# 2023-03-02, 1.25.0
|
807 |
+
def test_alltrue(self):
|
808 |
+
self.assert_deprecated(lambda: np.alltrue(np.array([True, False])))
|
809 |
+
|
810 |
+
|
811 |
+
class TestMathAlias(_DeprecationTestCase):
|
812 |
+
# Deprecated in Numpy 1.25, 2023-04-06
|
813 |
+
def test_deprecated_np_math(self):
|
814 |
+
self.assert_deprecated(lambda: np.math)
|
815 |
+
|
816 |
+
def test_deprecated_np_lib_math(self):
|
817 |
+
self.assert_deprecated(lambda: np.lib.math)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_dlpack.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.testing import assert_array_equal, IS_PYPY
|
6 |
+
|
7 |
+
|
8 |
+
class TestDLPack:
|
9 |
+
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
|
10 |
+
def test_dunder_dlpack_refcount(self):
|
11 |
+
x = np.arange(5)
|
12 |
+
y = x.__dlpack__()
|
13 |
+
assert sys.getrefcount(x) == 3
|
14 |
+
del y
|
15 |
+
assert sys.getrefcount(x) == 2
|
16 |
+
|
17 |
+
def test_dunder_dlpack_stream(self):
|
18 |
+
x = np.arange(5)
|
19 |
+
x.__dlpack__(stream=None)
|
20 |
+
|
21 |
+
with pytest.raises(RuntimeError):
|
22 |
+
x.__dlpack__(stream=1)
|
23 |
+
|
24 |
+
def test_strides_not_multiple_of_itemsize(self):
|
25 |
+
dt = np.dtype([('int', np.int32), ('char', np.int8)])
|
26 |
+
y = np.zeros((5,), dtype=dt)
|
27 |
+
z = y['int']
|
28 |
+
|
29 |
+
with pytest.raises(BufferError):
|
30 |
+
np.from_dlpack(z)
|
31 |
+
|
32 |
+
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
|
33 |
+
def test_from_dlpack_refcount(self):
|
34 |
+
x = np.arange(5)
|
35 |
+
y = np.from_dlpack(x)
|
36 |
+
assert sys.getrefcount(x) == 3
|
37 |
+
del y
|
38 |
+
assert sys.getrefcount(x) == 2
|
39 |
+
|
40 |
+
@pytest.mark.parametrize("dtype", [
|
41 |
+
np.bool_,
|
42 |
+
np.int8, np.int16, np.int32, np.int64,
|
43 |
+
np.uint8, np.uint16, np.uint32, np.uint64,
|
44 |
+
np.float16, np.float32, np.float64,
|
45 |
+
np.complex64, np.complex128
|
46 |
+
])
|
47 |
+
def test_dtype_passthrough(self, dtype):
|
48 |
+
x = np.arange(5).astype(dtype)
|
49 |
+
y = np.from_dlpack(x)
|
50 |
+
|
51 |
+
assert y.dtype == x.dtype
|
52 |
+
assert_array_equal(x, y)
|
53 |
+
|
54 |
+
def test_invalid_dtype(self):
|
55 |
+
x = np.asarray(np.datetime64('2021-05-27'))
|
56 |
+
|
57 |
+
with pytest.raises(BufferError):
|
58 |
+
np.from_dlpack(x)
|
59 |
+
|
60 |
+
def test_invalid_byte_swapping(self):
|
61 |
+
dt = np.dtype('=i8').newbyteorder()
|
62 |
+
x = np.arange(5, dtype=dt)
|
63 |
+
|
64 |
+
with pytest.raises(BufferError):
|
65 |
+
np.from_dlpack(x)
|
66 |
+
|
67 |
+
def test_non_contiguous(self):
|
68 |
+
x = np.arange(25).reshape((5, 5))
|
69 |
+
|
70 |
+
y1 = x[0]
|
71 |
+
assert_array_equal(y1, np.from_dlpack(y1))
|
72 |
+
|
73 |
+
y2 = x[:, 0]
|
74 |
+
assert_array_equal(y2, np.from_dlpack(y2))
|
75 |
+
|
76 |
+
y3 = x[1, :]
|
77 |
+
assert_array_equal(y3, np.from_dlpack(y3))
|
78 |
+
|
79 |
+
y4 = x[1]
|
80 |
+
assert_array_equal(y4, np.from_dlpack(y4))
|
81 |
+
|
82 |
+
y5 = np.diagonal(x).copy()
|
83 |
+
assert_array_equal(y5, np.from_dlpack(y5))
|
84 |
+
|
85 |
+
@pytest.mark.parametrize("ndim", range(33))
|
86 |
+
def test_higher_dims(self, ndim):
|
87 |
+
shape = (1,) * ndim
|
88 |
+
x = np.zeros(shape, dtype=np.float64)
|
89 |
+
|
90 |
+
assert shape == np.from_dlpack(x).shape
|
91 |
+
|
92 |
+
def test_dlpack_device(self):
|
93 |
+
x = np.arange(5)
|
94 |
+
assert x.__dlpack_device__() == (1, 0)
|
95 |
+
y = np.from_dlpack(x)
|
96 |
+
assert y.__dlpack_device__() == (1, 0)
|
97 |
+
z = y[::2]
|
98 |
+
assert z.__dlpack_device__() == (1, 0)
|
99 |
+
|
100 |
+
def dlpack_deleter_exception(self):
|
101 |
+
x = np.arange(5)
|
102 |
+
_ = x.__dlpack__()
|
103 |
+
raise RuntimeError
|
104 |
+
|
105 |
+
def test_dlpack_destructor_exception(self):
|
106 |
+
with pytest.raises(RuntimeError):
|
107 |
+
self.dlpack_deleter_exception()
|
108 |
+
|
109 |
+
def test_readonly(self):
|
110 |
+
x = np.arange(5)
|
111 |
+
x.flags.writeable = False
|
112 |
+
with pytest.raises(BufferError):
|
113 |
+
x.__dlpack__()
|
114 |
+
|
115 |
+
def test_ndim0(self):
|
116 |
+
x = np.array(1.0)
|
117 |
+
y = np.from_dlpack(x)
|
118 |
+
assert_array_equal(x, y)
|
119 |
+
|
120 |
+
def test_size1dims_arrays(self):
|
121 |
+
x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
|
122 |
+
buffer=np.ones(1000, dtype=np.uint8), order='F')
|
123 |
+
y = np.from_dlpack(x)
|
124 |
+
assert_array_equal(x, y)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_dtype.py
ADDED
@@ -0,0 +1,1906 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import operator
|
3 |
+
import pytest
|
4 |
+
import ctypes
|
5 |
+
import gc
|
6 |
+
import types
|
7 |
+
from typing import Any
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import numpy.dtypes
|
11 |
+
from numpy.core._rational_tests import rational
|
12 |
+
from numpy.core._multiarray_tests import create_custom_field_dtype
|
13 |
+
from numpy.testing import (
|
14 |
+
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
|
15 |
+
IS_PYSTON, _OLD_PROMOTION)
|
16 |
+
from numpy.compat import pickle
|
17 |
+
from itertools import permutations
|
18 |
+
import random
|
19 |
+
|
20 |
+
import hypothesis
|
21 |
+
from hypothesis.extra import numpy as hynp
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
def assert_dtype_equal(a, b):
|
26 |
+
assert_equal(a, b)
|
27 |
+
assert_equal(hash(a), hash(b),
|
28 |
+
"two equivalent types do not hash to the same value !")
|
29 |
+
|
30 |
+
def assert_dtype_not_equal(a, b):
|
31 |
+
assert_(a != b)
|
32 |
+
assert_(hash(a) != hash(b),
|
33 |
+
"two different types hash to the same value !")
|
34 |
+
|
35 |
+
class TestBuiltin:
|
36 |
+
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
|
37 |
+
np.compat.unicode])
|
38 |
+
def test_run(self, t):
|
39 |
+
"""Only test hash runs at all."""
|
40 |
+
dt = np.dtype(t)
|
41 |
+
hash(dt)
|
42 |
+
|
43 |
+
@pytest.mark.parametrize('t', [int, float])
|
44 |
+
def test_dtype(self, t):
|
45 |
+
# Make sure equivalent byte order char hash the same (e.g. < and = on
|
46 |
+
# little endian)
|
47 |
+
dt = np.dtype(t)
|
48 |
+
dt2 = dt.newbyteorder("<")
|
49 |
+
dt3 = dt.newbyteorder(">")
|
50 |
+
if dt == dt2:
|
51 |
+
assert_(dt.byteorder != dt2.byteorder, "bogus test")
|
52 |
+
assert_dtype_equal(dt, dt2)
|
53 |
+
else:
|
54 |
+
assert_(dt.byteorder != dt3.byteorder, "bogus test")
|
55 |
+
assert_dtype_equal(dt, dt3)
|
56 |
+
|
57 |
+
def test_equivalent_dtype_hashing(self):
|
58 |
+
# Make sure equivalent dtypes with different type num hash equal
|
59 |
+
uintp = np.dtype(np.uintp)
|
60 |
+
if uintp.itemsize == 4:
|
61 |
+
left = uintp
|
62 |
+
right = np.dtype(np.uint32)
|
63 |
+
else:
|
64 |
+
left = uintp
|
65 |
+
right = np.dtype(np.ulonglong)
|
66 |
+
assert_(left == right)
|
67 |
+
assert_(hash(left) == hash(right))
|
68 |
+
|
69 |
+
def test_invalid_types(self):
|
70 |
+
# Make sure invalid type strings raise an error
|
71 |
+
|
72 |
+
assert_raises(TypeError, np.dtype, 'O3')
|
73 |
+
assert_raises(TypeError, np.dtype, 'O5')
|
74 |
+
assert_raises(TypeError, np.dtype, 'O7')
|
75 |
+
assert_raises(TypeError, np.dtype, 'b3')
|
76 |
+
assert_raises(TypeError, np.dtype, 'h4')
|
77 |
+
assert_raises(TypeError, np.dtype, 'I5')
|
78 |
+
assert_raises(TypeError, np.dtype, 'e3')
|
79 |
+
assert_raises(TypeError, np.dtype, 'f5')
|
80 |
+
|
81 |
+
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
|
82 |
+
assert_raises(TypeError, np.dtype, 'g12')
|
83 |
+
elif np.dtype('g').itemsize == 12:
|
84 |
+
assert_raises(TypeError, np.dtype, 'g16')
|
85 |
+
|
86 |
+
if np.dtype('l').itemsize == 8:
|
87 |
+
assert_raises(TypeError, np.dtype, 'l4')
|
88 |
+
assert_raises(TypeError, np.dtype, 'L4')
|
89 |
+
else:
|
90 |
+
assert_raises(TypeError, np.dtype, 'l8')
|
91 |
+
assert_raises(TypeError, np.dtype, 'L8')
|
92 |
+
|
93 |
+
if np.dtype('q').itemsize == 8:
|
94 |
+
assert_raises(TypeError, np.dtype, 'q4')
|
95 |
+
assert_raises(TypeError, np.dtype, 'Q4')
|
96 |
+
else:
|
97 |
+
assert_raises(TypeError, np.dtype, 'q8')
|
98 |
+
assert_raises(TypeError, np.dtype, 'Q8')
|
99 |
+
|
100 |
+
def test_richcompare_invalid_dtype_equality(self):
|
101 |
+
# Make sure objects that cannot be converted to valid
|
102 |
+
# dtypes results in False/True when compared to valid dtypes.
|
103 |
+
# Here 7 cannot be converted to dtype. No exceptions should be raised
|
104 |
+
|
105 |
+
assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
|
106 |
+
assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
|
107 |
+
|
108 |
+
@pytest.mark.parametrize(
|
109 |
+
'operation',
|
110 |
+
[operator.le, operator.lt, operator.ge, operator.gt])
|
111 |
+
def test_richcompare_invalid_dtype_comparison(self, operation):
|
112 |
+
# Make sure TypeError is raised for comparison operators
|
113 |
+
# for invalid dtypes. Here 7 is an invalid dtype.
|
114 |
+
|
115 |
+
with pytest.raises(TypeError):
|
116 |
+
operation(np.dtype(np.int32), 7)
|
117 |
+
|
118 |
+
@pytest.mark.parametrize("dtype",
|
119 |
+
['Bool', 'Bytes0', 'Complex32', 'Complex64',
|
120 |
+
'Datetime64', 'Float16', 'Float32', 'Float64',
|
121 |
+
'Int8', 'Int16', 'Int32', 'Int64',
|
122 |
+
'Object0', 'Str0', 'Timedelta64',
|
123 |
+
'UInt8', 'UInt16', 'Uint32', 'UInt32',
|
124 |
+
'Uint64', 'UInt64', 'Void0',
|
125 |
+
"Float128", "Complex128"])
|
126 |
+
def test_numeric_style_types_are_invalid(self, dtype):
|
127 |
+
with assert_raises(TypeError):
|
128 |
+
np.dtype(dtype)
|
129 |
+
|
130 |
+
def test_remaining_dtypes_with_bad_bytesize(self):
|
131 |
+
# The np.<name> aliases were deprecated, these probably should be too
|
132 |
+
assert np.dtype("int0") is np.dtype("intp")
|
133 |
+
assert np.dtype("uint0") is np.dtype("uintp")
|
134 |
+
assert np.dtype("bool8") is np.dtype("bool")
|
135 |
+
assert np.dtype("bytes0") is np.dtype("bytes")
|
136 |
+
assert np.dtype("str0") is np.dtype("str")
|
137 |
+
assert np.dtype("object0") is np.dtype("object")
|
138 |
+
|
139 |
+
@pytest.mark.parametrize(
|
140 |
+
'value',
|
141 |
+
['m8', 'M8', 'datetime64', 'timedelta64',
|
142 |
+
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
|
143 |
+
'>f', '<f', '=f', '|f',
|
144 |
+
])
|
145 |
+
def test_dtype_bytes_str_equivalence(self, value):
|
146 |
+
bytes_value = value.encode('ascii')
|
147 |
+
from_bytes = np.dtype(bytes_value)
|
148 |
+
from_str = np.dtype(value)
|
149 |
+
assert_dtype_equal(from_bytes, from_str)
|
150 |
+
|
151 |
+
def test_dtype_from_bytes(self):
|
152 |
+
# Empty bytes object
|
153 |
+
assert_raises(TypeError, np.dtype, b'')
|
154 |
+
# Byte order indicator, but no type
|
155 |
+
assert_raises(TypeError, np.dtype, b'|')
|
156 |
+
|
157 |
+
# Single character with ordinal < NPY_NTYPES returns
|
158 |
+
# type by index into _builtin_descrs
|
159 |
+
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
|
160 |
+
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
|
161 |
+
|
162 |
+
# Single character where value is a valid type code
|
163 |
+
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
|
164 |
+
|
165 |
+
# Bytes with non-ascii values raise errors
|
166 |
+
assert_raises(TypeError, np.dtype, b'\xff')
|
167 |
+
assert_raises(TypeError, np.dtype, b's\xff')
|
168 |
+
|
169 |
+
def test_bad_param(self):
|
170 |
+
# Can't give a size that's too small
|
171 |
+
assert_raises(ValueError, np.dtype,
|
172 |
+
{'names':['f0', 'f1'],
|
173 |
+
'formats':['i4', 'i1'],
|
174 |
+
'offsets':[0, 4],
|
175 |
+
'itemsize':4})
|
176 |
+
# If alignment is enabled, the alignment (4) must divide the itemsize
|
177 |
+
assert_raises(ValueError, np.dtype,
|
178 |
+
{'names':['f0', 'f1'],
|
179 |
+
'formats':['i4', 'i1'],
|
180 |
+
'offsets':[0, 4],
|
181 |
+
'itemsize':9}, align=True)
|
182 |
+
# If alignment is enabled, the individual fields must be aligned
|
183 |
+
assert_raises(ValueError, np.dtype,
|
184 |
+
{'names':['f0', 'f1'],
|
185 |
+
'formats':['i1', 'f4'],
|
186 |
+
'offsets':[0, 2]}, align=True)
|
187 |
+
|
188 |
+
def test_field_order_equality(self):
|
189 |
+
x = np.dtype({'names': ['A', 'B'],
|
190 |
+
'formats': ['i4', 'f4'],
|
191 |
+
'offsets': [0, 4]})
|
192 |
+
y = np.dtype({'names': ['B', 'A'],
|
193 |
+
'formats': ['i4', 'f4'],
|
194 |
+
'offsets': [4, 0]})
|
195 |
+
assert_equal(x == y, False)
|
196 |
+
# This is an safe cast (not equiv) due to the different names:
|
197 |
+
assert np.can_cast(x, y, casting="safe")
|
198 |
+
|
199 |
+
@pytest.mark.parametrize(
|
200 |
+
["type_char", "char_size", "scalar_type"],
|
201 |
+
[["U", 4, np.str_],
|
202 |
+
["S", 1, np.bytes_]])
|
203 |
+
def test_create_string_dtypes_directly(
|
204 |
+
self, type_char, char_size, scalar_type):
|
205 |
+
dtype_class = type(np.dtype(type_char))
|
206 |
+
|
207 |
+
dtype = dtype_class(8)
|
208 |
+
assert dtype.type is scalar_type
|
209 |
+
assert dtype.itemsize == 8*char_size
|
210 |
+
|
211 |
+
def test_create_invalid_string_errors(self):
|
212 |
+
one_too_big = np.iinfo(np.intc).max + 1
|
213 |
+
with pytest.raises(TypeError):
|
214 |
+
type(np.dtype("U"))(one_too_big // 4)
|
215 |
+
|
216 |
+
with pytest.raises(TypeError):
|
217 |
+
# Code coverage for very large numbers:
|
218 |
+
type(np.dtype("U"))(np.iinfo(np.intp).max // 4 + 1)
|
219 |
+
|
220 |
+
if one_too_big < sys.maxsize:
|
221 |
+
with pytest.raises(TypeError):
|
222 |
+
type(np.dtype("S"))(one_too_big)
|
223 |
+
|
224 |
+
with pytest.raises(ValueError):
|
225 |
+
type(np.dtype("U"))(-1)
|
226 |
+
|
227 |
+
|
228 |
+
class TestRecord:
|
229 |
+
def test_equivalent_record(self):
|
230 |
+
"""Test whether equivalent record dtypes hash the same."""
|
231 |
+
a = np.dtype([('yo', int)])
|
232 |
+
b = np.dtype([('yo', int)])
|
233 |
+
assert_dtype_equal(a, b)
|
234 |
+
|
235 |
+
def test_different_names(self):
|
236 |
+
# In theory, they may hash the same (collision) ?
|
237 |
+
a = np.dtype([('yo', int)])
|
238 |
+
b = np.dtype([('ye', int)])
|
239 |
+
assert_dtype_not_equal(a, b)
|
240 |
+
|
241 |
+
def test_different_titles(self):
|
242 |
+
# In theory, they may hash the same (collision) ?
|
243 |
+
a = np.dtype({'names': ['r', 'b'],
|
244 |
+
'formats': ['u1', 'u1'],
|
245 |
+
'titles': ['Red pixel', 'Blue pixel']})
|
246 |
+
b = np.dtype({'names': ['r', 'b'],
|
247 |
+
'formats': ['u1', 'u1'],
|
248 |
+
'titles': ['RRed pixel', 'Blue pixel']})
|
249 |
+
assert_dtype_not_equal(a, b)
|
250 |
+
|
251 |
+
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
|
252 |
+
def test_refcount_dictionary_setting(self):
|
253 |
+
names = ["name1"]
|
254 |
+
formats = ["f8"]
|
255 |
+
titles = ["t1"]
|
256 |
+
offsets = [0]
|
257 |
+
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
|
258 |
+
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
|
259 |
+
np.dtype(d)
|
260 |
+
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
|
261 |
+
assert refcounts == refcounts_new
|
262 |
+
|
263 |
+
def test_mutate(self):
|
264 |
+
# Mutating a dtype should reset the cached hash value.
|
265 |
+
# NOTE: Mutating should be deprecated, but new API added to replace it.
|
266 |
+
a = np.dtype([('yo', int)])
|
267 |
+
b = np.dtype([('yo', int)])
|
268 |
+
c = np.dtype([('ye', int)])
|
269 |
+
assert_dtype_equal(a, b)
|
270 |
+
assert_dtype_not_equal(a, c)
|
271 |
+
a.names = ['ye']
|
272 |
+
assert_dtype_equal(a, c)
|
273 |
+
assert_dtype_not_equal(a, b)
|
274 |
+
state = b.__reduce__()[2]
|
275 |
+
a.__setstate__(state)
|
276 |
+
assert_dtype_equal(a, b)
|
277 |
+
assert_dtype_not_equal(a, c)
|
278 |
+
|
279 |
+
def test_mutate_error(self):
|
280 |
+
# NOTE: Mutating should be deprecated, but new API added to replace it.
|
281 |
+
a = np.dtype("i,i")
|
282 |
+
|
283 |
+
with pytest.raises(ValueError, match="must replace all names at once"):
|
284 |
+
a.names = ["f0"]
|
285 |
+
|
286 |
+
with pytest.raises(ValueError, match=".*and not string"):
|
287 |
+
a.names = ["f0", b"not a unicode name"]
|
288 |
+
|
289 |
+
def test_not_lists(self):
|
290 |
+
"""Test if an appropriate exception is raised when passing bad values to
|
291 |
+
the dtype constructor.
|
292 |
+
"""
|
293 |
+
assert_raises(TypeError, np.dtype,
|
294 |
+
dict(names={'A', 'B'}, formats=['f8', 'i4']))
|
295 |
+
assert_raises(TypeError, np.dtype,
|
296 |
+
dict(names=['A', 'B'], formats={'f8', 'i4'}))
|
297 |
+
|
298 |
+
def test_aligned_size(self):
|
299 |
+
# Check that structured dtypes get padded to an aligned size
|
300 |
+
dt = np.dtype('i4, i1', align=True)
|
301 |
+
assert_equal(dt.itemsize, 8)
|
302 |
+
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
|
303 |
+
assert_equal(dt.itemsize, 8)
|
304 |
+
dt = np.dtype({'names':['f0', 'f1'],
|
305 |
+
'formats':['i4', 'u1'],
|
306 |
+
'offsets':[0, 4]}, align=True)
|
307 |
+
assert_equal(dt.itemsize, 8)
|
308 |
+
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
|
309 |
+
assert_equal(dt.itemsize, 8)
|
310 |
+
# Nesting should preserve that alignment
|
311 |
+
dt1 = np.dtype([('f0', 'i4'),
|
312 |
+
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
|
313 |
+
('f2', 'i1')], align=True)
|
314 |
+
assert_equal(dt1.itemsize, 20)
|
315 |
+
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
|
316 |
+
'formats':['i4',
|
317 |
+
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
|
318 |
+
'i1'],
|
319 |
+
'offsets':[0, 4, 16]}, align=True)
|
320 |
+
assert_equal(dt2.itemsize, 20)
|
321 |
+
dt3 = np.dtype({'f0': ('i4', 0),
|
322 |
+
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
|
323 |
+
'f2': ('i1', 16)}, align=True)
|
324 |
+
assert_equal(dt3.itemsize, 20)
|
325 |
+
assert_equal(dt1, dt2)
|
326 |
+
assert_equal(dt2, dt3)
|
327 |
+
# Nesting should preserve packing
|
328 |
+
dt1 = np.dtype([('f0', 'i4'),
|
329 |
+
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
|
330 |
+
('f2', 'i1')], align=False)
|
331 |
+
assert_equal(dt1.itemsize, 11)
|
332 |
+
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
|
333 |
+
'formats':['i4',
|
334 |
+
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
|
335 |
+
'i1'],
|
336 |
+
'offsets':[0, 4, 10]}, align=False)
|
337 |
+
assert_equal(dt2.itemsize, 11)
|
338 |
+
dt3 = np.dtype({'f0': ('i4', 0),
|
339 |
+
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
|
340 |
+
'f2': ('i1', 10)}, align=False)
|
341 |
+
assert_equal(dt3.itemsize, 11)
|
342 |
+
assert_equal(dt1, dt2)
|
343 |
+
assert_equal(dt2, dt3)
|
344 |
+
# Array of subtype should preserve alignment
|
345 |
+
dt1 = np.dtype([('a', '|i1'),
|
346 |
+
('b', [('f0', '<i2'),
|
347 |
+
('f1', '<f4')], 2)], align=True)
|
348 |
+
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
|
349 |
+
('b', [('f0', '<i2'), ('', '|V2'),
|
350 |
+
('f1', '<f4')], (2,))])
|
351 |
+
|
352 |
+
def test_union_struct(self):
|
353 |
+
# Should be able to create union dtypes
|
354 |
+
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
|
355 |
+
'offsets':[0, 0, 2]}, align=True)
|
356 |
+
assert_equal(dt.itemsize, 4)
|
357 |
+
a = np.array([3], dtype='<u4').view(dt)
|
358 |
+
a['f1'] = 10
|
359 |
+
a['f2'] = 36
|
360 |
+
assert_equal(a['f0'], 10 + 36*256*256)
|
361 |
+
# Should be able to specify fields out of order
|
362 |
+
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
|
363 |
+
'offsets':[4, 0, 2]}, align=True)
|
364 |
+
assert_equal(dt.itemsize, 8)
|
365 |
+
# field name should not matter: assignment is by position
|
366 |
+
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
|
367 |
+
'formats':['<u4', '<u2', '<u2'],
|
368 |
+
'offsets':[4, 0, 2]}, align=True)
|
369 |
+
vals = [(0, 1, 2), (3, 2**15-1, 4)]
|
370 |
+
vals2 = [(0, 1, 2), (3, 2**15-1, 4)]
|
371 |
+
a = np.array(vals, dt)
|
372 |
+
b = np.array(vals2, dt2)
|
373 |
+
assert_equal(a.astype(dt2), b)
|
374 |
+
assert_equal(b.astype(dt), a)
|
375 |
+
assert_equal(a.view(dt2), b)
|
376 |
+
assert_equal(b.view(dt), a)
|
377 |
+
# Should not be able to overlap objects with other types
|
378 |
+
assert_raises(TypeError, np.dtype,
|
379 |
+
{'names':['f0', 'f1'],
|
380 |
+
'formats':['O', 'i1'],
|
381 |
+
'offsets':[0, 2]})
|
382 |
+
assert_raises(TypeError, np.dtype,
|
383 |
+
{'names':['f0', 'f1'],
|
384 |
+
'formats':['i4', 'O'],
|
385 |
+
'offsets':[0, 3]})
|
386 |
+
assert_raises(TypeError, np.dtype,
|
387 |
+
{'names':['f0', 'f1'],
|
388 |
+
'formats':[[('a', 'O')], 'i1'],
|
389 |
+
'offsets':[0, 2]})
|
390 |
+
assert_raises(TypeError, np.dtype,
|
391 |
+
{'names':['f0', 'f1'],
|
392 |
+
'formats':['i4', [('a', 'O')]],
|
393 |
+
'offsets':[0, 3]})
|
394 |
+
# Out of order should still be ok, however
|
395 |
+
dt = np.dtype({'names':['f0', 'f1'],
|
396 |
+
'formats':['i1', 'O'],
|
397 |
+
'offsets':[np.dtype('intp').itemsize, 0]})
|
398 |
+
|
399 |
+
@pytest.mark.parametrize(["obj", "dtype", "expected"],
|
400 |
+
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
|
401 |
+
(3, "(3)f4,", [3, 3, 3]),
|
402 |
+
(np.float64(2), "(2)f4,", [2, 2]),
|
403 |
+
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
|
404 |
+
(["1", "2"], "(2)i,", None)])
|
405 |
+
def test_subarray_list(self, obj, dtype, expected):
|
406 |
+
dtype = np.dtype(dtype)
|
407 |
+
res = np.array(obj, dtype=dtype)
|
408 |
+
|
409 |
+
if expected is None:
|
410 |
+
# iterate the 1-d list to fill the array
|
411 |
+
expected = np.empty(len(obj), dtype=dtype)
|
412 |
+
for i in range(len(expected)):
|
413 |
+
expected[i] = obj[i]
|
414 |
+
|
415 |
+
assert_array_equal(res, expected)
|
416 |
+
|
417 |
+
def test_comma_datetime(self):
|
418 |
+
dt = np.dtype('M8[D],datetime64[Y],i8')
|
419 |
+
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
|
420 |
+
('f1', 'datetime64[Y]'),
|
421 |
+
('f2', 'i8')]))
|
422 |
+
|
423 |
+
def test_from_dictproxy(self):
|
424 |
+
# Tests for PR #5920
|
425 |
+
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
|
426 |
+
assert_dtype_equal(dt, np.dtype(dt.fields))
|
427 |
+
dt2 = np.dtype((np.void, dt.fields))
|
428 |
+
assert_equal(dt2.fields, dt.fields)
|
429 |
+
|
430 |
+
def test_from_dict_with_zero_width_field(self):
|
431 |
+
# Regression test for #6430 / #2196
|
432 |
+
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
|
433 |
+
dt2 = np.dtype({'names': ['val1', 'val2'],
|
434 |
+
'formats': [(np.float32, (0,)), int]})
|
435 |
+
|
436 |
+
assert_dtype_equal(dt, dt2)
|
437 |
+
assert_equal(dt.fields['val1'][0].itemsize, 0)
|
438 |
+
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
|
439 |
+
|
440 |
+
def test_bool_commastring(self):
|
441 |
+
d = np.dtype('?,?,?') # raises?
|
442 |
+
assert_equal(len(d.names), 3)
|
443 |
+
for n in d.names:
|
444 |
+
assert_equal(d.fields[n][0], np.dtype('?'))
|
445 |
+
|
446 |
+
def test_nonint_offsets(self):
|
447 |
+
# gh-8059
|
448 |
+
def make_dtype(off):
|
449 |
+
return np.dtype({'names': ['A'], 'formats': ['i4'],
|
450 |
+
'offsets': [off]})
|
451 |
+
|
452 |
+
assert_raises(TypeError, make_dtype, 'ASD')
|
453 |
+
assert_raises(OverflowError, make_dtype, 2**70)
|
454 |
+
assert_raises(TypeError, make_dtype, 2.3)
|
455 |
+
assert_raises(ValueError, make_dtype, -10)
|
456 |
+
|
457 |
+
# no errors here:
|
458 |
+
dt = make_dtype(np.uint32(0))
|
459 |
+
np.zeros(1, dtype=dt)[0].item()
|
460 |
+
|
461 |
+
def test_fields_by_index(self):
|
462 |
+
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
|
463 |
+
assert_dtype_equal(dt[0], np.dtype(np.int8))
|
464 |
+
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
|
465 |
+
assert_dtype_equal(dt[-1], dt[1])
|
466 |
+
assert_dtype_equal(dt[-2], dt[0])
|
467 |
+
assert_raises(IndexError, lambda: dt[-3])
|
468 |
+
|
469 |
+
assert_raises(TypeError, operator.getitem, dt, 3.0)
|
470 |
+
|
471 |
+
assert_equal(dt[1], dt[np.int8(1)])
|
472 |
+
|
473 |
+
@pytest.mark.parametrize('align_flag',[False, True])
|
474 |
+
def test_multifield_index(self, align_flag):
|
475 |
+
# indexing with a list produces subfields
|
476 |
+
# the align flag should be preserved
|
477 |
+
dt = np.dtype([
|
478 |
+
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
|
479 |
+
], align=align_flag)
|
480 |
+
|
481 |
+
dt_sub = dt[['B', 'col1']]
|
482 |
+
assert_equal(
|
483 |
+
dt_sub,
|
484 |
+
np.dtype({
|
485 |
+
'names': ['B', 'col1'],
|
486 |
+
'formats': ['<f8', '<U20'],
|
487 |
+
'offsets': [88, 0],
|
488 |
+
'titles': [None, 'title'],
|
489 |
+
'itemsize': 96
|
490 |
+
})
|
491 |
+
)
|
492 |
+
assert_equal(dt_sub.isalignedstruct, align_flag)
|
493 |
+
|
494 |
+
dt_sub = dt[['B']]
|
495 |
+
assert_equal(
|
496 |
+
dt_sub,
|
497 |
+
np.dtype({
|
498 |
+
'names': ['B'],
|
499 |
+
'formats': ['<f8'],
|
500 |
+
'offsets': [88],
|
501 |
+
'itemsize': 96
|
502 |
+
})
|
503 |
+
)
|
504 |
+
assert_equal(dt_sub.isalignedstruct, align_flag)
|
505 |
+
|
506 |
+
dt_sub = dt[[]]
|
507 |
+
assert_equal(
|
508 |
+
dt_sub,
|
509 |
+
np.dtype({
|
510 |
+
'names': [],
|
511 |
+
'formats': [],
|
512 |
+
'offsets': [],
|
513 |
+
'itemsize': 96
|
514 |
+
})
|
515 |
+
)
|
516 |
+
assert_equal(dt_sub.isalignedstruct, align_flag)
|
517 |
+
|
518 |
+
assert_raises(TypeError, operator.getitem, dt, ())
|
519 |
+
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
|
520 |
+
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
|
521 |
+
assert_raises(KeyError, operator.getitem, dt, ['fake'])
|
522 |
+
assert_raises(KeyError, operator.getitem, dt, ['title'])
|
523 |
+
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
|
524 |
+
|
525 |
+
def test_partial_dict(self):
|
526 |
+
# 'names' is missing
|
527 |
+
assert_raises(ValueError, np.dtype,
|
528 |
+
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
|
529 |
+
|
530 |
+
def test_fieldless_views(self):
|
531 |
+
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
|
532 |
+
'itemsize':8})
|
533 |
+
assert_raises(ValueError, a.view, np.dtype([]))
|
534 |
+
|
535 |
+
d = np.dtype((np.dtype([]), 10))
|
536 |
+
assert_equal(d.shape, (10,))
|
537 |
+
assert_equal(d.itemsize, 0)
|
538 |
+
assert_equal(d.base, np.dtype([]))
|
539 |
+
|
540 |
+
arr = np.fromiter((() for i in range(10)), [])
|
541 |
+
assert_equal(arr.dtype, np.dtype([]))
|
542 |
+
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
|
543 |
+
assert_equal(np.frombuffer(b'', dtype=[], count=2),
|
544 |
+
np.empty(2, dtype=[]))
|
545 |
+
|
546 |
+
assert_raises(ValueError, np.dtype, ([], 'f8'))
|
547 |
+
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
|
548 |
+
|
549 |
+
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
|
550 |
+
np.ones(2, dtype=bool))
|
551 |
+
|
552 |
+
assert_equal(np.zeros((1, 2), dtype=[]) == a,
|
553 |
+
np.ones((1, 2), dtype=bool))
|
554 |
+
|
555 |
+
def test_nonstructured_with_object(self):
|
556 |
+
# See gh-23277, the dtype here thinks it contain objects, if the
|
557 |
+
# assert about that fails, the test becomes meaningless (which is OK)
|
558 |
+
arr = np.recarray((0,), dtype="O")
|
559 |
+
assert arr.dtype.names is None # no fields
|
560 |
+
assert arr.dtype.hasobject # but claims to contain objects
|
561 |
+
del arr # the deletion failed previously.
|
562 |
+
|
563 |
+
|
564 |
+
class TestSubarray:
|
565 |
+
def test_single_subarray(self):
|
566 |
+
a = np.dtype((int, (2)))
|
567 |
+
b = np.dtype((int, (2,)))
|
568 |
+
assert_dtype_equal(a, b)
|
569 |
+
|
570 |
+
assert_equal(type(a.subdtype[1]), tuple)
|
571 |
+
assert_equal(type(b.subdtype[1]), tuple)
|
572 |
+
|
573 |
+
def test_equivalent_record(self):
|
574 |
+
"""Test whether equivalent subarray dtypes hash the same."""
|
575 |
+
a = np.dtype((int, (2, 3)))
|
576 |
+
b = np.dtype((int, (2, 3)))
|
577 |
+
assert_dtype_equal(a, b)
|
578 |
+
|
579 |
+
def test_nonequivalent_record(self):
|
580 |
+
"""Test whether different subarray dtypes hash differently."""
|
581 |
+
a = np.dtype((int, (2, 3)))
|
582 |
+
b = np.dtype((int, (3, 2)))
|
583 |
+
assert_dtype_not_equal(a, b)
|
584 |
+
|
585 |
+
a = np.dtype((int, (2, 3)))
|
586 |
+
b = np.dtype((int, (2, 2)))
|
587 |
+
assert_dtype_not_equal(a, b)
|
588 |
+
|
589 |
+
a = np.dtype((int, (1, 2, 3)))
|
590 |
+
b = np.dtype((int, (1, 2)))
|
591 |
+
assert_dtype_not_equal(a, b)
|
592 |
+
|
593 |
+
def test_shape_equal(self):
|
594 |
+
"""Test some data types that are equal"""
|
595 |
+
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
|
596 |
+
# FutureWarning during deprecation period; after it is passed this
|
597 |
+
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
|
598 |
+
with pytest.warns(FutureWarning):
|
599 |
+
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
|
600 |
+
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
|
601 |
+
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
|
602 |
+
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
|
603 |
+
assert_dtype_equal(np.dtype(d), np.dtype(d))
|
604 |
+
|
605 |
+
def test_shape_simple(self):
|
606 |
+
"""Test some simple cases that shouldn't be equal"""
|
607 |
+
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
|
608 |
+
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
|
609 |
+
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
|
610 |
+
|
611 |
+
def test_shape_monster(self):
|
612 |
+
"""Test some more complicated cases that shouldn't be equal"""
|
613 |
+
assert_dtype_not_equal(
|
614 |
+
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
|
615 |
+
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
|
616 |
+
assert_dtype_not_equal(
|
617 |
+
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
|
618 |
+
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
|
619 |
+
assert_dtype_not_equal(
|
620 |
+
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
|
621 |
+
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
|
622 |
+
assert_dtype_not_equal(
|
623 |
+
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
|
624 |
+
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
|
625 |
+
|
626 |
+
def test_shape_sequence(self):
|
627 |
+
# Any sequence of integers should work as shape, but the result
|
628 |
+
# should be a tuple (immutable) of base type integers.
|
629 |
+
a = np.array([1, 2, 3], dtype=np.int16)
|
630 |
+
l = [1, 2, 3]
|
631 |
+
# Array gets converted
|
632 |
+
dt = np.dtype([('a', 'f4', a)])
|
633 |
+
assert_(isinstance(dt['a'].shape, tuple))
|
634 |
+
assert_(isinstance(dt['a'].shape[0], int))
|
635 |
+
# List gets converted
|
636 |
+
dt = np.dtype([('a', 'f4', l)])
|
637 |
+
assert_(isinstance(dt['a'].shape, tuple))
|
638 |
+
#
|
639 |
+
|
640 |
+
class IntLike:
|
641 |
+
def __index__(self):
|
642 |
+
return 3
|
643 |
+
|
644 |
+
def __int__(self):
|
645 |
+
# (a PyNumber_Check fails without __int__)
|
646 |
+
return 3
|
647 |
+
|
648 |
+
dt = np.dtype([('a', 'f4', IntLike())])
|
649 |
+
assert_(isinstance(dt['a'].shape, tuple))
|
650 |
+
assert_(isinstance(dt['a'].shape[0], int))
|
651 |
+
dt = np.dtype([('a', 'f4', (IntLike(),))])
|
652 |
+
assert_(isinstance(dt['a'].shape, tuple))
|
653 |
+
assert_(isinstance(dt['a'].shape[0], int))
|
654 |
+
|
655 |
+
def test_shape_matches_ndim(self):
|
656 |
+
dt = np.dtype([('a', 'f4', ())])
|
657 |
+
assert_equal(dt['a'].shape, ())
|
658 |
+
assert_equal(dt['a'].ndim, 0)
|
659 |
+
|
660 |
+
dt = np.dtype([('a', 'f4')])
|
661 |
+
assert_equal(dt['a'].shape, ())
|
662 |
+
assert_equal(dt['a'].ndim, 0)
|
663 |
+
|
664 |
+
dt = np.dtype([('a', 'f4', 4)])
|
665 |
+
assert_equal(dt['a'].shape, (4,))
|
666 |
+
assert_equal(dt['a'].ndim, 1)
|
667 |
+
|
668 |
+
dt = np.dtype([('a', 'f4', (1, 2, 3))])
|
669 |
+
assert_equal(dt['a'].shape, (1, 2, 3))
|
670 |
+
assert_equal(dt['a'].ndim, 3)
|
671 |
+
|
672 |
+
def test_shape_invalid(self):
|
673 |
+
# Check that the shape is valid.
|
674 |
+
max_int = np.iinfo(np.intc).max
|
675 |
+
max_intp = np.iinfo(np.intp).max
|
676 |
+
# Too large values (the datatype is part of this)
|
677 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
|
678 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
|
679 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
|
680 |
+
# Takes a different code path (fails earlier:
|
681 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
|
682 |
+
# Negative values
|
683 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
|
684 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
|
685 |
+
|
686 |
+
def test_alignment(self):
|
687 |
+
#Check that subarrays are aligned
|
688 |
+
t1 = np.dtype('(1,)i4', align=True)
|
689 |
+
t2 = np.dtype('2i4', align=True)
|
690 |
+
assert_equal(t1.alignment, t2.alignment)
|
691 |
+
|
692 |
+
def test_aligned_empty(self):
|
693 |
+
# Mainly regression test for gh-19696: construction failed completely
|
694 |
+
dt = np.dtype([], align=True)
|
695 |
+
assert dt == np.dtype([])
|
696 |
+
dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True)
|
697 |
+
assert dt == np.dtype([])
|
698 |
+
|
699 |
+
def test_subarray_base_item(self):
|
700 |
+
arr = np.ones(3, dtype=[("f", "i", 3)])
|
701 |
+
# Extracting the field "absorbs" the subarray into a view:
|
702 |
+
assert arr["f"].base is arr
|
703 |
+
# Extract the structured item, and then check the tuple component:
|
704 |
+
item = arr.item(0)
|
705 |
+
assert type(item) is tuple and len(item) == 1
|
706 |
+
assert item[0].base is arr
|
707 |
+
|
708 |
+
def test_subarray_cast_copies(self):
|
709 |
+
# Older versions of NumPy did NOT copy, but they got the ownership
|
710 |
+
# wrong (not actually knowing the correct base!). Versions since 1.21
|
711 |
+
# (I think) crashed fairly reliable. This defines the correct behavior
|
712 |
+
# as a copy. Keeping the ownership would be possible (but harder)
|
713 |
+
arr = np.ones(3, dtype=[("f", "i", 3)])
|
714 |
+
cast = arr.astype(object)
|
715 |
+
for fields in cast:
|
716 |
+
assert type(fields) == tuple and len(fields) == 1
|
717 |
+
subarr = fields[0]
|
718 |
+
assert subarr.base is None
|
719 |
+
assert subarr.flags.owndata
|
720 |
+
|
721 |
+
|
722 |
+
def iter_struct_object_dtypes():
|
723 |
+
"""
|
724 |
+
Iterates over a few complex dtypes and object pattern which
|
725 |
+
fill the array with a given object (defaults to a singleton).
|
726 |
+
|
727 |
+
Yields
|
728 |
+
------
|
729 |
+
dtype : dtype
|
730 |
+
pattern : tuple
|
731 |
+
Structured tuple for use with `np.array`.
|
732 |
+
count : int
|
733 |
+
Number of objects stored in the dtype.
|
734 |
+
singleton : object
|
735 |
+
A singleton object. The returned pattern is constructed so that
|
736 |
+
all objects inside the datatype are set to the singleton.
|
737 |
+
"""
|
738 |
+
obj = object()
|
739 |
+
|
740 |
+
dt = np.dtype([('b', 'O', (2, 3))])
|
741 |
+
p = ([[obj] * 3] * 2,)
|
742 |
+
yield pytest.param(dt, p, 6, obj, id="<subarray>")
|
743 |
+
|
744 |
+
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
|
745 |
+
p = (0, [[obj] * 3] * 2)
|
746 |
+
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
|
747 |
+
|
748 |
+
dt = np.dtype([('a', 'i4'),
|
749 |
+
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
|
750 |
+
p = (0, [[(obj, 0)] * 3] * 2)
|
751 |
+
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
|
752 |
+
|
753 |
+
dt = np.dtype([('a', 'i4'),
|
754 |
+
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
|
755 |
+
p = (0, [[(obj, obj)] * 3] * 2)
|
756 |
+
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
|
757 |
+
|
758 |
+
|
759 |
+
@pytest.mark.skipif(
|
760 |
+
sys.version_info >= (3, 12),
|
761 |
+
reason="Python 3.12 has immortal refcounts, this test will no longer "
|
762 |
+
"work. See gh-23986"
|
763 |
+
)
|
764 |
+
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
|
765 |
+
class TestStructuredObjectRefcounting:
|
766 |
+
"""These tests cover various uses of complicated structured types which
|
767 |
+
include objects and thus require reference counting.
|
768 |
+
"""
|
769 |
+
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
|
770 |
+
iter_struct_object_dtypes())
|
771 |
+
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
|
772 |
+
pytest.param(np.empty, None,
|
773 |
+
# None is probably used for too many things
|
774 |
+
marks=pytest.mark.skip("unreliable due to python's behaviour")),
|
775 |
+
(np.ones, 1),
|
776 |
+
(np.zeros, 0)])
|
777 |
+
def test_structured_object_create_delete(self, dt, pat, count, singleton,
|
778 |
+
creation_func, creation_obj):
|
779 |
+
"""Structured object reference counting in creation and deletion"""
|
780 |
+
# The test assumes that 0, 1, and None are singletons.
|
781 |
+
gc.collect()
|
782 |
+
before = sys.getrefcount(creation_obj)
|
783 |
+
arr = creation_func(3, dt)
|
784 |
+
|
785 |
+
now = sys.getrefcount(creation_obj)
|
786 |
+
assert now - before == count * 3
|
787 |
+
del arr
|
788 |
+
now = sys.getrefcount(creation_obj)
|
789 |
+
assert now == before
|
790 |
+
|
791 |
+
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
|
792 |
+
iter_struct_object_dtypes())
|
793 |
+
def test_structured_object_item_setting(self, dt, pat, count, singleton):
|
794 |
+
"""Structured object reference counting for simple item setting"""
|
795 |
+
one = 1
|
796 |
+
|
797 |
+
gc.collect()
|
798 |
+
before = sys.getrefcount(singleton)
|
799 |
+
arr = np.array([pat] * 3, dt)
|
800 |
+
assert sys.getrefcount(singleton) - before == count * 3
|
801 |
+
# Fill with `1` and check that it was replaced correctly:
|
802 |
+
before2 = sys.getrefcount(one)
|
803 |
+
arr[...] = one
|
804 |
+
after2 = sys.getrefcount(one)
|
805 |
+
assert after2 - before2 == count * 3
|
806 |
+
del arr
|
807 |
+
gc.collect()
|
808 |
+
assert sys.getrefcount(one) == before2
|
809 |
+
assert sys.getrefcount(singleton) == before
|
810 |
+
|
811 |
+
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
|
812 |
+
iter_struct_object_dtypes())
|
813 |
+
@pytest.mark.parametrize(
|
814 |
+
['shape', 'index', 'items_changed'],
|
815 |
+
[((3,), ([0, 2],), 2),
|
816 |
+
((3, 2), ([0, 2], slice(None)), 4),
|
817 |
+
((3, 2), ([0, 2], [1]), 2),
|
818 |
+
((3,), ([True, False, True]), 2)])
|
819 |
+
def test_structured_object_indexing(self, shape, index, items_changed,
|
820 |
+
dt, pat, count, singleton):
|
821 |
+
"""Structured object reference counting for advanced indexing."""
|
822 |
+
# Use two small negative values (should be singletons, but less likely
|
823 |
+
# to run into race-conditions). This failed in some threaded envs
|
824 |
+
# When using 0 and 1. If it fails again, should remove all explicit
|
825 |
+
# checks, and rely on `pytest-leaks` reference count checker only.
|
826 |
+
val0 = -4
|
827 |
+
val1 = -5
|
828 |
+
|
829 |
+
arr = np.full(shape, val0, dt)
|
830 |
+
|
831 |
+
gc.collect()
|
832 |
+
before_val0 = sys.getrefcount(val0)
|
833 |
+
before_val1 = sys.getrefcount(val1)
|
834 |
+
# Test item getting:
|
835 |
+
part = arr[index]
|
836 |
+
after_val0 = sys.getrefcount(val0)
|
837 |
+
assert after_val0 - before_val0 == count * items_changed
|
838 |
+
del part
|
839 |
+
# Test item setting:
|
840 |
+
arr[index] = val1
|
841 |
+
gc.collect()
|
842 |
+
after_val0 = sys.getrefcount(val0)
|
843 |
+
after_val1 = sys.getrefcount(val1)
|
844 |
+
assert before_val0 - after_val0 == count * items_changed
|
845 |
+
assert after_val1 - before_val1 == count * items_changed
|
846 |
+
|
847 |
+
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
|
848 |
+
iter_struct_object_dtypes())
|
849 |
+
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
|
850 |
+
"""Structured object reference counting for specialized functions.
|
851 |
+
The older functions such as take and repeat use different code paths
|
852 |
+
then item setting (when writing this).
|
853 |
+
"""
|
854 |
+
indices = [0, 1]
|
855 |
+
|
856 |
+
arr = np.array([pat] * 3, dt)
|
857 |
+
gc.collect()
|
858 |
+
before = sys.getrefcount(singleton)
|
859 |
+
res = arr.take(indices)
|
860 |
+
after = sys.getrefcount(singleton)
|
861 |
+
assert after - before == count * 2
|
862 |
+
new = res.repeat(10)
|
863 |
+
gc.collect()
|
864 |
+
after_repeat = sys.getrefcount(singleton)
|
865 |
+
assert after_repeat - after == count * 2 * 10
|
866 |
+
|
867 |
+
|
868 |
+
class TestStructuredDtypeSparseFields:
|
869 |
+
"""Tests subarray fields which contain sparse dtypes so that
|
870 |
+
not all memory is used by the dtype work. Such dtype's should
|
871 |
+
leave the underlying memory unchanged.
|
872 |
+
"""
|
873 |
+
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
|
874 |
+
'offsets':[0, 4]}, (2, 3))])
|
875 |
+
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
|
876 |
+
'offsets':[4]}, (2, 3))])
|
877 |
+
|
878 |
+
def test_sparse_field_assignment(self):
|
879 |
+
arr = np.zeros(3, self.dtype)
|
880 |
+
sparse_arr = arr.view(self.sparse_dtype)
|
881 |
+
|
882 |
+
sparse_arr[...] = np.finfo(np.float32).max
|
883 |
+
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
|
884 |
+
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
|
885 |
+
|
886 |
+
def test_sparse_field_assignment_fancy(self):
|
887 |
+
# Fancy assignment goes to the copyswap function for complex types:
|
888 |
+
arr = np.zeros(3, self.dtype)
|
889 |
+
sparse_arr = arr.view(self.sparse_dtype)
|
890 |
+
|
891 |
+
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
|
892 |
+
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
|
893 |
+
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
|
894 |
+
|
895 |
+
|
896 |
+
class TestMonsterType:
|
897 |
+
"""Test deeply nested subtypes."""
|
898 |
+
|
899 |
+
def test1(self):
|
900 |
+
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
|
901 |
+
'titles': ['Red pixel', 'Blue pixel']})
|
902 |
+
a = np.dtype([('yo', int), ('ye', simple1),
|
903 |
+
('yi', np.dtype((int, (3, 2))))])
|
904 |
+
b = np.dtype([('yo', int), ('ye', simple1),
|
905 |
+
('yi', np.dtype((int, (3, 2))))])
|
906 |
+
assert_dtype_equal(a, b)
|
907 |
+
|
908 |
+
c = np.dtype([('yo', int), ('ye', simple1),
|
909 |
+
('yi', np.dtype((a, (3, 2))))])
|
910 |
+
d = np.dtype([('yo', int), ('ye', simple1),
|
911 |
+
('yi', np.dtype((a, (3, 2))))])
|
912 |
+
assert_dtype_equal(c, d)
|
913 |
+
|
914 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
915 |
+
def test_list_recursion(self):
|
916 |
+
l = list()
|
917 |
+
l.append(('f', l))
|
918 |
+
with pytest.raises(RecursionError):
|
919 |
+
np.dtype(l)
|
920 |
+
|
921 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
922 |
+
def test_tuple_recursion(self):
|
923 |
+
d = np.int32
|
924 |
+
for i in range(100000):
|
925 |
+
d = (d, (1,))
|
926 |
+
with pytest.raises(RecursionError):
|
927 |
+
np.dtype(d)
|
928 |
+
|
929 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
930 |
+
def test_dict_recursion(self):
|
931 |
+
d = dict(names=['self'], formats=[None], offsets=[0])
|
932 |
+
d['formats'][0] = d
|
933 |
+
with pytest.raises(RecursionError):
|
934 |
+
np.dtype(d)
|
935 |
+
|
936 |
+
|
937 |
+
class TestMetadata:
|
938 |
+
def test_no_metadata(self):
|
939 |
+
d = np.dtype(int)
|
940 |
+
assert_(d.metadata is None)
|
941 |
+
|
942 |
+
def test_metadata_takes_dict(self):
|
943 |
+
d = np.dtype(int, metadata={'datum': 1})
|
944 |
+
assert_(d.metadata == {'datum': 1})
|
945 |
+
|
946 |
+
def test_metadata_rejects_nondict(self):
|
947 |
+
assert_raises(TypeError, np.dtype, int, metadata='datum')
|
948 |
+
assert_raises(TypeError, np.dtype, int, metadata=1)
|
949 |
+
assert_raises(TypeError, np.dtype, int, metadata=None)
|
950 |
+
|
951 |
+
def test_nested_metadata(self):
|
952 |
+
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
|
953 |
+
assert_(d['a'].metadata == {'datum': 1})
|
954 |
+
|
955 |
+
def test_base_metadata_copied(self):
|
956 |
+
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
|
957 |
+
assert_(d.metadata == {'datum': 1})
|
958 |
+
|
959 |
+
class TestString:
|
960 |
+
def test_complex_dtype_str(self):
|
961 |
+
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
|
962 |
+
('rtile', '>f4', (64, 36))], (3,)),
|
963 |
+
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
|
964 |
+
('bright', '>f4', (8, 36))])])
|
965 |
+
assert_equal(str(dt),
|
966 |
+
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
|
967 |
+
"('rtile', '>f4', (64, 36))], (3,)), "
|
968 |
+
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
|
969 |
+
"('bright', '>f4', (8, 36))])]")
|
970 |
+
|
971 |
+
# If the sticky aligned flag is set to True, it makes the
|
972 |
+
# str() function use a dict representation with an 'aligned' flag
|
973 |
+
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
|
974 |
+
('rtile', '>f4', (64, 36))],
|
975 |
+
(3,)),
|
976 |
+
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
|
977 |
+
('bright', '>f4', (8, 36))])],
|
978 |
+
align=True)
|
979 |
+
assert_equal(str(dt),
|
980 |
+
"{'names': ['top', 'bottom'],"
|
981 |
+
" 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
|
982 |
+
"('rtile', '>f4', (64, 36))], (3,)), "
|
983 |
+
"[('bleft', ('>f4', (8, 64)), (1,)), "
|
984 |
+
"('bright', '>f4', (8, 36))]],"
|
985 |
+
" 'offsets': [0, 76800],"
|
986 |
+
" 'itemsize': 80000,"
|
987 |
+
" 'aligned': True}")
|
988 |
+
with np.printoptions(legacy='1.21'):
|
989 |
+
assert_equal(str(dt),
|
990 |
+
"{'names':['top','bottom'], "
|
991 |
+
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
|
992 |
+
"('rtile', '>f4', (64, 36))], (3,)),"
|
993 |
+
"[('bleft', ('>f4', (8, 64)), (1,)), "
|
994 |
+
"('bright', '>f4', (8, 36))]], "
|
995 |
+
"'offsets':[0,76800], "
|
996 |
+
"'itemsize':80000, "
|
997 |
+
"'aligned':True}")
|
998 |
+
assert_equal(np.dtype(eval(str(dt))), dt)
|
999 |
+
|
1000 |
+
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
|
1001 |
+
'offsets': [0, 1, 2],
|
1002 |
+
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
|
1003 |
+
assert_equal(str(dt),
|
1004 |
+
"[(('Red pixel', 'r'), 'u1'), "
|
1005 |
+
"(('Green pixel', 'g'), 'u1'), "
|
1006 |
+
"(('Blue pixel', 'b'), 'u1')]")
|
1007 |
+
|
1008 |
+
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
|
1009 |
+
'formats': ['<u4', 'u1', 'u1', 'u1'],
|
1010 |
+
'offsets': [0, 0, 1, 2],
|
1011 |
+
'titles': ['Color', 'Red pixel',
|
1012 |
+
'Green pixel', 'Blue pixel']})
|
1013 |
+
assert_equal(str(dt),
|
1014 |
+
"{'names': ['rgba', 'r', 'g', 'b'],"
|
1015 |
+
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
|
1016 |
+
" 'offsets': [0, 0, 1, 2],"
|
1017 |
+
" 'titles': ['Color', 'Red pixel', "
|
1018 |
+
"'Green pixel', 'Blue pixel'],"
|
1019 |
+
" 'itemsize': 4}")
|
1020 |
+
|
1021 |
+
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
|
1022 |
+
'offsets': [0, 2],
|
1023 |
+
'titles': ['Red pixel', 'Blue pixel']})
|
1024 |
+
assert_equal(str(dt),
|
1025 |
+
"{'names': ['r', 'b'],"
|
1026 |
+
" 'formats': ['u1', 'u1'],"
|
1027 |
+
" 'offsets': [0, 2],"
|
1028 |
+
" 'titles': ['Red pixel', 'Blue pixel'],"
|
1029 |
+
" 'itemsize': 3}")
|
1030 |
+
|
1031 |
+
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
|
1032 |
+
assert_equal(str(dt),
|
1033 |
+
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
|
1034 |
+
|
1035 |
+
def test_repr_structured(self):
|
1036 |
+
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
|
1037 |
+
('rtile', '>f4', (64, 36))], (3,)),
|
1038 |
+
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
|
1039 |
+
('bright', '>f4', (8, 36))])])
|
1040 |
+
assert_equal(repr(dt),
|
1041 |
+
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
|
1042 |
+
"('rtile', '>f4', (64, 36))], (3,)), "
|
1043 |
+
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
|
1044 |
+
"('bright', '>f4', (8, 36))])])")
|
1045 |
+
|
1046 |
+
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
|
1047 |
+
'offsets': [0, 1, 2],
|
1048 |
+
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
|
1049 |
+
align=True)
|
1050 |
+
assert_equal(repr(dt),
|
1051 |
+
"dtype([(('Red pixel', 'r'), 'u1'), "
|
1052 |
+
"(('Green pixel', 'g'), 'u1'), "
|
1053 |
+
"(('Blue pixel', 'b'), 'u1')], align=True)")
|
1054 |
+
|
1055 |
+
def test_repr_structured_not_packed(self):
|
1056 |
+
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
|
1057 |
+
'formats': ['<u4', 'u1', 'u1', 'u1'],
|
1058 |
+
'offsets': [0, 0, 1, 2],
|
1059 |
+
'titles': ['Color', 'Red pixel',
|
1060 |
+
'Green pixel', 'Blue pixel']}, align=True)
|
1061 |
+
assert_equal(repr(dt),
|
1062 |
+
"dtype({'names': ['rgba', 'r', 'g', 'b'],"
|
1063 |
+
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
|
1064 |
+
" 'offsets': [0, 0, 1, 2],"
|
1065 |
+
" 'titles': ['Color', 'Red pixel', "
|
1066 |
+
"'Green pixel', 'Blue pixel'],"
|
1067 |
+
" 'itemsize': 4}, align=True)")
|
1068 |
+
|
1069 |
+
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
|
1070 |
+
'offsets': [0, 2],
|
1071 |
+
'titles': ['Red pixel', 'Blue pixel'],
|
1072 |
+
'itemsize': 4})
|
1073 |
+
assert_equal(repr(dt),
|
1074 |
+
"dtype({'names': ['r', 'b'], "
|
1075 |
+
"'formats': ['u1', 'u1'], "
|
1076 |
+
"'offsets': [0, 2], "
|
1077 |
+
"'titles': ['Red pixel', 'Blue pixel'], "
|
1078 |
+
"'itemsize': 4})")
|
1079 |
+
|
1080 |
+
def test_repr_structured_datetime(self):
|
1081 |
+
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
|
1082 |
+
assert_equal(repr(dt),
|
1083 |
+
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
|
1084 |
+
|
1085 |
+
def test_repr_str_subarray(self):
|
1086 |
+
dt = np.dtype(('<i2', (1,)))
|
1087 |
+
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
|
1088 |
+
assert_equal(str(dt), "('<i2', (1,))")
|
1089 |
+
|
1090 |
+
def test_base_dtype_with_object_type(self):
|
1091 |
+
# Issue gh-2798, should not error.
|
1092 |
+
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
|
1093 |
+
|
1094 |
+
def test_empty_string_to_object(self):
|
1095 |
+
# Pull request #4722
|
1096 |
+
np.array(["", ""]).astype(object)
|
1097 |
+
|
1098 |
+
def test_void_subclass_unsized(self):
|
1099 |
+
dt = np.dtype(np.record)
|
1100 |
+
assert_equal(repr(dt), "dtype('V')")
|
1101 |
+
assert_equal(str(dt), '|V0')
|
1102 |
+
assert_equal(dt.name, 'record')
|
1103 |
+
|
1104 |
+
def test_void_subclass_sized(self):
|
1105 |
+
dt = np.dtype((np.record, 2))
|
1106 |
+
assert_equal(repr(dt), "dtype('V2')")
|
1107 |
+
assert_equal(str(dt), '|V2')
|
1108 |
+
assert_equal(dt.name, 'record16')
|
1109 |
+
|
1110 |
+
def test_void_subclass_fields(self):
|
1111 |
+
dt = np.dtype((np.record, [('a', '<u2')]))
|
1112 |
+
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
|
1113 |
+
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
|
1114 |
+
assert_equal(dt.name, 'record16')
|
1115 |
+
|
1116 |
+
|
1117 |
+
class TestDtypeAttributeDeletion:
|
1118 |
+
|
1119 |
+
def test_dtype_non_writable_attributes_deletion(self):
|
1120 |
+
dt = np.dtype(np.double)
|
1121 |
+
attr = ["subdtype", "descr", "str", "name", "base", "shape",
|
1122 |
+
"isbuiltin", "isnative", "isalignedstruct", "fields",
|
1123 |
+
"metadata", "hasobject"]
|
1124 |
+
|
1125 |
+
for s in attr:
|
1126 |
+
assert_raises(AttributeError, delattr, dt, s)
|
1127 |
+
|
1128 |
+
def test_dtype_writable_attributes_deletion(self):
|
1129 |
+
dt = np.dtype(np.double)
|
1130 |
+
attr = ["names"]
|
1131 |
+
for s in attr:
|
1132 |
+
assert_raises(AttributeError, delattr, dt, s)
|
1133 |
+
|
1134 |
+
|
1135 |
+
class TestDtypeAttributes:
|
1136 |
+
def test_descr_has_trailing_void(self):
|
1137 |
+
# see gh-6359
|
1138 |
+
dtype = np.dtype({
|
1139 |
+
'names': ['A', 'B'],
|
1140 |
+
'formats': ['f4', 'f4'],
|
1141 |
+
'offsets': [0, 8],
|
1142 |
+
'itemsize': 16})
|
1143 |
+
new_dtype = np.dtype(dtype.descr)
|
1144 |
+
assert_equal(new_dtype.itemsize, 16)
|
1145 |
+
|
1146 |
+
def test_name_dtype_subclass(self):
|
1147 |
+
# Ticket #4357
|
1148 |
+
class user_def_subcls(np.void):
|
1149 |
+
pass
|
1150 |
+
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
|
1151 |
+
|
1152 |
+
def test_zero_stride(self):
|
1153 |
+
arr = np.ones(1, dtype="i8")
|
1154 |
+
arr = np.broadcast_to(arr, 10)
|
1155 |
+
assert arr.strides == (0,)
|
1156 |
+
with pytest.raises(ValueError):
|
1157 |
+
arr.dtype = "i1"
|
1158 |
+
|
1159 |
+
class TestDTypeMakeCanonical:
|
1160 |
+
def check_canonical(self, dtype, canonical):
|
1161 |
+
"""
|
1162 |
+
Check most properties relevant to "canonical" versions of a dtype,
|
1163 |
+
which is mainly native byte order for datatypes supporting this.
|
1164 |
+
|
1165 |
+
The main work is checking structured dtypes with fields, where we
|
1166 |
+
reproduce most the actual logic used in the C-code.
|
1167 |
+
"""
|
1168 |
+
assert type(dtype) is type(canonical)
|
1169 |
+
|
1170 |
+
# a canonical DType should always have equivalent casting (both ways)
|
1171 |
+
assert np.can_cast(dtype, canonical, casting="equiv")
|
1172 |
+
assert np.can_cast(canonical, dtype, casting="equiv")
|
1173 |
+
# a canonical dtype (and its fields) is always native (checks fields):
|
1174 |
+
assert canonical.isnative
|
1175 |
+
|
1176 |
+
# Check that canonical of canonical is the same (no casting):
|
1177 |
+
assert np.result_type(canonical) == canonical
|
1178 |
+
|
1179 |
+
if not dtype.names:
|
1180 |
+
# The flags currently never change for unstructured dtypes
|
1181 |
+
assert dtype.flags == canonical.flags
|
1182 |
+
return
|
1183 |
+
|
1184 |
+
# Must have all the needs API flag set:
|
1185 |
+
assert dtype.flags & 0b10000
|
1186 |
+
|
1187 |
+
# Check that the fields are identical (including titles):
|
1188 |
+
assert dtype.fields.keys() == canonical.fields.keys()
|
1189 |
+
|
1190 |
+
def aligned_offset(offset, alignment):
|
1191 |
+
# round up offset:
|
1192 |
+
return - (-offset // alignment) * alignment
|
1193 |
+
|
1194 |
+
totalsize = 0
|
1195 |
+
max_alignment = 1
|
1196 |
+
for name in dtype.names:
|
1197 |
+
# each field is also canonical:
|
1198 |
+
new_field_descr = canonical.fields[name][0]
|
1199 |
+
self.check_canonical(dtype.fields[name][0], new_field_descr)
|
1200 |
+
|
1201 |
+
# Must have the "inherited" object related flags:
|
1202 |
+
expected = 0b11011 & new_field_descr.flags
|
1203 |
+
assert (canonical.flags & expected) == expected
|
1204 |
+
|
1205 |
+
if canonical.isalignedstruct:
|
1206 |
+
totalsize = aligned_offset(totalsize, new_field_descr.alignment)
|
1207 |
+
max_alignment = max(new_field_descr.alignment, max_alignment)
|
1208 |
+
|
1209 |
+
assert canonical.fields[name][1] == totalsize
|
1210 |
+
# if a title exists, they must match (otherwise empty tuple):
|
1211 |
+
assert dtype.fields[name][2:] == canonical.fields[name][2:]
|
1212 |
+
|
1213 |
+
totalsize += new_field_descr.itemsize
|
1214 |
+
|
1215 |
+
if canonical.isalignedstruct:
|
1216 |
+
totalsize = aligned_offset(totalsize, max_alignment)
|
1217 |
+
assert canonical.itemsize == totalsize
|
1218 |
+
assert canonical.alignment == max_alignment
|
1219 |
+
|
1220 |
+
def test_simple(self):
|
1221 |
+
dt = np.dtype(">i4")
|
1222 |
+
assert np.result_type(dt).isnative
|
1223 |
+
assert np.result_type(dt).num == dt.num
|
1224 |
+
|
1225 |
+
# dtype with empty space:
|
1226 |
+
struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]]
|
1227 |
+
canonical = np.result_type(struct_dt)
|
1228 |
+
assert canonical.itemsize == 4+8
|
1229 |
+
assert canonical.isnative
|
1230 |
+
|
1231 |
+
# aligned struct dtype with empty space:
|
1232 |
+
struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]]
|
1233 |
+
canonical = np.result_type(struct_dt)
|
1234 |
+
assert canonical.isalignedstruct
|
1235 |
+
assert canonical.itemsize == np.dtype("i8").alignment + 8
|
1236 |
+
assert canonical.isnative
|
1237 |
+
|
1238 |
+
def test_object_flag_not_inherited(self):
|
1239 |
+
# The following dtype still indicates "object", because its included
|
1240 |
+
# in the unaccessible space (maybe this could change at some point):
|
1241 |
+
arr = np.ones(3, "i,O,i")[["f0", "f2"]]
|
1242 |
+
assert arr.dtype.hasobject
|
1243 |
+
canonical_dt = np.result_type(arr.dtype)
|
1244 |
+
assert not canonical_dt.hasobject
|
1245 |
+
|
1246 |
+
@pytest.mark.slow
|
1247 |
+
@hypothesis.given(dtype=hynp.nested_dtypes())
|
1248 |
+
def test_make_canonical_hypothesis(self, dtype):
|
1249 |
+
canonical = np.result_type(dtype)
|
1250 |
+
self.check_canonical(dtype, canonical)
|
1251 |
+
# result_type with two arguments should always give identical results:
|
1252 |
+
two_arg_result = np.result_type(dtype, dtype)
|
1253 |
+
assert np.can_cast(two_arg_result, canonical, casting="no")
|
1254 |
+
|
1255 |
+
@pytest.mark.slow
|
1256 |
+
@hypothesis.given(
|
1257 |
+
dtype=hypothesis.extra.numpy.array_dtypes(
|
1258 |
+
subtype_strategy=hypothesis.extra.numpy.array_dtypes(),
|
1259 |
+
min_size=5, max_size=10, allow_subarrays=True))
|
1260 |
+
def test_structured(self, dtype):
|
1261 |
+
# Pick 4 of the fields at random. This will leave empty space in the
|
1262 |
+
# dtype (since we do not canonicalize it here).
|
1263 |
+
field_subset = random.sample(dtype.names, k=4)
|
1264 |
+
dtype_with_empty_space = dtype[field_subset]
|
1265 |
+
assert dtype_with_empty_space.itemsize == dtype.itemsize
|
1266 |
+
canonicalized = np.result_type(dtype_with_empty_space)
|
1267 |
+
self.check_canonical(dtype_with_empty_space, canonicalized)
|
1268 |
+
# promotion with two arguments should always give identical results:
|
1269 |
+
two_arg_result = np.promote_types(
|
1270 |
+
dtype_with_empty_space, dtype_with_empty_space)
|
1271 |
+
assert np.can_cast(two_arg_result, canonicalized, casting="no")
|
1272 |
+
|
1273 |
+
# Ensure that we also check aligned struct (check the opposite, in
|
1274 |
+
# case hypothesis grows support for `align`. Then repeat the test:
|
1275 |
+
dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct)
|
1276 |
+
dtype_with_empty_space = dtype_aligned[field_subset]
|
1277 |
+
assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize
|
1278 |
+
canonicalized = np.result_type(dtype_with_empty_space)
|
1279 |
+
self.check_canonical(dtype_with_empty_space, canonicalized)
|
1280 |
+
# promotion with two arguments should always give identical results:
|
1281 |
+
two_arg_result = np.promote_types(
|
1282 |
+
dtype_with_empty_space, dtype_with_empty_space)
|
1283 |
+
assert np.can_cast(two_arg_result, canonicalized, casting="no")
|
1284 |
+
|
1285 |
+
|
1286 |
+
class TestPickling:
|
1287 |
+
|
1288 |
+
def check_pickling(self, dtype):
|
1289 |
+
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
1290 |
+
buf = pickle.dumps(dtype, proto)
|
1291 |
+
# The dtype pickling itself pickles `np.dtype` if it is pickled
|
1292 |
+
# as a singleton `dtype` should be stored in the buffer:
|
1293 |
+
assert b"_DType_reconstruct" not in buf
|
1294 |
+
assert b"dtype" in buf
|
1295 |
+
pickled = pickle.loads(buf)
|
1296 |
+
assert_equal(pickled, dtype)
|
1297 |
+
assert_equal(pickled.descr, dtype.descr)
|
1298 |
+
if dtype.metadata is not None:
|
1299 |
+
assert_equal(pickled.metadata, dtype.metadata)
|
1300 |
+
# Check the reconstructed dtype is functional
|
1301 |
+
x = np.zeros(3, dtype=dtype)
|
1302 |
+
y = np.zeros(3, dtype=pickled)
|
1303 |
+
assert_equal(x, y)
|
1304 |
+
assert_equal(x[0], y[0])
|
1305 |
+
|
1306 |
+
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
|
1307 |
+
np.compat.unicode, bool])
|
1308 |
+
def test_builtin(self, t):
|
1309 |
+
self.check_pickling(np.dtype(t))
|
1310 |
+
|
1311 |
+
def test_structured(self):
|
1312 |
+
dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
|
1313 |
+
self.check_pickling(dt)
|
1314 |
+
|
1315 |
+
def test_structured_aligned(self):
|
1316 |
+
dt = np.dtype('i4, i1', align=True)
|
1317 |
+
self.check_pickling(dt)
|
1318 |
+
|
1319 |
+
def test_structured_unaligned(self):
|
1320 |
+
dt = np.dtype('i4, i1', align=False)
|
1321 |
+
self.check_pickling(dt)
|
1322 |
+
|
1323 |
+
def test_structured_padded(self):
|
1324 |
+
dt = np.dtype({
|
1325 |
+
'names': ['A', 'B'],
|
1326 |
+
'formats': ['f4', 'f4'],
|
1327 |
+
'offsets': [0, 8],
|
1328 |
+
'itemsize': 16})
|
1329 |
+
self.check_pickling(dt)
|
1330 |
+
|
1331 |
+
def test_structured_titles(self):
|
1332 |
+
dt = np.dtype({'names': ['r', 'b'],
|
1333 |
+
'formats': ['u1', 'u1'],
|
1334 |
+
'titles': ['Red pixel', 'Blue pixel']})
|
1335 |
+
self.check_pickling(dt)
|
1336 |
+
|
1337 |
+
@pytest.mark.parametrize('base', ['m8', 'M8'])
|
1338 |
+
@pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
|
1339 |
+
'ms', 'us', 'ns', 'ps', 'fs', 'as'])
|
1340 |
+
def test_datetime(self, base, unit):
|
1341 |
+
dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
|
1342 |
+
self.check_pickling(dt)
|
1343 |
+
if unit:
|
1344 |
+
dt = np.dtype('%s[7%s]' % (base, unit))
|
1345 |
+
self.check_pickling(dt)
|
1346 |
+
|
1347 |
+
def test_metadata(self):
|
1348 |
+
dt = np.dtype(int, metadata={'datum': 1})
|
1349 |
+
self.check_pickling(dt)
|
1350 |
+
|
1351 |
+
@pytest.mark.parametrize("DType",
|
1352 |
+
[type(np.dtype(t)) for t in np.typecodes['All']] +
|
1353 |
+
[np.dtype(rational), np.dtype])
|
1354 |
+
def test_pickle_types(self, DType):
|
1355 |
+
# Check that DTypes (the classes/types) roundtrip when pickling
|
1356 |
+
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
1357 |
+
roundtrip_DType = pickle.loads(pickle.dumps(DType, proto))
|
1358 |
+
assert roundtrip_DType is DType
|
1359 |
+
|
1360 |
+
|
1361 |
+
class TestPromotion:
|
1362 |
+
"""Test cases related to more complex DType promotions. Further promotion
|
1363 |
+
tests are defined in `test_numeric.py`
|
1364 |
+
"""
|
1365 |
+
@np._no_nep50_warning()
|
1366 |
+
@pytest.mark.parametrize(["other", "expected", "expected_weak"],
|
1367 |
+
[(2**16-1, np.complex64, None),
|
1368 |
+
(2**32-1, np.complex128, np.complex64),
|
1369 |
+
(np.float16(2), np.complex64, None),
|
1370 |
+
(np.float32(2), np.complex64, None),
|
1371 |
+
(np.longdouble(2), np.complex64, np.clongdouble),
|
1372 |
+
# Base of the double value to sidestep any rounding issues:
|
1373 |
+
(np.longdouble(np.nextafter(1.7e308, 0.)),
|
1374 |
+
np.complex128, np.clongdouble),
|
1375 |
+
# Additionally use "nextafter" so the cast can't round down:
|
1376 |
+
(np.longdouble(np.nextafter(1.7e308, np.inf)),
|
1377 |
+
np.clongdouble, None),
|
1378 |
+
# repeat for complex scalars:
|
1379 |
+
(np.complex64(2), np.complex64, None),
|
1380 |
+
(np.clongdouble(2), np.complex64, np.clongdouble),
|
1381 |
+
# Base of the double value to sidestep any rounding issues:
|
1382 |
+
(np.clongdouble(np.nextafter(1.7e308, 0.) * 1j),
|
1383 |
+
np.complex128, np.clongdouble),
|
1384 |
+
# Additionally use "nextafter" so the cast can't round down:
|
1385 |
+
(np.clongdouble(np.nextafter(1.7e308, np.inf)),
|
1386 |
+
np.clongdouble, None),
|
1387 |
+
])
|
1388 |
+
def test_complex_other_value_based(self,
|
1389 |
+
weak_promotion, other, expected, expected_weak):
|
1390 |
+
if weak_promotion and expected_weak is not None:
|
1391 |
+
expected = expected_weak
|
1392 |
+
|
1393 |
+
# This would change if we modify the value based promotion
|
1394 |
+
min_complex = np.dtype(np.complex64)
|
1395 |
+
|
1396 |
+
res = np.result_type(other, min_complex)
|
1397 |
+
assert res == expected
|
1398 |
+
# Check the same for a simple ufunc call that uses the same logic:
|
1399 |
+
res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype
|
1400 |
+
assert res == expected
|
1401 |
+
|
1402 |
+
@pytest.mark.parametrize(["other", "expected"],
|
1403 |
+
[(np.bool_, np.complex128),
|
1404 |
+
(np.int64, np.complex128),
|
1405 |
+
(np.float16, np.complex64),
|
1406 |
+
(np.float32, np.complex64),
|
1407 |
+
(np.float64, np.complex128),
|
1408 |
+
(np.longdouble, np.clongdouble),
|
1409 |
+
(np.complex64, np.complex64),
|
1410 |
+
(np.complex128, np.complex128),
|
1411 |
+
(np.clongdouble, np.clongdouble),
|
1412 |
+
])
|
1413 |
+
def test_complex_scalar_value_based(self, other, expected):
|
1414 |
+
# This would change if we modify the value based promotion
|
1415 |
+
complex_scalar = 1j
|
1416 |
+
|
1417 |
+
res = np.result_type(other, complex_scalar)
|
1418 |
+
assert res == expected
|
1419 |
+
# Check the same for a simple ufunc call that uses the same logic:
|
1420 |
+
res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype
|
1421 |
+
assert res == expected
|
1422 |
+
|
1423 |
+
def test_complex_pyscalar_promote_rational(self):
|
1424 |
+
with pytest.raises(TypeError,
|
1425 |
+
match=r".* no common DType exists for the given inputs"):
|
1426 |
+
np.result_type(1j, rational)
|
1427 |
+
|
1428 |
+
with pytest.raises(TypeError,
|
1429 |
+
match=r".* no common DType exists for the given inputs"):
|
1430 |
+
np.result_type(1j, rational(1, 2))
|
1431 |
+
|
1432 |
+
@pytest.mark.parametrize("val", [2, 2**32, 2**63, 2**64, 2*100])
|
1433 |
+
def test_python_integer_promotion(self, val):
|
1434 |
+
# If we only path scalars (mainly python ones!), the result must take
|
1435 |
+
# into account that the integer may be considered int32, int64, uint64,
|
1436 |
+
# or object depending on the input value. So test those paths!
|
1437 |
+
expected_dtype = np.result_type(np.array(val).dtype, np.array(0).dtype)
|
1438 |
+
assert np.result_type(val, 0) == expected_dtype
|
1439 |
+
# For completeness sake, also check with a NumPy scalar as second arg:
|
1440 |
+
assert np.result_type(val, np.int8(0)) == expected_dtype
|
1441 |
+
|
1442 |
+
@pytest.mark.parametrize(["other", "expected"],
|
1443 |
+
[(1, rational), (1., np.float64)])
|
1444 |
+
@np._no_nep50_warning()
|
1445 |
+
def test_float_int_pyscalar_promote_rational(
|
1446 |
+
self, weak_promotion, other, expected):
|
1447 |
+
# Note that rationals are a bit akward as they promote with float64
|
1448 |
+
# or default ints, but not float16 or uint8/int8 (which looks
|
1449 |
+
# inconsistent here). The new promotion fixes this (partially?)
|
1450 |
+
if not weak_promotion and type(other) == float:
|
1451 |
+
# The float version, checks float16 in the legacy path, which fails
|
1452 |
+
# the integer version seems to check int8 (also), so it can
|
1453 |
+
# pass.
|
1454 |
+
with pytest.raises(TypeError,
|
1455 |
+
match=r".* do not have a common DType"):
|
1456 |
+
np.result_type(other, rational)
|
1457 |
+
else:
|
1458 |
+
assert np.result_type(other, rational) == expected
|
1459 |
+
|
1460 |
+
assert np.result_type(other, rational(1, 2)) == expected
|
1461 |
+
|
1462 |
+
@pytest.mark.parametrize(["dtypes", "expected"], [
|
1463 |
+
# These promotions are not associative/commutative:
|
1464 |
+
([np.uint16, np.int16, np.float16], np.float32),
|
1465 |
+
([np.uint16, np.int8, np.float16], np.float32),
|
1466 |
+
([np.uint8, np.int16, np.float16], np.float32),
|
1467 |
+
# The following promotions are not ambiguous, but cover code
|
1468 |
+
# paths of abstract promotion (no particular logic being tested)
|
1469 |
+
([1, 1, np.float64], np.float64),
|
1470 |
+
([1, 1., np.complex128], np.complex128),
|
1471 |
+
([1, 1j, np.float64], np.complex128),
|
1472 |
+
([1., 1., np.int64], np.float64),
|
1473 |
+
([1., 1j, np.float64], np.complex128),
|
1474 |
+
([1j, 1j, np.float64], np.complex128),
|
1475 |
+
([1, True, np.bool_], np.int_),
|
1476 |
+
])
|
1477 |
+
def test_permutations_do_not_influence_result(self, dtypes, expected):
|
1478 |
+
# Tests that most permutations do not influence the result. In the
|
1479 |
+
# above some uint and int combintations promote to a larger integer
|
1480 |
+
# type, which would then promote to a larger than necessary float.
|
1481 |
+
for perm in permutations(dtypes):
|
1482 |
+
assert np.result_type(*perm) == expected
|
1483 |
+
|
1484 |
+
|
1485 |
+
def test_rational_dtype():
|
1486 |
+
# test for bug gh-5719
|
1487 |
+
a = np.array([1111], dtype=rational).astype
|
1488 |
+
assert_raises(OverflowError, a, 'int8')
|
1489 |
+
|
1490 |
+
# test that dtype detection finds user-defined types
|
1491 |
+
x = rational(1)
|
1492 |
+
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
|
1493 |
+
|
1494 |
+
|
1495 |
+
def test_dtypes_are_true():
|
1496 |
+
# test for gh-6294
|
1497 |
+
assert bool(np.dtype('f8'))
|
1498 |
+
assert bool(np.dtype('i8'))
|
1499 |
+
assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
|
1500 |
+
|
1501 |
+
|
1502 |
+
def test_invalid_dtype_string():
|
1503 |
+
# test for gh-10440
|
1504 |
+
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
|
1505 |
+
assert_raises(TypeError, np.dtype, 'Fl\xfcgel')
|
1506 |
+
|
1507 |
+
|
1508 |
+
def test_keyword_argument():
|
1509 |
+
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
|
1510 |
+
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
|
1511 |
+
|
1512 |
+
|
1513 |
+
def test_ulong_dtype():
|
1514 |
+
# test for gh-21063
|
1515 |
+
assert np.dtype("ulong") == np.dtype(np.uint)
|
1516 |
+
|
1517 |
+
|
1518 |
+
class TestFromDTypeAttribute:
|
1519 |
+
def test_simple(self):
|
1520 |
+
class dt:
|
1521 |
+
dtype = np.dtype("f8")
|
1522 |
+
|
1523 |
+
assert np.dtype(dt) == np.float64
|
1524 |
+
assert np.dtype(dt()) == np.float64
|
1525 |
+
|
1526 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
1527 |
+
def test_recursion(self):
|
1528 |
+
class dt:
|
1529 |
+
pass
|
1530 |
+
|
1531 |
+
dt.dtype = dt
|
1532 |
+
with pytest.raises(RecursionError):
|
1533 |
+
np.dtype(dt)
|
1534 |
+
|
1535 |
+
dt_instance = dt()
|
1536 |
+
dt_instance.dtype = dt
|
1537 |
+
with pytest.raises(RecursionError):
|
1538 |
+
np.dtype(dt_instance)
|
1539 |
+
|
1540 |
+
def test_void_subtype(self):
|
1541 |
+
class dt(np.void):
|
1542 |
+
# This code path is fully untested before, so it is unclear
|
1543 |
+
# what this should be useful for. Note that if np.void is used
|
1544 |
+
# numpy will think we are deallocating a base type [1.17, 2019-02].
|
1545 |
+
dtype = np.dtype("f,f")
|
1546 |
+
|
1547 |
+
np.dtype(dt)
|
1548 |
+
np.dtype(dt(1))
|
1549 |
+
|
1550 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
1551 |
+
def test_void_subtype_recursion(self):
|
1552 |
+
class vdt(np.void):
|
1553 |
+
pass
|
1554 |
+
|
1555 |
+
vdt.dtype = vdt
|
1556 |
+
|
1557 |
+
with pytest.raises(RecursionError):
|
1558 |
+
np.dtype(vdt)
|
1559 |
+
|
1560 |
+
with pytest.raises(RecursionError):
|
1561 |
+
np.dtype(vdt(1))
|
1562 |
+
|
1563 |
+
|
1564 |
+
class TestDTypeClasses:
|
1565 |
+
@pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational])
|
1566 |
+
def test_basic_dtypes_subclass_properties(self, dtype):
|
1567 |
+
# Note: Except for the isinstance and type checks, these attributes
|
1568 |
+
# are considered currently private and may change.
|
1569 |
+
dtype = np.dtype(dtype)
|
1570 |
+
assert isinstance(dtype, np.dtype)
|
1571 |
+
assert type(dtype) is not np.dtype
|
1572 |
+
if dtype.type.__name__ != "rational":
|
1573 |
+
dt_name = type(dtype).__name__.lower().removesuffix("dtype")
|
1574 |
+
if dt_name == "uint" or dt_name == "int":
|
1575 |
+
# The scalar names has a `c` attached because "int" is Python
|
1576 |
+
# int and that is long...
|
1577 |
+
dt_name += "c"
|
1578 |
+
sc_name = dtype.type.__name__
|
1579 |
+
assert dt_name == sc_name.strip("_")
|
1580 |
+
assert type(dtype).__module__ == "numpy.dtypes"
|
1581 |
+
|
1582 |
+
assert getattr(numpy.dtypes, type(dtype).__name__) is type(dtype)
|
1583 |
+
else:
|
1584 |
+
assert type(dtype).__name__ == "dtype[rational]"
|
1585 |
+
assert type(dtype).__module__ == "numpy"
|
1586 |
+
|
1587 |
+
assert not type(dtype)._abstract
|
1588 |
+
|
1589 |
+
# the flexible dtypes and datetime/timedelta have additional parameters
|
1590 |
+
# which are more than just storage information, these would need to be
|
1591 |
+
# given when creating a dtype:
|
1592 |
+
parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)
|
1593 |
+
if dtype.type not in parametric:
|
1594 |
+
assert not type(dtype)._parametric
|
1595 |
+
assert type(dtype)() is dtype
|
1596 |
+
else:
|
1597 |
+
assert type(dtype)._parametric
|
1598 |
+
with assert_raises(TypeError):
|
1599 |
+
type(dtype)()
|
1600 |
+
|
1601 |
+
def test_dtype_superclass(self):
|
1602 |
+
assert type(np.dtype) is not type
|
1603 |
+
assert isinstance(np.dtype, type)
|
1604 |
+
|
1605 |
+
assert type(np.dtype).__name__ == "_DTypeMeta"
|
1606 |
+
assert type(np.dtype).__module__ == "numpy"
|
1607 |
+
assert np.dtype._abstract
|
1608 |
+
|
1609 |
+
def test_is_numeric(self):
|
1610 |
+
all_codes = set(np.typecodes['All'])
|
1611 |
+
numeric_codes = set(np.typecodes['AllInteger'] +
|
1612 |
+
np.typecodes['AllFloat'] + '?')
|
1613 |
+
non_numeric_codes = all_codes - numeric_codes
|
1614 |
+
|
1615 |
+
for code in numeric_codes:
|
1616 |
+
assert type(np.dtype(code))._is_numeric
|
1617 |
+
|
1618 |
+
for code in non_numeric_codes:
|
1619 |
+
assert not type(np.dtype(code))._is_numeric
|
1620 |
+
|
1621 |
+
@pytest.mark.parametrize("int_", ["UInt", "Int"])
|
1622 |
+
@pytest.mark.parametrize("size", [8, 16, 32, 64])
|
1623 |
+
def test_integer_alias_names(self, int_, size):
|
1624 |
+
DType = getattr(numpy.dtypes, f"{int_}{size}DType")
|
1625 |
+
sctype = getattr(numpy, f"{int_.lower()}{size}")
|
1626 |
+
assert DType.type is sctype
|
1627 |
+
assert DType.__name__.lower().removesuffix("dtype") == sctype.__name__
|
1628 |
+
|
1629 |
+
@pytest.mark.parametrize("name",
|
1630 |
+
["Half", "Float", "Double", "CFloat", "CDouble"])
|
1631 |
+
def test_float_alias_names(self, name):
|
1632 |
+
with pytest.raises(AttributeError):
|
1633 |
+
getattr(numpy.dtypes, name + "DType") is numpy.dtypes.Float16DType
|
1634 |
+
|
1635 |
+
|
1636 |
+
class TestFromCTypes:
|
1637 |
+
|
1638 |
+
@staticmethod
|
1639 |
+
def check(ctype, dtype):
|
1640 |
+
dtype = np.dtype(dtype)
|
1641 |
+
assert_equal(np.dtype(ctype), dtype)
|
1642 |
+
assert_equal(np.dtype(ctype()), dtype)
|
1643 |
+
|
1644 |
+
def test_array(self):
|
1645 |
+
c8 = ctypes.c_uint8
|
1646 |
+
self.check( 3 * c8, (np.uint8, (3,)))
|
1647 |
+
self.check( 1 * c8, (np.uint8, (1,)))
|
1648 |
+
self.check( 0 * c8, (np.uint8, (0,)))
|
1649 |
+
self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
|
1650 |
+
self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
|
1651 |
+
|
1652 |
+
def test_padded_structure(self):
|
1653 |
+
class PaddedStruct(ctypes.Structure):
|
1654 |
+
_fields_ = [
|
1655 |
+
('a', ctypes.c_uint8),
|
1656 |
+
('b', ctypes.c_uint16)
|
1657 |
+
]
|
1658 |
+
expected = np.dtype([
|
1659 |
+
('a', np.uint8),
|
1660 |
+
('b', np.uint16)
|
1661 |
+
], align=True)
|
1662 |
+
self.check(PaddedStruct, expected)
|
1663 |
+
|
1664 |
+
def test_bit_fields(self):
|
1665 |
+
class BitfieldStruct(ctypes.Structure):
|
1666 |
+
_fields_ = [
|
1667 |
+
('a', ctypes.c_uint8, 7),
|
1668 |
+
('b', ctypes.c_uint8, 1)
|
1669 |
+
]
|
1670 |
+
assert_raises(TypeError, np.dtype, BitfieldStruct)
|
1671 |
+
assert_raises(TypeError, np.dtype, BitfieldStruct())
|
1672 |
+
|
1673 |
+
def test_pointer(self):
|
1674 |
+
p_uint8 = ctypes.POINTER(ctypes.c_uint8)
|
1675 |
+
assert_raises(TypeError, np.dtype, p_uint8)
|
1676 |
+
|
1677 |
+
def test_void_pointer(self):
|
1678 |
+
self.check(ctypes.c_void_p, np.uintp)
|
1679 |
+
|
1680 |
+
def test_union(self):
|
1681 |
+
class Union(ctypes.Union):
|
1682 |
+
_fields_ = [
|
1683 |
+
('a', ctypes.c_uint8),
|
1684 |
+
('b', ctypes.c_uint16),
|
1685 |
+
]
|
1686 |
+
expected = np.dtype(dict(
|
1687 |
+
names=['a', 'b'],
|
1688 |
+
formats=[np.uint8, np.uint16],
|
1689 |
+
offsets=[0, 0],
|
1690 |
+
itemsize=2
|
1691 |
+
))
|
1692 |
+
self.check(Union, expected)
|
1693 |
+
|
1694 |
+
def test_union_with_struct_packed(self):
|
1695 |
+
class Struct(ctypes.Structure):
|
1696 |
+
_pack_ = 1
|
1697 |
+
_fields_ = [
|
1698 |
+
('one', ctypes.c_uint8),
|
1699 |
+
('two', ctypes.c_uint32)
|
1700 |
+
]
|
1701 |
+
|
1702 |
+
class Union(ctypes.Union):
|
1703 |
+
_fields_ = [
|
1704 |
+
('a', ctypes.c_uint8),
|
1705 |
+
('b', ctypes.c_uint16),
|
1706 |
+
('c', ctypes.c_uint32),
|
1707 |
+
('d', Struct),
|
1708 |
+
]
|
1709 |
+
expected = np.dtype(dict(
|
1710 |
+
names=['a', 'b', 'c', 'd'],
|
1711 |
+
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
|
1712 |
+
offsets=[0, 0, 0, 0],
|
1713 |
+
itemsize=ctypes.sizeof(Union)
|
1714 |
+
))
|
1715 |
+
self.check(Union, expected)
|
1716 |
+
|
1717 |
+
def test_union_packed(self):
|
1718 |
+
class Struct(ctypes.Structure):
|
1719 |
+
_fields_ = [
|
1720 |
+
('one', ctypes.c_uint8),
|
1721 |
+
('two', ctypes.c_uint32)
|
1722 |
+
]
|
1723 |
+
_pack_ = 1
|
1724 |
+
class Union(ctypes.Union):
|
1725 |
+
_pack_ = 1
|
1726 |
+
_fields_ = [
|
1727 |
+
('a', ctypes.c_uint8),
|
1728 |
+
('b', ctypes.c_uint16),
|
1729 |
+
('c', ctypes.c_uint32),
|
1730 |
+
('d', Struct),
|
1731 |
+
]
|
1732 |
+
expected = np.dtype(dict(
|
1733 |
+
names=['a', 'b', 'c', 'd'],
|
1734 |
+
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
|
1735 |
+
offsets=[0, 0, 0, 0],
|
1736 |
+
itemsize=ctypes.sizeof(Union)
|
1737 |
+
))
|
1738 |
+
self.check(Union, expected)
|
1739 |
+
|
1740 |
+
def test_packed_structure(self):
|
1741 |
+
class PackedStructure(ctypes.Structure):
|
1742 |
+
_pack_ = 1
|
1743 |
+
_fields_ = [
|
1744 |
+
('a', ctypes.c_uint8),
|
1745 |
+
('b', ctypes.c_uint16)
|
1746 |
+
]
|
1747 |
+
expected = np.dtype([
|
1748 |
+
('a', np.uint8),
|
1749 |
+
('b', np.uint16)
|
1750 |
+
])
|
1751 |
+
self.check(PackedStructure, expected)
|
1752 |
+
|
1753 |
+
def test_large_packed_structure(self):
|
1754 |
+
class PackedStructure(ctypes.Structure):
|
1755 |
+
_pack_ = 2
|
1756 |
+
_fields_ = [
|
1757 |
+
('a', ctypes.c_uint8),
|
1758 |
+
('b', ctypes.c_uint16),
|
1759 |
+
('c', ctypes.c_uint8),
|
1760 |
+
('d', ctypes.c_uint16),
|
1761 |
+
('e', ctypes.c_uint32),
|
1762 |
+
('f', ctypes.c_uint32),
|
1763 |
+
('g', ctypes.c_uint8)
|
1764 |
+
]
|
1765 |
+
expected = np.dtype(dict(
|
1766 |
+
formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
|
1767 |
+
offsets=[0, 2, 4, 6, 8, 12, 16],
|
1768 |
+
names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
|
1769 |
+
itemsize=18))
|
1770 |
+
self.check(PackedStructure, expected)
|
1771 |
+
|
1772 |
+
def test_big_endian_structure_packed(self):
|
1773 |
+
class BigEndStruct(ctypes.BigEndianStructure):
|
1774 |
+
_fields_ = [
|
1775 |
+
('one', ctypes.c_uint8),
|
1776 |
+
('two', ctypes.c_uint32)
|
1777 |
+
]
|
1778 |
+
_pack_ = 1
|
1779 |
+
expected = np.dtype([('one', 'u1'), ('two', '>u4')])
|
1780 |
+
self.check(BigEndStruct, expected)
|
1781 |
+
|
1782 |
+
def test_little_endian_structure_packed(self):
|
1783 |
+
class LittleEndStruct(ctypes.LittleEndianStructure):
|
1784 |
+
_fields_ = [
|
1785 |
+
('one', ctypes.c_uint8),
|
1786 |
+
('two', ctypes.c_uint32)
|
1787 |
+
]
|
1788 |
+
_pack_ = 1
|
1789 |
+
expected = np.dtype([('one', 'u1'), ('two', '<u4')])
|
1790 |
+
self.check(LittleEndStruct, expected)
|
1791 |
+
|
1792 |
+
def test_little_endian_structure(self):
|
1793 |
+
class PaddedStruct(ctypes.LittleEndianStructure):
|
1794 |
+
_fields_ = [
|
1795 |
+
('a', ctypes.c_uint8),
|
1796 |
+
('b', ctypes.c_uint16)
|
1797 |
+
]
|
1798 |
+
expected = np.dtype([
|
1799 |
+
('a', '<B'),
|
1800 |
+
('b', '<H')
|
1801 |
+
], align=True)
|
1802 |
+
self.check(PaddedStruct, expected)
|
1803 |
+
|
1804 |
+
def test_big_endian_structure(self):
|
1805 |
+
class PaddedStruct(ctypes.BigEndianStructure):
|
1806 |
+
_fields_ = [
|
1807 |
+
('a', ctypes.c_uint8),
|
1808 |
+
('b', ctypes.c_uint16)
|
1809 |
+
]
|
1810 |
+
expected = np.dtype([
|
1811 |
+
('a', '>B'),
|
1812 |
+
('b', '>H')
|
1813 |
+
], align=True)
|
1814 |
+
self.check(PaddedStruct, expected)
|
1815 |
+
|
1816 |
+
def test_simple_endian_types(self):
|
1817 |
+
self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
|
1818 |
+
self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
|
1819 |
+
self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
|
1820 |
+
self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
|
1821 |
+
|
1822 |
+
all_types = set(np.typecodes['All'])
|
1823 |
+
all_pairs = permutations(all_types, 2)
|
1824 |
+
|
1825 |
+
@pytest.mark.parametrize("pair", all_pairs)
|
1826 |
+
def test_pairs(self, pair):
|
1827 |
+
"""
|
1828 |
+
Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
|
1829 |
+
Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
|
1830 |
+
"""
|
1831 |
+
# gh-5645: check that np.dtype('i,L') can be used
|
1832 |
+
pair_type = np.dtype('{},{}'.format(*pair))
|
1833 |
+
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
|
1834 |
+
assert_equal(pair_type, expected)
|
1835 |
+
|
1836 |
+
|
1837 |
+
class TestUserDType:
|
1838 |
+
@pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
|
1839 |
+
def test_custom_structured_dtype(self):
|
1840 |
+
class mytype:
|
1841 |
+
pass
|
1842 |
+
|
1843 |
+
blueprint = np.dtype([("field", object)])
|
1844 |
+
dt = create_custom_field_dtype(blueprint, mytype, 0)
|
1845 |
+
assert dt.type == mytype
|
1846 |
+
# We cannot (currently) *create* this dtype with `np.dtype` because
|
1847 |
+
# mytype does not inherit from `np.generic`. This seems like an
|
1848 |
+
# unnecessary restriction, but one that has been around forever:
|
1849 |
+
assert np.dtype(mytype) == np.dtype("O")
|
1850 |
+
|
1851 |
+
def test_custom_structured_dtype_errors(self):
|
1852 |
+
class mytype:
|
1853 |
+
pass
|
1854 |
+
|
1855 |
+
blueprint = np.dtype([("field", object)])
|
1856 |
+
|
1857 |
+
with pytest.raises(ValueError):
|
1858 |
+
# Tests what happens if fields are unset during creation
|
1859 |
+
# which is currently rejected due to the containing object
|
1860 |
+
# (see PyArray_RegisterDataType).
|
1861 |
+
create_custom_field_dtype(blueprint, mytype, 1)
|
1862 |
+
|
1863 |
+
with pytest.raises(RuntimeError):
|
1864 |
+
# Tests that a dtype must have its type field set up to np.dtype
|
1865 |
+
# or in this case a builtin instance.
|
1866 |
+
create_custom_field_dtype(blueprint, mytype, 2)
|
1867 |
+
|
1868 |
+
|
1869 |
+
class TestClassGetItem:
|
1870 |
+
def test_dtype(self) -> None:
|
1871 |
+
alias = np.dtype[Any]
|
1872 |
+
assert isinstance(alias, types.GenericAlias)
|
1873 |
+
assert alias.__origin__ is np.dtype
|
1874 |
+
|
1875 |
+
@pytest.mark.parametrize("code", np.typecodes["All"])
|
1876 |
+
def test_dtype_subclass(self, code: str) -> None:
|
1877 |
+
cls = type(np.dtype(code))
|
1878 |
+
alias = cls[Any]
|
1879 |
+
assert isinstance(alias, types.GenericAlias)
|
1880 |
+
assert alias.__origin__ is cls
|
1881 |
+
|
1882 |
+
@pytest.mark.parametrize("arg_len", range(4))
|
1883 |
+
def test_subscript_tuple(self, arg_len: int) -> None:
|
1884 |
+
arg_tup = (Any,) * arg_len
|
1885 |
+
if arg_len == 1:
|
1886 |
+
assert np.dtype[arg_tup]
|
1887 |
+
else:
|
1888 |
+
with pytest.raises(TypeError):
|
1889 |
+
np.dtype[arg_tup]
|
1890 |
+
|
1891 |
+
def test_subscript_scalar(self) -> None:
|
1892 |
+
assert np.dtype[Any]
|
1893 |
+
|
1894 |
+
|
1895 |
+
def test_result_type_integers_and_unitless_timedelta64():
|
1896 |
+
# Regression test for gh-20077. The following call of `result_type`
|
1897 |
+
# would cause a seg. fault.
|
1898 |
+
td = np.timedelta64(4)
|
1899 |
+
result = np.result_type(0, td)
|
1900 |
+
assert_dtype_equal(result, td.dtype)
|
1901 |
+
|
1902 |
+
|
1903 |
+
def test_creating_dtype_with_dtype_class_errors():
|
1904 |
+
# Regression test for #25031, calling `np.dtype` with itself segfaulted.
|
1905 |
+
with pytest.raises(TypeError, match="Cannot convert np.dtype into a"):
|
1906 |
+
np.array(np.ones(10), dtype=np.dtype)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_einsum.py
ADDED
@@ -0,0 +1,1248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
import sys
|
3 |
+
import platform
|
4 |
+
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from numpy.testing import (
|
9 |
+
assert_, assert_equal, assert_array_equal, assert_almost_equal,
|
10 |
+
assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
|
11 |
+
)
|
12 |
+
|
13 |
+
try:
|
14 |
+
COMPILERS = np.show_config(mode="dicts")["Compilers"]
|
15 |
+
USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl"
|
16 |
+
except TypeError:
|
17 |
+
USING_CLANG_CL = False
|
18 |
+
|
19 |
+
# Setup for optimize einsum
|
20 |
+
chars = 'abcdefghij'
|
21 |
+
sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
|
22 |
+
global_size_dict = dict(zip(chars, sizes))
|
23 |
+
|
24 |
+
|
25 |
+
class TestEinsum:
|
26 |
+
def test_einsum_errors(self):
|
27 |
+
for do_opt in [True, False]:
|
28 |
+
# Need enough arguments
|
29 |
+
assert_raises(ValueError, np.einsum, optimize=do_opt)
|
30 |
+
assert_raises(ValueError, np.einsum, "", optimize=do_opt)
|
31 |
+
|
32 |
+
# subscripts must be a string
|
33 |
+
assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt)
|
34 |
+
|
35 |
+
# out parameter must be an array
|
36 |
+
assert_raises(TypeError, np.einsum, "", 0, out='test',
|
37 |
+
optimize=do_opt)
|
38 |
+
|
39 |
+
# order parameter must be a valid order
|
40 |
+
assert_raises(ValueError, np.einsum, "", 0, order='W',
|
41 |
+
optimize=do_opt)
|
42 |
+
|
43 |
+
# casting parameter must be a valid casting
|
44 |
+
assert_raises(ValueError, np.einsum, "", 0, casting='blah',
|
45 |
+
optimize=do_opt)
|
46 |
+
|
47 |
+
# dtype parameter must be a valid dtype
|
48 |
+
assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
|
49 |
+
optimize=do_opt)
|
50 |
+
|
51 |
+
# other keyword arguments are rejected
|
52 |
+
assert_raises(TypeError, np.einsum, "", 0, bad_arg=0,
|
53 |
+
optimize=do_opt)
|
54 |
+
|
55 |
+
# issue 4528 revealed a segfault with this call
|
56 |
+
assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt)
|
57 |
+
|
58 |
+
# number of operands must match count in subscripts string
|
59 |
+
assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt)
|
60 |
+
assert_raises(ValueError, np.einsum, ",", 0, [0], [0],
|
61 |
+
optimize=do_opt)
|
62 |
+
assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt)
|
63 |
+
|
64 |
+
# can't have more subscripts than dimensions in the operand
|
65 |
+
assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt)
|
66 |
+
assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt)
|
67 |
+
assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt)
|
68 |
+
assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt)
|
69 |
+
assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt)
|
70 |
+
assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt)
|
71 |
+
|
72 |
+
# invalid ellipsis
|
73 |
+
assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt)
|
74 |
+
assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt)
|
75 |
+
assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt)
|
76 |
+
assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt)
|
77 |
+
|
78 |
+
# invalid subscript character
|
79 |
+
assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt)
|
80 |
+
assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt)
|
81 |
+
assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt)
|
82 |
+
|
83 |
+
# output subscripts must appear in input
|
84 |
+
assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt)
|
85 |
+
|
86 |
+
# output subscripts may only be specified once
|
87 |
+
assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]],
|
88 |
+
optimize=do_opt)
|
89 |
+
|
90 |
+
# dimensions much match when being collapsed
|
91 |
+
assert_raises(ValueError, np.einsum, "ii",
|
92 |
+
np.arange(6).reshape(2, 3), optimize=do_opt)
|
93 |
+
assert_raises(ValueError, np.einsum, "ii->i",
|
94 |
+
np.arange(6).reshape(2, 3), optimize=do_opt)
|
95 |
+
|
96 |
+
# broadcasting to new dimensions must be enabled explicitly
|
97 |
+
assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
|
98 |
+
optimize=do_opt)
|
99 |
+
assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
|
100 |
+
out=np.arange(4).reshape(2, 2), optimize=do_opt)
|
101 |
+
with assert_raises_regex(ValueError, "'b'"):
|
102 |
+
# gh-11221 - 'c' erroneously appeared in the error message
|
103 |
+
a = np.ones((3, 3, 4, 5, 6))
|
104 |
+
b = np.ones((3, 4, 5))
|
105 |
+
np.einsum('aabcb,abc', a, b)
|
106 |
+
|
107 |
+
# Check order kwarg, asanyarray allows 1d to pass through
|
108 |
+
assert_raises(ValueError, np.einsum, "i->i", np.arange(6).reshape(-1, 1),
|
109 |
+
optimize=do_opt, order='d')
|
110 |
+
|
111 |
+
def test_einsum_object_errors(self):
|
112 |
+
# Exceptions created by object arithmetic should
|
113 |
+
# successfully propagate
|
114 |
+
|
115 |
+
class CustomException(Exception):
|
116 |
+
pass
|
117 |
+
|
118 |
+
class DestructoBox:
|
119 |
+
|
120 |
+
def __init__(self, value, destruct):
|
121 |
+
self._val = value
|
122 |
+
self._destruct = destruct
|
123 |
+
|
124 |
+
def __add__(self, other):
|
125 |
+
tmp = self._val + other._val
|
126 |
+
if tmp >= self._destruct:
|
127 |
+
raise CustomException
|
128 |
+
else:
|
129 |
+
self._val = tmp
|
130 |
+
return self
|
131 |
+
|
132 |
+
def __radd__(self, other):
|
133 |
+
if other == 0:
|
134 |
+
return self
|
135 |
+
else:
|
136 |
+
return self.__add__(other)
|
137 |
+
|
138 |
+
def __mul__(self, other):
|
139 |
+
tmp = self._val * other._val
|
140 |
+
if tmp >= self._destruct:
|
141 |
+
raise CustomException
|
142 |
+
else:
|
143 |
+
self._val = tmp
|
144 |
+
return self
|
145 |
+
|
146 |
+
def __rmul__(self, other):
|
147 |
+
if other == 0:
|
148 |
+
return self
|
149 |
+
else:
|
150 |
+
return self.__mul__(other)
|
151 |
+
|
152 |
+
a = np.array([DestructoBox(i, 5) for i in range(1, 10)],
|
153 |
+
dtype='object').reshape(3, 3)
|
154 |
+
|
155 |
+
# raised from unbuffered_loop_nop1_ndim2
|
156 |
+
assert_raises(CustomException, np.einsum, "ij->i", a)
|
157 |
+
|
158 |
+
# raised from unbuffered_loop_nop1_ndim3
|
159 |
+
b = np.array([DestructoBox(i, 100) for i in range(0, 27)],
|
160 |
+
dtype='object').reshape(3, 3, 3)
|
161 |
+
assert_raises(CustomException, np.einsum, "i...k->...", b)
|
162 |
+
|
163 |
+
# raised from unbuffered_loop_nop2_ndim2
|
164 |
+
b = np.array([DestructoBox(i, 55) for i in range(1, 4)],
|
165 |
+
dtype='object')
|
166 |
+
assert_raises(CustomException, np.einsum, "ij, j", a, b)
|
167 |
+
|
168 |
+
# raised from unbuffered_loop_nop2_ndim3
|
169 |
+
assert_raises(CustomException, np.einsum, "ij, jh", a, a)
|
170 |
+
|
171 |
+
# raised from PyArray_EinsteinSum
|
172 |
+
assert_raises(CustomException, np.einsum, "ij->", a)
|
173 |
+
|
174 |
+
def test_einsum_views(self):
|
175 |
+
# pass-through
|
176 |
+
for do_opt in [True, False]:
|
177 |
+
a = np.arange(6)
|
178 |
+
a.shape = (2, 3)
|
179 |
+
|
180 |
+
b = np.einsum("...", a, optimize=do_opt)
|
181 |
+
assert_(b.base is a)
|
182 |
+
|
183 |
+
b = np.einsum(a, [Ellipsis], optimize=do_opt)
|
184 |
+
assert_(b.base is a)
|
185 |
+
|
186 |
+
b = np.einsum("ij", a, optimize=do_opt)
|
187 |
+
assert_(b.base is a)
|
188 |
+
assert_equal(b, a)
|
189 |
+
|
190 |
+
b = np.einsum(a, [0, 1], optimize=do_opt)
|
191 |
+
assert_(b.base is a)
|
192 |
+
assert_equal(b, a)
|
193 |
+
|
194 |
+
# output is writeable whenever input is writeable
|
195 |
+
b = np.einsum("...", a, optimize=do_opt)
|
196 |
+
assert_(b.flags['WRITEABLE'])
|
197 |
+
a.flags['WRITEABLE'] = False
|
198 |
+
b = np.einsum("...", a, optimize=do_opt)
|
199 |
+
assert_(not b.flags['WRITEABLE'])
|
200 |
+
|
201 |
+
# transpose
|
202 |
+
a = np.arange(6)
|
203 |
+
a.shape = (2, 3)
|
204 |
+
|
205 |
+
b = np.einsum("ji", a, optimize=do_opt)
|
206 |
+
assert_(b.base is a)
|
207 |
+
assert_equal(b, a.T)
|
208 |
+
|
209 |
+
b = np.einsum(a, [1, 0], optimize=do_opt)
|
210 |
+
assert_(b.base is a)
|
211 |
+
assert_equal(b, a.T)
|
212 |
+
|
213 |
+
# diagonal
|
214 |
+
a = np.arange(9)
|
215 |
+
a.shape = (3, 3)
|
216 |
+
|
217 |
+
b = np.einsum("ii->i", a, optimize=do_opt)
|
218 |
+
assert_(b.base is a)
|
219 |
+
assert_equal(b, [a[i, i] for i in range(3)])
|
220 |
+
|
221 |
+
b = np.einsum(a, [0, 0], [0], optimize=do_opt)
|
222 |
+
assert_(b.base is a)
|
223 |
+
assert_equal(b, [a[i, i] for i in range(3)])
|
224 |
+
|
225 |
+
# diagonal with various ways of broadcasting an additional dimension
|
226 |
+
a = np.arange(27)
|
227 |
+
a.shape = (3, 3, 3)
|
228 |
+
|
229 |
+
b = np.einsum("...ii->...i", a, optimize=do_opt)
|
230 |
+
assert_(b.base is a)
|
231 |
+
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
|
232 |
+
|
233 |
+
b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
|
234 |
+
assert_(b.base is a)
|
235 |
+
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
|
236 |
+
|
237 |
+
b = np.einsum("ii...->...i", a, optimize=do_opt)
|
238 |
+
assert_(b.base is a)
|
239 |
+
assert_equal(b, [[x[i, i] for i in range(3)]
|
240 |
+
for x in a.transpose(2, 0, 1)])
|
241 |
+
|
242 |
+
b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
|
243 |
+
assert_(b.base is a)
|
244 |
+
assert_equal(b, [[x[i, i] for i in range(3)]
|
245 |
+
for x in a.transpose(2, 0, 1)])
|
246 |
+
|
247 |
+
b = np.einsum("...ii->i...", a, optimize=do_opt)
|
248 |
+
assert_(b.base is a)
|
249 |
+
assert_equal(b, [a[:, i, i] for i in range(3)])
|
250 |
+
|
251 |
+
b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
|
252 |
+
assert_(b.base is a)
|
253 |
+
assert_equal(b, [a[:, i, i] for i in range(3)])
|
254 |
+
|
255 |
+
b = np.einsum("jii->ij", a, optimize=do_opt)
|
256 |
+
assert_(b.base is a)
|
257 |
+
assert_equal(b, [a[:, i, i] for i in range(3)])
|
258 |
+
|
259 |
+
b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
|
260 |
+
assert_(b.base is a)
|
261 |
+
assert_equal(b, [a[:, i, i] for i in range(3)])
|
262 |
+
|
263 |
+
b = np.einsum("ii...->i...", a, optimize=do_opt)
|
264 |
+
assert_(b.base is a)
|
265 |
+
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
|
266 |
+
|
267 |
+
b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
|
268 |
+
assert_(b.base is a)
|
269 |
+
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
|
270 |
+
|
271 |
+
b = np.einsum("i...i->i...", a, optimize=do_opt)
|
272 |
+
assert_(b.base is a)
|
273 |
+
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
|
274 |
+
|
275 |
+
b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
|
276 |
+
assert_(b.base is a)
|
277 |
+
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
|
278 |
+
|
279 |
+
b = np.einsum("i...i->...i", a, optimize=do_opt)
|
280 |
+
assert_(b.base is a)
|
281 |
+
assert_equal(b, [[x[i, i] for i in range(3)]
|
282 |
+
for x in a.transpose(1, 0, 2)])
|
283 |
+
|
284 |
+
b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
|
285 |
+
assert_(b.base is a)
|
286 |
+
assert_equal(b, [[x[i, i] for i in range(3)]
|
287 |
+
for x in a.transpose(1, 0, 2)])
|
288 |
+
|
289 |
+
# triple diagonal
|
290 |
+
a = np.arange(27)
|
291 |
+
a.shape = (3, 3, 3)
|
292 |
+
|
293 |
+
b = np.einsum("iii->i", a, optimize=do_opt)
|
294 |
+
assert_(b.base is a)
|
295 |
+
assert_equal(b, [a[i, i, i] for i in range(3)])
|
296 |
+
|
297 |
+
b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
|
298 |
+
assert_(b.base is a)
|
299 |
+
assert_equal(b, [a[i, i, i] for i in range(3)])
|
300 |
+
|
301 |
+
# swap axes
|
302 |
+
a = np.arange(24)
|
303 |
+
a.shape = (2, 3, 4)
|
304 |
+
|
305 |
+
b = np.einsum("ijk->jik", a, optimize=do_opt)
|
306 |
+
assert_(b.base is a)
|
307 |
+
assert_equal(b, a.swapaxes(0, 1))
|
308 |
+
|
309 |
+
b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
|
310 |
+
assert_(b.base is a)
|
311 |
+
assert_equal(b, a.swapaxes(0, 1))
|
312 |
+
|
313 |
+
@np._no_nep50_warning()
|
314 |
+
def check_einsum_sums(self, dtype, do_opt=False):
|
315 |
+
dtype = np.dtype(dtype)
|
316 |
+
# Check various sums. Does many sizes to exercise unrolled loops.
|
317 |
+
|
318 |
+
# sum(a, axis=-1)
|
319 |
+
for n in range(1, 17):
|
320 |
+
a = np.arange(n, dtype=dtype)
|
321 |
+
b = np.sum(a, axis=-1)
|
322 |
+
if hasattr(b, 'astype'):
|
323 |
+
b = b.astype(dtype)
|
324 |
+
assert_equal(np.einsum("i->", a, optimize=do_opt), b)
|
325 |
+
assert_equal(np.einsum(a, [0], [], optimize=do_opt), b)
|
326 |
+
|
327 |
+
for n in range(1, 17):
|
328 |
+
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
|
329 |
+
b = np.sum(a, axis=-1)
|
330 |
+
if hasattr(b, 'astype'):
|
331 |
+
b = b.astype(dtype)
|
332 |
+
assert_equal(np.einsum("...i->...", a, optimize=do_opt), b)
|
333 |
+
assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), b)
|
334 |
+
|
335 |
+
# sum(a, axis=0)
|
336 |
+
for n in range(1, 17):
|
337 |
+
a = np.arange(2*n, dtype=dtype).reshape(2, n)
|
338 |
+
b = np.sum(a, axis=0)
|
339 |
+
if hasattr(b, 'astype'):
|
340 |
+
b = b.astype(dtype)
|
341 |
+
assert_equal(np.einsum("i...->...", a, optimize=do_opt), b)
|
342 |
+
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b)
|
343 |
+
|
344 |
+
for n in range(1, 17):
|
345 |
+
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
|
346 |
+
b = np.sum(a, axis=0)
|
347 |
+
if hasattr(b, 'astype'):
|
348 |
+
b = b.astype(dtype)
|
349 |
+
assert_equal(np.einsum("i...->...", a, optimize=do_opt), b)
|
350 |
+
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b)
|
351 |
+
|
352 |
+
# trace(a)
|
353 |
+
for n in range(1, 17):
|
354 |
+
a = np.arange(n*n, dtype=dtype).reshape(n, n)
|
355 |
+
b = np.trace(a)
|
356 |
+
if hasattr(b, 'astype'):
|
357 |
+
b = b.astype(dtype)
|
358 |
+
assert_equal(np.einsum("ii", a, optimize=do_opt), b)
|
359 |
+
assert_equal(np.einsum(a, [0, 0], optimize=do_opt), b)
|
360 |
+
|
361 |
+
# gh-15961: should accept numpy int64 type in subscript list
|
362 |
+
np_array = np.asarray([0, 0])
|
363 |
+
assert_equal(np.einsum(a, np_array, optimize=do_opt), b)
|
364 |
+
assert_equal(np.einsum(a, list(np_array), optimize=do_opt), b)
|
365 |
+
|
366 |
+
# multiply(a, b)
|
367 |
+
assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
|
368 |
+
for n in range(1, 17):
|
369 |
+
a = np.arange(3 * n, dtype=dtype).reshape(3, n)
|
370 |
+
b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
|
371 |
+
assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
|
372 |
+
np.multiply(a, b))
|
373 |
+
assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
|
374 |
+
np.multiply(a, b))
|
375 |
+
|
376 |
+
# inner(a,b)
|
377 |
+
for n in range(1, 17):
|
378 |
+
a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
|
379 |
+
b = np.arange(n, dtype=dtype)
|
380 |
+
assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
|
381 |
+
assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
|
382 |
+
np.inner(a, b))
|
383 |
+
|
384 |
+
for n in range(1, 11):
|
385 |
+
a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
|
386 |
+
b = np.arange(n, dtype=dtype)
|
387 |
+
assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
|
388 |
+
np.inner(a.T, b.T).T)
|
389 |
+
assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
|
390 |
+
np.inner(a.T, b.T).T)
|
391 |
+
|
392 |
+
# outer(a,b)
|
393 |
+
for n in range(1, 17):
|
394 |
+
a = np.arange(3, dtype=dtype)+1
|
395 |
+
b = np.arange(n, dtype=dtype)+1
|
396 |
+
assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
|
397 |
+
np.outer(a, b))
|
398 |
+
assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
|
399 |
+
np.outer(a, b))
|
400 |
+
|
401 |
+
# Suppress the complex warnings for the 'as f8' tests
|
402 |
+
with suppress_warnings() as sup:
|
403 |
+
sup.filter(np.ComplexWarning)
|
404 |
+
|
405 |
+
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
|
406 |
+
for n in range(1, 17):
|
407 |
+
a = np.arange(4*n, dtype=dtype).reshape(4, n)
|
408 |
+
b = np.arange(n, dtype=dtype)
|
409 |
+
assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
|
410 |
+
np.dot(a, b))
|
411 |
+
assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
|
412 |
+
np.dot(a, b))
|
413 |
+
|
414 |
+
c = np.arange(4, dtype=dtype)
|
415 |
+
np.einsum("ij,j", a, b, out=c,
|
416 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
417 |
+
assert_equal(c,
|
418 |
+
np.dot(a.astype('f8'),
|
419 |
+
b.astype('f8')).astype(dtype))
|
420 |
+
c[...] = 0
|
421 |
+
np.einsum(a, [0, 1], b, [1], out=c,
|
422 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
423 |
+
assert_equal(c,
|
424 |
+
np.dot(a.astype('f8'),
|
425 |
+
b.astype('f8')).astype(dtype))
|
426 |
+
|
427 |
+
for n in range(1, 17):
|
428 |
+
a = np.arange(4*n, dtype=dtype).reshape(4, n)
|
429 |
+
b = np.arange(n, dtype=dtype)
|
430 |
+
assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
|
431 |
+
np.dot(b.T, a.T))
|
432 |
+
assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
|
433 |
+
np.dot(b.T, a.T))
|
434 |
+
|
435 |
+
c = np.arange(4, dtype=dtype)
|
436 |
+
np.einsum("ji,j", a.T, b.T, out=c,
|
437 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
438 |
+
assert_equal(c,
|
439 |
+
np.dot(b.T.astype('f8'),
|
440 |
+
a.T.astype('f8')).astype(dtype))
|
441 |
+
c[...] = 0
|
442 |
+
np.einsum(a.T, [1, 0], b.T, [1], out=c,
|
443 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
444 |
+
assert_equal(c,
|
445 |
+
np.dot(b.T.astype('f8'),
|
446 |
+
a.T.astype('f8')).astype(dtype))
|
447 |
+
|
448 |
+
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
|
449 |
+
for n in range(1, 17):
|
450 |
+
if n < 8 or dtype != 'f2':
|
451 |
+
a = np.arange(4*n, dtype=dtype).reshape(4, n)
|
452 |
+
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
|
453 |
+
assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
|
454 |
+
np.dot(a, b))
|
455 |
+
assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
|
456 |
+
np.dot(a, b))
|
457 |
+
|
458 |
+
for n in range(1, 17):
|
459 |
+
a = np.arange(4*n, dtype=dtype).reshape(4, n)
|
460 |
+
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
|
461 |
+
c = np.arange(24, dtype=dtype).reshape(4, 6)
|
462 |
+
np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
|
463 |
+
optimize=do_opt)
|
464 |
+
assert_equal(c,
|
465 |
+
np.dot(a.astype('f8'),
|
466 |
+
b.astype('f8')).astype(dtype))
|
467 |
+
c[...] = 0
|
468 |
+
np.einsum(a, [0, 1], b, [1, 2], out=c,
|
469 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
470 |
+
assert_equal(c,
|
471 |
+
np.dot(a.astype('f8'),
|
472 |
+
b.astype('f8')).astype(dtype))
|
473 |
+
|
474 |
+
# matrix triple product (note this is not currently an efficient
|
475 |
+
# way to multiply 3 matrices)
|
476 |
+
a = np.arange(12, dtype=dtype).reshape(3, 4)
|
477 |
+
b = np.arange(20, dtype=dtype).reshape(4, 5)
|
478 |
+
c = np.arange(30, dtype=dtype).reshape(5, 6)
|
479 |
+
if dtype != 'f2':
|
480 |
+
assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
|
481 |
+
a.dot(b).dot(c))
|
482 |
+
assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
|
483 |
+
optimize=do_opt), a.dot(b).dot(c))
|
484 |
+
|
485 |
+
d = np.arange(18, dtype=dtype).reshape(3, 6)
|
486 |
+
np.einsum("ij,jk,kl", a, b, c, out=d,
|
487 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
488 |
+
tgt = a.astype('f8').dot(b.astype('f8'))
|
489 |
+
tgt = tgt.dot(c.astype('f8')).astype(dtype)
|
490 |
+
assert_equal(d, tgt)
|
491 |
+
|
492 |
+
d[...] = 0
|
493 |
+
np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
|
494 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
495 |
+
tgt = a.astype('f8').dot(b.astype('f8'))
|
496 |
+
tgt = tgt.dot(c.astype('f8')).astype(dtype)
|
497 |
+
assert_equal(d, tgt)
|
498 |
+
|
499 |
+
# tensordot(a, b)
|
500 |
+
if np.dtype(dtype) != np.dtype('f2'):
|
501 |
+
a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
|
502 |
+
b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
|
503 |
+
assert_equal(np.einsum("ijk, jil -> kl", a, b),
|
504 |
+
np.tensordot(a, b, axes=([1, 0], [0, 1])))
|
505 |
+
assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
|
506 |
+
np.tensordot(a, b, axes=([1, 0], [0, 1])))
|
507 |
+
|
508 |
+
c = np.arange(10, dtype=dtype).reshape(5, 2)
|
509 |
+
np.einsum("ijk,jil->kl", a, b, out=c,
|
510 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
511 |
+
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
|
512 |
+
axes=([1, 0], [0, 1])).astype(dtype))
|
513 |
+
c[...] = 0
|
514 |
+
np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
|
515 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
516 |
+
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
|
517 |
+
axes=([1, 0], [0, 1])).astype(dtype))
|
518 |
+
|
519 |
+
# logical_and(logical_and(a!=0, b!=0), c!=0)
|
520 |
+
neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1
|
521 |
+
a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype)
|
522 |
+
b = np.array([0, 3.5, 0., neg_val, 0, 1, 3, 12], dtype=dtype)
|
523 |
+
c = np.array([True, True, False, True, True, False, True, True])
|
524 |
+
|
525 |
+
assert_equal(np.einsum("i,i,i->i", a, b, c,
|
526 |
+
dtype='?', casting='unsafe', optimize=do_opt),
|
527 |
+
np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
|
528 |
+
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
|
529 |
+
dtype='?', casting='unsafe'),
|
530 |
+
np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
|
531 |
+
|
532 |
+
a = np.arange(9, dtype=dtype)
|
533 |
+
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
|
534 |
+
assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
|
535 |
+
assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
|
536 |
+
assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
|
537 |
+
|
538 |
+
# Various stride0, contiguous, and SSE aligned variants
|
539 |
+
for n in range(1, 25):
|
540 |
+
a = np.arange(n, dtype=dtype)
|
541 |
+
if np.dtype(dtype).itemsize > 1:
|
542 |
+
assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
|
543 |
+
np.multiply(a, a))
|
544 |
+
assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
|
545 |
+
assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
|
546 |
+
assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
|
547 |
+
assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
|
548 |
+
assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
|
549 |
+
|
550 |
+
assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
|
551 |
+
np.multiply(a[1:], a[:-1]))
|
552 |
+
assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
|
553 |
+
np.dot(a[1:], a[:-1]))
|
554 |
+
assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
|
555 |
+
assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
|
556 |
+
assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
|
557 |
+
2*np.sum(a[1:]))
|
558 |
+
assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
|
559 |
+
2*np.sum(a[1:]))
|
560 |
+
|
561 |
+
# An object array, summed as the data type
|
562 |
+
a = np.arange(9, dtype=object)
|
563 |
+
|
564 |
+
b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
|
565 |
+
assert_equal(b, np.sum(a))
|
566 |
+
if hasattr(b, "dtype"):
|
567 |
+
# Can be a python object when dtype is object
|
568 |
+
assert_equal(b.dtype, np.dtype(dtype))
|
569 |
+
|
570 |
+
b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
|
571 |
+
assert_equal(b, np.sum(a))
|
572 |
+
if hasattr(b, "dtype"):
|
573 |
+
# Can be a python object when dtype is object
|
574 |
+
assert_equal(b.dtype, np.dtype(dtype))
|
575 |
+
|
576 |
+
# A case which was failing (ticket #1885)
|
577 |
+
p = np.arange(2) + 1
|
578 |
+
q = np.arange(4).reshape(2, 2) + 3
|
579 |
+
r = np.arange(4).reshape(2, 2) + 7
|
580 |
+
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
|
581 |
+
|
582 |
+
# singleton dimensions broadcast (gh-10343)
|
583 |
+
p = np.ones((10,2))
|
584 |
+
q = np.ones((1,2))
|
585 |
+
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
|
586 |
+
np.einsum('ij,ij->j', p, q, optimize=False))
|
587 |
+
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
|
588 |
+
[10.] * 2)
|
589 |
+
|
590 |
+
# a blas-compatible contraction broadcasting case which was failing
|
591 |
+
# for optimize=True (ticket #10930)
|
592 |
+
x = np.array([2., 3.])
|
593 |
+
y = np.array([4.])
|
594 |
+
assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
|
595 |
+
assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
|
596 |
+
|
597 |
+
# all-ones array was bypassing bug (ticket #10930)
|
598 |
+
p = np.ones((1, 5)) / 2
|
599 |
+
q = np.ones((5, 5)) / 2
|
600 |
+
for optimize in (True, False):
|
601 |
+
assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
|
602 |
+
optimize=optimize),
|
603 |
+
np.einsum("...ij,...jk->...ik", p, q,
|
604 |
+
optimize=optimize))
|
605 |
+
assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
|
606 |
+
optimize=optimize),
|
607 |
+
np.full((1, 5), 1.25))
|
608 |
+
|
609 |
+
# Cases which were failing (gh-10899)
|
610 |
+
x = np.eye(2, dtype=dtype)
|
611 |
+
y = np.ones(2, dtype=dtype)
|
612 |
+
assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
|
613 |
+
[2.]) # contig_contig_outstride0_two
|
614 |
+
assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
|
615 |
+
[2.]) # stride0_contig_outstride0_two
|
616 |
+
assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
|
617 |
+
[2.]) # contig_stride0_outstride0_two
|
618 |
+
|
619 |
+
def test_einsum_sums_int8(self):
|
620 |
+
if (
|
621 |
+
(sys.platform == 'darwin' and platform.machine() == 'x86_64')
|
622 |
+
or
|
623 |
+
USING_CLANG_CL
|
624 |
+
):
|
625 |
+
pytest.xfail('Fails on macOS x86-64 and when using clang-cl '
|
626 |
+
'with Meson, see gh-23838')
|
627 |
+
self.check_einsum_sums('i1')
|
628 |
+
|
629 |
+
def test_einsum_sums_uint8(self):
|
630 |
+
if (
|
631 |
+
(sys.platform == 'darwin' and platform.machine() == 'x86_64')
|
632 |
+
or
|
633 |
+
USING_CLANG_CL
|
634 |
+
):
|
635 |
+
pytest.xfail('Fails on macOS x86-64 and when using clang-cl '
|
636 |
+
'with Meson, see gh-23838')
|
637 |
+
self.check_einsum_sums('u1')
|
638 |
+
|
639 |
+
def test_einsum_sums_int16(self):
|
640 |
+
self.check_einsum_sums('i2')
|
641 |
+
|
642 |
+
def test_einsum_sums_uint16(self):
|
643 |
+
self.check_einsum_sums('u2')
|
644 |
+
|
645 |
+
def test_einsum_sums_int32(self):
|
646 |
+
self.check_einsum_sums('i4')
|
647 |
+
self.check_einsum_sums('i4', True)
|
648 |
+
|
649 |
+
def test_einsum_sums_uint32(self):
|
650 |
+
self.check_einsum_sums('u4')
|
651 |
+
self.check_einsum_sums('u4', True)
|
652 |
+
|
653 |
+
def test_einsum_sums_int64(self):
|
654 |
+
self.check_einsum_sums('i8')
|
655 |
+
|
656 |
+
def test_einsum_sums_uint64(self):
|
657 |
+
self.check_einsum_sums('u8')
|
658 |
+
|
659 |
+
def test_einsum_sums_float16(self):
|
660 |
+
self.check_einsum_sums('f2')
|
661 |
+
|
662 |
+
def test_einsum_sums_float32(self):
|
663 |
+
self.check_einsum_sums('f4')
|
664 |
+
|
665 |
+
def test_einsum_sums_float64(self):
|
666 |
+
self.check_einsum_sums('f8')
|
667 |
+
self.check_einsum_sums('f8', True)
|
668 |
+
|
669 |
+
def test_einsum_sums_longdouble(self):
|
670 |
+
self.check_einsum_sums(np.longdouble)
|
671 |
+
|
672 |
+
def test_einsum_sums_cfloat64(self):
|
673 |
+
self.check_einsum_sums('c8')
|
674 |
+
self.check_einsum_sums('c8', True)
|
675 |
+
|
676 |
+
def test_einsum_sums_cfloat128(self):
|
677 |
+
self.check_einsum_sums('c16')
|
678 |
+
|
679 |
+
def test_einsum_sums_clongdouble(self):
|
680 |
+
self.check_einsum_sums(np.clongdouble)
|
681 |
+
|
682 |
+
def test_einsum_sums_object(self):
|
683 |
+
self.check_einsum_sums('object')
|
684 |
+
self.check_einsum_sums('object', True)
|
685 |
+
|
686 |
+
def test_einsum_misc(self):
|
687 |
+
# This call used to crash because of a bug in
|
688 |
+
# PyArray_AssignZero
|
689 |
+
a = np.ones((1, 2))
|
690 |
+
b = np.ones((2, 2, 1))
|
691 |
+
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
|
692 |
+
assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
|
693 |
+
|
694 |
+
# Regression test for issue #10369 (test unicode inputs with Python 2)
|
695 |
+
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
|
696 |
+
assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
|
697 |
+
assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
|
698 |
+
optimize='greedy'), 20)
|
699 |
+
|
700 |
+
# The iterator had an issue with buffering this reduction
|
701 |
+
a = np.ones((5, 12, 4, 2, 3), np.int64)
|
702 |
+
b = np.ones((5, 12, 11), np.int64)
|
703 |
+
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
|
704 |
+
np.einsum('ijklm,ijn->', a, b))
|
705 |
+
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
|
706 |
+
np.einsum('ijklm,ijn->', a, b, optimize=True))
|
707 |
+
|
708 |
+
# Issue #2027, was a problem in the contiguous 3-argument
|
709 |
+
# inner loop implementation
|
710 |
+
a = np.arange(1, 3)
|
711 |
+
b = np.arange(1, 5).reshape(2, 2)
|
712 |
+
c = np.arange(1, 9).reshape(4, 2)
|
713 |
+
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
|
714 |
+
[[[1, 3], [3, 9], [5, 15], [7, 21]],
|
715 |
+
[[8, 16], [16, 32], [24, 48], [32, 64]]])
|
716 |
+
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
|
717 |
+
[[[1, 3], [3, 9], [5, 15], [7, 21]],
|
718 |
+
[[8, 16], [16, 32], [24, 48], [32, 64]]])
|
719 |
+
|
720 |
+
# Ensure explicitly setting out=None does not cause an error
|
721 |
+
# see issue gh-15776 and issue gh-15256
|
722 |
+
assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]])
|
723 |
+
|
724 |
+
def test_object_loop(self):
|
725 |
+
|
726 |
+
class Mult:
|
727 |
+
def __mul__(self, other):
|
728 |
+
return 42
|
729 |
+
|
730 |
+
objMult = np.array([Mult()])
|
731 |
+
objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object)
|
732 |
+
|
733 |
+
with pytest.raises(TypeError):
|
734 |
+
np.einsum("i,j", [1], objNULL)
|
735 |
+
with pytest.raises(TypeError):
|
736 |
+
np.einsum("i,j", objNULL, [1])
|
737 |
+
assert np.einsum("i,j", objMult, objMult) == 42
|
738 |
+
|
739 |
+
def test_subscript_range(self):
|
740 |
+
# Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
|
741 |
+
# when creating a subscript from arrays
|
742 |
+
a = np.ones((2, 3))
|
743 |
+
b = np.ones((3, 4))
|
744 |
+
np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
|
745 |
+
np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
|
746 |
+
np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
|
747 |
+
assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
|
748 |
+
assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
|
749 |
+
|
750 |
+
def test_einsum_broadcast(self):
|
751 |
+
# Issue #2455 change in handling ellipsis
|
752 |
+
# remove the 'middle broadcast' error
|
753 |
+
# only use the 'RIGHT' iteration in prepare_op_axes
|
754 |
+
# adds auto broadcast on left where it belongs
|
755 |
+
# broadcast on right has to be explicit
|
756 |
+
# We need to test the optimized parsing as well
|
757 |
+
|
758 |
+
A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
|
759 |
+
B = np.arange(3)
|
760 |
+
ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
|
761 |
+
for opt in [True, False]:
|
762 |
+
assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
|
763 |
+
assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
|
764 |
+
assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
|
765 |
+
|
766 |
+
A = np.arange(12).reshape((4, 3))
|
767 |
+
B = np.arange(6).reshape((3, 2))
|
768 |
+
ref = np.einsum('ik,kj->ij', A, B, optimize=False)
|
769 |
+
for opt in [True, False]:
|
770 |
+
assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
|
771 |
+
assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
|
772 |
+
assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
|
773 |
+
assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
|
774 |
+
|
775 |
+
dims = [2, 3, 4, 5]
|
776 |
+
a = np.arange(np.prod(dims)).reshape(dims)
|
777 |
+
v = np.arange(dims[2])
|
778 |
+
ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
|
779 |
+
for opt in [True, False]:
|
780 |
+
assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
|
781 |
+
assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
|
782 |
+
assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
|
783 |
+
|
784 |
+
J, K, M = 160, 160, 120
|
785 |
+
A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
|
786 |
+
B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
|
787 |
+
ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
|
788 |
+
for opt in [True, False]:
|
789 |
+
assert_equal(np.einsum('...lmn,lmno->...o', A, B,
|
790 |
+
optimize=opt), ref) # used to raise error
|
791 |
+
|
792 |
+
def test_einsum_fixedstridebug(self):
|
793 |
+
# Issue #4485 obscure einsum bug
|
794 |
+
# This case revealed a bug in nditer where it reported a stride
|
795 |
+
# as 'fixed' (0) when it was in fact not fixed during processing
|
796 |
+
# (0 or 4). The reason for the bug was that the check for a fixed
|
797 |
+
# stride was using the information from the 2D inner loop reuse
|
798 |
+
# to restrict the iteration dimensions it had to validate to be
|
799 |
+
# the same, but that 2D inner loop reuse logic is only triggered
|
800 |
+
# during the buffer copying step, and hence it was invalid to
|
801 |
+
# rely on those values. The fix is to check all the dimensions
|
802 |
+
# of the stride in question, which in the test case reveals that
|
803 |
+
# the stride is not fixed.
|
804 |
+
#
|
805 |
+
# NOTE: This test is triggered by the fact that the default buffersize,
|
806 |
+
# used by einsum, is 8192, and 3*2731 = 8193, is larger than that
|
807 |
+
# and results in a mismatch between the buffering and the
|
808 |
+
# striding for operand A.
|
809 |
+
A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
|
810 |
+
B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
|
811 |
+
es = np.einsum('cl, cpx->lpx', A, B)
|
812 |
+
tp = np.tensordot(A, B, axes=(0, 0))
|
813 |
+
assert_equal(es, tp)
|
814 |
+
# The following is the original test case from the bug report,
|
815 |
+
# made repeatable by changing random arrays to aranges.
|
816 |
+
A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
|
817 |
+
B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
|
818 |
+
es = np.einsum('cl, cpxy->lpxy', A, B)
|
819 |
+
tp = np.tensordot(A, B, axes=(0, 0))
|
820 |
+
assert_equal(es, tp)
|
821 |
+
|
822 |
+
def test_einsum_fixed_collapsingbug(self):
|
823 |
+
# Issue #5147.
|
824 |
+
# The bug only occurred when output argument of einssum was used.
|
825 |
+
x = np.random.normal(0, 1, (5, 5, 5, 5))
|
826 |
+
y1 = np.zeros((5, 5))
|
827 |
+
np.einsum('aabb->ab', x, out=y1)
|
828 |
+
idx = np.arange(5)
|
829 |
+
y2 = x[idx[:, None], idx[:, None], idx, idx]
|
830 |
+
assert_equal(y1, y2)
|
831 |
+
|
832 |
+
def test_einsum_failed_on_p9_and_s390x(self):
|
833 |
+
# Issues gh-14692 and gh-12689
|
834 |
+
# Bug with signed vs unsigned char errored on power9 and s390x Linux
|
835 |
+
tensor = np.random.random_sample((10, 10, 10, 10))
|
836 |
+
x = np.einsum('ijij->', tensor)
|
837 |
+
y = tensor.trace(axis1=0, axis2=2).trace()
|
838 |
+
assert_allclose(x, y)
|
839 |
+
|
840 |
+
def test_einsum_all_contig_non_contig_output(self):
|
841 |
+
# Issue gh-5907, tests that the all contiguous special case
|
842 |
+
# actually checks the contiguity of the output
|
843 |
+
x = np.ones((5, 5))
|
844 |
+
out = np.ones(10)[::2]
|
845 |
+
correct_base = np.ones(10)
|
846 |
+
correct_base[::2] = 5
|
847 |
+
# Always worked (inner iteration is done with 0-stride):
|
848 |
+
np.einsum('mi,mi,mi->m', x, x, x, out=out)
|
849 |
+
assert_array_equal(out.base, correct_base)
|
850 |
+
# Example 1:
|
851 |
+
out = np.ones(10)[::2]
|
852 |
+
np.einsum('im,im,im->m', x, x, x, out=out)
|
853 |
+
assert_array_equal(out.base, correct_base)
|
854 |
+
# Example 2, buffering causes x to be contiguous but
|
855 |
+
# special cases do not catch the operation before:
|
856 |
+
out = np.ones((2, 2, 2))[..., 0]
|
857 |
+
correct_base = np.ones((2, 2, 2))
|
858 |
+
correct_base[..., 0] = 2
|
859 |
+
x = np.ones((2, 2), np.float32)
|
860 |
+
np.einsum('ij,jk->ik', x, x, out=out)
|
861 |
+
assert_array_equal(out.base, correct_base)
|
862 |
+
|
863 |
+
@pytest.mark.parametrize("dtype",
|
864 |
+
np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
|
865 |
+
def test_different_paths(self, dtype):
|
866 |
+
# Test originally added to cover broken float16 path: gh-20305
|
867 |
+
# Likely most are covered elsewhere, at least partially.
|
868 |
+
dtype = np.dtype(dtype)
|
869 |
+
# Simple test, designed to exercise most specialized code paths,
|
870 |
+
# note the +0.5 for floats. This makes sure we use a float value
|
871 |
+
# where the results must be exact.
|
872 |
+
arr = (np.arange(7) + 0.5).astype(dtype)
|
873 |
+
scalar = np.array(2, dtype=dtype)
|
874 |
+
|
875 |
+
# contig -> scalar:
|
876 |
+
res = np.einsum('i->', arr)
|
877 |
+
assert res == arr.sum()
|
878 |
+
# contig, contig -> contig:
|
879 |
+
res = np.einsum('i,i->i', arr, arr)
|
880 |
+
assert_array_equal(res, arr * arr)
|
881 |
+
# noncontig, noncontig -> contig:
|
882 |
+
res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2])
|
883 |
+
assert_array_equal(res, arr * arr)
|
884 |
+
# contig + contig -> scalar
|
885 |
+
assert np.einsum('i,i->', arr, arr) == (arr * arr).sum()
|
886 |
+
# contig + scalar -> contig (with out)
|
887 |
+
out = np.ones(7, dtype=dtype)
|
888 |
+
res = np.einsum('i,->i', arr, dtype.type(2), out=out)
|
889 |
+
assert_array_equal(res, arr * dtype.type(2))
|
890 |
+
# scalar + contig -> contig (with out)
|
891 |
+
res = np.einsum(',i->i', scalar, arr)
|
892 |
+
assert_array_equal(res, arr * dtype.type(2))
|
893 |
+
# scalar + contig -> scalar
|
894 |
+
res = np.einsum(',i->', scalar, arr)
|
895 |
+
# Use einsum to compare to not have difference due to sum round-offs:
|
896 |
+
assert res == np.einsum('i->', scalar * arr)
|
897 |
+
# contig + scalar -> scalar
|
898 |
+
res = np.einsum('i,->', arr, scalar)
|
899 |
+
# Use einsum to compare to not have difference due to sum round-offs:
|
900 |
+
assert res == np.einsum('i->', scalar * arr)
|
901 |
+
# contig + contig + contig -> scalar
|
902 |
+
arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype)
|
903 |
+
res = np.einsum('i,i,i->', arr, arr, arr)
|
904 |
+
assert_array_equal(res, (arr * arr * arr).sum())
|
905 |
+
# four arrays:
|
906 |
+
res = np.einsum('i,i,i,i->', arr, arr, arr, arr)
|
907 |
+
assert_array_equal(res, (arr * arr * arr * arr).sum())
|
908 |
+
|
909 |
+
def test_small_boolean_arrays(self):
|
910 |
+
# See gh-5946.
|
911 |
+
# Use array of True embedded in False.
|
912 |
+
a = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
|
913 |
+
a[...] = True
|
914 |
+
out = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
|
915 |
+
tgt = np.ones((2, 1, 1), dtype=np.bool_)
|
916 |
+
res = np.einsum('...ij,...jk->...ik', a, a, out=out)
|
917 |
+
assert_equal(res, tgt)
|
918 |
+
|
919 |
+
def test_out_is_res(self):
|
920 |
+
a = np.arange(9).reshape(3, 3)
|
921 |
+
res = np.einsum('...ij,...jk->...ik', a, a, out=a)
|
922 |
+
assert res is a
|
923 |
+
|
924 |
+
def optimize_compare(self, subscripts, operands=None):
|
925 |
+
# Tests all paths of the optimization function against
|
926 |
+
# conventional einsum
|
927 |
+
if operands is None:
|
928 |
+
args = [subscripts]
|
929 |
+
terms = subscripts.split('->')[0].split(',')
|
930 |
+
for term in terms:
|
931 |
+
dims = [global_size_dict[x] for x in term]
|
932 |
+
args.append(np.random.rand(*dims))
|
933 |
+
else:
|
934 |
+
args = [subscripts] + operands
|
935 |
+
|
936 |
+
noopt = np.einsum(*args, optimize=False)
|
937 |
+
opt = np.einsum(*args, optimize='greedy')
|
938 |
+
assert_almost_equal(opt, noopt)
|
939 |
+
opt = np.einsum(*args, optimize='optimal')
|
940 |
+
assert_almost_equal(opt, noopt)
|
941 |
+
|
942 |
+
def test_hadamard_like_products(self):
|
943 |
+
# Hadamard outer products
|
944 |
+
self.optimize_compare('a,ab,abc->abc')
|
945 |
+
self.optimize_compare('a,b,ab->ab')
|
946 |
+
|
947 |
+
def test_index_transformations(self):
|
948 |
+
# Simple index transformation cases
|
949 |
+
self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
|
950 |
+
self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
|
951 |
+
self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
|
952 |
+
|
953 |
+
def test_complex(self):
|
954 |
+
# Long test cases
|
955 |
+
self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
|
956 |
+
self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
|
957 |
+
self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
|
958 |
+
self.optimize_compare('abhe,hidj,jgba,hiab,gab')
|
959 |
+
self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
|
960 |
+
self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
|
961 |
+
self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
|
962 |
+
self.optimize_compare('bdhe,acad,hiab,agac,hibd')
|
963 |
+
|
964 |
+
def test_collapse(self):
|
965 |
+
# Inner products
|
966 |
+
self.optimize_compare('ab,ab,c->')
|
967 |
+
self.optimize_compare('ab,ab,c->c')
|
968 |
+
self.optimize_compare('ab,ab,cd,cd->')
|
969 |
+
self.optimize_compare('ab,ab,cd,cd->ac')
|
970 |
+
self.optimize_compare('ab,ab,cd,cd->cd')
|
971 |
+
self.optimize_compare('ab,ab,cd,cd,ef,ef->')
|
972 |
+
|
973 |
+
def test_expand(self):
|
974 |
+
# Outer products
|
975 |
+
self.optimize_compare('ab,cd,ef->abcdef')
|
976 |
+
self.optimize_compare('ab,cd,ef->acdf')
|
977 |
+
self.optimize_compare('ab,cd,de->abcde')
|
978 |
+
self.optimize_compare('ab,cd,de->be')
|
979 |
+
self.optimize_compare('ab,bcd,cd->abcd')
|
980 |
+
self.optimize_compare('ab,bcd,cd->abd')
|
981 |
+
|
982 |
+
def test_edge_cases(self):
|
983 |
+
# Difficult edge cases for optimization
|
984 |
+
self.optimize_compare('eb,cb,fb->cef')
|
985 |
+
self.optimize_compare('dd,fb,be,cdb->cef')
|
986 |
+
self.optimize_compare('bca,cdb,dbf,afc->')
|
987 |
+
self.optimize_compare('dcc,fce,ea,dbf->ab')
|
988 |
+
self.optimize_compare('fdf,cdd,ccd,afe->ae')
|
989 |
+
self.optimize_compare('abcd,ad')
|
990 |
+
self.optimize_compare('ed,fcd,ff,bcf->be')
|
991 |
+
self.optimize_compare('baa,dcf,af,cde->be')
|
992 |
+
self.optimize_compare('bd,db,eac->ace')
|
993 |
+
self.optimize_compare('fff,fae,bef,def->abd')
|
994 |
+
self.optimize_compare('efc,dbc,acf,fd->abe')
|
995 |
+
self.optimize_compare('ba,ac,da->bcd')
|
996 |
+
|
997 |
+
def test_inner_product(self):
|
998 |
+
# Inner products
|
999 |
+
self.optimize_compare('ab,ab')
|
1000 |
+
self.optimize_compare('ab,ba')
|
1001 |
+
self.optimize_compare('abc,abc')
|
1002 |
+
self.optimize_compare('abc,bac')
|
1003 |
+
self.optimize_compare('abc,cba')
|
1004 |
+
|
1005 |
+
def test_random_cases(self):
|
1006 |
+
# Randomly built test cases
|
1007 |
+
self.optimize_compare('aab,fa,df,ecc->bde')
|
1008 |
+
self.optimize_compare('ecb,fef,bad,ed->ac')
|
1009 |
+
self.optimize_compare('bcf,bbb,fbf,fc->')
|
1010 |
+
self.optimize_compare('bb,ff,be->e')
|
1011 |
+
self.optimize_compare('bcb,bb,fc,fff->')
|
1012 |
+
self.optimize_compare('fbb,dfd,fc,fc->')
|
1013 |
+
self.optimize_compare('afd,ba,cc,dc->bf')
|
1014 |
+
self.optimize_compare('adb,bc,fa,cfc->d')
|
1015 |
+
self.optimize_compare('bbd,bda,fc,db->acf')
|
1016 |
+
self.optimize_compare('dba,ead,cad->bce')
|
1017 |
+
self.optimize_compare('aef,fbc,dca->bde')
|
1018 |
+
|
1019 |
+
def test_combined_views_mapping(self):
|
1020 |
+
# gh-10792
|
1021 |
+
a = np.arange(9).reshape(1, 1, 3, 1, 3)
|
1022 |
+
b = np.einsum('bbcdc->d', a)
|
1023 |
+
assert_equal(b, [12])
|
1024 |
+
|
1025 |
+
def test_broadcasting_dot_cases(self):
|
1026 |
+
# Ensures broadcasting cases are not mistaken for GEMM
|
1027 |
+
|
1028 |
+
a = np.random.rand(1, 5, 4)
|
1029 |
+
b = np.random.rand(4, 6)
|
1030 |
+
c = np.random.rand(5, 6)
|
1031 |
+
d = np.random.rand(10)
|
1032 |
+
|
1033 |
+
self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
|
1034 |
+
self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
|
1035 |
+
|
1036 |
+
e = np.random.rand(1, 1, 5, 4)
|
1037 |
+
f = np.random.rand(7, 7)
|
1038 |
+
self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
|
1039 |
+
self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
|
1040 |
+
|
1041 |
+
# Edge case found in gh-11308
|
1042 |
+
g = np.arange(64).reshape(2, 4, 8)
|
1043 |
+
self.optimize_compare('obk,ijk->ioj', operands=[g, g])
|
1044 |
+
|
1045 |
+
def test_output_order(self):
|
1046 |
+
# Ensure output order is respected for optimize cases, the below
|
1047 |
+
# conraction should yield a reshaped tensor view
|
1048 |
+
# gh-16415
|
1049 |
+
|
1050 |
+
a = np.ones((2, 3, 5), order='F')
|
1051 |
+
b = np.ones((4, 3), order='F')
|
1052 |
+
|
1053 |
+
for opt in [True, False]:
|
1054 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt)
|
1055 |
+
assert_(tmp.flags.f_contiguous)
|
1056 |
+
|
1057 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt)
|
1058 |
+
assert_(tmp.flags.f_contiguous)
|
1059 |
+
|
1060 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt)
|
1061 |
+
assert_(tmp.flags.c_contiguous)
|
1062 |
+
|
1063 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt)
|
1064 |
+
assert_(tmp.flags.c_contiguous is False)
|
1065 |
+
assert_(tmp.flags.f_contiguous is False)
|
1066 |
+
|
1067 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt)
|
1068 |
+
assert_(tmp.flags.c_contiguous is False)
|
1069 |
+
assert_(tmp.flags.f_contiguous is False)
|
1070 |
+
|
1071 |
+
c = np.ones((4, 3), order='C')
|
1072 |
+
for opt in [True, False]:
|
1073 |
+
tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt)
|
1074 |
+
assert_(tmp.flags.c_contiguous)
|
1075 |
+
|
1076 |
+
d = np.ones((2, 3, 5), order='C')
|
1077 |
+
for opt in [True, False]:
|
1078 |
+
tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt)
|
1079 |
+
assert_(tmp.flags.c_contiguous)
|
1080 |
+
|
1081 |
+
class TestEinsumPath:
|
1082 |
+
def build_operands(self, string, size_dict=global_size_dict):
|
1083 |
+
|
1084 |
+
# Builds views based off initial operands
|
1085 |
+
operands = [string]
|
1086 |
+
terms = string.split('->')[0].split(',')
|
1087 |
+
for term in terms:
|
1088 |
+
dims = [size_dict[x] for x in term]
|
1089 |
+
operands.append(np.random.rand(*dims))
|
1090 |
+
|
1091 |
+
return operands
|
1092 |
+
|
1093 |
+
def assert_path_equal(self, comp, benchmark):
|
1094 |
+
# Checks if list of tuples are equivalent
|
1095 |
+
ret = (len(comp) == len(benchmark))
|
1096 |
+
assert_(ret)
|
1097 |
+
for pos in range(len(comp) - 1):
|
1098 |
+
ret &= isinstance(comp[pos + 1], tuple)
|
1099 |
+
ret &= (comp[pos + 1] == benchmark[pos + 1])
|
1100 |
+
assert_(ret)
|
1101 |
+
|
1102 |
+
def test_memory_contraints(self):
|
1103 |
+
# Ensure memory constraints are satisfied
|
1104 |
+
|
1105 |
+
outer_test = self.build_operands('a,b,c->abc')
|
1106 |
+
|
1107 |
+
path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
|
1108 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
|
1109 |
+
|
1110 |
+
path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
|
1111 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
|
1112 |
+
|
1113 |
+
long_test = self.build_operands('acdf,jbje,gihb,hfac')
|
1114 |
+
path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
|
1115 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
|
1116 |
+
|
1117 |
+
path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
|
1118 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
|
1119 |
+
|
1120 |
+
def test_long_paths(self):
|
1121 |
+
# Long complex cases
|
1122 |
+
|
1123 |
+
# Long test 1
|
1124 |
+
long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
|
1125 |
+
path, path_str = np.einsum_path(*long_test1, optimize='greedy')
|
1126 |
+
self.assert_path_equal(path, ['einsum_path',
|
1127 |
+
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
|
1128 |
+
|
1129 |
+
path, path_str = np.einsum_path(*long_test1, optimize='optimal')
|
1130 |
+
self.assert_path_equal(path, ['einsum_path',
|
1131 |
+
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
|
1132 |
+
|
1133 |
+
# Long test 2
|
1134 |
+
long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
|
1135 |
+
path, path_str = np.einsum_path(*long_test2, optimize='greedy')
|
1136 |
+
self.assert_path_equal(path, ['einsum_path',
|
1137 |
+
(3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
|
1138 |
+
|
1139 |
+
path, path_str = np.einsum_path(*long_test2, optimize='optimal')
|
1140 |
+
self.assert_path_equal(path, ['einsum_path',
|
1141 |
+
(0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
|
1142 |
+
|
1143 |
+
def test_edge_paths(self):
|
1144 |
+
# Difficult edge cases
|
1145 |
+
|
1146 |
+
# Edge test1
|
1147 |
+
edge_test1 = self.build_operands('eb,cb,fb->cef')
|
1148 |
+
path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
|
1149 |
+
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
|
1150 |
+
|
1151 |
+
path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
|
1152 |
+
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
|
1153 |
+
|
1154 |
+
# Edge test2
|
1155 |
+
edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
|
1156 |
+
path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
|
1157 |
+
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
|
1158 |
+
|
1159 |
+
path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
|
1160 |
+
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
|
1161 |
+
|
1162 |
+
# Edge test3
|
1163 |
+
edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
|
1164 |
+
path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
|
1165 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
|
1166 |
+
|
1167 |
+
path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
|
1168 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
|
1169 |
+
|
1170 |
+
# Edge test4
|
1171 |
+
edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
|
1172 |
+
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
|
1173 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
|
1174 |
+
|
1175 |
+
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
|
1176 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
|
1177 |
+
|
1178 |
+
# Edge test5
|
1179 |
+
edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
|
1180 |
+
size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
|
1181 |
+
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
|
1182 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
|
1183 |
+
|
1184 |
+
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
|
1185 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
|
1186 |
+
|
1187 |
+
def test_path_type_input(self):
|
1188 |
+
# Test explicit path handling
|
1189 |
+
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
|
1190 |
+
|
1191 |
+
path, path_str = np.einsum_path(*path_test, optimize=False)
|
1192 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
|
1193 |
+
|
1194 |
+
path, path_str = np.einsum_path(*path_test, optimize=True)
|
1195 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
|
1196 |
+
|
1197 |
+
exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
|
1198 |
+
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
|
1199 |
+
self.assert_path_equal(path, exp_path)
|
1200 |
+
|
1201 |
+
# Double check einsum works on the input path
|
1202 |
+
noopt = np.einsum(*path_test, optimize=False)
|
1203 |
+
opt = np.einsum(*path_test, optimize=exp_path)
|
1204 |
+
assert_almost_equal(noopt, opt)
|
1205 |
+
|
1206 |
+
def test_path_type_input_internal_trace(self):
|
1207 |
+
#gh-20962
|
1208 |
+
path_test = self.build_operands('cab,cdd->ab')
|
1209 |
+
exp_path = ['einsum_path', (1,), (0, 1)]
|
1210 |
+
|
1211 |
+
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
|
1212 |
+
self.assert_path_equal(path, exp_path)
|
1213 |
+
|
1214 |
+
# Double check einsum works on the input path
|
1215 |
+
noopt = np.einsum(*path_test, optimize=False)
|
1216 |
+
opt = np.einsum(*path_test, optimize=exp_path)
|
1217 |
+
assert_almost_equal(noopt, opt)
|
1218 |
+
|
1219 |
+
def test_path_type_input_invalid(self):
|
1220 |
+
path_test = self.build_operands('ab,bc,cd,de->ae')
|
1221 |
+
exp_path = ['einsum_path', (2, 3), (0, 1)]
|
1222 |
+
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
|
1223 |
+
assert_raises(
|
1224 |
+
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
|
1225 |
+
|
1226 |
+
path_test = self.build_operands('a,a,a->a')
|
1227 |
+
exp_path = ['einsum_path', (1,), (0, 1)]
|
1228 |
+
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
|
1229 |
+
assert_raises(
|
1230 |
+
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
|
1231 |
+
|
1232 |
+
def test_spaces(self):
|
1233 |
+
#gh-10794
|
1234 |
+
arr = np.array([[1]])
|
1235 |
+
for sp in itertools.product(['', ' '], repeat=4):
|
1236 |
+
# no error for any spacing
|
1237 |
+
np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
|
1238 |
+
|
1239 |
+
def test_overlap():
|
1240 |
+
a = np.arange(9, dtype=int).reshape(3, 3)
|
1241 |
+
b = np.arange(9, dtype=int).reshape(3, 3)
|
1242 |
+
d = np.dot(a, b)
|
1243 |
+
# sanity check
|
1244 |
+
c = np.einsum('ij,jk->ik', a, b)
|
1245 |
+
assert_equal(c, d)
|
1246 |
+
#gh-10080, out overlaps one of the operands
|
1247 |
+
c = np.einsum('ij,jk->ik', a, b, out=b)
|
1248 |
+
assert_equal(c, d)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_extint128.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
import contextlib
|
3 |
+
import operator
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import numpy.core._multiarray_tests as mt
|
8 |
+
|
9 |
+
from numpy.testing import assert_raises, assert_equal
|
10 |
+
|
11 |
+
|
12 |
+
INT64_MAX = np.iinfo(np.int64).max
|
13 |
+
INT64_MIN = np.iinfo(np.int64).min
|
14 |
+
INT64_MID = 2**32
|
15 |
+
|
16 |
+
# int128 is not two's complement, the sign bit is separate
|
17 |
+
INT128_MAX = 2**128 - 1
|
18 |
+
INT128_MIN = -INT128_MAX
|
19 |
+
INT128_MID = 2**64
|
20 |
+
|
21 |
+
INT64_VALUES = (
|
22 |
+
[INT64_MIN + j for j in range(20)] +
|
23 |
+
[INT64_MAX - j for j in range(20)] +
|
24 |
+
[INT64_MID + j for j in range(-20, 20)] +
|
25 |
+
[2*INT64_MID + j for j in range(-20, 20)] +
|
26 |
+
[INT64_MID//2 + j for j in range(-20, 20)] +
|
27 |
+
list(range(-70, 70))
|
28 |
+
)
|
29 |
+
|
30 |
+
INT128_VALUES = (
|
31 |
+
[INT128_MIN + j for j in range(20)] +
|
32 |
+
[INT128_MAX - j for j in range(20)] +
|
33 |
+
[INT128_MID + j for j in range(-20, 20)] +
|
34 |
+
[2*INT128_MID + j for j in range(-20, 20)] +
|
35 |
+
[INT128_MID//2 + j for j in range(-20, 20)] +
|
36 |
+
list(range(-70, 70)) +
|
37 |
+
[False] # negative zero
|
38 |
+
)
|
39 |
+
|
40 |
+
INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
|
41 |
+
|
42 |
+
|
43 |
+
@contextlib.contextmanager
|
44 |
+
def exc_iter(*args):
|
45 |
+
"""
|
46 |
+
Iterate over Cartesian product of *args, and if an exception is raised,
|
47 |
+
add information of the current iterate.
|
48 |
+
"""
|
49 |
+
|
50 |
+
value = [None]
|
51 |
+
|
52 |
+
def iterate():
|
53 |
+
for v in itertools.product(*args):
|
54 |
+
value[0] = v
|
55 |
+
yield v
|
56 |
+
|
57 |
+
try:
|
58 |
+
yield iterate()
|
59 |
+
except Exception:
|
60 |
+
import traceback
|
61 |
+
msg = "At: %r\n%s" % (repr(value[0]),
|
62 |
+
traceback.format_exc())
|
63 |
+
raise AssertionError(msg)
|
64 |
+
|
65 |
+
|
66 |
+
def test_safe_binop():
|
67 |
+
# Test checked arithmetic routines
|
68 |
+
|
69 |
+
ops = [
|
70 |
+
(operator.add, 1),
|
71 |
+
(operator.sub, 2),
|
72 |
+
(operator.mul, 3)
|
73 |
+
]
|
74 |
+
|
75 |
+
with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
|
76 |
+
for xop, a, b in it:
|
77 |
+
pyop, op = xop
|
78 |
+
c = pyop(a, b)
|
79 |
+
|
80 |
+
if not (INT64_MIN <= c <= INT64_MAX):
|
81 |
+
assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
|
82 |
+
else:
|
83 |
+
d = mt.extint_safe_binop(a, b, op)
|
84 |
+
if c != d:
|
85 |
+
# assert_equal is slow
|
86 |
+
assert_equal(d, c)
|
87 |
+
|
88 |
+
|
89 |
+
def test_to_128():
|
90 |
+
with exc_iter(INT64_VALUES) as it:
|
91 |
+
for a, in it:
|
92 |
+
b = mt.extint_to_128(a)
|
93 |
+
if a != b:
|
94 |
+
assert_equal(b, a)
|
95 |
+
|
96 |
+
|
97 |
+
def test_to_64():
|
98 |
+
with exc_iter(INT128_VALUES) as it:
|
99 |
+
for a, in it:
|
100 |
+
if not (INT64_MIN <= a <= INT64_MAX):
|
101 |
+
assert_raises(OverflowError, mt.extint_to_64, a)
|
102 |
+
else:
|
103 |
+
b = mt.extint_to_64(a)
|
104 |
+
if a != b:
|
105 |
+
assert_equal(b, a)
|
106 |
+
|
107 |
+
|
108 |
+
def test_mul_64_64():
|
109 |
+
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
|
110 |
+
for a, b in it:
|
111 |
+
c = a * b
|
112 |
+
d = mt.extint_mul_64_64(a, b)
|
113 |
+
if c != d:
|
114 |
+
assert_equal(d, c)
|
115 |
+
|
116 |
+
|
117 |
+
def test_add_128():
|
118 |
+
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
|
119 |
+
for a, b in it:
|
120 |
+
c = a + b
|
121 |
+
if not (INT128_MIN <= c <= INT128_MAX):
|
122 |
+
assert_raises(OverflowError, mt.extint_add_128, a, b)
|
123 |
+
else:
|
124 |
+
d = mt.extint_add_128(a, b)
|
125 |
+
if c != d:
|
126 |
+
assert_equal(d, c)
|
127 |
+
|
128 |
+
|
129 |
+
def test_sub_128():
|
130 |
+
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
|
131 |
+
for a, b in it:
|
132 |
+
c = a - b
|
133 |
+
if not (INT128_MIN <= c <= INT128_MAX):
|
134 |
+
assert_raises(OverflowError, mt.extint_sub_128, a, b)
|
135 |
+
else:
|
136 |
+
d = mt.extint_sub_128(a, b)
|
137 |
+
if c != d:
|
138 |
+
assert_equal(d, c)
|
139 |
+
|
140 |
+
|
141 |
+
def test_neg_128():
|
142 |
+
with exc_iter(INT128_VALUES) as it:
|
143 |
+
for a, in it:
|
144 |
+
b = -a
|
145 |
+
c = mt.extint_neg_128(a)
|
146 |
+
if b != c:
|
147 |
+
assert_equal(c, b)
|
148 |
+
|
149 |
+
|
150 |
+
def test_shl_128():
|
151 |
+
with exc_iter(INT128_VALUES) as it:
|
152 |
+
for a, in it:
|
153 |
+
if a < 0:
|
154 |
+
b = -(((-a) << 1) & (2**128-1))
|
155 |
+
else:
|
156 |
+
b = (a << 1) & (2**128-1)
|
157 |
+
c = mt.extint_shl_128(a)
|
158 |
+
if b != c:
|
159 |
+
assert_equal(c, b)
|
160 |
+
|
161 |
+
|
162 |
+
def test_shr_128():
|
163 |
+
with exc_iter(INT128_VALUES) as it:
|
164 |
+
for a, in it:
|
165 |
+
if a < 0:
|
166 |
+
b = -((-a) >> 1)
|
167 |
+
else:
|
168 |
+
b = a >> 1
|
169 |
+
c = mt.extint_shr_128(a)
|
170 |
+
if b != c:
|
171 |
+
assert_equal(c, b)
|
172 |
+
|
173 |
+
|
174 |
+
def test_gt_128():
|
175 |
+
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
|
176 |
+
for a, b in it:
|
177 |
+
c = a > b
|
178 |
+
d = mt.extint_gt_128(a, b)
|
179 |
+
if c != d:
|
180 |
+
assert_equal(d, c)
|
181 |
+
|
182 |
+
|
183 |
+
@pytest.mark.slow
|
184 |
+
def test_divmod_128_64():
|
185 |
+
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
|
186 |
+
for a, b in it:
|
187 |
+
if a >= 0:
|
188 |
+
c, cr = divmod(a, b)
|
189 |
+
else:
|
190 |
+
c, cr = divmod(-a, b)
|
191 |
+
c = -c
|
192 |
+
cr = -cr
|
193 |
+
|
194 |
+
d, dr = mt.extint_divmod_128_64(a, b)
|
195 |
+
|
196 |
+
if c != d or d != dr or b*d + dr != a:
|
197 |
+
assert_equal(d, c)
|
198 |
+
assert_equal(dr, cr)
|
199 |
+
assert_equal(b*d + dr, a)
|
200 |
+
|
201 |
+
|
202 |
+
def test_floordiv_128_64():
|
203 |
+
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
|
204 |
+
for a, b in it:
|
205 |
+
c = a // b
|
206 |
+
d = mt.extint_floordiv_128_64(a, b)
|
207 |
+
|
208 |
+
if c != d:
|
209 |
+
assert_equal(d, c)
|
210 |
+
|
211 |
+
|
212 |
+
def test_ceildiv_128_64():
|
213 |
+
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
|
214 |
+
for a, b in it:
|
215 |
+
c = (a + b - 1) // b
|
216 |
+
d = mt.extint_ceildiv_128_64(a, b)
|
217 |
+
|
218 |
+
if c != d:
|
219 |
+
assert_equal(d, c)
|