Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/numpy/__pycache__/__config__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/__pycache__/conftest.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__init__.py +4 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__init__.pyi +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/_dtype.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/_internal.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/_multiarray_umath.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/multiarray.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/umath.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/_dtype.py +6 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/_dtype_ctypes.py +6 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/_internal.py +6 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/_multiarray_umath.py +6 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/multiarray.py +6 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_core/umath.py +6 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__pycache__/pyinstaller-smoke.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__pycache__/test_pyinstaller.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/hook-numpy.py +37 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py +32 -0
- env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/test_pyinstaller.py +35 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/__pycache__/py3k.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/__pycache__/setup.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/py3k.py +145 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/setup.py +10 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/compat/tests/test_compat.py +22 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/_locales.py +74 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_conversion_utils.py +208 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_cpu_dispatcher.py +43 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_cython.py +135 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_errstate.py +61 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_limited_api.py +44 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_longdouble.py +395 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_print.py +202 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_scalarinherit.py +98 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_shape_base.py +825 -0
- env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/test_helper.py +167 -0
- env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/test_pocketfft.py +308 -0
env-llmeval/lib/python3.10/site-packages/numpy/__pycache__/__config__.cpython-310.pyc
ADDED
Binary file (3.97 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/__pycache__/conftest.cpython-310.pyc
ADDED
Binary file (3.88 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This private module only contains stubs for interoperability with
|
3 |
+
NumPy 2.0 pickled arrays. It may not be used by the end user.
|
4 |
+
"""
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__init__.pyi
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (317 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/_dtype.cpython-310.pyc
ADDED
Binary file (300 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-310.pyc
ADDED
Binary file (314 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/_internal.cpython-310.pyc
ADDED
Binary file (306 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/_multiarray_umath.cpython-310.pyc
ADDED
Binary file (322 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/multiarray.cpython-310.pyc
ADDED
Binary file (308 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/__pycache__/umath.cpython-310.pyc
ADDED
Binary file (298 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/_dtype.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.core import _dtype
|
2 |
+
|
3 |
+
_globals = globals()
|
4 |
+
|
5 |
+
for item in _dtype.__dir__():
|
6 |
+
_globals[item] = getattr(_dtype, item)
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/_dtype_ctypes.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.core import _dtype_ctypes
|
2 |
+
|
3 |
+
_globals = globals()
|
4 |
+
|
5 |
+
for item in _dtype_ctypes.__dir__():
|
6 |
+
_globals[item] = getattr(_dtype_ctypes, item)
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/_internal.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.core import _internal
|
2 |
+
|
3 |
+
_globals = globals()
|
4 |
+
|
5 |
+
for item in _internal.__dir__():
|
6 |
+
_globals[item] = getattr(_internal, item)
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/_multiarray_umath.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.core import _multiarray_umath
|
2 |
+
|
3 |
+
_globals = globals()
|
4 |
+
|
5 |
+
for item in _multiarray_umath.__dir__():
|
6 |
+
_globals[item] = getattr(_multiarray_umath, item)
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/multiarray.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.core import multiarray
|
2 |
+
|
3 |
+
_globals = globals()
|
4 |
+
|
5 |
+
for item in multiarray.__dir__():
|
6 |
+
_globals[item] = getattr(multiarray, item)
|
env-llmeval/lib/python3.10/site-packages/numpy/_core/umath.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.core import umath
|
2 |
+
|
3 |
+
_globals = globals()
|
4 |
+
|
5 |
+
for item in umath.__dir__():
|
6 |
+
_globals[item] = getattr(umath, item)
|
env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (183 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-310.pyc
ADDED
Binary file (890 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__pycache__/pyinstaller-smoke.cpython-310.pyc
ADDED
Binary file (1.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/__pycache__/test_pyinstaller.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/hook-numpy.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This hook should collect all binary files and any hidden modules that numpy
|
2 |
+
needs.
|
3 |
+
|
4 |
+
Our (some-what inadequate) docs for writing PyInstaller hooks are kept here:
|
5 |
+
https://pyinstaller.readthedocs.io/en/stable/hooks.html
|
6 |
+
|
7 |
+
"""
|
8 |
+
from PyInstaller.compat import is_conda, is_pure_conda
|
9 |
+
from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies
|
10 |
+
|
11 |
+
# Collect all DLLs inside numpy's installation folder, dump them into built
|
12 |
+
# app's root.
|
13 |
+
binaries = collect_dynamic_libs("numpy", ".")
|
14 |
+
|
15 |
+
# If using Conda without any non-conda virtual environment manager:
|
16 |
+
if is_pure_conda:
|
17 |
+
# Assume running the NumPy from Conda-forge and collect it's DLLs from the
|
18 |
+
# communal Conda bin directory. DLLs from NumPy's dependencies must also be
|
19 |
+
# collected to capture MKL, OpenBlas, OpenMP, etc.
|
20 |
+
from PyInstaller.utils.hooks import conda_support
|
21 |
+
datas = conda_support.collect_dynamic_libs("numpy", dependencies=True)
|
22 |
+
|
23 |
+
# Submodules PyInstaller cannot detect. `_dtype_ctypes` is only imported
|
24 |
+
# from C and `_multiarray_tests` is used in tests (which are not packed).
|
25 |
+
hiddenimports = ['numpy.core._dtype_ctypes', 'numpy.core._multiarray_tests']
|
26 |
+
|
27 |
+
# Remove testing and building code and packages that are referenced throughout
|
28 |
+
# NumPy but are not really dependencies.
|
29 |
+
excludedimports = [
|
30 |
+
"scipy",
|
31 |
+
"pytest",
|
32 |
+
"f2py",
|
33 |
+
"setuptools",
|
34 |
+
"numpy.f2py",
|
35 |
+
"distutils",
|
36 |
+
"numpy.distutils",
|
37 |
+
]
|
env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""A crude *bit of everything* smoke test to verify PyInstaller compatibility.
|
2 |
+
|
3 |
+
PyInstaller typically goes wrong by forgetting to package modules, extension
|
4 |
+
modules or shared libraries. This script should aim to touch as many of those
|
5 |
+
as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure
|
6 |
+
due to an uncollected resource. Missing resources are unlikely to lead to
|
7 |
+
arithmetic errors so there's generally no need to verify any calculation's
|
8 |
+
output - merely that it made it to the end OK. This script should not
|
9 |
+
explicitly import any of numpy's submodules as that gives PyInstaller undue
|
10 |
+
hints that those submodules exist and should be collected (accessing implicitly
|
11 |
+
loaded submodules is OK).
|
12 |
+
|
13 |
+
"""
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
a = np.arange(1., 10.).reshape((3, 3)) % 5
|
17 |
+
np.linalg.det(a)
|
18 |
+
a @ a
|
19 |
+
a @ a.T
|
20 |
+
np.linalg.inv(a)
|
21 |
+
np.sin(np.exp(a))
|
22 |
+
np.linalg.svd(a)
|
23 |
+
np.linalg.eigh(a)
|
24 |
+
|
25 |
+
np.unique(np.random.randint(0, 10, 100))
|
26 |
+
np.sort(np.random.uniform(0, 10, 100))
|
27 |
+
|
28 |
+
np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
|
29 |
+
np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum()
|
30 |
+
np.polynomial.Legendre([7, 8, 9]).roots()
|
31 |
+
|
32 |
+
print("I made it!")
|
env-llmeval/lib/python3.10/site-packages/numpy/_pyinstaller/test_pyinstaller.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import subprocess
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
|
7 |
+
# PyInstaller has been very unproactive about replacing 'imp' with 'importlib'.
|
8 |
+
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
|
9 |
+
# It also leaks io.BytesIO()s.
|
10 |
+
@pytest.mark.filterwarnings('ignore::ResourceWarning')
|
11 |
+
@pytest.mark.parametrize("mode", ["--onedir", "--onefile"])
|
12 |
+
@pytest.mark.slow
|
13 |
+
def test_pyinstaller(mode, tmp_path):
|
14 |
+
"""Compile and run pyinstaller-smoke.py using PyInstaller."""
|
15 |
+
|
16 |
+
pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run
|
17 |
+
|
18 |
+
source = Path(__file__).with_name("pyinstaller-smoke.py").resolve()
|
19 |
+
args = [
|
20 |
+
# Place all generated files in ``tmp_path``.
|
21 |
+
'--workpath', str(tmp_path / "build"),
|
22 |
+
'--distpath', str(tmp_path / "dist"),
|
23 |
+
'--specpath', str(tmp_path),
|
24 |
+
mode,
|
25 |
+
str(source),
|
26 |
+
]
|
27 |
+
pyinstaller_cli(args)
|
28 |
+
|
29 |
+
if mode == "--onefile":
|
30 |
+
exe = tmp_path / "dist" / source.stem
|
31 |
+
else:
|
32 |
+
exe = tmp_path / "dist" / source.stem / source.stem
|
33 |
+
|
34 |
+
p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE)
|
35 |
+
assert p.stdout.strip() == b"I made it!"
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (646 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/__pycache__/py3k.cpython-310.pyc
ADDED
Binary file (4.74 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/__pycache__/setup.cpython-310.pyc
ADDED
Binary file (557 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/py3k.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Python 3.X compatibility tools.
|
3 |
+
|
4 |
+
While this file was originally intended for Python 2 -> 3 transition,
|
5 |
+
it is now used to create a compatibility layer between different
|
6 |
+
minor versions of Python 3.
|
7 |
+
|
8 |
+
While the active version of numpy may not support a given version of python, we
|
9 |
+
allow downstream libraries to continue to use these shims for forward
|
10 |
+
compatibility with numpy while they transition their code to newer versions of
|
11 |
+
Python.
|
12 |
+
"""
|
13 |
+
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
|
14 |
+
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
|
15 |
+
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
|
16 |
+
'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
|
17 |
+
'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
|
18 |
+
|
19 |
+
import sys
|
20 |
+
import os
|
21 |
+
from pathlib import Path
|
22 |
+
import io
|
23 |
+
try:
|
24 |
+
import pickle5 as pickle
|
25 |
+
except ImportError:
|
26 |
+
import pickle
|
27 |
+
|
28 |
+
long = int
|
29 |
+
integer_types = (int,)
|
30 |
+
basestring = str
|
31 |
+
unicode = str
|
32 |
+
bytes = bytes
|
33 |
+
|
34 |
+
def asunicode(s):
|
35 |
+
if isinstance(s, bytes):
|
36 |
+
return s.decode('latin1')
|
37 |
+
return str(s)
|
38 |
+
|
39 |
+
def asbytes(s):
|
40 |
+
if isinstance(s, bytes):
|
41 |
+
return s
|
42 |
+
return str(s).encode('latin1')
|
43 |
+
|
44 |
+
def asstr(s):
|
45 |
+
if isinstance(s, bytes):
|
46 |
+
return s.decode('latin1')
|
47 |
+
return str(s)
|
48 |
+
|
49 |
+
def isfileobj(f):
|
50 |
+
if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)):
|
51 |
+
return False
|
52 |
+
try:
|
53 |
+
# BufferedReader/Writer may raise OSError when
|
54 |
+
# fetching `fileno()` (e.g. when wrapping BytesIO).
|
55 |
+
f.fileno()
|
56 |
+
return True
|
57 |
+
except OSError:
|
58 |
+
return False
|
59 |
+
|
60 |
+
def open_latin1(filename, mode='r'):
|
61 |
+
return open(filename, mode=mode, encoding='iso-8859-1')
|
62 |
+
|
63 |
+
def sixu(s):
|
64 |
+
return s
|
65 |
+
|
66 |
+
strchar = 'U'
|
67 |
+
|
68 |
+
def getexception():
|
69 |
+
return sys.exc_info()[1]
|
70 |
+
|
71 |
+
def asbytes_nested(x):
|
72 |
+
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
|
73 |
+
return [asbytes_nested(y) for y in x]
|
74 |
+
else:
|
75 |
+
return asbytes(x)
|
76 |
+
|
77 |
+
def asunicode_nested(x):
|
78 |
+
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
|
79 |
+
return [asunicode_nested(y) for y in x]
|
80 |
+
else:
|
81 |
+
return asunicode(x)
|
82 |
+
|
83 |
+
def is_pathlib_path(obj):
|
84 |
+
"""
|
85 |
+
Check whether obj is a `pathlib.Path` object.
|
86 |
+
|
87 |
+
Prefer using ``isinstance(obj, os.PathLike)`` instead of this function.
|
88 |
+
"""
|
89 |
+
return isinstance(obj, Path)
|
90 |
+
|
91 |
+
# from Python 3.7
|
92 |
+
class contextlib_nullcontext:
|
93 |
+
"""Context manager that does no additional processing.
|
94 |
+
|
95 |
+
Used as a stand-in for a normal context manager, when a particular
|
96 |
+
block of code is only sometimes used with a normal context manager:
|
97 |
+
|
98 |
+
cm = optional_cm if condition else nullcontext()
|
99 |
+
with cm:
|
100 |
+
# Perform operation, using optional_cm if condition is True
|
101 |
+
|
102 |
+
.. note::
|
103 |
+
Prefer using `contextlib.nullcontext` instead of this context manager.
|
104 |
+
"""
|
105 |
+
|
106 |
+
def __init__(self, enter_result=None):
|
107 |
+
self.enter_result = enter_result
|
108 |
+
|
109 |
+
def __enter__(self):
|
110 |
+
return self.enter_result
|
111 |
+
|
112 |
+
def __exit__(self, *excinfo):
|
113 |
+
pass
|
114 |
+
|
115 |
+
|
116 |
+
def npy_load_module(name, fn, info=None):
|
117 |
+
"""
|
118 |
+
Load a module. Uses ``load_module`` which will be deprecated in python
|
119 |
+
3.12. An alternative that uses ``exec_module`` is in
|
120 |
+
numpy.distutils.misc_util.exec_mod_from_location
|
121 |
+
|
122 |
+
.. versionadded:: 1.11.2
|
123 |
+
|
124 |
+
Parameters
|
125 |
+
----------
|
126 |
+
name : str
|
127 |
+
Full module name.
|
128 |
+
fn : str
|
129 |
+
Path to module file.
|
130 |
+
info : tuple, optional
|
131 |
+
Only here for backward compatibility with Python 2.*.
|
132 |
+
|
133 |
+
Returns
|
134 |
+
-------
|
135 |
+
mod : module
|
136 |
+
|
137 |
+
"""
|
138 |
+
# Explicitly lazy import this to avoid paying the cost
|
139 |
+
# of importing importlib at startup
|
140 |
+
from importlib.machinery import SourceFileLoader
|
141 |
+
return SourceFileLoader(name, fn).load_module()
|
142 |
+
|
143 |
+
|
144 |
+
os_fspath = os.fspath
|
145 |
+
os_PathLike = os.PathLike
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/setup.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def configuration(parent_package='',top_path=None):
|
2 |
+
from numpy.distutils.misc_util import Configuration
|
3 |
+
|
4 |
+
config = Configuration('compat', parent_package, top_path)
|
5 |
+
config.add_subpackage('tests')
|
6 |
+
return config
|
7 |
+
|
8 |
+
if __name__ == '__main__':
|
9 |
+
from numpy.distutils.core import setup
|
10 |
+
setup(configuration=configuration)
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (183 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-310.pyc
ADDED
Binary file (874 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/compat/tests/test_compat.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from os.path import join
|
2 |
+
from io import BufferedReader, BytesIO
|
3 |
+
|
4 |
+
from numpy.compat import isfileobj
|
5 |
+
from numpy.testing import assert_
|
6 |
+
from numpy.testing import tempdir
|
7 |
+
|
8 |
+
|
9 |
+
def test_isfileobj():
|
10 |
+
with tempdir(prefix="numpy_test_compat_") as folder:
|
11 |
+
filename = join(folder, 'a.bin')
|
12 |
+
|
13 |
+
with open(filename, 'wb') as f:
|
14 |
+
assert_(isfileobj(f))
|
15 |
+
|
16 |
+
with open(filename, 'ab') as f:
|
17 |
+
assert_(isfileobj(f))
|
18 |
+
|
19 |
+
with open(filename, 'rb') as f:
|
20 |
+
assert_(isfileobj(f))
|
21 |
+
|
22 |
+
assert_(isfileobj(BufferedReader(BytesIO())) is False)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/_locales.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Provide class for testing in French locale
|
2 |
+
|
3 |
+
"""
|
4 |
+
import sys
|
5 |
+
import locale
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
__ALL__ = ['CommaDecimalPointLocale']
|
10 |
+
|
11 |
+
|
12 |
+
def find_comma_decimal_point_locale():
|
13 |
+
"""See if platform has a decimal point as comma locale.
|
14 |
+
|
15 |
+
Find a locale that uses a comma instead of a period as the
|
16 |
+
decimal point.
|
17 |
+
|
18 |
+
Returns
|
19 |
+
-------
|
20 |
+
old_locale: str
|
21 |
+
Locale when the function was called.
|
22 |
+
new_locale: {str, None)
|
23 |
+
First French locale found, None if none found.
|
24 |
+
|
25 |
+
"""
|
26 |
+
if sys.platform == 'win32':
|
27 |
+
locales = ['FRENCH']
|
28 |
+
else:
|
29 |
+
locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
|
30 |
+
|
31 |
+
old_locale = locale.getlocale(locale.LC_NUMERIC)
|
32 |
+
new_locale = None
|
33 |
+
try:
|
34 |
+
for loc in locales:
|
35 |
+
try:
|
36 |
+
locale.setlocale(locale.LC_NUMERIC, loc)
|
37 |
+
new_locale = loc
|
38 |
+
break
|
39 |
+
except locale.Error:
|
40 |
+
pass
|
41 |
+
finally:
|
42 |
+
locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
|
43 |
+
return old_locale, new_locale
|
44 |
+
|
45 |
+
|
46 |
+
class CommaDecimalPointLocale:
|
47 |
+
"""Sets LC_NUMERIC to a locale with comma as decimal point.
|
48 |
+
|
49 |
+
Classes derived from this class have setup and teardown methods that run
|
50 |
+
tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
|
51 |
+
the decimal point instead of periods ('.'). On exit the locale is restored
|
52 |
+
to the initial locale. It also serves as context manager with the same
|
53 |
+
effect. If no such locale is available, the test is skipped.
|
54 |
+
|
55 |
+
.. versionadded:: 1.15.0
|
56 |
+
|
57 |
+
"""
|
58 |
+
(cur_locale, tst_locale) = find_comma_decimal_point_locale()
|
59 |
+
|
60 |
+
def setup_method(self):
|
61 |
+
if self.tst_locale is None:
|
62 |
+
pytest.skip("No French locale available")
|
63 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
|
64 |
+
|
65 |
+
def teardown_method(self):
|
66 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
|
67 |
+
|
68 |
+
def __enter__(self):
|
69 |
+
if self.tst_locale is None:
|
70 |
+
pytest.skip("No French locale available")
|
71 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
|
72 |
+
|
73 |
+
def __exit__(self, type, value, traceback):
|
74 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_conversion_utils.py
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for numpy/core/src/multiarray/conversion_utils.c
|
3 |
+
"""
|
4 |
+
import re
|
5 |
+
import sys
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import numpy.core._multiarray_tests as mt
|
11 |
+
from numpy.testing import assert_warns, IS_PYPY
|
12 |
+
|
13 |
+
|
14 |
+
class StringConverterTestCase:
|
15 |
+
allow_bytes = True
|
16 |
+
case_insensitive = True
|
17 |
+
exact_match = False
|
18 |
+
warn = True
|
19 |
+
|
20 |
+
def _check_value_error(self, val):
|
21 |
+
pattern = r'\(got {}\)'.format(re.escape(repr(val)))
|
22 |
+
with pytest.raises(ValueError, match=pattern) as exc:
|
23 |
+
self.conv(val)
|
24 |
+
|
25 |
+
def _check_conv_assert_warn(self, val, expected):
|
26 |
+
if self.warn:
|
27 |
+
with assert_warns(DeprecationWarning) as exc:
|
28 |
+
assert self.conv(val) == expected
|
29 |
+
else:
|
30 |
+
assert self.conv(val) == expected
|
31 |
+
|
32 |
+
def _check(self, val, expected):
|
33 |
+
"""Takes valid non-deprecated inputs for converters,
|
34 |
+
runs converters on inputs, checks correctness of outputs,
|
35 |
+
warnings and errors"""
|
36 |
+
assert self.conv(val) == expected
|
37 |
+
|
38 |
+
if self.allow_bytes:
|
39 |
+
assert self.conv(val.encode('ascii')) == expected
|
40 |
+
else:
|
41 |
+
with pytest.raises(TypeError):
|
42 |
+
self.conv(val.encode('ascii'))
|
43 |
+
|
44 |
+
if len(val) != 1:
|
45 |
+
if self.exact_match:
|
46 |
+
self._check_value_error(val[:1])
|
47 |
+
self._check_value_error(val + '\0')
|
48 |
+
else:
|
49 |
+
self._check_conv_assert_warn(val[:1], expected)
|
50 |
+
|
51 |
+
if self.case_insensitive:
|
52 |
+
if val != val.lower():
|
53 |
+
self._check_conv_assert_warn(val.lower(), expected)
|
54 |
+
if val != val.upper():
|
55 |
+
self._check_conv_assert_warn(val.upper(), expected)
|
56 |
+
else:
|
57 |
+
if val != val.lower():
|
58 |
+
self._check_value_error(val.lower())
|
59 |
+
if val != val.upper():
|
60 |
+
self._check_value_error(val.upper())
|
61 |
+
|
62 |
+
def test_wrong_type(self):
|
63 |
+
# common cases which apply to all the below
|
64 |
+
with pytest.raises(TypeError):
|
65 |
+
self.conv({})
|
66 |
+
with pytest.raises(TypeError):
|
67 |
+
self.conv([])
|
68 |
+
|
69 |
+
def test_wrong_value(self):
|
70 |
+
# nonsense strings
|
71 |
+
self._check_value_error('')
|
72 |
+
self._check_value_error('\N{greek small letter pi}')
|
73 |
+
|
74 |
+
if self.allow_bytes:
|
75 |
+
self._check_value_error(b'')
|
76 |
+
# bytes which can't be converted to strings via utf8
|
77 |
+
self._check_value_error(b"\xFF")
|
78 |
+
if self.exact_match:
|
79 |
+
self._check_value_error("there's no way this is supported")
|
80 |
+
|
81 |
+
|
82 |
+
class TestByteorderConverter(StringConverterTestCase):
|
83 |
+
""" Tests of PyArray_ByteorderConverter """
|
84 |
+
conv = mt.run_byteorder_converter
|
85 |
+
warn = False
|
86 |
+
|
87 |
+
def test_valid(self):
|
88 |
+
for s in ['big', '>']:
|
89 |
+
self._check(s, 'NPY_BIG')
|
90 |
+
for s in ['little', '<']:
|
91 |
+
self._check(s, 'NPY_LITTLE')
|
92 |
+
for s in ['native', '=']:
|
93 |
+
self._check(s, 'NPY_NATIVE')
|
94 |
+
for s in ['ignore', '|']:
|
95 |
+
self._check(s, 'NPY_IGNORE')
|
96 |
+
for s in ['swap']:
|
97 |
+
self._check(s, 'NPY_SWAP')
|
98 |
+
|
99 |
+
|
100 |
+
class TestSortkindConverter(StringConverterTestCase):
|
101 |
+
""" Tests of PyArray_SortkindConverter """
|
102 |
+
conv = mt.run_sortkind_converter
|
103 |
+
warn = False
|
104 |
+
|
105 |
+
def test_valid(self):
|
106 |
+
self._check('quicksort', 'NPY_QUICKSORT')
|
107 |
+
self._check('heapsort', 'NPY_HEAPSORT')
|
108 |
+
self._check('mergesort', 'NPY_STABLESORT') # alias
|
109 |
+
self._check('stable', 'NPY_STABLESORT')
|
110 |
+
|
111 |
+
|
112 |
+
class TestSelectkindConverter(StringConverterTestCase):
|
113 |
+
""" Tests of PyArray_SelectkindConverter """
|
114 |
+
conv = mt.run_selectkind_converter
|
115 |
+
case_insensitive = False
|
116 |
+
exact_match = True
|
117 |
+
|
118 |
+
def test_valid(self):
|
119 |
+
self._check('introselect', 'NPY_INTROSELECT')
|
120 |
+
|
121 |
+
|
122 |
+
class TestSearchsideConverter(StringConverterTestCase):
|
123 |
+
""" Tests of PyArray_SearchsideConverter """
|
124 |
+
conv = mt.run_searchside_converter
|
125 |
+
def test_valid(self):
|
126 |
+
self._check('left', 'NPY_SEARCHLEFT')
|
127 |
+
self._check('right', 'NPY_SEARCHRIGHT')
|
128 |
+
|
129 |
+
|
130 |
+
class TestOrderConverter(StringConverterTestCase):
|
131 |
+
""" Tests of PyArray_OrderConverter """
|
132 |
+
conv = mt.run_order_converter
|
133 |
+
warn = False
|
134 |
+
|
135 |
+
def test_valid(self):
|
136 |
+
self._check('c', 'NPY_CORDER')
|
137 |
+
self._check('f', 'NPY_FORTRANORDER')
|
138 |
+
self._check('a', 'NPY_ANYORDER')
|
139 |
+
self._check('k', 'NPY_KEEPORDER')
|
140 |
+
|
141 |
+
def test_flatten_invalid_order(self):
|
142 |
+
# invalid after gh-14596
|
143 |
+
with pytest.raises(ValueError):
|
144 |
+
self.conv('Z')
|
145 |
+
for order in [False, True, 0, 8]:
|
146 |
+
with pytest.raises(TypeError):
|
147 |
+
self.conv(order)
|
148 |
+
|
149 |
+
|
150 |
+
class TestClipmodeConverter(StringConverterTestCase):
|
151 |
+
""" Tests of PyArray_ClipmodeConverter """
|
152 |
+
conv = mt.run_clipmode_converter
|
153 |
+
def test_valid(self):
|
154 |
+
self._check('clip', 'NPY_CLIP')
|
155 |
+
self._check('wrap', 'NPY_WRAP')
|
156 |
+
self._check('raise', 'NPY_RAISE')
|
157 |
+
|
158 |
+
# integer values allowed here
|
159 |
+
assert self.conv(np.CLIP) == 'NPY_CLIP'
|
160 |
+
assert self.conv(np.WRAP) == 'NPY_WRAP'
|
161 |
+
assert self.conv(np.RAISE) == 'NPY_RAISE'
|
162 |
+
|
163 |
+
|
164 |
+
class TestCastingConverter(StringConverterTestCase):
|
165 |
+
""" Tests of PyArray_CastingConverter """
|
166 |
+
conv = mt.run_casting_converter
|
167 |
+
case_insensitive = False
|
168 |
+
exact_match = True
|
169 |
+
|
170 |
+
def test_valid(self):
|
171 |
+
self._check("no", "NPY_NO_CASTING")
|
172 |
+
self._check("equiv", "NPY_EQUIV_CASTING")
|
173 |
+
self._check("safe", "NPY_SAFE_CASTING")
|
174 |
+
self._check("same_kind", "NPY_SAME_KIND_CASTING")
|
175 |
+
self._check("unsafe", "NPY_UNSAFE_CASTING")
|
176 |
+
|
177 |
+
|
178 |
+
class TestIntpConverter:
|
179 |
+
""" Tests of PyArray_IntpConverter """
|
180 |
+
conv = mt.run_intp_converter
|
181 |
+
|
182 |
+
def test_basic(self):
|
183 |
+
assert self.conv(1) == (1,)
|
184 |
+
assert self.conv((1, 2)) == (1, 2)
|
185 |
+
assert self.conv([1, 2]) == (1, 2)
|
186 |
+
assert self.conv(()) == ()
|
187 |
+
|
188 |
+
def test_none(self):
|
189 |
+
# once the warning expires, this will raise TypeError
|
190 |
+
with pytest.warns(DeprecationWarning):
|
191 |
+
assert self.conv(None) == ()
|
192 |
+
|
193 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
194 |
+
reason="PyPy bug in error formatting")
|
195 |
+
def test_float(self):
|
196 |
+
with pytest.raises(TypeError):
|
197 |
+
self.conv(1.0)
|
198 |
+
with pytest.raises(TypeError):
|
199 |
+
self.conv([1, 1.0])
|
200 |
+
|
201 |
+
def test_too_large(self):
|
202 |
+
with pytest.raises(ValueError):
|
203 |
+
self.conv(2**64)
|
204 |
+
|
205 |
+
def test_too_many_dims(self):
|
206 |
+
assert self.conv([1]*32) == (1,)*32
|
207 |
+
with pytest.raises(ValueError):
|
208 |
+
self.conv([1]*33)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_cpu_dispatcher.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__
|
2 |
+
from numpy.core import _umath_tests
|
3 |
+
from numpy.testing import assert_equal
|
4 |
+
|
5 |
+
def test_dispatcher():
|
6 |
+
"""
|
7 |
+
Testing the utilities of the CPU dispatcher
|
8 |
+
"""
|
9 |
+
targets = (
|
10 |
+
"SSE2", "SSE41", "AVX2",
|
11 |
+
"VSX", "VSX2", "VSX3",
|
12 |
+
"NEON", "ASIMD", "ASIMDHP",
|
13 |
+
"VX", "VXE"
|
14 |
+
)
|
15 |
+
highest_sfx = "" # no suffix for the baseline
|
16 |
+
all_sfx = []
|
17 |
+
for feature in reversed(targets):
|
18 |
+
# skip baseline features, by the default `CCompilerOpt` do not generate separated objects
|
19 |
+
# for the baseline, just one object combined all of them via 'baseline' option
|
20 |
+
# within the configuration statements.
|
21 |
+
if feature in __cpu_baseline__:
|
22 |
+
continue
|
23 |
+
# check compiler and running machine support
|
24 |
+
if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
|
25 |
+
continue
|
26 |
+
|
27 |
+
if not highest_sfx:
|
28 |
+
highest_sfx = "_" + feature
|
29 |
+
all_sfx.append("func" + "_" + feature)
|
30 |
+
|
31 |
+
test = _umath_tests.test_dispatch()
|
32 |
+
assert_equal(test["func"], "func" + highest_sfx)
|
33 |
+
assert_equal(test["var"], "var" + highest_sfx)
|
34 |
+
|
35 |
+
if highest_sfx:
|
36 |
+
assert_equal(test["func_xb"], "func" + highest_sfx)
|
37 |
+
assert_equal(test["var_xb"], "var" + highest_sfx)
|
38 |
+
else:
|
39 |
+
assert_equal(test["func_xb"], "nobase")
|
40 |
+
assert_equal(test["var_xb"], "nobase")
|
41 |
+
|
42 |
+
all_sfx.append("func") # add the baseline
|
43 |
+
assert_equal(test["all"], all_sfx)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_cython.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import subprocess
|
4 |
+
import sys
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from numpy.testing import IS_WASM
|
9 |
+
|
10 |
+
# This import is copied from random.tests.test_extending
|
11 |
+
try:
|
12 |
+
import cython
|
13 |
+
from Cython.Compiler.Version import version as cython_version
|
14 |
+
except ImportError:
|
15 |
+
cython = None
|
16 |
+
else:
|
17 |
+
from numpy._utils import _pep440
|
18 |
+
|
19 |
+
# Cython 0.29.30 is required for Python 3.11 and there are
|
20 |
+
# other fixes in the 0.29 series that are needed even for earlier
|
21 |
+
# Python versions.
|
22 |
+
# Note: keep in sync with the one in pyproject.toml
|
23 |
+
required_version = "0.29.30"
|
24 |
+
if _pep440.parse(cython_version) < _pep440.Version(required_version):
|
25 |
+
# too old or wrong cython, skip the test
|
26 |
+
cython = None
|
27 |
+
|
28 |
+
pytestmark = pytest.mark.skipif(cython is None, reason="requires cython")
|
29 |
+
|
30 |
+
|
31 |
+
@pytest.fixture(scope='module')
|
32 |
+
def install_temp(tmpdir_factory):
|
33 |
+
# Based in part on test_cython from random.tests.test_extending
|
34 |
+
if IS_WASM:
|
35 |
+
pytest.skip("No subprocess")
|
36 |
+
|
37 |
+
srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython')
|
38 |
+
build_dir = tmpdir_factory.mktemp("cython_test") / "build"
|
39 |
+
os.makedirs(build_dir, exist_ok=True)
|
40 |
+
try:
|
41 |
+
subprocess.check_call(["meson", "--version"])
|
42 |
+
except FileNotFoundError:
|
43 |
+
pytest.skip("No usable 'meson' found")
|
44 |
+
if sys.platform == "win32":
|
45 |
+
subprocess.check_call(["meson", "setup",
|
46 |
+
"--buildtype=release",
|
47 |
+
"--vsenv", str(srcdir)],
|
48 |
+
cwd=build_dir,
|
49 |
+
)
|
50 |
+
else:
|
51 |
+
subprocess.check_call(["meson", "setup", str(srcdir)],
|
52 |
+
cwd=build_dir
|
53 |
+
)
|
54 |
+
subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir)
|
55 |
+
|
56 |
+
sys.path.append(str(build_dir))
|
57 |
+
|
58 |
+
def test_is_timedelta64_object(install_temp):
|
59 |
+
import checks
|
60 |
+
|
61 |
+
assert checks.is_td64(np.timedelta64(1234))
|
62 |
+
assert checks.is_td64(np.timedelta64(1234, "ns"))
|
63 |
+
assert checks.is_td64(np.timedelta64("NaT", "ns"))
|
64 |
+
|
65 |
+
assert not checks.is_td64(1)
|
66 |
+
assert not checks.is_td64(None)
|
67 |
+
assert not checks.is_td64("foo")
|
68 |
+
assert not checks.is_td64(np.datetime64("now", "s"))
|
69 |
+
|
70 |
+
|
71 |
+
def test_is_datetime64_object(install_temp):
|
72 |
+
import checks
|
73 |
+
|
74 |
+
assert checks.is_dt64(np.datetime64(1234, "ns"))
|
75 |
+
assert checks.is_dt64(np.datetime64("NaT", "ns"))
|
76 |
+
|
77 |
+
assert not checks.is_dt64(1)
|
78 |
+
assert not checks.is_dt64(None)
|
79 |
+
assert not checks.is_dt64("foo")
|
80 |
+
assert not checks.is_dt64(np.timedelta64(1234))
|
81 |
+
|
82 |
+
|
83 |
+
def test_get_datetime64_value(install_temp):
|
84 |
+
import checks
|
85 |
+
|
86 |
+
dt64 = np.datetime64("2016-01-01", "ns")
|
87 |
+
|
88 |
+
result = checks.get_dt64_value(dt64)
|
89 |
+
expected = dt64.view("i8")
|
90 |
+
|
91 |
+
assert result == expected
|
92 |
+
|
93 |
+
|
94 |
+
def test_get_timedelta64_value(install_temp):
|
95 |
+
import checks
|
96 |
+
|
97 |
+
td64 = np.timedelta64(12345, "h")
|
98 |
+
|
99 |
+
result = checks.get_td64_value(td64)
|
100 |
+
expected = td64.view("i8")
|
101 |
+
|
102 |
+
assert result == expected
|
103 |
+
|
104 |
+
|
105 |
+
def test_get_datetime64_unit(install_temp):
|
106 |
+
import checks
|
107 |
+
|
108 |
+
dt64 = np.datetime64("2016-01-01", "ns")
|
109 |
+
result = checks.get_dt64_unit(dt64)
|
110 |
+
expected = 10
|
111 |
+
assert result == expected
|
112 |
+
|
113 |
+
td64 = np.timedelta64(12345, "h")
|
114 |
+
result = checks.get_dt64_unit(td64)
|
115 |
+
expected = 5
|
116 |
+
assert result == expected
|
117 |
+
|
118 |
+
|
119 |
+
def test_abstract_scalars(install_temp):
|
120 |
+
import checks
|
121 |
+
|
122 |
+
assert checks.is_integer(1)
|
123 |
+
assert checks.is_integer(np.int8(1))
|
124 |
+
assert checks.is_integer(np.uint64(1))
|
125 |
+
|
126 |
+
def test_conv_intp(install_temp):
|
127 |
+
import checks
|
128 |
+
|
129 |
+
class myint:
|
130 |
+
def __int__(self):
|
131 |
+
return 3
|
132 |
+
|
133 |
+
# These conversion passes via `__int__`, not `__index__`:
|
134 |
+
assert checks.conv_intp(3.) == 3
|
135 |
+
assert checks.conv_intp(myint()) == 3
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_errstate.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import sysconfig
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.testing import assert_, assert_raises, IS_WASM
|
6 |
+
|
7 |
+
# The floating point emulation on ARM EABI systems lacking a hardware FPU is
|
8 |
+
# known to be buggy. This is an attempt to identify these hosts. It may not
|
9 |
+
# catch all possible cases, but it catches the known cases of gh-413 and
|
10 |
+
# gh-15562.
|
11 |
+
hosttype = sysconfig.get_config_var('HOST_GNU_TYPE')
|
12 |
+
arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi')
|
13 |
+
|
14 |
+
class TestErrstate:
|
15 |
+
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
|
16 |
+
@pytest.mark.skipif(arm_softfloat,
|
17 |
+
reason='platform/cpu issue with FPU (gh-413,-15562)')
|
18 |
+
def test_invalid(self):
|
19 |
+
with np.errstate(all='raise', under='ignore'):
|
20 |
+
a = -np.arange(3)
|
21 |
+
# This should work
|
22 |
+
with np.errstate(invalid='ignore'):
|
23 |
+
np.sqrt(a)
|
24 |
+
# While this should fail!
|
25 |
+
with assert_raises(FloatingPointError):
|
26 |
+
np.sqrt(a)
|
27 |
+
|
28 |
+
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
|
29 |
+
@pytest.mark.skipif(arm_softfloat,
|
30 |
+
reason='platform/cpu issue with FPU (gh-15562)')
|
31 |
+
def test_divide(self):
|
32 |
+
with np.errstate(all='raise', under='ignore'):
|
33 |
+
a = -np.arange(3)
|
34 |
+
# This should work
|
35 |
+
with np.errstate(divide='ignore'):
|
36 |
+
a // 0
|
37 |
+
# While this should fail!
|
38 |
+
with assert_raises(FloatingPointError):
|
39 |
+
a // 0
|
40 |
+
# As should this, see gh-15562
|
41 |
+
with assert_raises(FloatingPointError):
|
42 |
+
a // a
|
43 |
+
|
44 |
+
def test_errcall(self):
|
45 |
+
def foo(*args):
|
46 |
+
print(args)
|
47 |
+
|
48 |
+
olderrcall = np.geterrcall()
|
49 |
+
with np.errstate(call=foo):
|
50 |
+
assert_(np.geterrcall() is foo, 'call is not foo')
|
51 |
+
with np.errstate(call=None):
|
52 |
+
assert_(np.geterrcall() is None, 'call is not None')
|
53 |
+
assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
|
54 |
+
|
55 |
+
def test_errstate_decorator(self):
|
56 |
+
@np.errstate(all='ignore')
|
57 |
+
def foo():
|
58 |
+
a = -np.arange(3)
|
59 |
+
a // 0
|
60 |
+
|
61 |
+
foo()
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_limited_api.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import subprocess
|
4 |
+
import sys
|
5 |
+
import sysconfig
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from numpy.testing import IS_WASM
|
9 |
+
|
10 |
+
|
11 |
+
@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess")
|
12 |
+
@pytest.mark.xfail(
|
13 |
+
sysconfig.get_config_var("Py_DEBUG"),
|
14 |
+
reason=(
|
15 |
+
"Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, "
|
16 |
+
"and Py_REF_DEBUG"
|
17 |
+
),
|
18 |
+
)
|
19 |
+
def test_limited_api(tmp_path):
|
20 |
+
"""Test building a third-party C extension with the limited API."""
|
21 |
+
# Based in part on test_cython from random.tests.test_extending
|
22 |
+
|
23 |
+
here = os.path.dirname(__file__)
|
24 |
+
ext_dir = os.path.join(here, "examples", "limited_api")
|
25 |
+
|
26 |
+
cytest = str(tmp_path / "limited_api")
|
27 |
+
|
28 |
+
shutil.copytree(ext_dir, cytest)
|
29 |
+
# build the examples and "install" them into a temporary directory
|
30 |
+
|
31 |
+
install_log = str(tmp_path / "tmp_install_log.txt")
|
32 |
+
subprocess.check_output(
|
33 |
+
[
|
34 |
+
sys.executable,
|
35 |
+
"setup.py",
|
36 |
+
"build",
|
37 |
+
"install",
|
38 |
+
"--prefix", str(tmp_path / "installdir"),
|
39 |
+
"--single-version-externally-managed",
|
40 |
+
"--record",
|
41 |
+
install_log,
|
42 |
+
],
|
43 |
+
cwd=cytest,
|
44 |
+
)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_longdouble.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
import platform
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
from numpy.testing import (
|
7 |
+
assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
|
8 |
+
temppath, IS_MUSL
|
9 |
+
)
|
10 |
+
from numpy.core.tests._locales import CommaDecimalPointLocale
|
11 |
+
|
12 |
+
|
13 |
+
LD_INFO = np.finfo(np.longdouble)
|
14 |
+
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
|
15 |
+
|
16 |
+
|
17 |
+
_o = 1 + LD_INFO.eps
|
18 |
+
string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
|
19 |
+
del _o
|
20 |
+
|
21 |
+
|
22 |
+
def test_scalar_extraction():
|
23 |
+
"""Confirm that extracting a value doesn't convert to python float"""
|
24 |
+
o = 1 + LD_INFO.eps
|
25 |
+
a = np.array([o, o, o])
|
26 |
+
assert_equal(a[1], o)
|
27 |
+
|
28 |
+
|
29 |
+
# Conversions string -> long double
|
30 |
+
|
31 |
+
# 0.1 not exactly representable in base 2 floating point.
|
32 |
+
repr_precision = len(repr(np.longdouble(0.1)))
|
33 |
+
# +2 from macro block starting around line 842 in scalartypes.c.src.
|
34 |
+
|
35 |
+
|
36 |
+
@pytest.mark.skipif(IS_MUSL,
|
37 |
+
reason="test flaky on musllinux")
|
38 |
+
@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
|
39 |
+
reason="repr precision not enough to show eps")
|
40 |
+
def test_repr_roundtrip():
|
41 |
+
# We will only see eps in repr if within printing precision.
|
42 |
+
o = 1 + LD_INFO.eps
|
43 |
+
assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
|
47 |
+
def test_repr_roundtrip_bytes():
|
48 |
+
o = 1 + LD_INFO.eps
|
49 |
+
assert_equal(np.longdouble(repr(o).encode("ascii")), o)
|
50 |
+
|
51 |
+
|
52 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
|
53 |
+
@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes))
|
54 |
+
def test_array_and_stringlike_roundtrip(strtype):
|
55 |
+
"""
|
56 |
+
Test that string representations of long-double roundtrip both
|
57 |
+
for array casting and scalar coercion, see also gh-15608.
|
58 |
+
"""
|
59 |
+
o = 1 + LD_INFO.eps
|
60 |
+
|
61 |
+
if strtype in (np.bytes_, bytes):
|
62 |
+
o_str = strtype(repr(o).encode("ascii"))
|
63 |
+
else:
|
64 |
+
o_str = strtype(repr(o))
|
65 |
+
|
66 |
+
# Test that `o` is correctly coerced from the string-like
|
67 |
+
assert o == np.longdouble(o_str)
|
68 |
+
|
69 |
+
# Test that arrays also roundtrip correctly:
|
70 |
+
o_strarr = np.asarray([o] * 3, dtype=strtype)
|
71 |
+
assert (o == o_strarr.astype(np.longdouble)).all()
|
72 |
+
|
73 |
+
# And array coercion and casting to string give the same as scalar repr:
|
74 |
+
assert (o_strarr == o_str).all()
|
75 |
+
assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
|
76 |
+
|
77 |
+
|
78 |
+
def test_bogus_string():
|
79 |
+
assert_raises(ValueError, np.longdouble, "spam")
|
80 |
+
assert_raises(ValueError, np.longdouble, "1.0 flub")
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
|
84 |
+
def test_fromstring():
|
85 |
+
o = 1 + LD_INFO.eps
|
86 |
+
s = (" " + repr(o))*5
|
87 |
+
a = np.array([o]*5)
|
88 |
+
assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
|
89 |
+
err_msg="reading '%s'" % s)
|
90 |
+
|
91 |
+
|
92 |
+
def test_fromstring_complex():
|
93 |
+
for ctype in ["complex", "cdouble", "cfloat"]:
|
94 |
+
# Check spacing between separator
|
95 |
+
assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
|
96 |
+
np.array([1., 2., 3., 4.]))
|
97 |
+
# Real component not specified
|
98 |
+
assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
|
99 |
+
np.array([1.j, -2.j, 3.j, 40.j]))
|
100 |
+
# Both components specified
|
101 |
+
assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
|
102 |
+
np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
|
103 |
+
# Spaces at wrong places
|
104 |
+
with assert_warns(DeprecationWarning):
|
105 |
+
assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
|
106 |
+
np.array([1.]))
|
107 |
+
with assert_warns(DeprecationWarning):
|
108 |
+
assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
|
109 |
+
np.array([1.]))
|
110 |
+
with assert_warns(DeprecationWarning):
|
111 |
+
assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
|
112 |
+
np.array([1.]))
|
113 |
+
with assert_warns(DeprecationWarning):
|
114 |
+
assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
|
115 |
+
np.array([1.]))
|
116 |
+
with assert_warns(DeprecationWarning):
|
117 |
+
assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
|
118 |
+
np.array([1.]))
|
119 |
+
with assert_warns(DeprecationWarning):
|
120 |
+
assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
|
121 |
+
np.array([1j]))
|
122 |
+
|
123 |
+
|
124 |
+
def test_fromstring_bogus():
|
125 |
+
with assert_warns(DeprecationWarning):
|
126 |
+
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
|
127 |
+
np.array([1., 2., 3.]))
|
128 |
+
|
129 |
+
|
130 |
+
def test_fromstring_empty():
|
131 |
+
with assert_warns(DeprecationWarning):
|
132 |
+
assert_equal(np.fromstring("xxxxx", sep="x"),
|
133 |
+
np.array([]))
|
134 |
+
|
135 |
+
|
136 |
+
def test_fromstring_missing():
|
137 |
+
with assert_warns(DeprecationWarning):
|
138 |
+
assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
|
139 |
+
np.array([1]))
|
140 |
+
|
141 |
+
|
142 |
+
class TestFileBased:
|
143 |
+
|
144 |
+
ldbl = 1 + LD_INFO.eps
|
145 |
+
tgt = np.array([ldbl]*5)
|
146 |
+
out = ''.join([repr(t) + '\n' for t in tgt])
|
147 |
+
|
148 |
+
def test_fromfile_bogus(self):
|
149 |
+
with temppath() as path:
|
150 |
+
with open(path, 'w') as f:
|
151 |
+
f.write("1. 2. 3. flop 4.\n")
|
152 |
+
|
153 |
+
with assert_warns(DeprecationWarning):
|
154 |
+
res = np.fromfile(path, dtype=float, sep=" ")
|
155 |
+
assert_equal(res, np.array([1., 2., 3.]))
|
156 |
+
|
157 |
+
def test_fromfile_complex(self):
|
158 |
+
for ctype in ["complex", "cdouble", "cfloat"]:
|
159 |
+
# Check spacing between separator and only real component specified
|
160 |
+
with temppath() as path:
|
161 |
+
with open(path, 'w') as f:
|
162 |
+
f.write("1, 2 , 3 ,4\n")
|
163 |
+
|
164 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
165 |
+
assert_equal(res, np.array([1., 2., 3., 4.]))
|
166 |
+
|
167 |
+
# Real component not specified
|
168 |
+
with temppath() as path:
|
169 |
+
with open(path, 'w') as f:
|
170 |
+
f.write("1j, -2j, 3j, 4e1j\n")
|
171 |
+
|
172 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
173 |
+
assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
|
174 |
+
|
175 |
+
# Both components specified
|
176 |
+
with temppath() as path:
|
177 |
+
with open(path, 'w') as f:
|
178 |
+
f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
|
179 |
+
|
180 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
181 |
+
assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
|
182 |
+
|
183 |
+
# Spaces at wrong places
|
184 |
+
with temppath() as path:
|
185 |
+
with open(path, 'w') as f:
|
186 |
+
f.write("1+2 j,3\n")
|
187 |
+
|
188 |
+
with assert_warns(DeprecationWarning):
|
189 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
190 |
+
assert_equal(res, np.array([1.]))
|
191 |
+
|
192 |
+
# Spaces at wrong places
|
193 |
+
with temppath() as path:
|
194 |
+
with open(path, 'w') as f:
|
195 |
+
f.write("1+ 2j,3\n")
|
196 |
+
|
197 |
+
with assert_warns(DeprecationWarning):
|
198 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
199 |
+
assert_equal(res, np.array([1.]))
|
200 |
+
|
201 |
+
# Spaces at wrong places
|
202 |
+
with temppath() as path:
|
203 |
+
with open(path, 'w') as f:
|
204 |
+
f.write("1 +2j,3\n")
|
205 |
+
|
206 |
+
with assert_warns(DeprecationWarning):
|
207 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
208 |
+
assert_equal(res, np.array([1.]))
|
209 |
+
|
210 |
+
# Spaces at wrong places
|
211 |
+
with temppath() as path:
|
212 |
+
with open(path, 'w') as f:
|
213 |
+
f.write("1+j\n")
|
214 |
+
|
215 |
+
with assert_warns(DeprecationWarning):
|
216 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
217 |
+
assert_equal(res, np.array([1.]))
|
218 |
+
|
219 |
+
# Spaces at wrong places
|
220 |
+
with temppath() as path:
|
221 |
+
with open(path, 'w') as f:
|
222 |
+
f.write("1+\n")
|
223 |
+
|
224 |
+
with assert_warns(DeprecationWarning):
|
225 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
226 |
+
assert_equal(res, np.array([1.]))
|
227 |
+
|
228 |
+
# Spaces at wrong places
|
229 |
+
with temppath() as path:
|
230 |
+
with open(path, 'w') as f:
|
231 |
+
f.write("1j+1\n")
|
232 |
+
|
233 |
+
with assert_warns(DeprecationWarning):
|
234 |
+
res = np.fromfile(path, dtype=ctype, sep=",")
|
235 |
+
assert_equal(res, np.array([1.j]))
|
236 |
+
|
237 |
+
|
238 |
+
|
239 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
|
240 |
+
reason="Need strtold_l")
|
241 |
+
def test_fromfile(self):
|
242 |
+
with temppath() as path:
|
243 |
+
with open(path, 'w') as f:
|
244 |
+
f.write(self.out)
|
245 |
+
res = np.fromfile(path, dtype=np.longdouble, sep="\n")
|
246 |
+
assert_equal(res, self.tgt)
|
247 |
+
|
248 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
|
249 |
+
reason="Need strtold_l")
|
250 |
+
def test_genfromtxt(self):
|
251 |
+
with temppath() as path:
|
252 |
+
with open(path, 'w') as f:
|
253 |
+
f.write(self.out)
|
254 |
+
res = np.genfromtxt(path, dtype=np.longdouble)
|
255 |
+
assert_equal(res, self.tgt)
|
256 |
+
|
257 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
|
258 |
+
reason="Need strtold_l")
|
259 |
+
def test_loadtxt(self):
|
260 |
+
with temppath() as path:
|
261 |
+
with open(path, 'w') as f:
|
262 |
+
f.write(self.out)
|
263 |
+
res = np.loadtxt(path, dtype=np.longdouble)
|
264 |
+
assert_equal(res, self.tgt)
|
265 |
+
|
266 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
|
267 |
+
reason="Need strtold_l")
|
268 |
+
def test_tofile_roundtrip(self):
|
269 |
+
with temppath() as path:
|
270 |
+
self.tgt.tofile(path, sep=" ")
|
271 |
+
res = np.fromfile(path, dtype=np.longdouble, sep=" ")
|
272 |
+
assert_equal(res, self.tgt)
|
273 |
+
|
274 |
+
|
275 |
+
# Conversions long double -> string
|
276 |
+
|
277 |
+
|
278 |
+
def test_repr_exact():
|
279 |
+
o = 1 + LD_INFO.eps
|
280 |
+
assert_(repr(o) != '1')
|
281 |
+
|
282 |
+
|
283 |
+
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
|
284 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
|
285 |
+
reason="Need strtold_l")
|
286 |
+
def test_format():
|
287 |
+
o = 1 + LD_INFO.eps
|
288 |
+
assert_("{0:.40g}".format(o) != '1')
|
289 |
+
|
290 |
+
|
291 |
+
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
|
292 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
|
293 |
+
reason="Need strtold_l")
|
294 |
+
def test_percent():
|
295 |
+
o = 1 + LD_INFO.eps
|
296 |
+
assert_("%.40g" % o != '1')
|
297 |
+
|
298 |
+
|
299 |
+
@pytest.mark.skipif(longdouble_longer_than_double,
|
300 |
+
reason="array repr problem")
|
301 |
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
|
302 |
+
reason="Need strtold_l")
|
303 |
+
def test_array_repr():
|
304 |
+
o = 1 + LD_INFO.eps
|
305 |
+
a = np.array([o])
|
306 |
+
b = np.array([1], dtype=np.longdouble)
|
307 |
+
if not np.all(a != b):
|
308 |
+
raise ValueError("precision loss creating arrays")
|
309 |
+
assert_(repr(a) != repr(b))
|
310 |
+
|
311 |
+
#
|
312 |
+
# Locale tests: scalar types formatting should be independent of the locale
|
313 |
+
#
|
314 |
+
|
315 |
+
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
|
316 |
+
|
317 |
+
def test_repr_roundtrip_foreign(self):
|
318 |
+
o = 1.5
|
319 |
+
assert_equal(o, np.longdouble(repr(o)))
|
320 |
+
|
321 |
+
def test_fromstring_foreign_repr(self):
|
322 |
+
f = 1.234
|
323 |
+
a = np.fromstring(repr(f), dtype=float, sep=" ")
|
324 |
+
assert_equal(a[0], f)
|
325 |
+
|
326 |
+
def test_fromstring_best_effort_float(self):
|
327 |
+
with assert_warns(DeprecationWarning):
|
328 |
+
assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
|
329 |
+
np.array([1.]))
|
330 |
+
|
331 |
+
def test_fromstring_best_effort(self):
|
332 |
+
with assert_warns(DeprecationWarning):
|
333 |
+
assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
|
334 |
+
np.array([1.]))
|
335 |
+
|
336 |
+
def test_fromstring_foreign(self):
|
337 |
+
s = "1.234"
|
338 |
+
a = np.fromstring(s, dtype=np.longdouble, sep=" ")
|
339 |
+
assert_equal(a[0], np.longdouble(s))
|
340 |
+
|
341 |
+
def test_fromstring_foreign_sep(self):
|
342 |
+
a = np.array([1, 2, 3, 4])
|
343 |
+
b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
|
344 |
+
assert_array_equal(a, b)
|
345 |
+
|
346 |
+
def test_fromstring_foreign_value(self):
|
347 |
+
with assert_warns(DeprecationWarning):
|
348 |
+
b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
|
349 |
+
assert_array_equal(b[0], 1)
|
350 |
+
|
351 |
+
|
352 |
+
@pytest.mark.parametrize("int_val", [
|
353 |
+
# cases discussed in gh-10723
|
354 |
+
# and gh-9968
|
355 |
+
2 ** 1024, 0])
|
356 |
+
def test_longdouble_from_int(int_val):
|
357 |
+
# for issue gh-9968
|
358 |
+
str_val = str(int_val)
|
359 |
+
# we'll expect a RuntimeWarning on platforms
|
360 |
+
# with np.longdouble equivalent to np.double
|
361 |
+
# for large integer input
|
362 |
+
with warnings.catch_warnings(record=True) as w:
|
363 |
+
warnings.filterwarnings('always', '', RuntimeWarning)
|
364 |
+
# can be inf==inf on some platforms
|
365 |
+
assert np.longdouble(int_val) == np.longdouble(str_val)
|
366 |
+
# we can't directly compare the int and
|
367 |
+
# max longdouble value on all platforms
|
368 |
+
if np.allclose(np.finfo(np.longdouble).max,
|
369 |
+
np.finfo(np.double).max) and w:
|
370 |
+
assert w[0].category is RuntimeWarning
|
371 |
+
|
372 |
+
@pytest.mark.parametrize("bool_val", [
|
373 |
+
True, False])
|
374 |
+
def test_longdouble_from_bool(bool_val):
|
375 |
+
assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
|
376 |
+
|
377 |
+
|
378 |
+
@pytest.mark.skipif(
|
379 |
+
not (IS_MUSL and platform.machine() == "x86_64"),
|
380 |
+
reason="only need to run on musllinux_x86_64"
|
381 |
+
)
|
382 |
+
def test_musllinux_x86_64_signature():
|
383 |
+
# this test may fail if you're emulating musllinux_x86_64 on a different
|
384 |
+
# architecture, but should pass natively.
|
385 |
+
known_sigs = [b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf']
|
386 |
+
sig = (np.longdouble(-1.0) / np.longdouble(10.0)
|
387 |
+
).newbyteorder('<').tobytes()[:10]
|
388 |
+
assert sig in known_sigs
|
389 |
+
|
390 |
+
|
391 |
+
def test_eps_positive():
|
392 |
+
# np.finfo('g').eps should be positive on all platforms. If this isn't true
|
393 |
+
# then something may have gone wrong with the MachArLike, e.g. if
|
394 |
+
# np.core.getlimits._discovered_machar didn't work properly
|
395 |
+
assert np.finfo(np.longdouble).eps > 0.
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_print.py
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
from numpy.testing import assert_, assert_equal, IS_MUSL
|
7 |
+
from numpy.core.tests._locales import CommaDecimalPointLocale
|
8 |
+
|
9 |
+
|
10 |
+
from io import StringIO
|
11 |
+
|
12 |
+
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
|
13 |
+
|
14 |
+
|
15 |
+
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
|
16 |
+
def test_float_types(tp):
|
17 |
+
""" Check formatting.
|
18 |
+
|
19 |
+
This is only for the str function, and only for simple types.
|
20 |
+
The precision of np.float32 and np.longdouble aren't the same as the
|
21 |
+
python float precision.
|
22 |
+
|
23 |
+
"""
|
24 |
+
for x in [0, 1, -1, 1e20]:
|
25 |
+
assert_equal(str(tp(x)), str(float(x)),
|
26 |
+
err_msg='Failed str formatting for type %s' % tp)
|
27 |
+
|
28 |
+
if tp(1e16).itemsize > 4:
|
29 |
+
assert_equal(str(tp(1e16)), str(float('1e16')),
|
30 |
+
err_msg='Failed str formatting for type %s' % tp)
|
31 |
+
else:
|
32 |
+
ref = '1e+16'
|
33 |
+
assert_equal(str(tp(1e16)), ref,
|
34 |
+
err_msg='Failed str formatting for type %s' % tp)
|
35 |
+
|
36 |
+
|
37 |
+
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
|
38 |
+
def test_nan_inf_float(tp):
|
39 |
+
""" Check formatting of nan & inf.
|
40 |
+
|
41 |
+
This is only for the str function, and only for simple types.
|
42 |
+
The precision of np.float32 and np.longdouble aren't the same as the
|
43 |
+
python float precision.
|
44 |
+
|
45 |
+
"""
|
46 |
+
for x in [np.inf, -np.inf, np.nan]:
|
47 |
+
assert_equal(str(tp(x)), _REF[x],
|
48 |
+
err_msg='Failed str formatting for type %s' % tp)
|
49 |
+
|
50 |
+
|
51 |
+
@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
|
52 |
+
def test_complex_types(tp):
|
53 |
+
"""Check formatting of complex types.
|
54 |
+
|
55 |
+
This is only for the str function, and only for simple types.
|
56 |
+
The precision of np.float32 and np.longdouble aren't the same as the
|
57 |
+
python float precision.
|
58 |
+
|
59 |
+
"""
|
60 |
+
for x in [0, 1, -1, 1e20]:
|
61 |
+
assert_equal(str(tp(x)), str(complex(x)),
|
62 |
+
err_msg='Failed str formatting for type %s' % tp)
|
63 |
+
assert_equal(str(tp(x*1j)), str(complex(x*1j)),
|
64 |
+
err_msg='Failed str formatting for type %s' % tp)
|
65 |
+
assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
|
66 |
+
err_msg='Failed str formatting for type %s' % tp)
|
67 |
+
|
68 |
+
if tp(1e16).itemsize > 8:
|
69 |
+
assert_equal(str(tp(1e16)), str(complex(1e16)),
|
70 |
+
err_msg='Failed str formatting for type %s' % tp)
|
71 |
+
else:
|
72 |
+
ref = '(1e+16+0j)'
|
73 |
+
assert_equal(str(tp(1e16)), ref,
|
74 |
+
err_msg='Failed str formatting for type %s' % tp)
|
75 |
+
|
76 |
+
|
77 |
+
@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
|
78 |
+
def test_complex_inf_nan(dtype):
|
79 |
+
"""Check inf/nan formatting of complex types."""
|
80 |
+
TESTS = {
|
81 |
+
complex(np.inf, 0): "(inf+0j)",
|
82 |
+
complex(0, np.inf): "infj",
|
83 |
+
complex(-np.inf, 0): "(-inf+0j)",
|
84 |
+
complex(0, -np.inf): "-infj",
|
85 |
+
complex(np.inf, 1): "(inf+1j)",
|
86 |
+
complex(1, np.inf): "(1+infj)",
|
87 |
+
complex(-np.inf, 1): "(-inf+1j)",
|
88 |
+
complex(1, -np.inf): "(1-infj)",
|
89 |
+
complex(np.nan, 0): "(nan+0j)",
|
90 |
+
complex(0, np.nan): "nanj",
|
91 |
+
complex(-np.nan, 0): "(nan+0j)",
|
92 |
+
complex(0, -np.nan): "nanj",
|
93 |
+
complex(np.nan, 1): "(nan+1j)",
|
94 |
+
complex(1, np.nan): "(1+nanj)",
|
95 |
+
complex(-np.nan, 1): "(nan+1j)",
|
96 |
+
complex(1, -np.nan): "(1+nanj)",
|
97 |
+
}
|
98 |
+
for c, s in TESTS.items():
|
99 |
+
assert_equal(str(dtype(c)), s)
|
100 |
+
|
101 |
+
|
102 |
+
# print tests
|
103 |
+
def _test_redirected_print(x, tp, ref=None):
|
104 |
+
file = StringIO()
|
105 |
+
file_tp = StringIO()
|
106 |
+
stdout = sys.stdout
|
107 |
+
try:
|
108 |
+
sys.stdout = file_tp
|
109 |
+
print(tp(x))
|
110 |
+
sys.stdout = file
|
111 |
+
if ref:
|
112 |
+
print(ref)
|
113 |
+
else:
|
114 |
+
print(x)
|
115 |
+
finally:
|
116 |
+
sys.stdout = stdout
|
117 |
+
|
118 |
+
assert_equal(file.getvalue(), file_tp.getvalue(),
|
119 |
+
err_msg='print failed for type%s' % tp)
|
120 |
+
|
121 |
+
|
122 |
+
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
|
123 |
+
def test_float_type_print(tp):
|
124 |
+
"""Check formatting when using print """
|
125 |
+
for x in [0, 1, -1, 1e20]:
|
126 |
+
_test_redirected_print(float(x), tp)
|
127 |
+
|
128 |
+
for x in [np.inf, -np.inf, np.nan]:
|
129 |
+
_test_redirected_print(float(x), tp, _REF[x])
|
130 |
+
|
131 |
+
if tp(1e16).itemsize > 4:
|
132 |
+
_test_redirected_print(float(1e16), tp)
|
133 |
+
else:
|
134 |
+
ref = '1e+16'
|
135 |
+
_test_redirected_print(float(1e16), tp, ref)
|
136 |
+
|
137 |
+
|
138 |
+
@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
|
139 |
+
def test_complex_type_print(tp):
|
140 |
+
"""Check formatting when using print """
|
141 |
+
# We do not create complex with inf/nan directly because the feature is
|
142 |
+
# missing in python < 2.6
|
143 |
+
for x in [0, 1, -1, 1e20]:
|
144 |
+
_test_redirected_print(complex(x), tp)
|
145 |
+
|
146 |
+
if tp(1e16).itemsize > 8:
|
147 |
+
_test_redirected_print(complex(1e16), tp)
|
148 |
+
else:
|
149 |
+
ref = '(1e+16+0j)'
|
150 |
+
_test_redirected_print(complex(1e16), tp, ref)
|
151 |
+
|
152 |
+
_test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
|
153 |
+
_test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
|
154 |
+
_test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
|
155 |
+
|
156 |
+
|
157 |
+
def test_scalar_format():
|
158 |
+
"""Test the str.format method with NumPy scalar types"""
|
159 |
+
tests = [('{0}', True, np.bool_),
|
160 |
+
('{0}', False, np.bool_),
|
161 |
+
('{0:d}', 130, np.uint8),
|
162 |
+
('{0:d}', 50000, np.uint16),
|
163 |
+
('{0:d}', 3000000000, np.uint32),
|
164 |
+
('{0:d}', 15000000000000000000, np.uint64),
|
165 |
+
('{0:d}', -120, np.int8),
|
166 |
+
('{0:d}', -30000, np.int16),
|
167 |
+
('{0:d}', -2000000000, np.int32),
|
168 |
+
('{0:d}', -7000000000000000000, np.int64),
|
169 |
+
('{0:g}', 1.5, np.float16),
|
170 |
+
('{0:g}', 1.5, np.float32),
|
171 |
+
('{0:g}', 1.5, np.float64),
|
172 |
+
('{0:g}', 1.5, np.longdouble),
|
173 |
+
('{0:g}', 1.5+0.5j, np.complex64),
|
174 |
+
('{0:g}', 1.5+0.5j, np.complex128),
|
175 |
+
('{0:g}', 1.5+0.5j, np.clongdouble)]
|
176 |
+
|
177 |
+
for (fmat, val, valtype) in tests:
|
178 |
+
try:
|
179 |
+
assert_equal(fmat.format(val), fmat.format(valtype(val)),
|
180 |
+
"failed with val %s, type %s" % (val, valtype))
|
181 |
+
except ValueError as e:
|
182 |
+
assert_(False,
|
183 |
+
"format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
|
184 |
+
(fmat, repr(val), repr(valtype), str(e)))
|
185 |
+
|
186 |
+
|
187 |
+
#
|
188 |
+
# Locale tests: scalar types formatting should be independent of the locale
|
189 |
+
#
|
190 |
+
|
191 |
+
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
|
192 |
+
|
193 |
+
def test_locale_single(self):
|
194 |
+
assert_equal(str(np.float32(1.2)), str(float(1.2)))
|
195 |
+
|
196 |
+
def test_locale_double(self):
|
197 |
+
assert_equal(str(np.double(1.2)), str(float(1.2)))
|
198 |
+
|
199 |
+
@pytest.mark.skipif(IS_MUSL,
|
200 |
+
reason="test flaky on musllinux")
|
201 |
+
def test_locale_longdouble(self):
|
202 |
+
assert_equal(str(np.longdouble('1.2')), str(float(1.2)))
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_scalarinherit.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Test printing of scalar types.
|
2 |
+
|
3 |
+
"""
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
from numpy.testing import assert_, assert_raises
|
8 |
+
|
9 |
+
|
10 |
+
class A:
|
11 |
+
pass
|
12 |
+
class B(A, np.float64):
|
13 |
+
pass
|
14 |
+
|
15 |
+
class C(B):
|
16 |
+
pass
|
17 |
+
class D(C, B):
|
18 |
+
pass
|
19 |
+
|
20 |
+
class B0(np.float64, A):
|
21 |
+
pass
|
22 |
+
class C0(B0):
|
23 |
+
pass
|
24 |
+
|
25 |
+
class HasNew:
|
26 |
+
def __new__(cls, *args, **kwargs):
|
27 |
+
return cls, args, kwargs
|
28 |
+
|
29 |
+
class B1(np.float64, HasNew):
|
30 |
+
pass
|
31 |
+
|
32 |
+
|
33 |
+
class TestInherit:
|
34 |
+
def test_init(self):
|
35 |
+
x = B(1.0)
|
36 |
+
assert_(str(x) == '1.0')
|
37 |
+
y = C(2.0)
|
38 |
+
assert_(str(y) == '2.0')
|
39 |
+
z = D(3.0)
|
40 |
+
assert_(str(z) == '3.0')
|
41 |
+
|
42 |
+
def test_init2(self):
|
43 |
+
x = B0(1.0)
|
44 |
+
assert_(str(x) == '1.0')
|
45 |
+
y = C0(2.0)
|
46 |
+
assert_(str(y) == '2.0')
|
47 |
+
|
48 |
+
def test_gh_15395(self):
|
49 |
+
# HasNew is the second base, so `np.float64` should have priority
|
50 |
+
x = B1(1.0)
|
51 |
+
assert_(str(x) == '1.0')
|
52 |
+
|
53 |
+
# previously caused RecursionError!?
|
54 |
+
with pytest.raises(TypeError):
|
55 |
+
B1(1.0, 2.0)
|
56 |
+
|
57 |
+
|
58 |
+
class TestCharacter:
|
59 |
+
def test_char_radd(self):
|
60 |
+
# GH issue 9620, reached gentype_add and raise TypeError
|
61 |
+
np_s = np.bytes_('abc')
|
62 |
+
np_u = np.str_('abc')
|
63 |
+
s = b'def'
|
64 |
+
u = 'def'
|
65 |
+
assert_(np_s.__radd__(np_s) is NotImplemented)
|
66 |
+
assert_(np_s.__radd__(np_u) is NotImplemented)
|
67 |
+
assert_(np_s.__radd__(s) is NotImplemented)
|
68 |
+
assert_(np_s.__radd__(u) is NotImplemented)
|
69 |
+
assert_(np_u.__radd__(np_s) is NotImplemented)
|
70 |
+
assert_(np_u.__radd__(np_u) is NotImplemented)
|
71 |
+
assert_(np_u.__radd__(s) is NotImplemented)
|
72 |
+
assert_(np_u.__radd__(u) is NotImplemented)
|
73 |
+
assert_(s + np_s == b'defabc')
|
74 |
+
assert_(u + np_u == 'defabc')
|
75 |
+
|
76 |
+
class MyStr(str, np.generic):
|
77 |
+
# would segfault
|
78 |
+
pass
|
79 |
+
|
80 |
+
with assert_raises(TypeError):
|
81 |
+
# Previously worked, but gave completely wrong result
|
82 |
+
ret = s + MyStr('abc')
|
83 |
+
|
84 |
+
class MyBytes(bytes, np.generic):
|
85 |
+
# would segfault
|
86 |
+
pass
|
87 |
+
|
88 |
+
ret = s + MyBytes(b'abc')
|
89 |
+
assert(type(ret) is type(s))
|
90 |
+
assert ret == b"defabc"
|
91 |
+
|
92 |
+
def test_char_repeat(self):
|
93 |
+
np_s = np.bytes_('abc')
|
94 |
+
np_u = np.str_('abc')
|
95 |
+
res_s = b'abc' * 5
|
96 |
+
res_u = 'abc' * 5
|
97 |
+
assert_(np_s * 5 == res_s)
|
98 |
+
assert_(np_u * 5 == res_u)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/tests/test_shape_base.py
ADDED
@@ -0,0 +1,825 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import numpy as np
|
3 |
+
from numpy.core import (
|
4 |
+
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
|
5 |
+
newaxis, concatenate, stack
|
6 |
+
)
|
7 |
+
from numpy.core.shape_base import (_block_dispatcher, _block_setup,
|
8 |
+
_block_concatenate, _block_slicing)
|
9 |
+
from numpy.testing import (
|
10 |
+
assert_, assert_raises, assert_array_equal, assert_equal,
|
11 |
+
assert_raises_regex, assert_warns, IS_PYPY
|
12 |
+
)
|
13 |
+
|
14 |
+
|
15 |
+
class TestAtleast1d:
|
16 |
+
def test_0D_array(self):
|
17 |
+
a = array(1)
|
18 |
+
b = array(2)
|
19 |
+
res = [atleast_1d(a), atleast_1d(b)]
|
20 |
+
desired = [array([1]), array([2])]
|
21 |
+
assert_array_equal(res, desired)
|
22 |
+
|
23 |
+
def test_1D_array(self):
|
24 |
+
a = array([1, 2])
|
25 |
+
b = array([2, 3])
|
26 |
+
res = [atleast_1d(a), atleast_1d(b)]
|
27 |
+
desired = [array([1, 2]), array([2, 3])]
|
28 |
+
assert_array_equal(res, desired)
|
29 |
+
|
30 |
+
def test_2D_array(self):
|
31 |
+
a = array([[1, 2], [1, 2]])
|
32 |
+
b = array([[2, 3], [2, 3]])
|
33 |
+
res = [atleast_1d(a), atleast_1d(b)]
|
34 |
+
desired = [a, b]
|
35 |
+
assert_array_equal(res, desired)
|
36 |
+
|
37 |
+
def test_3D_array(self):
|
38 |
+
a = array([[1, 2], [1, 2]])
|
39 |
+
b = array([[2, 3], [2, 3]])
|
40 |
+
a = array([a, a])
|
41 |
+
b = array([b, b])
|
42 |
+
res = [atleast_1d(a), atleast_1d(b)]
|
43 |
+
desired = [a, b]
|
44 |
+
assert_array_equal(res, desired)
|
45 |
+
|
46 |
+
def test_r1array(self):
|
47 |
+
""" Test to make sure equivalent Travis O's r1array function
|
48 |
+
"""
|
49 |
+
assert_(atleast_1d(3).shape == (1,))
|
50 |
+
assert_(atleast_1d(3j).shape == (1,))
|
51 |
+
assert_(atleast_1d(3.0).shape == (1,))
|
52 |
+
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
|
53 |
+
|
54 |
+
|
55 |
+
class TestAtleast2d:
|
56 |
+
def test_0D_array(self):
|
57 |
+
a = array(1)
|
58 |
+
b = array(2)
|
59 |
+
res = [atleast_2d(a), atleast_2d(b)]
|
60 |
+
desired = [array([[1]]), array([[2]])]
|
61 |
+
assert_array_equal(res, desired)
|
62 |
+
|
63 |
+
def test_1D_array(self):
|
64 |
+
a = array([1, 2])
|
65 |
+
b = array([2, 3])
|
66 |
+
res = [atleast_2d(a), atleast_2d(b)]
|
67 |
+
desired = [array([[1, 2]]), array([[2, 3]])]
|
68 |
+
assert_array_equal(res, desired)
|
69 |
+
|
70 |
+
def test_2D_array(self):
|
71 |
+
a = array([[1, 2], [1, 2]])
|
72 |
+
b = array([[2, 3], [2, 3]])
|
73 |
+
res = [atleast_2d(a), atleast_2d(b)]
|
74 |
+
desired = [a, b]
|
75 |
+
assert_array_equal(res, desired)
|
76 |
+
|
77 |
+
def test_3D_array(self):
|
78 |
+
a = array([[1, 2], [1, 2]])
|
79 |
+
b = array([[2, 3], [2, 3]])
|
80 |
+
a = array([a, a])
|
81 |
+
b = array([b, b])
|
82 |
+
res = [atleast_2d(a), atleast_2d(b)]
|
83 |
+
desired = [a, b]
|
84 |
+
assert_array_equal(res, desired)
|
85 |
+
|
86 |
+
def test_r2array(self):
|
87 |
+
""" Test to make sure equivalent Travis O's r2array function
|
88 |
+
"""
|
89 |
+
assert_(atleast_2d(3).shape == (1, 1))
|
90 |
+
assert_(atleast_2d([3j, 1]).shape == (1, 2))
|
91 |
+
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
|
92 |
+
|
93 |
+
|
94 |
+
class TestAtleast3d:
|
95 |
+
def test_0D_array(self):
|
96 |
+
a = array(1)
|
97 |
+
b = array(2)
|
98 |
+
res = [atleast_3d(a), atleast_3d(b)]
|
99 |
+
desired = [array([[[1]]]), array([[[2]]])]
|
100 |
+
assert_array_equal(res, desired)
|
101 |
+
|
102 |
+
def test_1D_array(self):
|
103 |
+
a = array([1, 2])
|
104 |
+
b = array([2, 3])
|
105 |
+
res = [atleast_3d(a), atleast_3d(b)]
|
106 |
+
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
|
107 |
+
assert_array_equal(res, desired)
|
108 |
+
|
109 |
+
def test_2D_array(self):
|
110 |
+
a = array([[1, 2], [1, 2]])
|
111 |
+
b = array([[2, 3], [2, 3]])
|
112 |
+
res = [atleast_3d(a), atleast_3d(b)]
|
113 |
+
desired = [a[:,:, newaxis], b[:,:, newaxis]]
|
114 |
+
assert_array_equal(res, desired)
|
115 |
+
|
116 |
+
def test_3D_array(self):
|
117 |
+
a = array([[1, 2], [1, 2]])
|
118 |
+
b = array([[2, 3], [2, 3]])
|
119 |
+
a = array([a, a])
|
120 |
+
b = array([b, b])
|
121 |
+
res = [atleast_3d(a), atleast_3d(b)]
|
122 |
+
desired = [a, b]
|
123 |
+
assert_array_equal(res, desired)
|
124 |
+
|
125 |
+
|
126 |
+
class TestHstack:
|
127 |
+
def test_non_iterable(self):
|
128 |
+
assert_raises(TypeError, hstack, 1)
|
129 |
+
|
130 |
+
def test_empty_input(self):
|
131 |
+
assert_raises(ValueError, hstack, ())
|
132 |
+
|
133 |
+
def test_0D_array(self):
|
134 |
+
a = array(1)
|
135 |
+
b = array(2)
|
136 |
+
res = hstack([a, b])
|
137 |
+
desired = array([1, 2])
|
138 |
+
assert_array_equal(res, desired)
|
139 |
+
|
140 |
+
def test_1D_array(self):
|
141 |
+
a = array([1])
|
142 |
+
b = array([2])
|
143 |
+
res = hstack([a, b])
|
144 |
+
desired = array([1, 2])
|
145 |
+
assert_array_equal(res, desired)
|
146 |
+
|
147 |
+
def test_2D_array(self):
|
148 |
+
a = array([[1], [2]])
|
149 |
+
b = array([[1], [2]])
|
150 |
+
res = hstack([a, b])
|
151 |
+
desired = array([[1, 1], [2, 2]])
|
152 |
+
assert_array_equal(res, desired)
|
153 |
+
|
154 |
+
def test_generator(self):
|
155 |
+
with pytest.raises(TypeError, match="arrays to stack must be"):
|
156 |
+
hstack((np.arange(3) for _ in range(2)))
|
157 |
+
with pytest.raises(TypeError, match="arrays to stack must be"):
|
158 |
+
hstack(map(lambda x: x, np.ones((3, 2))))
|
159 |
+
|
160 |
+
def test_casting_and_dtype(self):
|
161 |
+
a = np.array([1, 2, 3])
|
162 |
+
b = np.array([2.5, 3.5, 4.5])
|
163 |
+
res = np.hstack((a, b), casting="unsafe", dtype=np.int64)
|
164 |
+
expected_res = np.array([1, 2, 3, 2, 3, 4])
|
165 |
+
assert_array_equal(res, expected_res)
|
166 |
+
|
167 |
+
def test_casting_and_dtype_type_error(self):
|
168 |
+
a = np.array([1, 2, 3])
|
169 |
+
b = np.array([2.5, 3.5, 4.5])
|
170 |
+
with pytest.raises(TypeError):
|
171 |
+
hstack((a, b), casting="safe", dtype=np.int64)
|
172 |
+
|
173 |
+
|
174 |
+
class TestVstack:
|
175 |
+
def test_non_iterable(self):
|
176 |
+
assert_raises(TypeError, vstack, 1)
|
177 |
+
|
178 |
+
def test_empty_input(self):
|
179 |
+
assert_raises(ValueError, vstack, ())
|
180 |
+
|
181 |
+
def test_0D_array(self):
|
182 |
+
a = array(1)
|
183 |
+
b = array(2)
|
184 |
+
res = vstack([a, b])
|
185 |
+
desired = array([[1], [2]])
|
186 |
+
assert_array_equal(res, desired)
|
187 |
+
|
188 |
+
def test_1D_array(self):
|
189 |
+
a = array([1])
|
190 |
+
b = array([2])
|
191 |
+
res = vstack([a, b])
|
192 |
+
desired = array([[1], [2]])
|
193 |
+
assert_array_equal(res, desired)
|
194 |
+
|
195 |
+
def test_2D_array(self):
|
196 |
+
a = array([[1], [2]])
|
197 |
+
b = array([[1], [2]])
|
198 |
+
res = vstack([a, b])
|
199 |
+
desired = array([[1], [2], [1], [2]])
|
200 |
+
assert_array_equal(res, desired)
|
201 |
+
|
202 |
+
def test_2D_array2(self):
|
203 |
+
a = array([1, 2])
|
204 |
+
b = array([1, 2])
|
205 |
+
res = vstack([a, b])
|
206 |
+
desired = array([[1, 2], [1, 2]])
|
207 |
+
assert_array_equal(res, desired)
|
208 |
+
|
209 |
+
def test_generator(self):
|
210 |
+
with pytest.raises(TypeError, match="arrays to stack must be"):
|
211 |
+
vstack((np.arange(3) for _ in range(2)))
|
212 |
+
|
213 |
+
def test_casting_and_dtype(self):
|
214 |
+
a = np.array([1, 2, 3])
|
215 |
+
b = np.array([2.5, 3.5, 4.5])
|
216 |
+
res = np.vstack((a, b), casting="unsafe", dtype=np.int64)
|
217 |
+
expected_res = np.array([[1, 2, 3], [2, 3, 4]])
|
218 |
+
assert_array_equal(res, expected_res)
|
219 |
+
|
220 |
+
def test_casting_and_dtype_type_error(self):
|
221 |
+
a = np.array([1, 2, 3])
|
222 |
+
b = np.array([2.5, 3.5, 4.5])
|
223 |
+
with pytest.raises(TypeError):
|
224 |
+
vstack((a, b), casting="safe", dtype=np.int64)
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
class TestConcatenate:
|
229 |
+
def test_returns_copy(self):
|
230 |
+
a = np.eye(3)
|
231 |
+
b = np.concatenate([a])
|
232 |
+
b[0, 0] = 2
|
233 |
+
assert b[0, 0] != a[0, 0]
|
234 |
+
|
235 |
+
def test_exceptions(self):
|
236 |
+
# test axis must be in bounds
|
237 |
+
for ndim in [1, 2, 3]:
|
238 |
+
a = np.ones((1,)*ndim)
|
239 |
+
np.concatenate((a, a), axis=0) # OK
|
240 |
+
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
|
241 |
+
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
|
242 |
+
|
243 |
+
# Scalars cannot be concatenated
|
244 |
+
assert_raises(ValueError, concatenate, (0,))
|
245 |
+
assert_raises(ValueError, concatenate, (np.array(0),))
|
246 |
+
|
247 |
+
# dimensionality must match
|
248 |
+
assert_raises_regex(
|
249 |
+
ValueError,
|
250 |
+
r"all the input arrays must have same number of dimensions, but "
|
251 |
+
r"the array at index 0 has 1 dimension\(s\) and the array at "
|
252 |
+
r"index 1 has 2 dimension\(s\)",
|
253 |
+
np.concatenate, (np.zeros(1), np.zeros((1, 1))))
|
254 |
+
|
255 |
+
# test shapes must match except for concatenation axis
|
256 |
+
a = np.ones((1, 2, 3))
|
257 |
+
b = np.ones((2, 2, 3))
|
258 |
+
axis = list(range(3))
|
259 |
+
for i in range(3):
|
260 |
+
np.concatenate((a, b), axis=axis[0]) # OK
|
261 |
+
assert_raises_regex(
|
262 |
+
ValueError,
|
263 |
+
"all the input array dimensions except for the concatenation axis "
|
264 |
+
"must match exactly, but along dimension {}, the array at "
|
265 |
+
"index 0 has size 1 and the array at index 1 has size 2"
|
266 |
+
.format(i),
|
267 |
+
np.concatenate, (a, b), axis=axis[1])
|
268 |
+
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
|
269 |
+
a = np.moveaxis(a, -1, 0)
|
270 |
+
b = np.moveaxis(b, -1, 0)
|
271 |
+
axis.append(axis.pop(0))
|
272 |
+
|
273 |
+
# No arrays to concatenate raises ValueError
|
274 |
+
assert_raises(ValueError, concatenate, ())
|
275 |
+
|
276 |
+
def test_concatenate_axis_None(self):
|
277 |
+
a = np.arange(4, dtype=np.float64).reshape((2, 2))
|
278 |
+
b = list(range(3))
|
279 |
+
c = ['x']
|
280 |
+
r = np.concatenate((a, a), axis=None)
|
281 |
+
assert_equal(r.dtype, a.dtype)
|
282 |
+
assert_equal(r.ndim, 1)
|
283 |
+
r = np.concatenate((a, b), axis=None)
|
284 |
+
assert_equal(r.size, a.size + len(b))
|
285 |
+
assert_equal(r.dtype, a.dtype)
|
286 |
+
r = np.concatenate((a, b, c), axis=None, dtype="U")
|
287 |
+
d = array(['0.0', '1.0', '2.0', '3.0',
|
288 |
+
'0', '1', '2', 'x'])
|
289 |
+
assert_array_equal(r, d)
|
290 |
+
|
291 |
+
out = np.zeros(a.size + len(b))
|
292 |
+
r = np.concatenate((a, b), axis=None)
|
293 |
+
rout = np.concatenate((a, b), axis=None, out=out)
|
294 |
+
assert_(out is rout)
|
295 |
+
assert_equal(r, rout)
|
296 |
+
|
297 |
+
def test_large_concatenate_axis_None(self):
|
298 |
+
# When no axis is given, concatenate uses flattened versions.
|
299 |
+
# This also had a bug with many arrays (see gh-5979).
|
300 |
+
x = np.arange(1, 100)
|
301 |
+
r = np.concatenate(x, None)
|
302 |
+
assert_array_equal(x, r)
|
303 |
+
|
304 |
+
# This should probably be deprecated:
|
305 |
+
r = np.concatenate(x, 100) # axis is >= MAXDIMS
|
306 |
+
assert_array_equal(x, r)
|
307 |
+
|
308 |
+
def test_concatenate(self):
|
309 |
+
# Test concatenate function
|
310 |
+
# One sequence returns unmodified (but as array)
|
311 |
+
r4 = list(range(4))
|
312 |
+
assert_array_equal(concatenate((r4,)), r4)
|
313 |
+
# Any sequence
|
314 |
+
assert_array_equal(concatenate((tuple(r4),)), r4)
|
315 |
+
assert_array_equal(concatenate((array(r4),)), r4)
|
316 |
+
# 1D default concatenation
|
317 |
+
r3 = list(range(3))
|
318 |
+
assert_array_equal(concatenate((r4, r3)), r4 + r3)
|
319 |
+
# Mixed sequence types
|
320 |
+
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
|
321 |
+
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
|
322 |
+
# Explicit axis specification
|
323 |
+
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
|
324 |
+
# Including negative
|
325 |
+
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
|
326 |
+
# 2D
|
327 |
+
a23 = array([[10, 11, 12], [13, 14, 15]])
|
328 |
+
a13 = array([[0, 1, 2]])
|
329 |
+
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
|
330 |
+
assert_array_equal(concatenate((a23, a13)), res)
|
331 |
+
assert_array_equal(concatenate((a23, a13), 0), res)
|
332 |
+
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
|
333 |
+
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
|
334 |
+
# Arrays much match shape
|
335 |
+
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
|
336 |
+
# 3D
|
337 |
+
res = arange(2 * 3 * 7).reshape((2, 3, 7))
|
338 |
+
a0 = res[..., :4]
|
339 |
+
a1 = res[..., 4:6]
|
340 |
+
a2 = res[..., 6:]
|
341 |
+
assert_array_equal(concatenate((a0, a1, a2), 2), res)
|
342 |
+
assert_array_equal(concatenate((a0, a1, a2), -1), res)
|
343 |
+
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
|
344 |
+
|
345 |
+
out = res.copy()
|
346 |
+
rout = concatenate((a0, a1, a2), 2, out=out)
|
347 |
+
assert_(out is rout)
|
348 |
+
assert_equal(res, rout)
|
349 |
+
|
350 |
+
@pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython")
|
351 |
+
def test_operator_concat(self):
|
352 |
+
import operator
|
353 |
+
a = array([1, 2])
|
354 |
+
b = array([3, 4])
|
355 |
+
n = [1,2]
|
356 |
+
res = array([1, 2, 3, 4])
|
357 |
+
assert_raises(TypeError, operator.concat, a, b)
|
358 |
+
assert_raises(TypeError, operator.concat, a, n)
|
359 |
+
assert_raises(TypeError, operator.concat, n, a)
|
360 |
+
assert_raises(TypeError, operator.concat, a, 1)
|
361 |
+
assert_raises(TypeError, operator.concat, 1, a)
|
362 |
+
|
363 |
+
def test_bad_out_shape(self):
|
364 |
+
a = array([1, 2])
|
365 |
+
b = array([3, 4])
|
366 |
+
|
367 |
+
assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
|
368 |
+
assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
|
369 |
+
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
|
370 |
+
concatenate((a, b), out=np.empty(4))
|
371 |
+
|
372 |
+
@pytest.mark.parametrize("axis", [None, 0])
|
373 |
+
@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"])
|
374 |
+
@pytest.mark.parametrize("casting",
|
375 |
+
['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
|
376 |
+
def test_out_and_dtype(self, axis, out_dtype, casting):
|
377 |
+
# Compare usage of `out=out` with `dtype=out.dtype`
|
378 |
+
out = np.empty(4, dtype=out_dtype)
|
379 |
+
to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
|
380 |
+
|
381 |
+
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
|
382 |
+
with assert_raises(TypeError):
|
383 |
+
concatenate(to_concat, out=out, axis=axis, casting=casting)
|
384 |
+
with assert_raises(TypeError):
|
385 |
+
concatenate(to_concat, dtype=out.dtype,
|
386 |
+
axis=axis, casting=casting)
|
387 |
+
else:
|
388 |
+
res_out = concatenate(to_concat, out=out,
|
389 |
+
axis=axis, casting=casting)
|
390 |
+
res_dtype = concatenate(to_concat, dtype=out.dtype,
|
391 |
+
axis=axis, casting=casting)
|
392 |
+
assert res_out is out
|
393 |
+
assert_array_equal(out, res_dtype)
|
394 |
+
assert res_dtype.dtype == out_dtype
|
395 |
+
|
396 |
+
with assert_raises(TypeError):
|
397 |
+
concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
|
398 |
+
|
399 |
+
@pytest.mark.parametrize("axis", [None, 0])
|
400 |
+
@pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"])
|
401 |
+
@pytest.mark.parametrize("arrs",
|
402 |
+
[([0.],), ([0.], [1]), ([0], ["string"], [1.])])
|
403 |
+
def test_dtype_with_promotion(self, arrs, string_dt, axis):
|
404 |
+
# Note that U0 and S0 should be deprecated eventually and changed to
|
405 |
+
# actually give the empty string result (together with `np.array`)
|
406 |
+
res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe")
|
407 |
+
# The actual dtype should be identical to a cast (of a double array):
|
408 |
+
assert res.dtype == np.array(1.).astype(string_dt).dtype
|
409 |
+
|
410 |
+
@pytest.mark.parametrize("axis", [None, 0])
|
411 |
+
def test_string_dtype_does_not_inspect(self, axis):
|
412 |
+
with pytest.raises(TypeError):
|
413 |
+
np.concatenate(([None], [1]), dtype="S", axis=axis)
|
414 |
+
with pytest.raises(TypeError):
|
415 |
+
np.concatenate(([None], [1]), dtype="U", axis=axis)
|
416 |
+
|
417 |
+
@pytest.mark.parametrize("axis", [None, 0])
|
418 |
+
def test_subarray_error(self, axis):
|
419 |
+
with pytest.raises(TypeError, match=".*subarray dtype"):
|
420 |
+
np.concatenate(([1], [1]), dtype="(2,)i", axis=axis)
|
421 |
+
|
422 |
+
|
423 |
+
def test_stack():
|
424 |
+
# non-iterable input
|
425 |
+
assert_raises(TypeError, stack, 1)
|
426 |
+
|
427 |
+
# 0d input
|
428 |
+
for input_ in [(1, 2, 3),
|
429 |
+
[np.int32(1), np.int32(2), np.int32(3)],
|
430 |
+
[np.array(1), np.array(2), np.array(3)]]:
|
431 |
+
assert_array_equal(stack(input_), [1, 2, 3])
|
432 |
+
# 1d input examples
|
433 |
+
a = np.array([1, 2, 3])
|
434 |
+
b = np.array([4, 5, 6])
|
435 |
+
r1 = array([[1, 2, 3], [4, 5, 6]])
|
436 |
+
assert_array_equal(np.stack((a, b)), r1)
|
437 |
+
assert_array_equal(np.stack((a, b), axis=1), r1.T)
|
438 |
+
# all input types
|
439 |
+
assert_array_equal(np.stack(list([a, b])), r1)
|
440 |
+
assert_array_equal(np.stack(array([a, b])), r1)
|
441 |
+
# all shapes for 1d input
|
442 |
+
arrays = [np.random.randn(3) for _ in range(10)]
|
443 |
+
axes = [0, 1, -1, -2]
|
444 |
+
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
|
445 |
+
for axis, expected_shape in zip(axes, expected_shapes):
|
446 |
+
assert_equal(np.stack(arrays, axis).shape, expected_shape)
|
447 |
+
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
|
448 |
+
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
|
449 |
+
# all shapes for 2d input
|
450 |
+
arrays = [np.random.randn(3, 4) for _ in range(10)]
|
451 |
+
axes = [0, 1, 2, -1, -2, -3]
|
452 |
+
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
|
453 |
+
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
|
454 |
+
for axis, expected_shape in zip(axes, expected_shapes):
|
455 |
+
assert_equal(np.stack(arrays, axis).shape, expected_shape)
|
456 |
+
# empty arrays
|
457 |
+
assert_(stack([[], [], []]).shape == (3, 0))
|
458 |
+
assert_(stack([[], [], []], axis=1).shape == (0, 3))
|
459 |
+
# out
|
460 |
+
out = np.zeros_like(r1)
|
461 |
+
np.stack((a, b), out=out)
|
462 |
+
assert_array_equal(out, r1)
|
463 |
+
# edge cases
|
464 |
+
assert_raises_regex(ValueError, 'need at least one array', stack, [])
|
465 |
+
assert_raises_regex(ValueError, 'must have the same shape',
|
466 |
+
stack, [1, np.arange(3)])
|
467 |
+
assert_raises_regex(ValueError, 'must have the same shape',
|
468 |
+
stack, [np.arange(3), 1])
|
469 |
+
assert_raises_regex(ValueError, 'must have the same shape',
|
470 |
+
stack, [np.arange(3), 1], axis=1)
|
471 |
+
assert_raises_regex(ValueError, 'must have the same shape',
|
472 |
+
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
|
473 |
+
assert_raises_regex(ValueError, 'must have the same shape',
|
474 |
+
stack, [np.arange(2), np.arange(3)])
|
475 |
+
|
476 |
+
# do not accept generators
|
477 |
+
with pytest.raises(TypeError, match="arrays to stack must be"):
|
478 |
+
stack((x for x in range(3)))
|
479 |
+
|
480 |
+
#casting and dtype test
|
481 |
+
a = np.array([1, 2, 3])
|
482 |
+
b = np.array([2.5, 3.5, 4.5])
|
483 |
+
res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64)
|
484 |
+
expected_res = np.array([[1, 2], [2, 3], [3, 4]])
|
485 |
+
assert_array_equal(res, expected_res)
|
486 |
+
#casting and dtype with TypeError
|
487 |
+
with assert_raises(TypeError):
|
488 |
+
stack((a, b), dtype=np.int64, axis=1, casting="safe")
|
489 |
+
|
490 |
+
|
491 |
+
@pytest.mark.parametrize("axis", [0])
|
492 |
+
@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"])
|
493 |
+
@pytest.mark.parametrize("casting",
|
494 |
+
['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
|
495 |
+
def test_stack_out_and_dtype(axis, out_dtype, casting):
|
496 |
+
to_concat = (array([1, 2]), array([3, 4]))
|
497 |
+
res = array([[1, 2], [3, 4]])
|
498 |
+
out = np.zeros_like(res)
|
499 |
+
|
500 |
+
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
|
501 |
+
with assert_raises(TypeError):
|
502 |
+
stack(to_concat, dtype=out_dtype,
|
503 |
+
axis=axis, casting=casting)
|
504 |
+
else:
|
505 |
+
res_out = stack(to_concat, out=out,
|
506 |
+
axis=axis, casting=casting)
|
507 |
+
res_dtype = stack(to_concat, dtype=out_dtype,
|
508 |
+
axis=axis, casting=casting)
|
509 |
+
assert res_out is out
|
510 |
+
assert_array_equal(out, res_dtype)
|
511 |
+
assert res_dtype.dtype == out_dtype
|
512 |
+
|
513 |
+
with assert_raises(TypeError):
|
514 |
+
stack(to_concat, out=out, dtype=out_dtype, axis=axis)
|
515 |
+
|
516 |
+
|
517 |
+
class TestBlock:
|
518 |
+
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
|
519 |
+
def block(self, request):
|
520 |
+
# blocking small arrays and large arrays go through different paths.
|
521 |
+
# the algorithm is triggered depending on the number of element
|
522 |
+
# copies required.
|
523 |
+
# We define a test fixture that forces most tests to go through
|
524 |
+
# both code paths.
|
525 |
+
# Ultimately, this should be removed if a single algorithm is found
|
526 |
+
# to be faster for both small and large arrays.
|
527 |
+
def _block_force_concatenate(arrays):
|
528 |
+
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
|
529 |
+
return _block_concatenate(arrays, list_ndim, result_ndim)
|
530 |
+
|
531 |
+
def _block_force_slicing(arrays):
|
532 |
+
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
|
533 |
+
return _block_slicing(arrays, list_ndim, result_ndim)
|
534 |
+
|
535 |
+
if request.param == 'force_concatenate':
|
536 |
+
return _block_force_concatenate
|
537 |
+
elif request.param == 'force_slicing':
|
538 |
+
return _block_force_slicing
|
539 |
+
elif request.param == 'block':
|
540 |
+
return block
|
541 |
+
else:
|
542 |
+
raise ValueError('Unknown blocking request. There is a typo in the tests.')
|
543 |
+
|
544 |
+
def test_returns_copy(self, block):
|
545 |
+
a = np.eye(3)
|
546 |
+
b = block(a)
|
547 |
+
b[0, 0] = 2
|
548 |
+
assert b[0, 0] != a[0, 0]
|
549 |
+
|
550 |
+
def test_block_total_size_estimate(self, block):
|
551 |
+
_, _, _, total_size = _block_setup([1])
|
552 |
+
assert total_size == 1
|
553 |
+
|
554 |
+
_, _, _, total_size = _block_setup([[1]])
|
555 |
+
assert total_size == 1
|
556 |
+
|
557 |
+
_, _, _, total_size = _block_setup([[1, 1]])
|
558 |
+
assert total_size == 2
|
559 |
+
|
560 |
+
_, _, _, total_size = _block_setup([[1], [1]])
|
561 |
+
assert total_size == 2
|
562 |
+
|
563 |
+
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
|
564 |
+
assert total_size == 4
|
565 |
+
|
566 |
+
def test_block_simple_row_wise(self, block):
|
567 |
+
a_2d = np.ones((2, 2))
|
568 |
+
b_2d = 2 * a_2d
|
569 |
+
desired = np.array([[1, 1, 2, 2],
|
570 |
+
[1, 1, 2, 2]])
|
571 |
+
result = block([a_2d, b_2d])
|
572 |
+
assert_equal(desired, result)
|
573 |
+
|
574 |
+
def test_block_simple_column_wise(self, block):
|
575 |
+
a_2d = np.ones((2, 2))
|
576 |
+
b_2d = 2 * a_2d
|
577 |
+
expected = np.array([[1, 1],
|
578 |
+
[1, 1],
|
579 |
+
[2, 2],
|
580 |
+
[2, 2]])
|
581 |
+
result = block([[a_2d], [b_2d]])
|
582 |
+
assert_equal(expected, result)
|
583 |
+
|
584 |
+
def test_block_with_1d_arrays_row_wise(self, block):
|
585 |
+
# # # 1-D vectors are treated as row arrays
|
586 |
+
a = np.array([1, 2, 3])
|
587 |
+
b = np.array([2, 3, 4])
|
588 |
+
expected = np.array([1, 2, 3, 2, 3, 4])
|
589 |
+
result = block([a, b])
|
590 |
+
assert_equal(expected, result)
|
591 |
+
|
592 |
+
def test_block_with_1d_arrays_multiple_rows(self, block):
|
593 |
+
a = np.array([1, 2, 3])
|
594 |
+
b = np.array([2, 3, 4])
|
595 |
+
expected = np.array([[1, 2, 3, 2, 3, 4],
|
596 |
+
[1, 2, 3, 2, 3, 4]])
|
597 |
+
result = block([[a, b], [a, b]])
|
598 |
+
assert_equal(expected, result)
|
599 |
+
|
600 |
+
def test_block_with_1d_arrays_column_wise(self, block):
|
601 |
+
# # # 1-D vectors are treated as row arrays
|
602 |
+
a_1d = np.array([1, 2, 3])
|
603 |
+
b_1d = np.array([2, 3, 4])
|
604 |
+
expected = np.array([[1, 2, 3],
|
605 |
+
[2, 3, 4]])
|
606 |
+
result = block([[a_1d], [b_1d]])
|
607 |
+
assert_equal(expected, result)
|
608 |
+
|
609 |
+
def test_block_mixed_1d_and_2d(self, block):
|
610 |
+
a_2d = np.ones((2, 2))
|
611 |
+
b_1d = np.array([2, 2])
|
612 |
+
result = block([[a_2d], [b_1d]])
|
613 |
+
expected = np.array([[1, 1],
|
614 |
+
[1, 1],
|
615 |
+
[2, 2]])
|
616 |
+
assert_equal(expected, result)
|
617 |
+
|
618 |
+
def test_block_complicated(self, block):
|
619 |
+
# a bit more complicated
|
620 |
+
one_2d = np.array([[1, 1, 1]])
|
621 |
+
two_2d = np.array([[2, 2, 2]])
|
622 |
+
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
|
623 |
+
four_1d = np.array([4, 4, 4, 4, 4, 4])
|
624 |
+
five_0d = np.array(5)
|
625 |
+
six_1d = np.array([6, 6, 6, 6, 6])
|
626 |
+
zero_2d = np.zeros((2, 6))
|
627 |
+
|
628 |
+
expected = np.array([[1, 1, 1, 2, 2, 2],
|
629 |
+
[3, 3, 3, 3, 3, 3],
|
630 |
+
[4, 4, 4, 4, 4, 4],
|
631 |
+
[5, 6, 6, 6, 6, 6],
|
632 |
+
[0, 0, 0, 0, 0, 0],
|
633 |
+
[0, 0, 0, 0, 0, 0]])
|
634 |
+
|
635 |
+
result = block([[one_2d, two_2d],
|
636 |
+
[three_2d],
|
637 |
+
[four_1d],
|
638 |
+
[five_0d, six_1d],
|
639 |
+
[zero_2d]])
|
640 |
+
assert_equal(result, expected)
|
641 |
+
|
642 |
+
def test_nested(self, block):
|
643 |
+
one = np.array([1, 1, 1])
|
644 |
+
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
|
645 |
+
three = np.array([3, 3, 3])
|
646 |
+
four = np.array([4, 4, 4])
|
647 |
+
five = np.array(5)
|
648 |
+
six = np.array([6, 6, 6, 6, 6])
|
649 |
+
zero = np.zeros((2, 6))
|
650 |
+
|
651 |
+
result = block([
|
652 |
+
[
|
653 |
+
block([
|
654 |
+
[one],
|
655 |
+
[three],
|
656 |
+
[four]
|
657 |
+
]),
|
658 |
+
two
|
659 |
+
],
|
660 |
+
[five, six],
|
661 |
+
[zero]
|
662 |
+
])
|
663 |
+
expected = np.array([[1, 1, 1, 2, 2, 2],
|
664 |
+
[3, 3, 3, 2, 2, 2],
|
665 |
+
[4, 4, 4, 2, 2, 2],
|
666 |
+
[5, 6, 6, 6, 6, 6],
|
667 |
+
[0, 0, 0, 0, 0, 0],
|
668 |
+
[0, 0, 0, 0, 0, 0]])
|
669 |
+
|
670 |
+
assert_equal(result, expected)
|
671 |
+
|
672 |
+
def test_3d(self, block):
|
673 |
+
a000 = np.ones((2, 2, 2), int) * 1
|
674 |
+
|
675 |
+
a100 = np.ones((3, 2, 2), int) * 2
|
676 |
+
a010 = np.ones((2, 3, 2), int) * 3
|
677 |
+
a001 = np.ones((2, 2, 3), int) * 4
|
678 |
+
|
679 |
+
a011 = np.ones((2, 3, 3), int) * 5
|
680 |
+
a101 = np.ones((3, 2, 3), int) * 6
|
681 |
+
a110 = np.ones((3, 3, 2), int) * 7
|
682 |
+
|
683 |
+
a111 = np.ones((3, 3, 3), int) * 8
|
684 |
+
|
685 |
+
result = block([
|
686 |
+
[
|
687 |
+
[a000, a001],
|
688 |
+
[a010, a011],
|
689 |
+
],
|
690 |
+
[
|
691 |
+
[a100, a101],
|
692 |
+
[a110, a111],
|
693 |
+
]
|
694 |
+
])
|
695 |
+
expected = array([[[1, 1, 4, 4, 4],
|
696 |
+
[1, 1, 4, 4, 4],
|
697 |
+
[3, 3, 5, 5, 5],
|
698 |
+
[3, 3, 5, 5, 5],
|
699 |
+
[3, 3, 5, 5, 5]],
|
700 |
+
|
701 |
+
[[1, 1, 4, 4, 4],
|
702 |
+
[1, 1, 4, 4, 4],
|
703 |
+
[3, 3, 5, 5, 5],
|
704 |
+
[3, 3, 5, 5, 5],
|
705 |
+
[3, 3, 5, 5, 5]],
|
706 |
+
|
707 |
+
[[2, 2, 6, 6, 6],
|
708 |
+
[2, 2, 6, 6, 6],
|
709 |
+
[7, 7, 8, 8, 8],
|
710 |
+
[7, 7, 8, 8, 8],
|
711 |
+
[7, 7, 8, 8, 8]],
|
712 |
+
|
713 |
+
[[2, 2, 6, 6, 6],
|
714 |
+
[2, 2, 6, 6, 6],
|
715 |
+
[7, 7, 8, 8, 8],
|
716 |
+
[7, 7, 8, 8, 8],
|
717 |
+
[7, 7, 8, 8, 8]],
|
718 |
+
|
719 |
+
[[2, 2, 6, 6, 6],
|
720 |
+
[2, 2, 6, 6, 6],
|
721 |
+
[7, 7, 8, 8, 8],
|
722 |
+
[7, 7, 8, 8, 8],
|
723 |
+
[7, 7, 8, 8, 8]]])
|
724 |
+
|
725 |
+
assert_array_equal(result, expected)
|
726 |
+
|
727 |
+
def test_block_with_mismatched_shape(self, block):
|
728 |
+
a = np.array([0, 0])
|
729 |
+
b = np.eye(2)
|
730 |
+
assert_raises(ValueError, block, [a, b])
|
731 |
+
assert_raises(ValueError, block, [b, a])
|
732 |
+
|
733 |
+
to_block = [[np.ones((2,3)), np.ones((2,2))],
|
734 |
+
[np.ones((2,2)), np.ones((2,2))]]
|
735 |
+
assert_raises(ValueError, block, to_block)
|
736 |
+
def test_no_lists(self, block):
|
737 |
+
assert_equal(block(1), np.array(1))
|
738 |
+
assert_equal(block(np.eye(3)), np.eye(3))
|
739 |
+
|
740 |
+
def test_invalid_nesting(self, block):
|
741 |
+
msg = 'depths are mismatched'
|
742 |
+
assert_raises_regex(ValueError, msg, block, [1, [2]])
|
743 |
+
assert_raises_regex(ValueError, msg, block, [1, []])
|
744 |
+
assert_raises_regex(ValueError, msg, block, [[1], 2])
|
745 |
+
assert_raises_regex(ValueError, msg, block, [[], 2])
|
746 |
+
assert_raises_regex(ValueError, msg, block, [
|
747 |
+
[[1], [2]],
|
748 |
+
[[3, 4]],
|
749 |
+
[5] # missing brackets
|
750 |
+
])
|
751 |
+
|
752 |
+
def test_empty_lists(self, block):
|
753 |
+
assert_raises_regex(ValueError, 'empty', block, [])
|
754 |
+
assert_raises_regex(ValueError, 'empty', block, [[]])
|
755 |
+
assert_raises_regex(ValueError, 'empty', block, [[1], []])
|
756 |
+
|
757 |
+
def test_tuple(self, block):
|
758 |
+
assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
|
759 |
+
assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
|
760 |
+
|
761 |
+
def test_different_ndims(self, block):
|
762 |
+
a = 1.
|
763 |
+
b = 2 * np.ones((1, 2))
|
764 |
+
c = 3 * np.ones((1, 1, 3))
|
765 |
+
|
766 |
+
result = block([a, b, c])
|
767 |
+
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
|
768 |
+
|
769 |
+
assert_equal(result, expected)
|
770 |
+
|
771 |
+
def test_different_ndims_depths(self, block):
|
772 |
+
a = 1.
|
773 |
+
b = 2 * np.ones((1, 2))
|
774 |
+
c = 3 * np.ones((1, 2, 3))
|
775 |
+
|
776 |
+
result = block([[a, b], [c]])
|
777 |
+
expected = np.array([[[1., 2., 2.],
|
778 |
+
[3., 3., 3.],
|
779 |
+
[3., 3., 3.]]])
|
780 |
+
|
781 |
+
assert_equal(result, expected)
|
782 |
+
|
783 |
+
def test_block_memory_order(self, block):
|
784 |
+
# 3D
|
785 |
+
arr_c = np.zeros((3,)*3, order='C')
|
786 |
+
arr_f = np.zeros((3,)*3, order='F')
|
787 |
+
|
788 |
+
b_c = [[[arr_c, arr_c],
|
789 |
+
[arr_c, arr_c]],
|
790 |
+
[[arr_c, arr_c],
|
791 |
+
[arr_c, arr_c]]]
|
792 |
+
|
793 |
+
b_f = [[[arr_f, arr_f],
|
794 |
+
[arr_f, arr_f]],
|
795 |
+
[[arr_f, arr_f],
|
796 |
+
[arr_f, arr_f]]]
|
797 |
+
|
798 |
+
assert block(b_c).flags['C_CONTIGUOUS']
|
799 |
+
assert block(b_f).flags['F_CONTIGUOUS']
|
800 |
+
|
801 |
+
arr_c = np.zeros((3, 3), order='C')
|
802 |
+
arr_f = np.zeros((3, 3), order='F')
|
803 |
+
# 2D
|
804 |
+
b_c = [[arr_c, arr_c],
|
805 |
+
[arr_c, arr_c]]
|
806 |
+
|
807 |
+
b_f = [[arr_f, arr_f],
|
808 |
+
[arr_f, arr_f]]
|
809 |
+
|
810 |
+
assert block(b_c).flags['C_CONTIGUOUS']
|
811 |
+
assert block(b_f).flags['F_CONTIGUOUS']
|
812 |
+
|
813 |
+
|
814 |
+
def test_block_dispatcher():
|
815 |
+
class ArrayLike:
|
816 |
+
pass
|
817 |
+
a = ArrayLike()
|
818 |
+
b = ArrayLike()
|
819 |
+
c = ArrayLike()
|
820 |
+
assert_equal(list(_block_dispatcher(a)), [a])
|
821 |
+
assert_equal(list(_block_dispatcher([a])), [a])
|
822 |
+
assert_equal(list(_block_dispatcher([a, b])), [a, b])
|
823 |
+
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
|
824 |
+
# don't recurse into non-lists
|
825 |
+
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
|
env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (180 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-310.pyc
ADDED
Binary file (5.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-310.pyc
ADDED
Binary file (11.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/test_helper.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Test functions for fftpack.helper module
|
2 |
+
|
3 |
+
Copied from fftpack.helper by Pearu Peterson, October 2005
|
4 |
+
|
5 |
+
"""
|
6 |
+
import numpy as np
|
7 |
+
from numpy.testing import assert_array_almost_equal
|
8 |
+
from numpy import fft, pi
|
9 |
+
|
10 |
+
|
11 |
+
class TestFFTShift:
|
12 |
+
|
13 |
+
def test_definition(self):
|
14 |
+
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
|
15 |
+
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
|
16 |
+
assert_array_almost_equal(fft.fftshift(x), y)
|
17 |
+
assert_array_almost_equal(fft.ifftshift(y), x)
|
18 |
+
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
|
19 |
+
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
|
20 |
+
assert_array_almost_equal(fft.fftshift(x), y)
|
21 |
+
assert_array_almost_equal(fft.ifftshift(y), x)
|
22 |
+
|
23 |
+
def test_inverse(self):
|
24 |
+
for n in [1, 4, 9, 100, 211]:
|
25 |
+
x = np.random.random((n,))
|
26 |
+
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
|
27 |
+
|
28 |
+
def test_axes_keyword(self):
|
29 |
+
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
|
30 |
+
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
|
31 |
+
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
|
32 |
+
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
|
33 |
+
fft.fftshift(freqs, axes=(0,)))
|
34 |
+
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
|
35 |
+
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
|
36 |
+
fft.ifftshift(shifted, axes=(0,)))
|
37 |
+
|
38 |
+
assert_array_almost_equal(fft.fftshift(freqs), shifted)
|
39 |
+
assert_array_almost_equal(fft.ifftshift(shifted), freqs)
|
40 |
+
|
41 |
+
def test_uneven_dims(self):
|
42 |
+
""" Test 2D input, which has uneven dimension sizes """
|
43 |
+
freqs = [
|
44 |
+
[0, 1],
|
45 |
+
[2, 3],
|
46 |
+
[4, 5]
|
47 |
+
]
|
48 |
+
|
49 |
+
# shift in dimension 0
|
50 |
+
shift_dim0 = [
|
51 |
+
[4, 5],
|
52 |
+
[0, 1],
|
53 |
+
[2, 3]
|
54 |
+
]
|
55 |
+
assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
|
56 |
+
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
|
57 |
+
assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
|
58 |
+
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
|
59 |
+
|
60 |
+
# shift in dimension 1
|
61 |
+
shift_dim1 = [
|
62 |
+
[1, 0],
|
63 |
+
[3, 2],
|
64 |
+
[5, 4]
|
65 |
+
]
|
66 |
+
assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
|
67 |
+
assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
|
68 |
+
|
69 |
+
# shift in both dimensions
|
70 |
+
shift_dim_both = [
|
71 |
+
[5, 4],
|
72 |
+
[1, 0],
|
73 |
+
[3, 2]
|
74 |
+
]
|
75 |
+
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
|
76 |
+
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
|
77 |
+
assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
|
78 |
+
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
|
79 |
+
|
80 |
+
# axes=None (default) shift in all dimensions
|
81 |
+
assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
|
82 |
+
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
|
83 |
+
assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
|
84 |
+
assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
|
85 |
+
|
86 |
+
def test_equal_to_original(self):
|
87 |
+
""" Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
|
88 |
+
from numpy.core import asarray, concatenate, arange, take
|
89 |
+
|
90 |
+
def original_fftshift(x, axes=None):
|
91 |
+
""" How fftshift was implemented in v1.14"""
|
92 |
+
tmp = asarray(x)
|
93 |
+
ndim = tmp.ndim
|
94 |
+
if axes is None:
|
95 |
+
axes = list(range(ndim))
|
96 |
+
elif isinstance(axes, int):
|
97 |
+
axes = (axes,)
|
98 |
+
y = tmp
|
99 |
+
for k in axes:
|
100 |
+
n = tmp.shape[k]
|
101 |
+
p2 = (n + 1) // 2
|
102 |
+
mylist = concatenate((arange(p2, n), arange(p2)))
|
103 |
+
y = take(y, mylist, k)
|
104 |
+
return y
|
105 |
+
|
106 |
+
def original_ifftshift(x, axes=None):
|
107 |
+
""" How ifftshift was implemented in v1.14 """
|
108 |
+
tmp = asarray(x)
|
109 |
+
ndim = tmp.ndim
|
110 |
+
if axes is None:
|
111 |
+
axes = list(range(ndim))
|
112 |
+
elif isinstance(axes, int):
|
113 |
+
axes = (axes,)
|
114 |
+
y = tmp
|
115 |
+
for k in axes:
|
116 |
+
n = tmp.shape[k]
|
117 |
+
p2 = n - (n + 1) // 2
|
118 |
+
mylist = concatenate((arange(p2, n), arange(p2)))
|
119 |
+
y = take(y, mylist, k)
|
120 |
+
return y
|
121 |
+
|
122 |
+
# create possible 2d array combinations and try all possible keywords
|
123 |
+
# compare output to original functions
|
124 |
+
for i in range(16):
|
125 |
+
for j in range(16):
|
126 |
+
for axes_keyword in [0, 1, None, (0,), (0, 1)]:
|
127 |
+
inp = np.random.rand(i, j)
|
128 |
+
|
129 |
+
assert_array_almost_equal(fft.fftshift(inp, axes_keyword),
|
130 |
+
original_fftshift(inp, axes_keyword))
|
131 |
+
|
132 |
+
assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),
|
133 |
+
original_ifftshift(inp, axes_keyword))
|
134 |
+
|
135 |
+
|
136 |
+
class TestFFTFreq:
|
137 |
+
|
138 |
+
def test_definition(self):
|
139 |
+
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
|
140 |
+
assert_array_almost_equal(9*fft.fftfreq(9), x)
|
141 |
+
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
|
142 |
+
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
|
143 |
+
assert_array_almost_equal(10*fft.fftfreq(10), x)
|
144 |
+
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
|
145 |
+
|
146 |
+
|
147 |
+
class TestRFFTFreq:
|
148 |
+
|
149 |
+
def test_definition(self):
|
150 |
+
x = [0, 1, 2, 3, 4]
|
151 |
+
assert_array_almost_equal(9*fft.rfftfreq(9), x)
|
152 |
+
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
|
153 |
+
x = [0, 1, 2, 3, 4, 5]
|
154 |
+
assert_array_almost_equal(10*fft.rfftfreq(10), x)
|
155 |
+
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
|
156 |
+
|
157 |
+
|
158 |
+
class TestIRFFTN:
|
159 |
+
|
160 |
+
def test_not_last_axis_success(self):
|
161 |
+
ar, ai = np.random.random((2, 16, 8, 32))
|
162 |
+
a = ar + 1j*ai
|
163 |
+
|
164 |
+
axes = (-2,)
|
165 |
+
|
166 |
+
# Should not raise error
|
167 |
+
fft.irfftn(a, axes=axes)
|
env-llmeval/lib/python3.10/site-packages/numpy/fft/tests/test_pocketfft.py
ADDED
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
from numpy.random import random
|
4 |
+
from numpy.testing import (
|
5 |
+
assert_array_equal, assert_raises, assert_allclose, IS_WASM
|
6 |
+
)
|
7 |
+
import threading
|
8 |
+
import queue
|
9 |
+
|
10 |
+
|
11 |
+
def fft1(x):
|
12 |
+
L = len(x)
|
13 |
+
phase = -2j * np.pi * (np.arange(L) / L)
|
14 |
+
phase = np.arange(L).reshape(-1, 1) * phase
|
15 |
+
return np.sum(x*np.exp(phase), axis=1)
|
16 |
+
|
17 |
+
|
18 |
+
class TestFFTShift:
|
19 |
+
|
20 |
+
def test_fft_n(self):
|
21 |
+
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
|
22 |
+
|
23 |
+
|
24 |
+
class TestFFT1D:
|
25 |
+
|
26 |
+
def test_identity(self):
|
27 |
+
maxlen = 512
|
28 |
+
x = random(maxlen) + 1j*random(maxlen)
|
29 |
+
xr = random(maxlen)
|
30 |
+
for i in range(1, maxlen):
|
31 |
+
assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
|
32 |
+
atol=1e-12)
|
33 |
+
assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i),
|
34 |
+
xr[0:i], atol=1e-12)
|
35 |
+
|
36 |
+
def test_fft(self):
|
37 |
+
x = random(30) + 1j*random(30)
|
38 |
+
assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
|
39 |
+
assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6)
|
40 |
+
assert_allclose(fft1(x) / np.sqrt(30),
|
41 |
+
np.fft.fft(x, norm="ortho"), atol=1e-6)
|
42 |
+
assert_allclose(fft1(x) / 30.,
|
43 |
+
np.fft.fft(x, norm="forward"), atol=1e-6)
|
44 |
+
|
45 |
+
@pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward'))
|
46 |
+
def test_ifft(self, norm):
|
47 |
+
x = random(30) + 1j*random(30)
|
48 |
+
assert_allclose(
|
49 |
+
x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
|
50 |
+
atol=1e-6)
|
51 |
+
# Ensure we get the correct error message
|
52 |
+
with pytest.raises(ValueError,
|
53 |
+
match='Invalid number of FFT data points'):
|
54 |
+
np.fft.ifft([], norm=norm)
|
55 |
+
|
56 |
+
def test_fft2(self):
|
57 |
+
x = random((30, 20)) + 1j*random((30, 20))
|
58 |
+
assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
|
59 |
+
np.fft.fft2(x), atol=1e-6)
|
60 |
+
assert_allclose(np.fft.fft2(x),
|
61 |
+
np.fft.fft2(x, norm="backward"), atol=1e-6)
|
62 |
+
assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
|
63 |
+
np.fft.fft2(x, norm="ortho"), atol=1e-6)
|
64 |
+
assert_allclose(np.fft.fft2(x) / (30. * 20.),
|
65 |
+
np.fft.fft2(x, norm="forward"), atol=1e-6)
|
66 |
+
|
67 |
+
def test_ifft2(self):
|
68 |
+
x = random((30, 20)) + 1j*random((30, 20))
|
69 |
+
assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
|
70 |
+
np.fft.ifft2(x), atol=1e-6)
|
71 |
+
assert_allclose(np.fft.ifft2(x),
|
72 |
+
np.fft.ifft2(x, norm="backward"), atol=1e-6)
|
73 |
+
assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
|
74 |
+
np.fft.ifft2(x, norm="ortho"), atol=1e-6)
|
75 |
+
assert_allclose(np.fft.ifft2(x) * (30. * 20.),
|
76 |
+
np.fft.ifft2(x, norm="forward"), atol=1e-6)
|
77 |
+
|
78 |
+
def test_fftn(self):
|
79 |
+
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
|
80 |
+
assert_allclose(
|
81 |
+
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
|
82 |
+
np.fft.fftn(x), atol=1e-6)
|
83 |
+
assert_allclose(np.fft.fftn(x),
|
84 |
+
np.fft.fftn(x, norm="backward"), atol=1e-6)
|
85 |
+
assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
|
86 |
+
np.fft.fftn(x, norm="ortho"), atol=1e-6)
|
87 |
+
assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.),
|
88 |
+
np.fft.fftn(x, norm="forward"), atol=1e-6)
|
89 |
+
|
90 |
+
def test_ifftn(self):
|
91 |
+
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
|
92 |
+
assert_allclose(
|
93 |
+
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
|
94 |
+
np.fft.ifftn(x), atol=1e-6)
|
95 |
+
assert_allclose(np.fft.ifftn(x),
|
96 |
+
np.fft.ifftn(x, norm="backward"), atol=1e-6)
|
97 |
+
assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
|
98 |
+
np.fft.ifftn(x, norm="ortho"), atol=1e-6)
|
99 |
+
assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.),
|
100 |
+
np.fft.ifftn(x, norm="forward"), atol=1e-6)
|
101 |
+
|
102 |
+
def test_rfft(self):
|
103 |
+
x = random(30)
|
104 |
+
for n in [x.size, 2*x.size]:
|
105 |
+
for norm in [None, 'backward', 'ortho', 'forward']:
|
106 |
+
assert_allclose(
|
107 |
+
np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
|
108 |
+
np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
|
109 |
+
assert_allclose(
|
110 |
+
np.fft.rfft(x, n=n),
|
111 |
+
np.fft.rfft(x, n=n, norm="backward"), atol=1e-6)
|
112 |
+
assert_allclose(
|
113 |
+
np.fft.rfft(x, n=n) / np.sqrt(n),
|
114 |
+
np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
|
115 |
+
assert_allclose(
|
116 |
+
np.fft.rfft(x, n=n) / n,
|
117 |
+
np.fft.rfft(x, n=n, norm="forward"), atol=1e-6)
|
118 |
+
|
119 |
+
def test_irfft(self):
|
120 |
+
x = random(30)
|
121 |
+
assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
|
122 |
+
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"),
|
123 |
+
norm="backward"), atol=1e-6)
|
124 |
+
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"),
|
125 |
+
norm="ortho"), atol=1e-6)
|
126 |
+
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"),
|
127 |
+
norm="forward"), atol=1e-6)
|
128 |
+
|
129 |
+
def test_rfft2(self):
|
130 |
+
x = random((30, 20))
|
131 |
+
assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
|
132 |
+
assert_allclose(np.fft.rfft2(x),
|
133 |
+
np.fft.rfft2(x, norm="backward"), atol=1e-6)
|
134 |
+
assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
|
135 |
+
np.fft.rfft2(x, norm="ortho"), atol=1e-6)
|
136 |
+
assert_allclose(np.fft.rfft2(x) / (30. * 20.),
|
137 |
+
np.fft.rfft2(x, norm="forward"), atol=1e-6)
|
138 |
+
|
139 |
+
def test_irfft2(self):
|
140 |
+
x = random((30, 20))
|
141 |
+
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
|
142 |
+
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"),
|
143 |
+
norm="backward"), atol=1e-6)
|
144 |
+
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"),
|
145 |
+
norm="ortho"), atol=1e-6)
|
146 |
+
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"),
|
147 |
+
norm="forward"), atol=1e-6)
|
148 |
+
|
149 |
+
def test_rfftn(self):
|
150 |
+
x = random((30, 20, 10))
|
151 |
+
assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
|
152 |
+
assert_allclose(np.fft.rfftn(x),
|
153 |
+
np.fft.rfftn(x, norm="backward"), atol=1e-6)
|
154 |
+
assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
|
155 |
+
np.fft.rfftn(x, norm="ortho"), atol=1e-6)
|
156 |
+
assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.),
|
157 |
+
np.fft.rfftn(x, norm="forward"), atol=1e-6)
|
158 |
+
|
159 |
+
def test_irfftn(self):
|
160 |
+
x = random((30, 20, 10))
|
161 |
+
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
|
162 |
+
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"),
|
163 |
+
norm="backward"), atol=1e-6)
|
164 |
+
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"),
|
165 |
+
norm="ortho"), atol=1e-6)
|
166 |
+
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"),
|
167 |
+
norm="forward"), atol=1e-6)
|
168 |
+
|
169 |
+
def test_hfft(self):
|
170 |
+
x = random(14) + 1j*random(14)
|
171 |
+
x_herm = np.concatenate((random(1), x, random(1)))
|
172 |
+
x = np.concatenate((x_herm, x[::-1].conj()))
|
173 |
+
assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
|
174 |
+
assert_allclose(np.fft.hfft(x_herm),
|
175 |
+
np.fft.hfft(x_herm, norm="backward"), atol=1e-6)
|
176 |
+
assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
|
177 |
+
np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
|
178 |
+
assert_allclose(np.fft.hfft(x_herm) / 30.,
|
179 |
+
np.fft.hfft(x_herm, norm="forward"), atol=1e-6)
|
180 |
+
|
181 |
+
def test_ihfft(self):
|
182 |
+
x = random(14) + 1j*random(14)
|
183 |
+
x_herm = np.concatenate((random(1), x, random(1)))
|
184 |
+
x = np.concatenate((x_herm, x[::-1].conj()))
|
185 |
+
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
|
186 |
+
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
|
187 |
+
norm="backward"), norm="backward"), atol=1e-6)
|
188 |
+
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
|
189 |
+
norm="ortho"), norm="ortho"), atol=1e-6)
|
190 |
+
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
|
191 |
+
norm="forward"), norm="forward"), atol=1e-6)
|
192 |
+
|
193 |
+
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
|
194 |
+
np.fft.rfftn, np.fft.irfftn])
|
195 |
+
def test_axes(self, op):
|
196 |
+
x = random((30, 20, 10))
|
197 |
+
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
|
198 |
+
for a in axes:
|
199 |
+
op_tr = op(np.transpose(x, a))
|
200 |
+
tr_op = np.transpose(op(x, axes=a), a)
|
201 |
+
assert_allclose(op_tr, tr_op, atol=1e-6)
|
202 |
+
|
203 |
+
def test_all_1d_norm_preserving(self):
|
204 |
+
# verify that round-trip transforms are norm-preserving
|
205 |
+
x = random(30)
|
206 |
+
x_norm = np.linalg.norm(x)
|
207 |
+
n = x.size * 2
|
208 |
+
func_pairs = [(np.fft.fft, np.fft.ifft),
|
209 |
+
(np.fft.rfft, np.fft.irfft),
|
210 |
+
# hfft: order so the first function takes x.size samples
|
211 |
+
# (necessary for comparison to x_norm above)
|
212 |
+
(np.fft.ihfft, np.fft.hfft),
|
213 |
+
]
|
214 |
+
for forw, back in func_pairs:
|
215 |
+
for n in [x.size, 2*x.size]:
|
216 |
+
for norm in [None, 'backward', 'ortho', 'forward']:
|
217 |
+
tmp = forw(x, n=n, norm=norm)
|
218 |
+
tmp = back(tmp, n=n, norm=norm)
|
219 |
+
assert_allclose(x_norm,
|
220 |
+
np.linalg.norm(tmp), atol=1e-6)
|
221 |
+
|
222 |
+
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
|
223 |
+
np.longdouble])
|
224 |
+
def test_dtypes(self, dtype):
|
225 |
+
# make sure that all input precisions are accepted and internally
|
226 |
+
# converted to 64bit
|
227 |
+
x = random(30).astype(dtype)
|
228 |
+
assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
|
229 |
+
assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
|
230 |
+
|
231 |
+
|
232 |
+
@pytest.mark.parametrize(
|
233 |
+
"dtype",
|
234 |
+
[np.float32, np.float64, np.complex64, np.complex128])
|
235 |
+
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
|
236 |
+
@pytest.mark.parametrize(
|
237 |
+
"fft",
|
238 |
+
[np.fft.fft, np.fft.fft2, np.fft.fftn,
|
239 |
+
np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
|
240 |
+
def test_fft_with_order(dtype, order, fft):
|
241 |
+
# Check that FFT/IFFT produces identical results for C, Fortran and
|
242 |
+
# non contiguous arrays
|
243 |
+
rng = np.random.RandomState(42)
|
244 |
+
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
|
245 |
+
# See discussion in pull/14178
|
246 |
+
_tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
|
247 |
+
if order == 'F':
|
248 |
+
Y = np.asfortranarray(X)
|
249 |
+
else:
|
250 |
+
# Make a non contiguous array
|
251 |
+
Y = X[::-1]
|
252 |
+
X = np.ascontiguousarray(X[::-1])
|
253 |
+
|
254 |
+
if fft.__name__.endswith('fft'):
|
255 |
+
for axis in range(3):
|
256 |
+
X_res = fft(X, axis=axis)
|
257 |
+
Y_res = fft(Y, axis=axis)
|
258 |
+
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
|
259 |
+
elif fft.__name__.endswith(('fft2', 'fftn')):
|
260 |
+
axes = [(0, 1), (1, 2), (0, 2)]
|
261 |
+
if fft.__name__.endswith('fftn'):
|
262 |
+
axes.extend([(0,), (1,), (2,), None])
|
263 |
+
for ax in axes:
|
264 |
+
X_res = fft(X, axes=ax)
|
265 |
+
Y_res = fft(Y, axes=ax)
|
266 |
+
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
|
267 |
+
else:
|
268 |
+
raise ValueError()
|
269 |
+
|
270 |
+
|
271 |
+
@pytest.mark.skipif(IS_WASM, reason="Cannot start thread")
|
272 |
+
class TestFFTThreadSafe:
|
273 |
+
threads = 16
|
274 |
+
input_shape = (800, 200)
|
275 |
+
|
276 |
+
def _test_mtsame(self, func, *args):
|
277 |
+
def worker(args, q):
|
278 |
+
q.put(func(*args))
|
279 |
+
|
280 |
+
q = queue.Queue()
|
281 |
+
expected = func(*args)
|
282 |
+
|
283 |
+
# Spin off a bunch of threads to call the same function simultaneously
|
284 |
+
t = [threading.Thread(target=worker, args=(args, q))
|
285 |
+
for i in range(self.threads)]
|
286 |
+
[x.start() for x in t]
|
287 |
+
|
288 |
+
[x.join() for x in t]
|
289 |
+
# Make sure all threads returned the correct value
|
290 |
+
for i in range(self.threads):
|
291 |
+
assert_array_equal(q.get(timeout=5), expected,
|
292 |
+
'Function returned wrong value in multithreaded context')
|
293 |
+
|
294 |
+
def test_fft(self):
|
295 |
+
a = np.ones(self.input_shape) * 1+0j
|
296 |
+
self._test_mtsame(np.fft.fft, a)
|
297 |
+
|
298 |
+
def test_ifft(self):
|
299 |
+
a = np.ones(self.input_shape) * 1+0j
|
300 |
+
self._test_mtsame(np.fft.ifft, a)
|
301 |
+
|
302 |
+
def test_rfft(self):
|
303 |
+
a = np.ones(self.input_shape)
|
304 |
+
self._test_mtsame(np.fft.rfft, a)
|
305 |
+
|
306 |
+
def test_irfft(self):
|
307 |
+
a = np.ones(self.input_shape) * 1+0j
|
308 |
+
self._test_mtsame(np.fft.irfft, a)
|